Source code for torch.distributed.elastic.rendezvous.dynamic_rendezvous
# mypy: allow-untyped-defs# Copyright (c) Facebook, Inc. and its affiliates.# All rights reserved.## This source code is licensed under the BSD-style license found in the# LICENSE file in the root directory of this source tree.importinspectimportloggingimportosimportpickleimportsocketimportthreadingimporttimeimportweakreffromabcimportABC,abstractmethodfromdataclassesimportdataclassfromdatetimeimportdatetime,timedelta,timezonefromenumimportEnumfromtypingimportAny,Callable,Dict,List,Optional,Set,Tupleimporttorch.distributedasdistfromtorch.distributedimportStorefromtorch.distributed.elastic.eventsimportconstruct_and_record_rdzv_event,NodeStatefrom.apiimport(RendezvousClosedError,RendezvousError,RendezvousGracefulExitError,RendezvousHandler,RendezvousInfo,RendezvousParameters,RendezvousStateError,RendezvousStoreInfo,RendezvousTimeoutError,)from.utilsimport_delay,_PeriodicTimer__all__=["RendezvousBackend","RendezvousTimeout","RendezvousSettings","DynamicRendezvousHandler","create_handler",]logger=logging.getLogger(__name__)defget_method_name(depth=2):iflen(inspect.stack())>depth:returninspect.stack()[depth].functionreturn"no_method_name"Token=Any"""Represent an opaque fencing token used by the rendezvous backend."""
[docs]classRendezvousBackend(ABC):"""Represent a backend that holds the rendezvous state."""@property@abstractmethoddefname(self)->str:"""Get the name of the backend."""
[docs]@abstractmethoddefget_state(self)->Optional[Tuple[bytes,Token]]:"""Get the rendezvous state. Returns: A tuple of the encoded rendezvous state and its fencing token or ``None`` if no state is found in the backend. Raises: RendezvousConnectionError: The connection to the backend has failed. RendezvousStateError: The rendezvous state is corrupt. """
[docs]@abstractmethoddefset_state(self,state:bytes,token:Optional[Token]=None)->Optional[Tuple[bytes,Token,bool]]:"""Set the rendezvous state. The new rendezvous state is set conditionally: - If the specified ``token`` matches the fencing token stored in the backend, the state will be updated. The new state will be returned to the caller along with its fencing token. - If the specified ``token`` does not match the fencing token stored in the backend, the state won't be updated; instead the existing state along with its fencing token will be returned to the caller. - If the specified ``token`` is ``None``, the new state will be set only if there is no existing state in the backend. Either the new state or the existing state along with its fencing token will be returned to the caller. Args: state: The encoded rendezvous state. token: An optional fencing token that was retrieved by a previous call to :py:meth:`get_state` or ``set_state()``. Returns: A tuple of the serialized rendezvous state, its fencing token, and a boolean value indicating whether our set attempt succeeded. Raises: RendezvousConnectionError: The connection to the backend has failed. RendezvousStateError: The rendezvous state is corrupt. """
[docs]classRendezvousTimeout:"""Hold the timeout configuration of a rendezvous. Args: join: The time within which the rendezvous is expected to complete. last_call: An additional wait amount before completing the rendezvous once the rendezvous has the minimum number of required participants. close: The time within which the rendezvous is expected to close after a call to :py:meth:`RendezvousHandler.set_closed` or :py:meth:`RendezvousHandler.shutdown`. keep_alive: The time within which a keep-alive heartbeat is expected to complete. """_ZERO=timedelta(0)_DEFAULT_TIMEOUTS={"join":timedelta(seconds=600),"last_call":timedelta(seconds=30),"close":timedelta(seconds=30),"heartbeat":timedelta(seconds=5),}_join:timedelta_last_call:timedelta_close:timedelta_heartbeat:timedeltadef__init__(self,join:Optional[timedelta]=None,last_call:Optional[timedelta]=None,close:Optional[timedelta]=None,heartbeat:Optional[timedelta]=None,)->None:self._set_timeouts(join=join,last_call=last_call,close=close,heartbeat=heartbeat)@propertydefjoin(self)->timedelta:"""Get the join timeout."""returnself._join@propertydeflast_call(self)->timedelta:"""Get the last call timeout."""returnself._last_call@propertydefclose(self)->timedelta:"""Get the close timeout."""returnself._close@propertydefheartbeat(self)->timedelta:"""Get the keep-alive heartbeat timeout."""returnself._heartbeatdef_set_timeouts(self,**timeouts:Optional[timedelta]):forname,timeoutintimeouts.items():iftimeoutisNone:timeout=self._DEFAULT_TIMEOUTS[name]iftimeout<=self._ZERO:raiseValueError(f"The {name} timeout ({timeout}) must be positive.")setattr(self,"_"+name,timeout)
@dataclass(repr=False,eq=False,frozen=True)classRendezvousSettings:"""Hold the settings of the rendezvous. Attributes: run_id: The run id of the rendezvous. min_nodes: The minimum number of nodes to admit to the rendezvous. max_nodes: The maximum number of nodes to admit to the rendezvous. timeout: The timeout configuration of the rendezvous. keep_alive_interval: The amount of time a node waits before sending a heartbeat to keep it alive in the rendezvous. keep_alive_max_attempt: The maximum number of failed heartbeat attempts after which a node is considered dead. """run_id:strmin_nodes:intmax_nodes:inttimeout:RendezvousTimeoutkeep_alive_interval:timedeltakeep_alive_max_attempt:int@dataclass(eq=True,order=True,frozen=True)class_NodeDesc:"""Describe a node in the rendezvous. Attributes: addr: The FQDN of the node or user specified local node address. pid: The id of the process in which the rendezvous handler runs. local_id: A process-wide unique id. """addr:strpid:intlocal_id:intdef__repr__(self)->str:returnf"{self.addr}_{self.pid}_{self.local_id}"class_NodeDescGenerator:"""Generate node descriptors. A node descriptor is a combination of an FQDN, a process id, and an auto- incremented integer that uniquely identifies a node in the rendezvous. """_lock:threading.Lock_local_id:intdef__init__(self)->None:self._lock=threading.Lock()# An integer that is incremented with each call to generate().self._local_id=0defgenerate(self,local_addr:Optional[str]=None)->_NodeDesc:# This method can be called by multiple threads concurrently; therefore,# we must increment the integer atomically.withself._lock:local_id=self._local_idself._local_id+=1return_NodeDesc(local_addrorsocket.getfqdn(),os.getpid(),local_id)class_RendezvousState:"""Hold the state of a rendezvous. Attributes: round: The current round of the rendezvous. complete: A boolean value indicating whether the current round of the rendezvous is complete. deadline: The time at which the current round of the rendezvous will be considered complete if it is still waiting for nodes to join. closed: A boolean value indicating whether the rendezvous is closed. participants: A dictionary of the participants and their corresponding ranks. wait_list: A set of nodes that are waiting to participate in the next round of the rendezvous. redundancy_list: A set of nodes that are redundant in the current round and can join the next rendezvous without triggering re-rendezvous. last_heartbeats: A dictionary containing each node's last heartbeat time. """round:intcomplete:booldeadline:Optional[datetime]closed:boolparticipants:Dict[_NodeDesc,int]wait_list:Set[_NodeDesc]redundancy_list:Set[_NodeDesc]last_heartbeats:Dict[_NodeDesc,datetime]def__init__(self)->None:self.round=0self.complete=Falseself.deadline=Noneself.closed=Falseself.participants={}self.wait_list=set()self.redundancy_list=set()self.last_heartbeats={}def_remove_participant_epilogue(state:_RendezvousState,settings:RendezvousSettings)->None:ifstate.complete:# If we do not have any participants left, move to the next round.ifnotstate.participants:msg="No participants left in the rendezvous, marking rendezvous as incomplete"logger.debug(msg)state.complete=Falsestate.round+=1else:iflen(state.participants)<settings.min_nodes:msg=(f"Number of participants {len(state.participants)}) less than"f"min_nodes {settings.min_nodes}, clearning deadline in state")logger.debug(msg)state.deadline=Noneclass_RendezvousStateHolder(ABC):"""Hold the shared rendezvous state synced with other nodes."""@property@abstractmethoddefstate(self)->_RendezvousState:"""Get the local state."""@abstractmethoddefsync(self)->Optional[bool]:"""Read or writes the latest state. Returns: A boolean value indicating whether the local state, in case marked as dirty, was successfully synced with other nodes. """@abstractmethoddefmark_dirty(self)->None:"""Mark the local state as dirty."""class_BackendRendezvousStateHolder(_RendezvousStateHolder):"""Hold the rendezvous state synced with other nodes via a backend. Args: backend: The rendezvous backend to use. settings: The rendezvous settings. cache_duration: The amount of time, in seconds, to cache the last rendezvous state before requesting it from the backend again. """_backend:RendezvousBackend_state:_RendezvousState_settings:RendezvousSettings_cache_duration:int_token:Token_dirty:bool_last_sync_time:float_dead_nodes:List[_NodeDesc]def__init__(self,backend:RendezvousBackend,settings:RendezvousSettings,cache_duration:int=1,)->None:self._backend=backendself._state=_RendezvousState()self._settings=settingsself._cache_duration=cache_durationself._token=Noneself._dirty=Falseself._last_sync_time=-1self._dead_nodes=[]def_record(self,message:str,node_state:NodeState=NodeState.RUNNING):construct_and_record_rdzv_event(name=f"{self.__class__.__name__}.{get_method_name()}",run_id=self._settings.run_id,message=message,node_state=node_state,)@propertydefstate(self)->_RendezvousState:"""See base class."""returnself._statedefsync(self)->Optional[bool]:"""See base class."""state_bits:Optional[bytes]=Nonetoken=Nonehas_set:Optional[bool]ifself._dirty:has_set=Falsestate_bits=pickle.dumps(self._state)set_response=self._backend.set_state(state_bits,self._token)ifset_responseisnotNone:state_bits,token,has_set=set_responseelse:has_set=Noneifself._cache_duration>0:# Avoid overloading the backend if we are asked to retrieve the# state repeatedly. Try to serve the cached state.ifself._last_sync_time>=max(time.monotonic()-self._cache_duration,0):returnNoneget_response=self._backend.get_state()ifget_responseisnotNone:state_bits,token=get_responseifstate_bitsisnotNone:try:self._state=pickle.loads(state_bits)exceptpickle.PickleErrorasexc:raiseRendezvousStateError("The rendezvous state is corrupt. See inner exception for details.")fromexcelse:self._state=_RendezvousState()ifhas_setandself._dead_nodesandlogger.isEnabledFor(logging.DEBUG):node_list=", ".join(f"'{dead_node}'"fordead_nodeinself._dead_nodes)msg=(f"As part of the sync operation the node(s) {node_list} have been removed from the "f"rendezvous '{self._settings.run_id}' since they had no heartbeat.")self._record(message=msg)logger.debug(msg)self._token=tokenself._dirty=Falseself._last_sync_time=time.monotonic()self._sanitize()returnhas_setdef_sanitize(self)->None:state=self._stateexpire_time=datetime.now(timezone.utc)-(self._settings.keep_alive_interval*self._settings.keep_alive_max_attempt)# Filter out the dead nodes.self._dead_nodes=[nodefornode,last_heartbeatinstate.last_heartbeats.items()iflast_heartbeat<expire_time]participant_removed=Falsefordead_nodeinself._dead_nodes:msg=f"Detected dead node '{dead_node}', removing it from the rendezvous"logger.debug(msg)delstate.last_heartbeats[dead_node]try:delstate.participants[dead_node]participant_removed=TrueexceptKeyError:passtry:state.wait_list.remove(dead_node)exceptKeyError:passtry:state.redundancy_list.remove(dead_node)exceptKeyError:passifparticipant_removed:# Common epilogue shared with the _remove_from_participants()# function of _DistributedRendezvousOpExecutor._remove_participant_epilogue(state,self._settings)defmark_dirty(self)->None:"""See base class. If the local rendezvous state is dirty, the next sync call will try to write the changes back to the backend. However this attempt might fail if another node, which had the same state, also made changes and wrote them before us. """self._dirty=Trueclass_Action(Enum):"""Specifies the possible actions based on the state of the rendezvous."""KEEP_ALIVE=1ADD_TO_PARTICIPANTS=2ADD_TO_WAIT_LIST=3ADD_TO_REDUNDANCY_LIST=4REMOVE_FROM_PARTICIPANTS=5REMOVE_FROM_WAIT_LIST=6REMOVE_FROM_REDUNDANCY_LIST=7MARK_RENDEZVOUS_COMPLETE=8MARK_RENDEZVOUS_CLOSED=9SYNC=10ERROR_CLOSED=11ERROR_TIMEOUT=12FINISH=13class_RendezvousContext:"""Holds the context of the rendezvous. Attributes: node: The node descriptor associated with the current rendezvous handler instance. state: The current state of the rendezvous. settings: The rendezvous settings. """node:_NodeDescstate:_RendezvousStatesettings:RendezvousSettingsdef__init__(self,node:_NodeDesc,state:_RendezvousState,settings:RendezvousSettings)->None:self.node=nodeself.state=stateself.settings=settingsclass_RendezvousOpExecutor(ABC):"""Execute rendezvous operations."""@abstractmethoddefrun(self,state_handler:Callable[[_RendezvousContext,float],_Action],deadline:float,update_deadline:Optional[Callable[[timedelta],float]]=None,)->None:"""Execute a rendezvous operation. An operation is run inside a state machine and is expected to transition the rendezvous from one state to another. Args: state_handler: A callable that is expected to return the next state transition action based on the current state of the rendezvous. deadline: The time, in seconds, at which the operation will be considered timed-out. update_deadline: Function to generate a new operation deadline if the current node may participate in the next rendezvous. """class_DistributedRendezvousOpExecutor(_RendezvousOpExecutor):"""Execute rendezvous operations using a shared state. Args: node: The node descriptor associated with the current rendezvous handler instance. state_holder: The ``RendezvousStateHolder`` to use to sync the rendezvous state with other nodes. settings: The rendezvous settings. """_node:_NodeDesc_state:_RendezvousState_state_holder:_RendezvousStateHolder_settings:RendezvousSettingsdef__init__(self,node:_NodeDesc,state_holder:_RendezvousStateHolder,settings:RendezvousSettings,)->None:self._node=nodeself._state_holder=state_holderself._settings=settingsdef_record(self,message:str,node_state:NodeState=NodeState.RUNNING)->None:construct_and_record_rdzv_event(name=f"{self.__class__.__name__}.{get_method_name()}",run_id=self._settings.run_id,message=message,node_state=node_state,hostname=self._node.addr,pid=self._node.pid,local_id=self._node.local_id,)defrun(self,state_handler:Callable[[_RendezvousContext,float],_Action],deadline:float,update_deadline:Optional[Callable[[timedelta],float]]=None,)->None:"""See base class."""action=Nonewhileaction!=_Action.FINISH:# Reads or writes the latest rendezvous state shared by all nodes in# the rendezvous. Note that our local changes might get overridden# by another node if that node synced its changes before us.has_set=self._state_holder.sync()ifhas_setisnotNone:ifhas_set:msg=(f"The node '{self._node}' has successfully synced its local changes with "f"other nodes in the rendezvous '{self._settings.run_id}'.")else:msg=(f"The node '{self._node}' has a stale state and failed to sync its local "f"changes with other nodes in the rendezvous '{self._settings.run_id}'.")self._record(message=msg)logger.debug(msg)self._state=self._state_holder.statectx=_RendezvousContext(self._node,self._state,self._settings)# Determine the next action to take based on the current state of# the rendezvous.action=state_handler(ctx,deadline)ifaction==_Action.FINISH:continueifaction==_Action.ERROR_CLOSED:raiseRendezvousClosedErrorifaction==_Action.ERROR_TIMEOUT:raiseRendezvousTimeoutErrorifaction==_Action.SYNC:# Delay the execution by one second to avoid overloading the# backend if we are asked to poll for state changes._delay(seconds=1)else:ifaction==_Action.KEEP_ALIVE:self._keep_alive()elifaction==_Action.ADD_TO_PARTICIPANTS:self._add_to_participants()elifaction==_Action.ADD_TO_WAIT_LIST:self._add_to_wait_list()elifaction==_Action.ADD_TO_REDUNDANCY_LIST:self._add_to_redundancy_list()elifaction==_Action.REMOVE_FROM_PARTICIPANTS:self._remove_from_participants()elifaction==_Action.REMOVE_FROM_WAIT_LIST:self._remove_from_wait_list()elifaction==_Action.REMOVE_FROM_REDUNDANCY_LIST:self._remove_from_redundancy_list()# update deadline since the node may participate in rendezvous processifupdate_deadline:deadline=update_deadline(self._settings.timeout.join)elifaction==_Action.MARK_RENDEZVOUS_COMPLETE:self._mark_rendezvous_complete()elifaction==_Action.MARK_RENDEZVOUS_CLOSED:self._mark_rendezvous_closed()# Attempt to sync our changes back to other nodes.self._state_holder.mark_dirty()def_keep_alive(self)->None:msg=(f"The node '{self._node}' updated its keep-alive heartbeat time for the rendezvous "f"'{self._settings.run_id}'. Pending sync.")self._record(message=msg)logger.debug(msg)self._state.last_heartbeats[self._node]=datetime.now(timezone.utc)def_add_to_participants(self)->None:msg=(f"The node '{self._node}' added itself to the participants of round "f"{self._state.round} of the rendezvous '{self._settings.run_id}'. Pending sync.")self._record(message=msg)logger.debug(msg)state=self._statetry:state.wait_list.remove(self._node)exceptKeyError:pass# The ranks of the participants will be set once the rendezvous is# complete.state.participants[self._node]=0self._keep_alive()iflen(state.participants)==self._settings.min_nodes:state.deadline=(datetime.now(timezone.utc)+self._settings.timeout.last_call)iflen(state.participants)==self._settings.max_nodes:self._mark_rendezvous_complete()def_add_to_wait_list(self)->None:msg=(f"The node '{self._node}' added itself to the wait list of round "f"{self._state.round+1} of the rendezvous '{self._settings.run_id}'. Pending sync.")self._record(message=msg)logger.debug(msg)ifself._nodeinself._state.redundancy_list:self._state.redundancy_list.remove(self._node)self._state.wait_list.add(self._node)self._keep_alive()def_add_to_redundancy_list(self)->None:msg=(f"The node '{self._node}' added itself to the redundancy list of round "f"{self._state.round+1} of the rendezvous '{self._settings.run_id}'. Pending sync.")self._record(message=msg)logger.debug(msg)self._state.redundancy_list.add(self._node)self._keep_alive()def_remove_from_participants(self)->None:msg=(f"The node '{self._node}' removed itself from the participants of round "f"{self._state.round} of the rendezvous '{self._settings.run_id}'. Pending sync.")self._record(message=msg)logger.debug(msg)state=self._statedelstate.participants[self._node]delstate.last_heartbeats[self._node]# Common epilogue shared with the sanitizer() function of# _BackendRendezvousStateHolder._remove_participant_epilogue(state,self._settings)def_remove_from_wait_list(self)->None:msg=(f"The node '{self._node}' removed itself from the wait list of round "f"{self._state.round+1} of the rendezvous '{self._settings.run_id}'. Pending sync.")self._record(message=msg)logger.debug(msg)self._state.wait_list.remove(self._node)delself._state.last_heartbeats[self._node]def_remove_from_redundancy_list(self)->None:msg=(f"The node '{self._node}' removed itself from the redunant list of round "f"{self._state.round+1} of the rendezvous '{self._settings.run_id}'. Pending sync.")self._record(message=msg)logger.debug(msg)self._state.redundancy_list.remove(self._node)delself._state.last_heartbeats[self._node]def_mark_rendezvous_complete(self)->None:msg=(f"The node '{self._node}' marked round {self._state.round} of the rendezvous "f"'{self._settings.run_id}' as complete. Pending sync.")self._record(message=msg,node_state=NodeState.SUCCEEDED)logger.debug(msg)state=self._statestate.complete=Truestate.deadline=None# Assign the ranks.forrank,nodeinenumerate(sorted(state.participants)):state.participants[node]=rankdef_mark_rendezvous_closed(self)->None:msg=(f"The node '{self._node}' marked the rendezvous '{self._settings.run_id}' as closed. ""Pending sync.")self._record(message=msg,node_state=NodeState.SUCCEEDED)logger.debug(msg)self._state.closed=Truedef_should_keep_alive(ctx:_RendezvousContext)->bool:"""Determine whether a keep-alive heartbeat should be sent."""try:last_heartbeat=ctx.state.last_heartbeats[ctx.node]exceptKeyError:returnFalsereturn(last_heartbeat<=datetime.now(timezone.utc)-ctx.settings.keep_alive_interval)class_RendezvousExitOp:"""Represent a rendezvous exit operation."""def__call__(self,ctx:_RendezvousContext,deadline:float)->_Action:ifctx.nodeinctx.state.participants:iftime.monotonic()>deadline:return_Action.ERROR_TIMEOUTreturn_Action.REMOVE_FROM_PARTICIPANTSreturn_Action.FINISHclass_RendezvousJoinOp:"""Represent a rendezvous join operation."""def__call__(self,ctx:_RendezvousContext,deadline:float)->_Action:state=ctx.state# A closed rendezvous means that it no longer accepts new nodes.ifstate.closed:ifctx.nodeinstate.redundancy_list:msg=f"The rendezvous '{ctx.settings.run_id}' is closed, terminating pending rendezvous."raiseRendezvousGracefulExitError(msg)return_Action.ERROR_CLOSEDifctx.nodeinstate.redundancy_list:msg=f"The node {ctx.node} is in redunancy list"logger.debug(msg)# don't apply the timeout logic here, since we want to allow the node to rejoiniflen(state.participants)==ctx.settings.max_nodes:if_should_keep_alive(ctx):return_Action.KEEP_ALIVEelse:return_Action.SYNCelse:# transition to waiting state that will respect timeouts.msg=f"The node {ctx.node} is removed from redunancy list"logger.debug(msg)return_Action.REMOVE_FROM_REDUNDANCY_LISTis_participant=ctx.nodeinstate.participants# If we are part of the rendezvous and it is already complete there is# no further action to take.ifstate.completeandis_participant:return_Action.FINISHnow=time.monotonic()ifnow>deadline:rollback_period=5# 5 seconds# If we still have time to rollback (a short period on top of the# operation deadline), try to remove ourself from the rendezvous.# It is okay if we can't though as our keep-alive will eventually# expire.ifnow<=deadline+rollback_period:# If we are part of the rendezvous, it means we couldn't find# enough participants to complete it on time.ifis_participant:return_Action.REMOVE_FROM_PARTICIPANTS# If we are in the wait list, it means we couldn't wait till the# next round of the rendezvous.ifctx.nodeinstate.wait_list:return_Action.REMOVE_FROM_WAIT_LISTreturn_Action.ERROR_TIMEOUTifstate.complete:# If we are here, it means we are not part of the rendezvous. In# case the rendezvous has capacity for additional participants add# ourself to the wait list for the next round.iflen(state.participants)<ctx.settings.max_nodes:ifctx.nodenotinstate.wait_list:return_Action.ADD_TO_WAIT_LISTeliflen(state.participants)>=ctx.settings.max_nodes:if(ctx.nodenotinstate.redundancy_listandctx.nodenotinstate.wait_list):return_Action.ADD_TO_REDUNDANCY_LISTelifis_participant:# If the rendezvous has enough number of participants including us,# check whether we have passed the rendezvous deadline. If yes,# complete it.if(len(state.participants)>=ctx.settings.min_nodesandlen(state.participants)<=ctx.settings.max_nodesandstate.deadlineisnotNone):ifstate.deadline<datetime.now(timezone.utc):msg=(f"The node '{ctx.node}' marking the rendezvous complete, "f"quorum established within deadline")logger.debug(msg)return_Action.MARK_RENDEZVOUS_COMPLETEelse:msg=f"The node '{ctx.node}' can't complete rendezvous: deadline reached"logger.debug(msg)else:msg=f"The node '{ctx.node}' can't complete rendezvous: not enough participants"logger.debug(msg)else:# The rendezvous is not complete yet and we are not part of it. Try# to join.return_Action.ADD_TO_PARTICIPANTSif_should_keep_alive(ctx):return_Action.KEEP_ALIVE# At this point either the rendezvous is not complete, but we are part# of it, which means we have to wait for other participants to join; or# the rendezvous is complete, but we are not part of it, which means we# have to wait for the next round.return_Action.SYNCclass_RendezvousCloseOp:"""Represent a rendezvous close operation."""def__call__(self,ctx:_RendezvousContext,deadline:float)->_Action:ifctx.state.closed:return_Action.FINISHiftime.monotonic()>deadline:return_Action.ERROR_TIMEOUTreturn_Action.MARK_RENDEZVOUS_CLOSEDclass_RendezvousKeepAliveOp:"""Represent a rendezvous keep-alive update operation."""def__call__(self,ctx:_RendezvousContext,deadline:float)->_Action:if_should_keep_alive(ctx):iftime.monotonic()>deadline:return_Action.ERROR_TIMEOUTreturn_Action.KEEP_ALIVEreturn_Action.FINISH
[docs]classDynamicRendezvousHandler(RendezvousHandler):"""Represent a handler that sets up a rendezvous among a set of nodes."""# Static_node_desc_generator=_NodeDescGenerator()_this_node:_NodeDesc_settings:RendezvousSettings_backend_name:str_store:Store_state_holder:_RendezvousStateHolder_op_executor:_RendezvousOpExecutor_heartbeat_lock:threading.Lock_keep_alive_timer:Optional[_PeriodicTimer]
[docs]@classmethoddeffrom_backend(cls,run_id:str,store:Store,backend:RendezvousBackend,min_nodes:int,max_nodes:int,local_addr:Optional[str]=None,timeout:Optional[RendezvousTimeout]=None,):"""Create a new :py:class:`DynamicRendezvousHandler`. Args: run_id: The run id of the rendezvous. store: The C10d store to return as part of the rendezvous. backend: The backend to use to hold the rendezvous state. min_nodes: The minimum number of nodes to admit to the rendezvous. max_nodes: The maximum number of nodes to admit to the rendezvous. local_addr: The local node address. timeout: The timeout configuration of the rendezvous. """# We associate each handler instance with a unique node descriptor.node=cls._node_desc_generator.generate(local_addr)settings=RendezvousSettings(run_id,min_nodes,max_nodes,timeoutorRendezvousTimeout(),keep_alive_interval=timedelta(seconds=5),keep_alive_max_attempt=3,)state_holder=_BackendRendezvousStateHolder(backend,settings)returncls(node,settings,backend.name,store,state_holder)
def__init__(self,node:_NodeDesc,settings:RendezvousSettings,backend_name:str,store:Store,state_holder:_RendezvousStateHolder,)->None:ifnotsettings.run_id:raiseValueError("The run id must be a non-empty string.")ifsettings.min_nodes<1:raiseValueError(f"The minimum number of nodes ({settings.min_nodes}) must be greater than zero.")ifsettings.max_nodes<settings.min_nodes:raiseValueError(f"The maximum number of nodes ({settings.max_nodes}) must be greater than or equal "f"to the minimum number of nodes ({settings.min_nodes}).")self._this_node=nodeself._settings=settingsself._backend_name=backend_nameself._store=storeself._state_holder=state_holderself._op_executor=_DistributedRendezvousOpExecutor(self._this_node,self._state_holder,self._settings)self._heartbeat_lock=threading.Lock()self._keep_alive_timer=None# Cached shared store server referenceself._shared_tcp_store_server:Optional[dist.Store]=Noneself._bootstrap_store_info:Optional[RendezvousStoreInfo]=Nonedef_record(self,message:str,node_state:NodeState=NodeState.RUNNING,rank:Optional[int]=None,)->None:construct_and_record_rdzv_event(name=f"{self.__class__.__name__}.{get_method_name()}",run_id=self._settings.run_id,message=message,node_state=node_state,hostname=self._this_node.addr,pid=self._this_node.pid,local_id=self._this_node.local_id,rank=rank,)def_create_tcp_store_server(self,master_addr,master_port)->dist.TCPStore:returndist.TCPStore(host_name=master_addr,port=master_port,is_master=True,multi_tenant=True,)@propertydefsettings(self)->RendezvousSettings:"""Get the settings of the rendezvous."""returnself._settingsdefget_backend(self)->str:"""See base class."""returnself._backend_name@propertydefuse_agent_store(self)->bool:"""See base class."""returnos.getenv("TORCH_DISABLE_SHARE_RDZV_TCP_STORE","0")!="1"defnext_rendezvous(self)->RendezvousInfo:"""See base class."""msg=(f"The node '{self._this_node}' attempts to join the next round of the rendezvous "f"'{self._settings.run_id}'.")self._record(message=msg)logger.info(msg)try:self._stop_heartbeats()# Delay the execution for a small random amount of time if this is our# first run. This will slightly skew the rendezvous attempts across the# nodes and reduce the load on the backend.ifself._state_holder.state.round==0:_delay(seconds=(0,0.3))exit_op=_RendezvousExitOp()join_op=_RendezvousJoinOp()deadline=self._get_deadline(self._settings.timeout.join)self._op_executor.run(exit_op,deadline)self._op_executor.run(join_op,deadline,self._get_deadline)self._start_heartbeats()rank,world_size=self._get_world()store=self._get_store()exceptExceptionase:self._record(message=f"{type(e).__name__}: {str(e)}",node_state=NodeState.FAILED,)raisemsg=(f"The node '{self._this_node}' has joined round {self._state_holder.state.round} of "f"the rendezvous '{self._settings.run_id}' as rank {rank} in a world of size "f"{world_size}.")self._record(message=msg,rank=rank)logger.info(msg)# opt-out option of TCPStore sharingifos.getenv("TORCH_DISABLE_SHARE_RDZV_TCP_STORE","0")=="1":bootstrap_store_info=RendezvousStoreInfo.build(rank,store,local_addr=self._this_node.addr)returnRendezvousInfo(store,rank,world_size,bootstrap_store_info,)# This will only be hit when TCPStore sharing is enabled.ifself._bootstrap_store_infoisNone:# To avoid race in get_free_port because we release the port after the call,# we want to create a TCPStore server soon afterwards.server_port=0ifrank==0:self._shared_tcp_store_server=self._create_tcp_store_server(self._this_node.addr,server_port)server_port=self._shared_tcp_store_server.portself._bootstrap_store_info=RendezvousStoreInfo.build(rank,store,local_addr=self._this_node.addr,server_port=server_port,# For non-0 rank, this is a no-op)assertself._bootstrap_store_infoisnotNoneifrank==0:assertself._shared_tcp_store_serverisnotNonereturnRendezvousInfo(store,rank,world_size,self._bootstrap_store_info,# type: ignore[assignment])defis_closed(self)->bool:"""See base class."""try:withself._heartbeat_lock:self._state_holder.sync()returnself._state_holder.state.closedexceptExceptionase:self._record(message=f"{type(e).__name__}: {str(e)}",node_state=NodeState.FAILED,)raisedefset_closed(self)->None:"""See base class."""try:withself._heartbeat_lock:self._close()exceptExceptionase:self._record(message=f"{type(e).__name__}: {str(e)}",node_state=NodeState.FAILED,)raisedefnum_nodes_waiting(self)->int:"""See base class."""try:withself._heartbeat_lock:self._state_holder.sync()returnlen(self._state_holder.state.wait_list)exceptExceptionase:self._record(message=f"{type(e).__name__}: {str(e)}",node_state=NodeState.FAILED,)raisedefget_run_id(self)->str:"""See base class."""returnself._settings.run_iddefshutdown(self)->bool:"""See base class."""self._stop_heartbeats()try:self._close()returnTrueexceptRendezvousErrorasex:msg=(f"The node '{self._this_node}' has failed to shutdown the rendezvous "f"'{self._settings.run_id}' due to an error of type {type(ex).__name__}.")self._record(message=msg,node_state=NodeState.FAILED)logger.warning(msg)returnFalseexceptExceptionase:self._record(message=f"{type(e).__name__}: {str(e)}",node_state=NodeState.FAILED,)raisedef_close(self)->None:op=_RendezvousCloseOp()deadline=self._get_deadline(self._settings.timeout.close)self._op_executor.run(op,deadline)msg=f"The node '{self._this_node}' has closed the rendezvous '{self._settings.run_id}'."self._record(message=msg,node_state=NodeState.SUCCEEDED)logger.info(msg)@staticmethoddef_keep_alive_weak(weak_self)->None:self=weak_self()ifselfisnotNone:self._keep_alive()def_keep_alive(self)->None:self._heartbeat_lock.acquire()op=_RendezvousKeepAliveOp()deadline=self._get_deadline(self._settings.timeout.heartbeat)try:self._op_executor.run(op,deadline)msg=(f"The node '{self._this_node}' has sent a keep-alive heartbeat to the rendezvous "f"'{self._settings.run_id}'.")self._record(message=msg)logger.debug(msg)exceptRendezvousErrorasex:msg=(f"The node '{self._this_node}' has failed to send a keep-alive heartbeat to the "f"rendezvous '{self._settings.run_id}' due to an error of type {type(ex).__name__}.")self._record(message=msg,node_state=NodeState.FAILED)logger.warning(msg)finally:self._heartbeat_lock.release()def_start_heartbeats(self)->None:self._keep_alive_timer=_PeriodicTimer(self._settings.keep_alive_interval,self._keep_alive_weak,weakref.ref(self))self._keep_alive_timer.set_name(f"RendezvousKeepAliveTimer_{self._this_node.local_id}")self._keep_alive_timer.start()def_stop_heartbeats(self)->None:ifself._keep_alive_timerisNone:returnself._keep_alive_timer.cancel()def_get_world(self)->Tuple[int,int]:state=self._state_holder.statereturnstate.participants[self._this_node],len(state.participants)def_wrap_store(self,store:Store)->Store:key_prefix=(f"torch.rendezvous.{self._settings.run_id}.{self._state_holder.state.round}")returndist.PrefixStore(key_prefix,store)def_get_store(self)->Store:returnself._wrap_store(self._store)def_get_deadline(self,timeout:timedelta)->float:returntime.monotonic()+timeout.total_seconds()
[docs]defcreate_handler(store:Store,backend:RendezvousBackend,params:RendezvousParameters)->DynamicRendezvousHandler:"""Create a new :py:class:`DynamicRendezvousHandler` from the specified parameters. Args: store: The C10d store to return as part of the rendezvous. backend: The backend to use to hold the rendezvous state. +-------------------+------------------------------------------------------+ | Parameter | Description | +===================+======================================================+ | join_timeout | The total time, in seconds, within which the | | | rendezvous is expected to complete. Defaults to 600 | | | seconds. | +-------------------+------------------------------------------------------+ | last_call_timeout | An additional wait amount, in seconds, before | | | completing the rendezvous once the minimum number of | | | nodes has been reached. Defaults to 30 seconds. | +-------------------+------------------------------------------------------+ | close_timeout | The time, in seconds, within which the rendezvous is | | | expected to close after a call to | | | :py:meth:`RendezvousHandler.set_closed` or | | | :py:meth:`RendezvousHandler.shutdown`. Defaults to | | | 30 seconds. | +-------------------+------------------------------------------------------+ """try:timeout=RendezvousTimeout(_get_timeout(params,"join"),_get_timeout(params,"last_call"),_get_timeout(params,"close"),)returnDynamicRendezvousHandler.from_backend(params.run_id,store,backend,params.min_nodes,params.max_nodes,params.local_addr,timeout,)exceptExceptionase:construct_and_record_rdzv_event(message=f"{type(e).__name__}: {str(e)}",run_id=params.run_id,node_state=NodeState.FAILED,)raise
Docs
Access comprehensive developer documentation for PyTorch
To analyze traffic and optimize your experience, we serve cookies on this site. By clicking or navigating, you agree to allow our usage of cookies. As the current maintainers of this site, Facebook’s Cookies Policy applies. Learn more, including about available controls: Cookies Policy.