importioimporttorchfrom._utilsimport_type,_cuda,_hpufromtorch.typesimportStoragefromtypingimportcast,Any,Dictas_Dict,Optionalas_Optional,TypeVar,Type,Unionimportcopyimportcollectionsfromfunctoolsimportlru_cacheimportwarningsimportthreadingimportfunctoolstry:importnumpyasnpHAS_NUMPY=TrueexceptModuleNotFoundError:np=None# type: ignore[assignment]_share_memory_lock=threading.Lock()_share_memory_map:_Dict[int,threading.RLock]={}T=TypeVar('T',bound='Union[_StorageBase, TypedStorage]')class_StorageBase:_cdata:Anyis_sparse:bool=Falseis_sparse_csr:bool=Falsedevice:torch.devicedef__init__(self,*args,**kwargs):...# noqa: E704def__len__(self)->int:...# type: ignore[empty-body] # noqa: E704def__getitem__(self,idx):...# noqa: E704def__setitem__(self,*args,**kwargs):...# noqa: E704defcopy_(self,source:T,non_blocking:_Optional[bool]=None)->T:...# type: ignore[empty-body] # noqa: E704defnew(self)->T:...# type: ignore[empty-body, misc, type-var] # noqa: E704defnbytes(self)->int:...# type: ignore[empty-body] # noqa: E704defsize(self)->int:returnself.nbytes()deftype(self,dtype:_Optional[str]=None,non_blocking:bool=False)->T:...# type: ignore[empty-body, misc, type-var] # noqa: E704defcuda(self,device=None,non_blocking=False,**kwargs)->T:...# type: ignore[empty-body, misc, type-var] # noqa: E704defhpu(self,device=None,non_blocking=False,**kwargs)->T:...# type: ignore[empty-body, misc, type-var] # noqa: E704defelement_size(self)->int:...# type: ignore[empty-body, type-var] # noqa: E704defget_device(self)->int:returnself.device.indexdefdata_ptr(self)->int:...# type: ignore[empty-body] # noqa: E704# Defined in torch/csrc/generic/StorageSharing.cppdef_share_filename_cpu_(self,*args,**kwargs):...# noqa: E704def_share_fd_cpu_(self,*args,**kwargs):...# noqa: E704@classmethoddef_new_using_filename_cpu(cls:Type[T],size:int)->T:...# type: ignore[empty-body] # noqa: E704@classmethoddef_new_using_fd_cpu(cls:Type[T],size:int)->T:...# type: ignore[empty-body] # noqa: E704@classmethoddeffrom_buffer(cls:Type[T],*args,**kwargs)->T:...# type: ignore[empty-body] # noqa: E704@classmethoddef_new_shared_filename_cpu(cls:Type[T],manager,obj,size,*,device=None,dtype=None)->T:...# type: ignore[empty-body] # noqa: E704@classmethoddef_release_ipc_counter_cuda(cls:Type[T],*args,**kwargs)->T:...# type: ignore[empty-body] # noqa: E704@classmethoddef_new_with_weak_ptr(cls:Type[T],*args,**kwargs)->T:...# type: ignore[empty-body] # noqa: E704def_shared_decref(self)->T:...# type: ignore[empty-body, misc, type-var] # noqa: E704def_write_file(self,*args,**kwargs):...# noqa: E704defresize_(self,size:int):...# noqa: E704def_weak_ref(self,*args,**kwargs)->T:...# type: ignore[empty-body, misc, type-var] # noqa: E704def_set_from_file(self,*args,**kwargs):...# noqa: E704def_set_cdata(self,*args,**kwargs):...# noqa: E704def_share_cuda_(self,*args,**kwargs):...# noqa: E704defis_shared(self)->bool:...# type: ignore[empty-body] # noqa: E704@classmethoddef_new_shared_cuda(cls:Type[T],*args,**kwargs)->T:...# type: ignore[empty-body] # noqa: E704def_shared_incref(self,*args,**kwargs):...# noqa: E704@classmethoddef_free_weak_ref(cls,*args,**kwargs):...# noqa: E704@propertydefis_cuda(self):...# noqa: E704@propertydefis_hpu(self):...# noqa: E704@classmethoddeffrom_file(cls,filename,shared,nbytes)->T:...# type: ignore[empty-body, misc, type-var] # noqa: E704@classmethoddef_expired(cls,*args,**kwargs)->T:...# type: ignore[empty-body, misc, type-var] # noqa: E704def_byteswap(self,*args,**kwargs):...# noqa: E704def__str__(self):info_str=(f'[{torch.typename(self)}(device={self.device}) 'f'of size {len(self)}]')ifself.device.type=='meta':return'...\n'+info_strelse:data_str=' '+'\n '.join(str(self[i])foriinrange(self.size()))returndata_str+'\n'+info_strdef__repr__(self):returnstr(self)def__iter__(self):returniter(self[i]foriinrange(self.size()))def__copy__(self):returnself.clone()def__deepcopy__(self,memo):memo=memo.setdefault('torch',{})ifself._cdatainmemo:returnmemo[self._cdata]new_storage=self.clone()memo[self._cdata]=new_storagereturnnew_storagedef__reduce__(self):b=io.BytesIO()torch.save(self,b,_use_new_zipfile_serialization=False)return(_load_from_bytes,(b.getvalue(),))def__sizeof__(self):returnsuper().__sizeof__()+self.size()defclone(self):"""Returns a copy of this storage"""returntype(self)(self.nbytes(),device=self.device).copy_(self)deftolist(self):"""Returns a list containing the elements of this storage"""returnlist(self)defcpu(self):"""Returns a CPU copy of this storage if it's not already on the CPU"""ifself.device.type!='cpu':returntorch.UntypedStorage(self.size()).copy_(self,False)else:returnselfdefmps(self):"""Returns a MPS copy of this storage if it's not already on the MPS"""ifself.device.type!='mps':returntorch.UntypedStorage(self.size(),device="mps").copy_(self,False)else:returnselfdef_to(self,dtype):ifnotisinstance(dtype,torch.dtype):raiseTypeError(f"Argument 'dtype' must be torch.dtype, not {type(dtype)}")storage=torch.tensor([],dtype=torch.uint8,device=self.device).set_(cast(Storage,self)).to(dtype)._typed_storage()ifstorage.data_ptr()==self.data_ptr():storage=storage.clone()returnstoragedefdouble(self):"""Casts this storage to double type"""returnself._to(torch.double)deffloat(self):"""Casts this storage to float type"""returnself._to(torch.float)defhalf(self):"""Casts this storage to half type"""returnself._to(torch.half)deflong(self):"""Casts this storage to long type"""returnself._to(torch.long)defint(self):"""Casts this storage to int type"""returnself._to(torch.int)defshort(self):"""Casts this storage to short type"""returnself._to(torch.short)defchar(self):"""Casts this storage to char type"""returnself._to(torch.int8)defbyte(self):"""Casts this storage to byte type"""returnself._to(torch.uint8)defbool(self):"""Casts this storage to bool type"""returnself._to(torch.bool)defbfloat16(self):"""Casts this storage to bfloat16 type"""returnself._to(torch.bfloat16)defcomplex_double(self):"""Casts this storage to complex double type"""returnself._to(torch.cdouble)defcomplex_float(self):"""Casts this storage to complex float type"""returnself._to(torch.cfloat)defis_pinned(self,device:Union[str,torch.device]='cuda'):r"""Determine whether the CPU storage is already pinned on device. Args: device (str or torch.device): The device to pin memory on. Default: ``'cuda'``. Returns: A boolean variable. """returntorch.tensor([],dtype=torch.uint8,device=self.device).set_(cast(Storage,self)).is_pinned(device)defpin_memory(self,device:Union[str,torch.device]='cuda'):r"""Copies the CPU storage to pinned memory, if it's not already pinned. Args: device (str or torch.device): The device to pin memory on. Default: ``'cuda'``. Returns: A pinned CPU storage. """ifself.device.type!='cpu':raiseTypeError(f"cannot pin '{self.type()}' only CPU memory can be pinned")pinned_tensor=torch.tensor([],dtype=torch.uint8,device=self.device).set_(cast(Storage,self)).pin_memory(device)returnpinned_tensor.untyped_storage()defshare_memory_(self):"""Moves the storage to shared memory. This is a no-op for storages already in shared memory and for CUDA storages, which do not need to be moved for sharing across processes. Storages in shared memory cannot be resized. Note that to mitigate issues like https://github.com/pytorch/pytorch/issues/95606 it is thread safe to call this function from multiple threads on the same object. It is NOT thread safe though to call any other function on self without proper synchronization. Please see :doc:`/notes/multiprocessing` for more details. Returns: self """fromtorch.multiprocessingimportget_sharing_strategyifself.device.typein["cuda",torch._C._get_privateuse1_backend_name()]:pass# CUDA or PrivateUse1 doesn't use POSIX shared memoryelifget_sharing_strategy()=='file_system':self._share_filename_cpu_()else:self._share_fd_cpu_()returnself@classmethoddef_new_shared(cls,size,*,device='cpu'):"""Creates a new storage in shared memory with the same data type"""fromtorch.multiprocessingimportget_sharing_strategydevice=torch.device(device)ifdevice.typein["cuda",torch._C._get_privateuse1_backend_name()]:returncls(size,device=device)elifget_sharing_strategy()=='file_system':returncls._new_using_filename_cpu(size)else:returncls._new_using_fd_cpu(size)defuntyped(self):returnselfdefbyteswap(self,dtype):"""Swaps bytes in underlying data"""elem_size=torch._utils._element_size(dtype)# for complex types, don't swap first and second numbersifdtype.is_complex:elem_size=max(int(elem_size/2),1)self._byteswap(elem_size)def_share_memory_lock_protected(fn):@functools.wraps(fn)defwrapper(self,*args,**kwargs):to_free=Noneto_wait=Nonewith_share_memory_lock:key=self._cdataifkeyin_share_memory_map:to_wait=_share_memory_map[key]else:_share_memory_map[key]=threading.RLock()_share_memory_map[key].acquire()to_free=key# If we're already in the process of sharing the storage, wait# for it to be done.ifto_waitisnotNone:withto_wait:passtry:returnfn(self,*args,**kwargs)finally:# If we acquired the storage lock here and we're done working on it# we can now release it and free the entry.ifto_freeisnotNone:# Ensure that the cdata from the storage didn't change and only# the data_ptr did.assertself._cdata==to_freewith_share_memory_lock:_share_memory_map[to_free].release()del_share_memory_map[to_free]returnwrapper
[docs]classUntypedStorage(torch._C.StorageBase,_StorageBase):def__getitem__(self,*args,**kwargs):ifself.device.type=='meta':raiseNotImplementedError("Not available for 'meta' device type")returnsuper().__getitem__(*args,**kwargs)@propertydefis_cuda(self):returnself.device.type=='cuda'@propertydefis_hpu(self):returnself.device.type=='hpu'
def_load_from_bytes(b):returntorch.load(io.BytesIO(b))_StorageBase.type=_type# type: ignore[assignment]_StorageBase.cuda=_cuda# type: ignore[assignment]_StorageBase.hpu=_hpu# type: ignore[assignment]@lru_cache(maxsize=None)def_dtype_to_storage_type_map():# NOTE: We should no longer add dtypes to this map. This map# is only used for BC/FC with older PyTorch versions. Going forward,# new dtypes of TypedStorage should not translate to a legacy# <type>Storage class. Instead, new dtypes of TypedStorage should# be serialized as an UntypedStorage paired with a torch.dtypereturn{torch.double:'DoubleStorage',torch.float:'FloatStorage',torch.half:'HalfStorage',torch.long:'LongStorage',torch.int:'IntStorage',torch.int16:'ShortStorage',torch.int8:'CharStorage',torch.uint8:'ByteStorage',torch.bool:'BoolStorage',torch.bfloat16:'BFloat16Storage',torch.cdouble:'ComplexDoubleStorage',torch.cfloat:'ComplexFloatStorage',torch.qint8:'QInt8Storage',torch.qint32:'QInt32Storage',torch.quint8:'QUInt8Storage',torch.quint4x2:'QUInt4x2Storage',torch.quint2x4:'QUInt2x4Storage',}@lru_cache(maxsize=None)def_storage_type_to_dtype_map():dtype_map={val:keyforkey,valin_dtype_to_storage_type_map().items()}returndtype_mapdef_get_storage_from_sequence(sequence,dtype,device):ifdtypein[torch.quint8,torch.quint4x2,torch.quint2x4,torch.qint32,torch.qint8]:interpret_dtypes={torch.quint8:torch.uint8,torch.quint4x2:torch.uint8,torch.quint2x4:torch.uint8,torch.qint32:torch.int32,torch.qint8:torch.int8}tmp_tensor=torch.tensor(sequence,dtype=interpret_dtypes[dtype],device=device)else:tmp_tensor=torch.tensor(sequence,dtype=dtype,device=device)returntmp_tensor._typed_storage()._untyped_storagedef_isint(x):ifHAS_NUMPY:returnisinstance(x,(int,np.integer))else:returnisinstance(x,int)_always_warn_typed_storage_removal=Falsedef_get_always_warn_typed_storage_removal():return_always_warn_typed_storage_removaldef_set_always_warn_typed_storage_removal(always_warn):global_always_warn_typed_storage_removalassertisinstance(always_warn,bool)_always_warn_typed_storage_removal=always_warndef_warn_typed_storage_removal(stacklevel=2):global_always_warn_typed_storage_removaldefis_first_time():ifnothasattr(_warn_typed_storage_removal,'has_warned'):returnTrueelse:returnnot_warn_typed_storage_removal.__dict__['has_warned']if_get_always_warn_typed_storage_removal()oris_first_time():message=("TypedStorage is deprecated. It will be removed in the future and ""UntypedStorage will be the only storage class. This should only matter ""to you if you are using storages directly. To access UntypedStorage ""directly, use tensor.untyped_storage() instead of tensor.storage()")warnings.warn(message,UserWarning,stacklevel=stacklevel+1)_warn_typed_storage_removal.__dict__['has_warned']=Truedef_reset_warn_typed_storage_removal():_warn_typed_storage_removal.__dict__['has_warned']=Falsedef_get_device_from_module(module:str):ifmodule.split(".")[-1]in["cuda",torch._C._get_privateuse1_backend_name()]:returnmodule.split(".")[-1]else:return"cpu"
def__new__(cls,*args,wrap_storage=None,dtype=None,device=None,_internal=False):ifnot_internal:_warn_typed_storage_removal()ifcls==torch.storage._LegacyStorage:raiseRuntimeError("Only child classes of _LegacyStorage can be instantiated")ifcls==TypedStorage:returnsuper().__new__(cls)else:arg_error_msg=(f'{cls}.__new__ received an invalid combination 'f'of arguments. Expected one of:\n'' * no arguments\n'' * (int size)\n'' * (Sequence data)\n'' * (*, UntypedStorage wrap_storage)')ifdeviceisnotNone:raiseRuntimeError(arg_error_msg+"\nKeyword argument 'device' cannot be specified")ifdtypeisnotNone:raiseRuntimeError(arg_error_msg+"\nKeyword argument 'dtype' cannot be specified")ifwrap_storageisNone:iflen(args)>1:raiseRuntimeError(arg_error_msg+"\nToo many positional arguments")iflen(args)==1andnot_isint(args[0])andnotisinstance(args[0],collections.abc.Sequence):raiseTypeError(arg_error_msg+f"\nArgument type not recognized: {type(args[0])}")returnTypedStorage(*args,dtype=cls._dtype,device=_get_device_from_module(cls.__module__),_internal=True)else:iflen(args)!=0:raiseRuntimeError(arg_error_msg+"\nNo positional arguments should be given when using ""'wrap_storage'")ifnotisinstance(wrap_storage,torch.UntypedStorage):raiseTypeError(arg_error_msg+f"\nArgument 'wrap_storage' must be UntypedStorage, but got {type(wrap_storage)}")cls_device=_get_device_from_module(cls.__module__)ifwrap_storage.device.type!=cls_device:raiseRuntimeError(arg_error_msg+f"\nDevice of 'wrap_storage' must be {cls_device}"f", but got {wrap_storage.device.type}")returnTypedStorage(*args,wrap_storage=wrap_storage,dtype=cls.dtype,_internal=True)def__init__(self,*args,device=None,dtype=None,wrap_storage=None,_internal=False):ifnot_internal:_warn_typed_storage_removal()arg_error_msg=('TypedStorage.__init__ received an invalid combination ''of arguments. Expected one of:\n'' * (*, torch.device device, torch.dtype dtype)\n'' * (int size, *, torch.device device, torch.dtype dtype)\n'' * (Sequence data, *, torch.device device, torch.dtype dtype)\n'' * (*, UntypedStorage wrap_storage, torch.dtype dtype)')ifwrap_storageisnotNone:iflen(args)!=0:raiseRuntimeError(arg_error_msg+"\nNo positional arguments should be given when using ""'wrap_storage'")ifdtypeisNone:raiseRuntimeError(arg_error_msg+"\nArgument 'dtype' must be specified")ifnotisinstance(dtype,torch.dtype):raiseTypeError(arg_error_msg+f"\nArgument 'dtype' must be torch.dtype, not {type(dtype)}")ifdeviceisnotNone:raiseRuntimeError(arg_error_msg+"\nArgument 'device' should not be specified when 'wrap_storage' is given")self.dtype=dtypeifnotisinstance(wrap_storage,torch.UntypedStorage):raiseTypeError(arg_error_msg+f"\nArgument 'wrap_storage' must be UntypedStorage, but got {type(wrap_storage)}")self._untyped_storage=wrap_storageelse:self.dtype=torch.get_default_dtype()ifdtypeisNoneelsedtypedevice=torch.device('cpu'ifdeviceisNoneelsedevice)ifself.dtypein[torch.quint8,torch.quint4x2,torch.quint2x4,torch.qint32,torch.qint8]:ifdevice.type=='cuda':raiseRuntimeError("Cannot create CUDA storage with quantized dtype")iflen(args)==0:self._untyped_storage=torch.UntypedStorage(device=device)eliflen(args)==1:if_isint(args[0]):self._untyped_storage=torch.UntypedStorage(int(args[0])*self._element_size(),device=device)elifisinstance(args[0],collections.abc.Sequence):self._untyped_storage=_get_storage_from_sequence(args[0],self.dtype,device)else:raiseTypeError(arg_error_msg+f"\nArgument type not recognized: {type(args[0])}")else:raiseRuntimeError(arg_error_msg+"\nToo many positional arguments")@propertydefis_cuda(self):_warn_typed_storage_removal()returnself._untyped_storage.device.type=='cuda'@propertydefis_hpu(self):_warn_typed_storage_removal()returnself._untyped_storage.device.type=='hpu'
[docs]defuntyped(self):"""Returns the internal :class:`torch.UntypedStorage`"""_warn_typed_storage_removal()returnself._untyped_storage
def_new_wrapped_storage(self,untyped_storage):asserttype(untyped_storage)==torch.UntypedStorageiftype(self)==TypedStorage:returnTypedStorage(wrap_storage=untyped_storage,dtype=self.dtype,_internal=True)else:returntype(self)(wrap_storage=untyped_storage)def__len__(self):_warn_typed_storage_removal()returnself._size()def_maybe_wrap_index(self,idx,is_stop=False):ifidxisNone:ifis_stop:returnself._size()else:return0else:iftype(idx)!=int:raiseTypeError(f"can't index a {type(self)} with {type(idx)}")ifis_stop:if(idx>self._size())or(idx<-self._size()):raiseIndexError(f'index {idx} out of range for storage of size {self.size()}')ifidx>0:returnidxelse:returnidx%self._size()else:if(idx>=self._size())or(idx<-self._size()):raiseIndexError(f'index {idx} out of range for storage of size {self.size()}')returnidx%self._size()def__setitem__(self,idx,value):_warn_typed_storage_removal()returnself._setitem(idx,value)def_setitem(self,idx,value):ifnotisinstance(idx,(int,slice)):raiseRuntimeError(f"can't index a {type(self)} with {type(idx)}")iftorch.is_storage(value):raiseRuntimeError(f'cannot set item with value type {type(value)}')ifself.dtypein[torch.quint8,torch.quint4x2,torch.quint2x4,torch.qint32,torch.qint8]:interpret_dtypes={torch.quint8:torch.uint8,torch.quint4x2:torch.uint8,torch.quint2x4:torch.uint8,torch.qint32:torch.int32,torch.qint8:torch.int8}tmp_dtype=interpret_dtypes[self.dtype]tmp_tensor=torch.tensor([],dtype=tmp_dtype,device=self._untyped_storage.device)tmp_tensor.set_(TypedStorage(wrap_storage=self._untyped_storage,dtype=tmp_dtype,_internal=True))else:tmp_tensor=torch.tensor([],dtype=self.dtype,device=self._untyped_storage.device).set_(self)tmp_tensor[idx]=valuedef__getitem__(self,idx):_warn_typed_storage_removal()returnself._getitem(idx)def_getitem(self,idx):ifself._untyped_storage.device.type=='meta':raiseNotImplementedError("Not available for 'meta' device type")# NOTE: Before TypedStorage existed, indexing with a slice used to be# possible for <type>Storage objects. However, it would return# a storage view, which would be a hassle to implement in TypedStorage,# so it was disabledifisinstance(idx,slice):raiseRuntimeError('slices are only supported in UntypedStorage.__getitem__')elifnotisinstance(idx,int):raiseRuntimeError(f"can't index a {type(self)} with {type(idx)}")ifself.dtypein[torch.quint8,torch.quint4x2,torch.quint2x4,torch.qint32,torch.qint8]:interpret_dtypes={torch.quint8:torch.uint8,torch.quint4x2:torch.uint8,torch.quint2x4:torch.uint8,torch.qint32:torch.int32,torch.qint8:torch.int8}returnTypedStorage(wrap_storage=self._untyped_storage,dtype=interpret_dtypes[self.dtype],_internal=True)._getitem(idx)idx_wrapped=self._maybe_wrap_index(idx)tmp_tensor=torch.tensor([],dtype=self.dtype,device=self._untyped_storage.device).set_(self)returntmp_tensor[idx_wrapped].item()
[docs]defcuda(self,device=None,non_blocking=False,**kwargs)->T:# type: ignore[misc, type-var]_warn_typed_storage_removal()ifself.dtypein[torch.quint8,torch.quint4x2,torch.quint2x4,torch.qint32,torch.qint8]:raiseRuntimeError("Cannot create CUDA storage with quantized dtype")cuda_storage:torch.UntypedStorage=self._untyped_storage.cuda(device,non_blocking,**kwargs)returnself._new_wrapped_storage(cuda_storage)
[docs]defhpu(self,device=None,non_blocking=False,**kwargs)->T:# type: ignore[misc, type-var]_warn_typed_storage_removal()ifself.dtypein[torch.quint8,torch.quint4x2,torch.quint2x4,torch.qint32,torch.qint8]:raiseRuntimeError("Cannot create HPU storage with quantized dtype")hpu_storage:torch.UntypedStorage=self._untyped_storage.hpu(device,non_blocking,**kwargs)returnself._new_wrapped_storage(hpu_storage)
def__str__(self):_warn_typed_storage_removal()info_str=(f'[{torch.typename(self)}(dtype={self.dtype}, 'f'device={self.device}) of size {len(self)}]')ifself.device.type=='meta':return'...\n'+info_strelse:data_str=' '+'\n '.join(str(self[i])foriinrange(self.size()))returndata_str+'\n'+info_strdef__repr__(self):_warn_typed_storage_removal()returnstr(self)def__iter__(self):_warn_typed_storage_removal()returniter(self[i]foriinrange(self.size()))def__copy__(self):_warn_typed_storage_removal()returnself._new_wrapped_storage(copy.copy(self._untyped_storage))def__deepcopy__(self,memo):_warn_typed_storage_removal()returnself._deepcopy(memo)# For internal use only, to avoid deprecation warningdef_deepcopy(self,memo):returnself._new_wrapped_storage(copy.deepcopy(self._untyped_storage,memo))def__sizeof__(self):_warn_typed_storage_removal()returnsuper().__sizeof__()+self.nbytes()
[docs]defclone(self):"""Returns a copy of this storage"""_warn_typed_storage_removal()returnself._new_wrapped_storage(self._untyped_storage.clone())
[docs]deftolist(self):"""Returns a list containing the elements of this storage"""_warn_typed_storage_removal()returnlist(self)
[docs]defcpu(self):"""Returns a CPU copy of this storage if it's not already on the CPU"""_warn_typed_storage_removal()returnself._new_wrapped_storage(self._untyped_storage.cpu())
[docs]defis_pinned(self,device:Union[str,torch.device]='cuda'):r"""Determine whether the CPU TypedStorage is already pinned on device. Args: device (str or torch.device): The device to pin memory on. Default: ``'cuda'`` Returns: A boolean variable. """_warn_typed_storage_removal()returnself._untyped_storage.is_pinned(device)
[docs]defpin_memory(self,device:Union[str,torch.device]='cuda'):r"""Copies the CPU TypedStorage to pinned memory, if it's not already pinned. Args: device (str or torch.device): The device to pin memory on. Default: ``'cuda'``. Returns: A pinned CPU storage. """_warn_typed_storage_removal()returnself._new_wrapped_storage(self._untyped_storage.pin_memory(device=device))
[docs]defshare_memory_(self):"""Moves the storage to shared memory. This is a no-op for storages already in shared memory and for CUDA storages, which do not need to be moved for sharing across processes. Storages in shared memory cannot be resized. Returns: self """_warn_typed_storage_removal()returnself._share_memory_()
# For internal use only, to avoid deprecation warningdef_share_memory_(self):self._untyped_storage.share_memory_()returnselfdef_new_shared(self,size,*,device=None):"""Creates a new storage in shared memory with the same data type"""ifdeviceisNone:device='cpu'device=torch.device(device)untyped_storage=torch.UntypedStorage._new_shared(size*self._element_size(),device=device)returnTypedStorage(wrap_storage=untyped_storage,dtype=self.dtype,_internal=True)@propertydef_cdata(self):returnself._untyped_storage._cdata@propertydefdevice(self):_warn_typed_storage_removal()returnself._untyped_storage.device
# For internal use only, to avoid deprecation warningdef_size(self):# NB: don't indirect through __len__, as that requires# an int to be returnedreturnself._untyped_storage.nbytes()//self._element_size()
# For internal use only, to avoid deprecation warningdef_pickle_storage_type(self):try:return_dtype_to_storage_type_map()[self.dtype]exceptKeyErrorase:raiseKeyError(f'dtype {self.dtype} is not recognized')fromedef__reduce__(self):b=io.BytesIO()torch.save(self,b,_use_new_zipfile_serialization=False)return(_load_from_bytes,(b.getvalue(),))
# For internal use only, to avoid deprecation warningdef_resize_(self,size):self._untyped_storage.resize_(size*self._element_size())@classmethoddef_free_weak_ref(cls,*args,**kwargs):returnUntypedStorage._free_weak_ref(*args,**kwargs)def_weak_ref(self,*args,**kwargs):returnself._untyped_storage._weak_ref(*args,**kwargs)
@classmethoddef_from_buffer(cls,*args,dtype=None,device=None,**kwargs):ifcls==TypedStorage:dtype=torch.get_default_dtype()ifdtypeisNoneelsedtypedevice=torch.device('cpu'ifdeviceisNoneelsedevice)ifdevice.type!='cpu':raiseRuntimeError(f'TypedStorage.from_buffer: Not available for device {device.type}')untyped_storage:torch.UntypedStorage=torch.UntypedStorage.from_buffer(*args,dtype=dtype,**kwargs)else:ifdtypeisnotNoneorlen(args)==5:raiseRuntimeError("from_buffer: 'dtype' can only be specified in ""UntypedStorage.from_buffer and TypedStorage.from_buffer")ifdeviceisnotNone:raiseRuntimeError("from_buffer: 'device' can only be specified in ""UntypedStorage.from_buffer and TypedStorage.from_buffer")dtype=cls._dtypeuntyped_storage=torch.UntypedStorage.from_buffer(*args,dtype=dtype,**kwargs)returnTypedStorage(wrap_storage=untyped_storage,dtype=dtype,_internal=True)def_to(self,dtype):ifnotisinstance(dtype,torch.dtype):raiseTypeError(f"Argument 'dtype' must be torch.dtype, not {type(dtype)}")storage=torch.tensor([],dtype=self.dtype,device=self.device).set_(self).to(dtype)._typed_storage()ifstorage.data_ptr()==self.data_ptr():storage=storage.clone()returnstorage
[docs]defdouble(self):"""Casts this storage to double type"""_warn_typed_storage_removal()returnself._to(torch.double)
[docs]deffloat(self):"""Casts this storage to float type"""_warn_typed_storage_removal()returnself._to(torch.float)
[docs]defhalf(self):"""Casts this storage to half type"""_warn_typed_storage_removal()returnself._to(torch.half)
[docs]deflong(self):"""Casts this storage to long type"""_warn_typed_storage_removal()returnself._to(torch.long)
[docs]defint(self):"""Casts this storage to int type"""_warn_typed_storage_removal()returnself._to(torch.int)
[docs]defshort(self):"""Casts this storage to short type"""_warn_typed_storage_removal()returnself._to(torch.short)
[docs]defchar(self):"""Casts this storage to char type"""_warn_typed_storage_removal()returnself._to(torch.int8)
[docs]defbyte(self):"""Casts this storage to byte type"""_warn_typed_storage_removal()returnself._to(torch.uint8)
[docs]defbool(self):"""Casts this storage to bool type"""_warn_typed_storage_removal()returnself._to(torch.bool)
[docs]defbfloat16(self):"""Casts this storage to bfloat16 type"""_warn_typed_storage_removal()returnself._to(torch.bfloat16)
[docs]defcomplex_double(self):"""Casts this storage to complex double type"""_warn_typed_storage_removal()returnself._to(torch.cdouble)
[docs]defcomplex_float(self):"""Casts this storage to complex float type"""_warn_typed_storage_removal()returnself._to(torch.cfloat)
[docs]@classmethoddeffrom_file(cls,filename,shared,size):""" from_file(filename, shared=False, size=0) -> Storage If `shared` is `True`, then memory is shared between all processes. All changes are written to the file. If `shared` is `False`, then the changes on the storage do not affect the file. `size` is the number of elements in the storage. If `shared` is `False`, then the file must contain at least `size * sizeof(Type)` bytes (`Type` is the type of storage). If `shared` is `True` the file will be created if needed. Args: filename (str): file name to map shared (bool): whether to share memory size (int): number of elements in the storage """_warn_typed_storage_removal()ifcls==TypedStorage:raiseRuntimeError('from_file can only be called on derived classes')untyped_storage:UntypedStorage=UntypedStorage.from_file(filename,shared,size*torch._utils._element_size(cls.dtype))storage=cls(wrap_storage=untyped_storage)returnstorage
# For internal use only, to avoid deprecation warningdef_is_shared(self):returnself._untyped_storage.is_shared()@classmethoddef_new_shared_cuda(cls,*args,**kwargs):returntorch.UntypedStorage._new_shared_cuda(*args,**kwargs)def_share_filename_cpu_(self,*args,**kwargs):manager_handle,storage_handle,size=self._untyped_storage._share_filename_cpu_(*args,**kwargs)returnmanager_handle,storage_handle,size//self._element_size()def_shared_decref(self):self._untyped_storage._shared_decref()returnself@classmethoddef_release_ipc_counter(cls,*args,device=None,**kwargs):returntorch.UntypedStorage._release_ipc_counter_cuda(*args,**kwargs)def_shared_incref(self,*args,**kwargs):returnself._untyped_storage._shared_incref(*args,**kwargs)def_share_fd_cpu_(self,*args,**kwargs):fd,size=self._untyped_storage._share_fd_cpu_(*args,**kwargs)returnfd,size//self._element_size()def_get_legacy_storage_class(self):ifself.dtypenotin_dtype_to_storage_type_map():returnNonestorage_name=_dtype_to_storage_type_map()[self.dtype]ifself.device.typenotin['cpu','cuda',torch._C._get_privateuse1_backend_name()]:returnNonemodule=torchifself.device.type=='cpu'elsegetattr(torch,self.device.type)try:returngetattr(module,storage_name)exceptAttributeError:returnNone
TypedStorage.type.__doc__=_type.__doc__TypedStorage.cuda.__doc__=_cuda.__doc__TypedStorage.hpu.__doc__=_hpu.__doc__class_LegacyStorageMeta(type):dtype:torch.dtypedef__instancecheck__(cls,instance):iftype(instance)==TypedStorage:cls_device=_get_device_from_module(cls.__module__)return(cls_device==instance.device.type)and(cls.dtype==instance.dtype)returnFalseclass_LegacyStorage(TypedStorage,metaclass=_LegacyStorageMeta):@classmethoddef_new_shared(cls,size):"""Creates a new storage in shared memory with the same data type"""untyped_storage=torch.UntypedStorage._new_shared(size*cls()._element_size())returncls(wrap_storage=untyped_storage)@classmethoddef_release_ipc_counter(cls,*args,**kwargs):returntorch.UntypedStorage._release_ipc_counter_cuda(*args,**kwargs)@classmethoddef_new_shared_filename(cls,manager,obj,size):bytes_size=size*torch._utils._element_size(cls.dtype)returncls(wrap_storage=torch.UntypedStorage._new_shared_filename_cpu(manager,obj,bytes_size))def_get_dtype_from_pickle_storage_type(pickle_storage_type:str):try:return_storage_type_to_dtype_map()[pickle_storage_type]exceptKeyErrorase:raiseKeyError(f'pickle storage type "{pickle_storage_type}" is not recognized')frome
Docs
Access comprehensive developer documentation for PyTorch
To analyze traffic and optimize your experience, we serve cookies on this site. By clicking or navigating, you agree to allow our usage of cookies. As the current maintainers of this site, Facebook’s Cookies Policy applies. Learn more, including about available controls: Cookies Policy.