# mypy: allow-untyped-defsfrom__future__importannotationsimportcollectionsimportcopyimportfunctoolsimportioimportthreadingimportwarningsfromtypingimport(Any,cast,Dictas_Dict,Optionalas_Optional,Type,TYPE_CHECKING,TypeVar,Union,)fromtyping_extensionsimportSelfimporttorchfromtorch._utilsimport_to,_typefromtorch.typesimport_bool,_int,StorageifTYPE_CHECKING:fromtorch._prims_commonimportDeviceLikeType__all__=["TypedStorage","UntypedStorage"]try:importnumpyasnpHAS_NUMPY=TrueexceptModuleNotFoundError:HAS_NUMPY=Falsenp=None# type: ignore[assignment]_share_memory_lock=threading.Lock()_share_memory_map:_Dict[int,threading.RLock]={}T=TypeVar("T",bound="Union[_StorageBase, TypedStorage]")class_StorageBase:_cdata:Anyis_sparse:_bool=Falseis_sparse_csr:_bool=Falsedevice:torch.device# Used when stashing FakeTensor device onto storage in torch.save(metadata_only=True)_fake_device:_Optional[torch.device]=Nonedef__init__(self,*args,**kwargs):passdef__len__(self)->_int:raiseNotImplementedErrordef__getitem__(self,idx):raiseNotImplementedErrordef__setitem__(self,*args,**kwargs):raiseNotImplementedErrordefcopy_(self,source:T,non_blocking:_Optional[_bool]=None)->T:raiseNotImplementedErrordefnew(self)->Union[_StorageBase,TypedStorage]:raiseNotImplementedErrordefnbytes(self)->_int:raiseNotImplementedErrordefsize(self)->_int:returnself.nbytes()deftype(self,dtype:_Optional[str]=None,non_blocking:_bool=False)->Union[_StorageBase,TypedStorage]:return_type(self,dtype,non_blocking)defcuda(self,device=None,non_blocking=False)->Union[_StorageBase,TypedStorage]:"""Returns a copy of this object in CUDA memory. If this object is already in CUDA memory and on the correct device, then no copy is performed and the original object is returned. Args: device (int): The destination GPU id. Defaults to the current device. non_blocking (bool): If ``True`` and the source is in pinned memory, the copy will be asynchronous with respect to the host. Otherwise, the argument has no effect. """device2=torch.device("cuda",device)ifdeviceelsetorch.device("cuda")returnself.to(device=device2,non_blocking=non_blocking)defhpu(self,device=None,non_blocking=False)->Union[_StorageBase,TypedStorage]:"""Returns a copy of this object in HPU memory. If this object is already in HPU memory and on the correct device, then no copy is performed and the original object is returned. Args: device (int): The destination HPU id. Defaults to the current device. non_blocking (bool): If ``True`` and the source is in pinned memory, the copy will be asynchronous with respect to the host. Otherwise, the argument has no effect. """device2=torch.device("hpu",device)ifdeviceelsetorch.device("hpu")returnself.to(device=device2,non_blocking=non_blocking)defelement_size(self)->_int:raiseNotImplementedErrordefget_device(self)->_int:returnself.device.indexdefdata_ptr(self)->_int:raiseNotImplementedErrordefresizable(self)->_bool:raiseNotImplementedError# Defined in torch/csrc/generic/StorageSharing.cppdef_share_filename_cpu_(self,*args,**kwargs):raiseNotImplementedErrordef_share_fd_cpu_(self,*args,**kwargs):raiseNotImplementedError@classmethoddef_new_using_filename_cpu(cls:Type[T],size:_int)->T:raiseNotImplementedError@classmethoddef_new_using_fd_cpu(cls:Type[T],size:_int)->T:raiseNotImplementedError@classmethoddeffrom_buffer(cls:Type[T],*args,**kwargs)->T:raiseNotImplementedError@classmethoddef_new_shared_filename_cpu(cls:Type[T],manager,obj,size,*,device=None,dtype=None,)->T:raiseNotImplementedError@classmethoddef_release_ipc_counter_cuda(cls:Type[T],*args,**kwargs)->T:raiseNotImplementedError@classmethoddef_new_with_weak_ptr(cls:Type[T],*args,**kwargs)->T:raiseNotImplementedErrordef_shared_decref(self)->Union[_StorageBase,TypedStorage]:raiseNotImplementedErrordef_write_file(self,*args,**kwargs):raiseNotImplementedErrordefresize_(self,size:_int):raiseNotImplementedErrordef_weak_ref(self,*args,**kwargs)->Union[_StorageBase,TypedStorage]:raiseNotImplementedErrordef_set_from_file(self,*args,**kwargs):raiseNotImplementedErrordef_set_cdata(self,*args,**kwargs):raiseNotImplementedErrordef_share_cuda_(self,*args,**kwargs):raiseNotImplementedErrordefis_shared(self)->_bool:raiseNotImplementedError@classmethoddef_new_shared_cuda(cls:Type[T],*args,**kwargs)->T:raiseNotImplementedErrordef_shared_incref(self,*args,**kwargs):raiseNotImplementedError@classmethoddef_free_weak_ref(cls,*args,**kwargs):raiseNotImplementedError@propertydefis_cuda(self):raiseNotImplementedError@propertydefis_hpu(self):raiseNotImplementedError@classmethoddeffrom_file(cls,filename,shared,nbytes)->Union[_StorageBase,TypedStorage]:raiseNotImplementedError@classmethoddef_expired(cls,*args,**kwargs)->Union[_StorageBase,TypedStorage]:raiseNotImplementedErrordef_byteswap(self,*args,**kwargs):raiseNotImplementedErrordef_get_filename(self,*args,**kwargs)->_Optional[str]:raiseNotImplementedErrordef__repr__(self):info_str=f"[{torch.typename(self)}(device={self.device}) of size {len(self)}]"ifself.device.type=="meta":return"...\n"+info_strdata_str=" "+"\n ".join(str(self[i])foriinrange(self.size()))returndata_str+"\n"+info_strdef__iter__(self):returniter(self[i]foriinrange(self.size()))def__copy__(self):returnself.clone()def__deepcopy__(self,memo):memo=memo.setdefault("torch",{})ifself._cdatainmemo:returnmemo[self._cdata]new_storage=self.clone()memo[self._cdata]=new_storagereturnnew_storagedef__reduce__(self):b=io.BytesIO()torch.save(self,b,_use_new_zipfile_serialization=False)return(_load_from_bytes,(b.getvalue(),))def__sizeof__(self):returnsuper().__sizeof__()+self.size()defclone(self):"""Return a copy of this storage."""returntype(self)(self.nbytes(),device=self.device).copy_(self)deftolist(self):"""Return a list containing the elements of this storage."""returnlist(self)defcpu(self):"""Return a CPU copy of this storage if it's not already on the CPU."""ifself.device.type!="cpu":returntorch.UntypedStorage(self.size()).copy_(self,False)returnselfdefmps(self):"""Return a MPS copy of this storage if it's not already on the MPS."""ifself.device.type!="mps":returntorch.UntypedStorage(self.size(),device="mps").copy_(self,False)returnselfdef_to(self,dtype):ifnotisinstance(dtype,torch.dtype):raiseTypeError(f"Argument 'dtype' must be torch.dtype, not {type(dtype)}")storage=(torch.tensor([],dtype=torch.uint8,device=self.device).set_(cast(Storage,self)).to(dtype)._typed_storage())ifstorage.data_ptr()==self.data_ptr():storage=storage.clone()returnstoragedefto(self,*,device:DeviceLikeType,non_blocking:_bool=False):ifnotisinstance(device,torch.device):device=torch.device(device)return_to(self,device,non_blocking)defdouble(self):"""Casts this storage to double type."""returnself._to(torch.double)deffloat(self):"""Casts this storage to float type."""returnself._to(torch.float)defhalf(self):"""Casts this storage to half type."""returnself._to(torch.half)deflong(self):"""Casts this storage to long type."""returnself._to(torch.long)defint(self):"""Casts this storage to int type."""returnself._to(torch.int)defshort(self):"""Casts this storage to short type."""returnself._to(torch.short)defchar(self):"""Casts this storage to char type."""returnself._to(torch.int8)defbyte(self):"""Casts this storage to byte type."""returnself._to(torch.uint8)defbool(self):"""Casts this storage to bool type."""returnself._to(torch.bool)defbfloat16(self):"""Casts this storage to bfloat16 type."""returnself._to(torch.bfloat16)defcomplex_double(self):"""Casts this storage to complex double type."""returnself._to(torch.cdouble)defcomplex_float(self):"""Casts this storage to complex float type."""returnself._to(torch.cfloat)deffloat8_e5m2(self):"""Casts this storage to float8_e5m2 type"""returnself._to(torch.float8_e5m2)deffloat8_e4m3fn(self):"""Casts this storage to float8_e4m3fn type"""returnself._to(torch.float8_e4m3fn)deffloat8_e5m2fnuz(self):"""Casts this storage to float8_e5m2fnuz type"""returnself._to(torch.float8_e5m2fnuz)deffloat8_e4m3fnuz(self):"""Casts this storage to float8_e4m3fnuz type"""returnself._to(torch.float8_e4m3fnuz)defis_pinned(self,device:Union[str,torch.device]="cuda"):r"""Determine whether the CPU storage is already pinned on device. Args: device (str or torch.device): The device to pin memory on. Default: ``'cuda'``. Returns: A boolean variable. """return(torch.tensor([],dtype=torch.uint8,device=self.device).set_(cast(Storage,self)).is_pinned(device))defpin_memory(self,device:Union[str,torch.device]="cuda"):r"""Copy the CPU storage to pinned memory, if it's not already pinned. Args: device (str or torch.device): The device to pin memory on. Default: ``'cuda'``. Returns: A pinned CPU storage. """ifself.device.type!="cpu":raiseTypeError(f"cannot pin '{self.type()}' only CPU memory can be pinned")pinned_tensor=(torch.tensor([],dtype=torch.uint8,device=self.device).set_(cast(Storage,self)).pin_memory(device))returnpinned_tensor.untyped_storage()defshare_memory_(self):"""See :meth:`torch.UntypedStorage.share_memory_`"""fromtorch.multiprocessingimportget_sharing_strategyifself.device.typein["cuda",torch._C._get_privateuse1_backend_name()]:pass# CUDA or PrivateUse1 doesn't use POSIX shared memoryelifget_sharing_strategy()=="file_system":self._share_filename_cpu_()else:self._share_fd_cpu_()returnself@classmethoddef_new_shared(cls,size,*,device="cpu"):"""Create a new storage in shared memory with the same data type."""fromtorch.multiprocessingimportget_sharing_strategydevice=torch.device(device)ifdevice.typein["cuda",torch._C._get_privateuse1_backend_name(),"hpu"]:returncls(size,device=device)elifget_sharing_strategy()=="file_system":returncls._new_using_filename_cpu(size)else:returncls._new_using_fd_cpu(size)defuntyped(self):returnselfdefbyteswap(self,dtype):"""Swap bytes in underlying data."""elem_size=torch._utils._element_size(dtype)# for complex types, don't swap first and second numbersifdtype.is_complex:elem_size=max(int(elem_size/2),1)self._byteswap(elem_size)def_share_memory_lock_protected(fn):@functools.wraps(fn)defwrapper(self,*args,**kwargs):to_free=Noneto_wait=Nonewith_share_memory_lock:key=self._cdataifkeyin_share_memory_map:to_wait=_share_memory_map[key]else:_share_memory_map[key]=threading.RLock()_share_memory_map[key].acquire()to_free=key# If we're already in the process of sharing the storage, wait# for it to be done.ifto_waitisnotNone:withto_wait:passtry:returnfn(self,*args,**kwargs)finally:# If we acquired the storage lock here and we're done working on it# we can now release it and free the entry.ifto_freeisnotNone:# Ensure that the cdata from the storage didn't change and only# the data_ptr did.assertself._cdata==to_freewith_share_memory_lock:_share_memory_map[to_free].release()del_share_memory_map[to_free]returnwrapper
[docs]classUntypedStorage(torch._C.StorageBase,_StorageBase):def__getitem__(self,*args,**kwargs):ifself.device.type=="meta":raiseNotImplementedError("Not available for 'meta' device type")returnsuper().__getitem__(*args,**kwargs)@propertydefis_cuda(self):returnself.device.type=="cuda"@propertydefis_hpu(self):returnself.device.type=="hpu"@propertydeffilename(self)->_Optional[str]:"""Returns the file name associated with this storage. The file name will be a string if the storage is on CPU and was created via :meth:`~torch.from_file()` with ``shared`` as ``True``. This attribute is ``None`` otherwise. """returnself._get_filename()
[docs]@_share_memory_lock_protecteddefshare_memory_(self,*args,**kwargs):""" Moves the storage to shared memory. This is a no-op for storages already in shared memory and for CUDA storages, which do not need to be moved for sharing across processes. Storages in shared memory cannot be resized. Note that to mitigate issues like `this <https://github.com/pytorch/pytorch/issues/95606>`_ it is thread safe to call this function from multiple threads on the same object. It is NOT thread safe though to call any other function on self without proper synchronization. Please see :doc:`/notes/multiprocessing` for more details. .. note:: When all references to a storage in shared memory are deleted, the associated shared memory object will also be deleted. PyTorch has a special cleanup process to ensure that this happens even if the current process exits unexpectedly. It is worth noting the difference between :meth:`share_memory_` and :meth:`from_file` with ``shared = True`` #. ``share_memory_`` uses `shm_open(3) <https://man7.org/linux/man-pages/man3/shm_open.3.html>`_ to create a POSIX shared memory object while :meth:`from_file` uses `open(2) <https://man7.org/linux/man-pages/man2/open.2.html>`_ to open the filename passed by the user. #. Both use an `mmap(2) call <https://man7.org/linux/man-pages/man2/mmap.2.html>`_ with ``MAP_SHARED`` to map the file/object into the current virtual address space #. ``share_memory_`` will call ``shm_unlink(3)`` on the object after mapping it to make sure the shared memory object is freed when no process has the object open. ``torch.from_file(shared=True)`` does not unlink the file. This file is persistent and will remain until it is deleted by the user. Returns: ``self`` """returnsuper().share_memory_(*args,**kwargs)
def_load_from_bytes(b):returntorch.load(io.BytesIO(b),weights_only=False)@functools.lru_cache(maxsize=None)def_new_dtypes():# These are dtypes serialized as UntypedStorage unlike those in# _dtype_to_storage_type_mapreturn{torch.float8_e5m2,torch.float8_e4m3fn,torch.float8_e5m2fnuz,torch.float8_e4m3fnuz,torch.bits8,torch.bits16,torch.bits1x8,torch.bits2x4,torch.bits4x2,torch.complex32,torch.uint16,torch.uint32,torch.uint64,}@functools.lru_cache(maxsize=None)def_dtype_to_storage_type_map():# NOTE: We should no longer add dtypes to this map. This map# is only used for BC/FC with older PyTorch versions. Going forward,# new dtypes of TypedStorage should not translate to a legacy# <type>Storage class. Instead, new dtypes of TypedStorage should# be serialized as an UntypedStorage paired with a torch.dtypereturn{torch.double:"DoubleStorage",torch.float:"FloatStorage",torch.half:"HalfStorage",torch.long:"LongStorage",torch.int:"IntStorage",torch.int16:"ShortStorage",torch.int8:"CharStorage",torch.uint8:"ByteStorage",torch.bool:"BoolStorage",torch.bfloat16:"BFloat16Storage",torch.cdouble:"ComplexDoubleStorage",torch.cfloat:"ComplexFloatStorage",torch.qint8:"QInt8Storage",torch.qint32:"QInt32Storage",torch.quint8:"QUInt8Storage",torch.quint4x2:"QUInt4x2Storage",torch.quint2x4:"QUInt2x4Storage",}@functools.lru_cache(maxsize=None)def_storage_type_to_dtype_map():dtype_map={val:keyforkey,valin_dtype_to_storage_type_map().items()}returndtype_mapdef_get_storage_from_sequence(sequence,dtype,device):ifdtypein[torch.quint8,torch.quint4x2,torch.quint2x4,torch.qint32,torch.qint8,]:interpret_dtypes={torch.quint8:torch.uint8,torch.quint4x2:torch.uint8,torch.quint2x4:torch.uint8,torch.qint32:torch.int32,torch.qint8:torch.int8,}tmp_tensor=torch.tensor(sequence,dtype=interpret_dtypes[dtype],device=device)else:tmp_tensor=torch.tensor(sequence,dtype=dtype,device=device)returntmp_tensor._typed_storage()._untyped_storagedef_isint(x):ifHAS_NUMPY:returnisinstance(x,(int,np.integer))else:returnisinstance(x,int)_always_warn_typed_storage_removal=Falsedef_get_always_warn_typed_storage_removal():return_always_warn_typed_storage_removaldef_set_always_warn_typed_storage_removal(always_warn):global_always_warn_typed_storage_removalassertisinstance(always_warn,bool)_always_warn_typed_storage_removal=always_warndef_warn_typed_storage_removal(stacklevel=2):global_always_warn_typed_storage_removaldefis_first_time():ifnothasattr(_warn_typed_storage_removal,"has_warned"):returnTrueelse:returnnot_warn_typed_storage_removal.__dict__["has_warned"]if_get_always_warn_typed_storage_removal()oris_first_time():message=("TypedStorage is deprecated. It will be removed in the future and ""UntypedStorage will be the only storage class. This should only matter ""to you if you are using storages directly. To access UntypedStorage ""directly, use tensor.untyped_storage() instead of tensor.storage()")warnings.warn(message,UserWarning,stacklevel=stacklevel+1)_warn_typed_storage_removal.__dict__["has_warned"]=Truedef_reset_warn_typed_storage_removal():_warn_typed_storage_removal.__dict__["has_warned"]=Falsedef_get_device_from_module(module:str):last_part=module.rsplit(".",1)[-1]iflast_partin["cuda",torch._C._get_privateuse1_backend_name(),"hpu"]:returnlast_partelse:return"cpu"
[docs]classTypedStorage:is_sparse:_bool=False# Used when stashing FakeTensor device onto storage in torch.save(metadata_only=True)_fake_device:_Optional[torch.device]=Nonedtype:torch.dtype@propertydef_dtype(self):returnself.dtype@propertydeffilename(self)->_Optional[str]:"""Returns the file name associated with this storage if the storage was memory mapped from a file. or ``None`` if the storage was not created by memory mapping a file."""returnself._untyped_storage.filename
def__new__(cls,*args,wrap_storage=None,dtype=None,device=None,_internal=False,):ifnot_internal:_warn_typed_storage_removal()ifcls==torch.storage._LegacyStorage:raiseRuntimeError("Only child classes of _LegacyStorage can be instantiated")ifcls==TypedStorage:returnsuper().__new__(cls)else:arg_error_msg=(f"{cls}.__new__ received an invalid combination "f"of arguments. Expected one of:\n"" * no arguments\n"" * (int size)\n"" * (Sequence data)\n"" * (*, UntypedStorage wrap_storage)")ifdeviceisnotNone:raiseRuntimeError(arg_error_msg+"\nKeyword argument 'device' cannot be specified")ifdtypeisnotNone:raiseRuntimeError(arg_error_msg+"\nKeyword argument 'dtype' cannot be specified")ifwrap_storageisNone:iflen(args)>1:raiseRuntimeError(arg_error_msg+"\nToo many positional arguments")if(len(args)==1andnot_isint(args[0])andnotisinstance(args[0],collections.abc.Sequence)):raiseTypeError(arg_error_msg+f"\nArgument type not recognized: {type(args[0])}")returnTypedStorage(*args,dtype=cls._dtype,device=_get_device_from_module(cls.__module__),_internal=True,)else:iflen(args)!=0:raiseRuntimeError(arg_error_msg+"\nNo positional arguments should be given when using ""'wrap_storage'")ifnotisinstance(wrap_storage,torch.UntypedStorage):raiseTypeError(arg_error_msg+f"\nArgument 'wrap_storage' must be UntypedStorage, but got {type(wrap_storage)}")cls_device=_get_device_from_module(cls.__module__)ifwrap_storage.device.type!=cls_device:raiseRuntimeError(arg_error_msg+f"\nDevice of 'wrap_storage' must be {cls_device}"f", but got {wrap_storage.device.type}")returnTypedStorage(*args,wrap_storage=wrap_storage,dtype=cls.dtype,_internal=True,)def__init__(self,*args,device=None,dtype=None,wrap_storage=None,_internal=False,):ifnot_internal:_warn_typed_storage_removal()arg_error_msg=("TypedStorage.__init__ received an invalid combination ""of arguments. Expected one of:\n"" * (*, torch.device device, torch.dtype dtype)\n"" * (int size, *, torch.device device, torch.dtype dtype)\n"" * (Sequence data, *, torch.device device, torch.dtype dtype)\n"" * (*, UntypedStorage wrap_storage, torch.dtype dtype)")ifwrap_storageisnotNone:iflen(args)!=0:raiseRuntimeError(arg_error_msg+"\nNo positional arguments should be given when using ""'wrap_storage'")ifdtypeisNone:raiseRuntimeError(arg_error_msg+"\nArgument 'dtype' must be specified")ifnotisinstance(dtype,torch.dtype):raiseTypeError(arg_error_msg+f"\nArgument 'dtype' must be torch.dtype, not {type(dtype)}")ifdeviceisnotNone:raiseRuntimeError(arg_error_msg+"\nArgument 'device' should not be specified when 'wrap_storage' is given")self.dtype=dtypeifnotisinstance(wrap_storage,torch.UntypedStorage):raiseTypeError(arg_error_msg+f"\nArgument 'wrap_storage' must be UntypedStorage, but got {type(wrap_storage)}")self._untyped_storage=wrap_storageelse:self.dtype=torch.get_default_dtype()ifdtypeisNoneelsedtypedevice=torch.device("cpu"ifdeviceisNoneelsedevice)ifself.dtypein[torch.quint8,torch.quint4x2,torch.quint2x4,torch.qint32,torch.qint8,]:ifdevice.type=="cuda":raiseRuntimeError("Cannot create CUDA storage with quantized dtype")iflen(args)==0:self._untyped_storage=torch.UntypedStorage(device=device)eliflen(args)==1:if_isint(args[0]):self._untyped_storage=torch.UntypedStorage(int(args[0])*self._element_size(),device=device)elifisinstance(args[0],collections.abc.Sequence):self._untyped_storage=_get_storage_from_sequence(args[0],self.dtype,device)else:raiseTypeError(arg_error_msg+f"\nArgument type not recognized: {type(args[0])}")else:raiseRuntimeError(arg_error_msg+"\nToo many positional arguments")@propertydefis_cuda(self):_warn_typed_storage_removal()returnself._untyped_storage.device.type=="cuda"@propertydefis_hpu(self):_warn_typed_storage_removal()returnself._untyped_storage.device.type=="hpu"
[docs]defuntyped(self):"""Return the internal :class:`torch.UntypedStorage`."""_warn_typed_storage_removal()returnself._untyped_storage
def_new_wrapped_storage(self,untyped_storage)->Self:asserttype(untyped_storage)==torch.UntypedStorageiftype(self)==TypedStorage:returncast(Self,TypedStorage(wrap_storage=untyped_storage,dtype=self.dtype,_internal=True),)else:returntype(self)(wrap_storage=untyped_storage)def__len__(self):_warn_typed_storage_removal()returnself._size()def_maybe_wrap_index(self,idx,is_stop=False):ifidxisNone:ifis_stop:returnself._size()else:return0else:iftype(idx)!=int:raiseTypeError(f"can't index a {type(self)} with {type(idx)}")ifis_stop:if(idx>self._size())or(idx<-self._size()):raiseIndexError(f"index {idx} out of range for storage of size {self.size()}")ifidx>0:returnidxelse:returnidx%self._size()else:if(idx>=self._size())or(idx<-self._size()):raiseIndexError(f"index {idx} out of range for storage of size {self.size()}")returnidx%self._size()def__setitem__(self,idx,value):_warn_typed_storage_removal()returnself._setitem(idx,value)def_setitem(self,idx,value):ifnotisinstance(idx,(int,slice)):raiseRuntimeError(f"can't index a {type(self)} with {type(idx)}")iftorch.is_storage(value):raiseRuntimeError(f"cannot set item with value type {type(value)}")ifself.dtypein[torch.quint8,torch.quint4x2,torch.quint2x4,torch.qint32,torch.qint8,]:interpret_dtypes={torch.quint8:torch.uint8,torch.quint4x2:torch.uint8,torch.quint2x4:torch.uint8,torch.qint32:torch.int32,torch.qint8:torch.int8,}tmp_dtype=interpret_dtypes[self.dtype]tmp_tensor=torch.tensor([],dtype=tmp_dtype,device=self._untyped_storage.device)tmp_tensor.set_(TypedStorage(wrap_storage=self._untyped_storage,dtype=tmp_dtype,_internal=True))else:tmp_tensor=torch.tensor([],dtype=self.dtype,device=self._untyped_storage.device).set_(self)tmp_tensor[idx]=valuedef__getitem__(self,idx):_warn_typed_storage_removal()returnself._getitem(idx)def_getitem(self,idx):ifself._untyped_storage.device.type=="meta":raiseNotImplementedError("Not available for 'meta' device type")# NOTE: Before TypedStorage existed, indexing with a slice used to be# possible for <type>Storage objects. However, it would return# a storage view, which would be a hassle to implement in TypedStorage,# so it was disabledifisinstance(idx,slice):raiseRuntimeError("slices are only supported in UntypedStorage.__getitem__")elifnotisinstance(idx,int):raiseRuntimeError(f"can't index a {type(self)} with {type(idx)}")ifself.dtypein[torch.quint8,torch.quint4x2,torch.quint2x4,torch.qint32,torch.qint8,]:interpret_dtypes={torch.quint8:torch.uint8,torch.quint4x2:torch.uint8,torch.quint2x4:torch.uint8,torch.qint32:torch.int32,torch.qint8:torch.int8,}returnTypedStorage(wrap_storage=self._untyped_storage,dtype=interpret_dtypes[self.dtype],_internal=True,)._getitem(idx)idx_wrapped=self._maybe_wrap_index(idx)fromtorch._subclasses.fake_tensorimportunset_fake_temporarilywithunset_fake_temporarily():tmp_tensor=torch.tensor([],dtype=self.dtype,device=self._untyped_storage.device).set_(self)returntmp_tensor[idx_wrapped].item()
[docs]defcuda(self,device=None,non_blocking=False)->Self:_warn_typed_storage_removal()ifself.dtypein[torch.quint8,torch.quint4x2,torch.quint2x4,torch.qint32,torch.qint8,]:raiseRuntimeError("Cannot create CUDA storage with quantized dtype")cuda_storage=self._untyped_storage.cuda(device,non_blocking)returnself._new_wrapped_storage(cuda_storage)
[docs]defhpu(self,device=None,non_blocking=False)->Self:_warn_typed_storage_removal()ifself.dtypein[torch.quint8,torch.quint4x2,torch.quint2x4,torch.qint32,torch.qint8,]:raiseRuntimeError("Cannot create HPU storage with quantized dtype")hpu_storage=self._untyped_storage.hpu(device,non_blocking)returnself._new_wrapped_storage(hpu_storage)
[docs]defto(self,*,device:DeviceLikeType,non_blocking:bool=False)->Self:_warn_typed_storage_removal()ifnotisinstance(device,torch.device):device=torch.device(device)ifself.dtypein[torch.quint8,torch.quint4x2,torch.quint2x4,torch.qint32,torch.qint8,]:raiseRuntimeError(f"Cannot create {device.type.upper()} storage with quantized dtype")to_storage=self._untyped_storage.to(device=device,non_blocking=non_blocking)returnself._new_wrapped_storage(to_storage)
def__str__(self):_warn_typed_storage_removal()info_str=(f"[{torch.typename(self)}(dtype={self.dtype}, "f"device={self.device}) of size {len(self)}]")ifself.device.type=="meta":return"...\n"+info_strelse:data_str=" "+"\n ".join(str(self[i])foriinrange(self.size()))returndata_str+"\n"+info_strdef__repr__(self):_warn_typed_storage_removal()returnstr(self)def__iter__(self):_warn_typed_storage_removal()returniter(self[i]foriinrange(self.size()))def__copy__(self):_warn_typed_storage_removal()returnself._new_wrapped_storage(copy.copy(self._untyped_storage))def__deepcopy__(self,memo):_warn_typed_storage_removal()returnself._deepcopy(memo)# For internal use only, to avoid deprecation warningdef_deepcopy(self,memo):returnself._new_wrapped_storage(copy.deepcopy(self._untyped_storage,memo))def__sizeof__(self):_warn_typed_storage_removal()returnsuper().__sizeof__()+self.nbytes()
[docs]defclone(self):"""Return a copy of this storage."""_warn_typed_storage_removal()returnself._new_wrapped_storage(self._untyped_storage.clone())
[docs]deftolist(self):"""Return a list containing the elements of this storage."""_warn_typed_storage_removal()returnlist(self)
[docs]defcpu(self):"""Return a CPU copy of this storage if it's not already on the CPU."""_warn_typed_storage_removal()returnself._new_wrapped_storage(self._untyped_storage.cpu())
[docs]defis_pinned(self,device:Union[str,torch.device]="cuda"):r"""Determine whether the CPU TypedStorage is already pinned on device. Args: device (str or torch.device): The device to pin memory on. Default: ``'cuda'`` Returns: A boolean variable. """_warn_typed_storage_removal()returnself._untyped_storage.is_pinned(device)
[docs]defpin_memory(self,device:Union[str,torch.device]="cuda"):r"""Copy the CPU TypedStorage to pinned memory, if it's not already pinned. Args: device (str or torch.device): The device to pin memory on. Default: ``'cuda'``. Returns: A pinned CPU storage. """_warn_typed_storage_removal()returnself._new_wrapped_storage(self._untyped_storage.pin_memory(device=device))
# For internal use only, to avoid deprecation warningdef_share_memory_(self):self._untyped_storage.share_memory_()returnselfdef_new_shared(self,size,*,device=None):"""Create a new storage in shared memory with the same data type."""ifdeviceisNone:device="cpu"device=torch.device(device)untyped_storage=torch.UntypedStorage._new_shared(size*self._element_size(),device=device)returnTypedStorage(wrap_storage=untyped_storage,dtype=self.dtype,_internal=True)@propertydef_cdata(self):returnself._untyped_storage._cdata@propertydefdevice(self):_warn_typed_storage_removal()returnself._untyped_storage.device
# For internal use only, to avoid deprecation warningdef_size(self):# NB: don't indirect through __len__, as that requires# an int to be returnedreturnself._untyped_storage.nbytes()//self._element_size()
# For internal use only, to avoid deprecation warningdef_pickle_storage_type(self):try:return_dtype_to_storage_type_map()[self.dtype]exceptKeyErrorase:raiseKeyError(f"dtype {self.dtype} is not recognized")fromedef__reduce__(self):b=io.BytesIO()torch.save(self,b,_use_new_zipfile_serialization=False)return(_load_from_bytes,(b.getvalue(),))
# For internal use only, to avoid deprecation warningdef_resize_(self,size):self._untyped_storage.resize_(size*self._element_size())@classmethoddef_free_weak_ref(cls,*args,**kwargs):returnUntypedStorage._free_weak_ref(*args,**kwargs)def_weak_ref(self,*args,**kwargs):returnself._untyped_storage._weak_ref(*args,**kwargs)
@classmethoddef_from_buffer(cls,*args,dtype=None,device=None,**kwargs):ifcls==TypedStorage:dtype=torch.get_default_dtype()ifdtypeisNoneelsedtypedevice=torch.device("cpu"ifdeviceisNoneelsedevice)ifdevice.type!="cpu":raiseRuntimeError(f"TypedStorage.from_buffer: Not available for device {device.type}")untyped_storage:torch.UntypedStorage=torch.UntypedStorage.from_buffer(*args,dtype=dtype,**kwargs)else:ifdtypeisnotNoneorlen(args)==5:raiseRuntimeError("from_buffer: 'dtype' can only be specified in ""UntypedStorage.from_buffer and TypedStorage.from_buffer")ifdeviceisnotNone:raiseRuntimeError("from_buffer: 'device' can only be specified in ""UntypedStorage.from_buffer and TypedStorage.from_buffer")dtype=cls._dtypeuntyped_storage=torch.UntypedStorage.from_buffer(*args,dtype=dtype,**kwargs)returnTypedStorage(wrap_storage=untyped_storage,dtype=dtype,_internal=True)def_to(self,dtype):ifnotisinstance(dtype,torch.dtype):raiseTypeError(f"Argument 'dtype' must be torch.dtype, not {type(dtype)}")storage=(torch.tensor([],dtype=self.dtype,device=self.device).set_(self).to(dtype)._typed_storage())ifstorage.data_ptr()==self.data_ptr():storage=storage.clone()returnstorage
[docs]defdouble(self):"""Casts this storage to double type."""_warn_typed_storage_removal()returnself._to(torch.double)
[docs]deffloat(self):"""Casts this storage to float type."""_warn_typed_storage_removal()returnself._to(torch.float)
[docs]defhalf(self):"""Casts this storage to half type."""_warn_typed_storage_removal()returnself._to(torch.half)
[docs]deflong(self):"""Casts this storage to long type."""_warn_typed_storage_removal()returnself._to(torch.long)
[docs]defint(self):"""Casts this storage to int type."""_warn_typed_storage_removal()returnself._to(torch.int)
[docs]defshort(self):"""Casts this storage to short type."""_warn_typed_storage_removal()returnself._to(torch.short)
[docs]defchar(self):"""Casts this storage to char type."""_warn_typed_storage_removal()returnself._to(torch.int8)
[docs]defbyte(self):"""Casts this storage to byte type."""_warn_typed_storage_removal()returnself._to(torch.uint8)
[docs]defbool(self):"""Casts this storage to bool type."""_warn_typed_storage_removal()returnself._to(torch.bool)
[docs]defbfloat16(self):"""Casts this storage to bfloat16 type."""_warn_typed_storage_removal()returnself._to(torch.bfloat16)
[docs]defcomplex_double(self):"""Casts this storage to complex double type."""_warn_typed_storage_removal()returnself._to(torch.cdouble)
[docs]defcomplex_float(self):"""Casts this storage to complex float type."""_warn_typed_storage_removal()returnself._to(torch.cfloat)
[docs]deffloat8_e5m2(self):"""Casts this storage to float8_e5m2 type"""_warn_typed_storage_removal()returnself._to(torch.float8_e5m2)
[docs]deffloat8_e4m3fn(self):"""Casts this storage to float8_e4m3fn type"""_warn_typed_storage_removal()returnself._to(torch.float8_e4m3fn)
[docs]deffloat8_e5m2fnuz(self):"""Casts this storage to float8_e5m2fnuz type"""_warn_typed_storage_removal()returnself._to(torch.float8_e5m2fnuz)
[docs]deffloat8_e4m3fnuz(self):"""Casts this storage to float8_e4m3fnuz type"""_warn_typed_storage_removal()returnself._to(torch.float8_e4m3fnuz)
[docs]@classmethoddeffrom_file(cls,filename,shared,size):"""from_file(filename, shared=False, size=0) -> Storage Creates a CPU storage backed by a memory-mapped file. If ``shared`` is ``True``, then memory is shared between all processes. All changes are written to the file. If ``shared`` is ``False``, then the changes on the storage do not affect the file. ``size`` is the number of elements in the storage. If ``shared`` is ``False``, then the file must contain at least ``size * sizeof(Type)`` bytes (``Type`` is the type of storage). If ``shared`` is ``True`` the file will be created if needed. Args: filename (str): file name to map shared (bool): whether to share memory (whether ``MAP_SHARED`` or ``MAP_PRIVATE`` is passed to the underlying `mmap(2) call <https://man7.org/linux/man-pages/man2/mmap.2.html>`_) size (int): number of elements in the storage """_warn_typed_storage_removal()ifcls==TypedStorage:raiseRuntimeError("from_file can only be called on derived classes")untyped_storage=UntypedStorage.from_file(filename,shared,size*torch._utils._element_size(cls.dtype))storage=cls(wrap_storage=untyped_storage)returnstorage
# For internal use only, to avoid deprecation warningdef_is_shared(self):returnself._untyped_storage.is_shared()@classmethoddef_new_shared_cuda(cls,*args,**kwargs):returntorch.UntypedStorage._new_shared_cuda(*args,**kwargs)def_share_filename_cpu_(self,*args,**kwargs):(manager_handle,storage_handle,size,)=self._untyped_storage._share_filename_cpu_(*args,**kwargs)returnmanager_handle,storage_handle,size//self._element_size()def_shared_decref(self):self._untyped_storage._shared_decref()returnself@classmethoddef_release_ipc_counter(cls,*args,device=None,**kwargs):returntorch.UntypedStorage._release_ipc_counter_cuda(*args,**kwargs)def_shared_incref(self,*args,**kwargs):returnself._untyped_storage._shared_incref(*args,**kwargs)def_share_fd_cpu_(self,*args,**kwargs):fd,size=self._untyped_storage._share_fd_cpu_(*args,**kwargs)returnfd,size//self._element_size()def_get_legacy_storage_class(self):ifself.dtypenotin_dtype_to_storage_type_map():returnNonestorage_name=_dtype_to_storage_type_map()[self.dtype]ifself.device.typenotin["cpu","cuda","hpu",torch._C._get_privateuse1_backend_name(),]:returnNonemodule=(torchifself.device.type=="cpu"elsegetattr(torch,self.device.type))try:returngetattr(module,storage_name)exceptAttributeError:returnNone
TypedStorage.type.__doc__=_type.__doc__TypedStorage.cuda.__doc__=_StorageBase.cuda.__doc__TypedStorage.hpu.__doc__=_StorageBase.hpu.__doc__TypedStorage.to.__doc__=_to.__doc__class_LegacyStorageMeta(type):dtype:torch.dtypedef__instancecheck__(cls,instance):iftype(instance)==TypedStorage:cls_device=_get_device_from_module(cls.__module__)return(cls_device==instance.device.type)and(cls.dtype==instance.dtype)returnFalseclass_LegacyStorage(TypedStorage,metaclass=_LegacyStorageMeta):@classmethoddef_new_shared(cls,size):"""Create a new storage in shared memory with the same data type."""untyped_storage=torch.UntypedStorage._new_shared(size*cls()._element_size())returncls(wrap_storage=untyped_storage)@classmethoddef_release_ipc_counter(cls,*args,**kwargs):returntorch.UntypedStorage._release_ipc_counter_cuda(*args,**kwargs)@classmethoddef_new_shared_filename(cls,manager,obj,size):bytes_size=size*torch._utils._element_size(cls.dtype)returncls(wrap_storage=torch.UntypedStorage._new_shared_filename_cpu(manager,obj,bytes_size))def_get_dtype_from_pickle_storage_type(pickle_storage_type:str):try:return_storage_type_to_dtype_map()[pickle_storage_type]exceptKeyErrorase:raiseKeyError(f'pickle storage type "{pickle_storage_type}" is not recognized')frome
Docs
Access comprehensive developer documentation for PyTorch
To analyze traffic and optimize your experience, we serve cookies on this site. By clicking or navigating, you agree to allow our usage of cookies. As the current maintainers of this site, Facebook’s Cookies Policy applies. Learn more, including about available controls: Cookies Policy.