importioimporttorchfrom._utilsimport_type,_cudafromtorch.typesimportStoragefromtypingimportAny,TypeVar,Type,Union,castimportcopyimportcollectionsfromfunctoolsimportlru_cachetry:importnumpyasnpHAS_NUMPY=TrueexceptModuleNotFoundError:np=None# type: ignore[assignment]T=TypeVar('T',bound='Union[_StorageBase, _TypedStorage]')class_StorageBase(object):_cdata:Anyis_sparse:bool=Falseis_sparse_csr:bool=Falsedevice:torch.devicedef__init__(self,*args,**kwargs):...# noqa: E704def__len__(self)->int:...# noqa: E704def__getitem__(self,idx):...# noqa: E704defcopy_(self,source:T,non_blocking:bool=None)->T:...# noqa: E704defnbytes(self)->int:...# noqa: E704defsize(self)->int:returnself.nbytes()deftype(self,dtype:str=None,non_blocking:bool=False)->T:...# noqa: E704defcuda(self,device=None,non_blocking=False,**kwargs)->T:...# noqa: E704defelement_size(self)->int:...# noqa: E704defget_device(self)->int:...# noqa: E704defdata_ptr(self)->int:...# noqa: E704# Defined in torch/csrc/generic/StorageSharing.cppdef_share_filename_cpu_(self,*args,**kwargs):...# noqa: E704def_share_fd_cpu_(self,*args,**kwargs):...# noqa: E704@classmethoddef_new_using_filename_cpu(cls:Type[T],size:int)->T:...# noqa: E704@classmethoddef_new_using_fd_cpu(cls:Type[T],size:int)->T:...# noqa: E704@classmethoddeffrom_buffer(cls,*args,**kwargs)->T:...# noqa: E704@classmethoddef_new_shared_filename_cpu(cls,manager,obj,size,*,device=None,dtype=None)->T:...# noqa: E704@classmethoddef_release_ipc_counter_cuda(cls,*args,**kwargs)->T:...# noqa: E704@classmethoddef_new_with_weak_ptr(cls,*args,**kwargs)->T:...# noqa: E704def_shared_decref(self)->T:...# noqa: E704def_write_file(self,*args,**kwargs):...# noqa: E704defresize_(self,size:int):...# noqa: E704def_weak_ref(self,*args,**kwargs)->T:...# noqa: E704defis_pinned(self)->bool:...# noqa: E704def_set_from_file(self,*args,**kwargs):...# noqa: E704def_set_cdata(self,*args,**kwargs):...# noqa: E704def_share_cuda_(self,*args,**kwargs):...# noqa: E704defis_shared(self)->bool:...# noqa: E704@classmethoddef_new_shared_cuda(cls,*args,**kwargs)->T:...# noqa: E704def_shared_incref(self,*args,**kwargs):...# noqa: E704@classmethoddef_free_weak_ref(cls,*args,**kwargs):...# noqa: E704@propertydefis_cuda(self):...# noqa: E704def__str__(self):data_str=' '+'\n '.join(str(self[i])foriinrange(self.size()))returndata_str+(f'\n[{torch.typename(self)}(device={self.device}) 'f'of size {len(self)}]')def__repr__(self):returnstr(self)def__iter__(self):returniter(map(lambdai:self[i],range(self.size())))def__copy__(self):returnself.clone()def__deepcopy__(self,memo):memo=memo.setdefault('torch',{})ifself._cdatainmemo:returnmemo[self._cdata]new_storage=self.clone()memo[self._cdata]=new_storagereturnnew_storagedef__reduce__(self):b=io.BytesIO()torch.save(self,b,_use_new_zipfile_serialization=False)return(_load_from_bytes,(b.getvalue(),))def__sizeof__(self):returnsuper(_StorageBase,self).__sizeof__()+self.size()defclone(self):"""Returns a copy of this storage"""returntype(self)(self.nbytes(),device=self.device).copy_(self)deftolist(self):"""Returns a list containing the elements of this storage"""returnlist(self)defcpu(self):"""Returns a CPU copy of this storage if it's not already on the CPU"""ifself.device.type!='cpu':returntorch._UntypedStorage(self.size()).copy_(self,False)else:returnselfdef_to(self,dtype):ifnotisinstance(dtype,torch.dtype):raiseTypeError(f"Argument 'dtype' must be torch.dtype, not {type(dtype)}")storage=torch.tensor([],dtype=torch.uint8,device=self.device).set_(cast(Storage,self)).to(dtype).storage()ifstorage.data_ptr()==self.data_ptr():storage=storage.clone()returnstoragedefdouble(self):"""Casts this storage to double type"""returnself._to(torch.double)deffloat(self):"""Casts this storage to float type"""returnself._to(torch.float)defhalf(self):"""Casts this storage to half type"""returnself._to(torch.half)deflong(self):"""Casts this storage to long type"""returnself._to(torch.long)defint(self):"""Casts this storage to int type"""returnself._to(torch.int)defshort(self):"""Casts this storage to short type"""returnself._to(torch.short)defchar(self):"""Casts this storage to char type"""returnself._to(torch.int8)defbyte(self):"""Casts this storage to byte type"""returnself._to(torch.uint8)defbool(self):"""Casts this storage to bool type"""returnself._to(torch.bool)defbfloat16(self):"""Casts this storage to bfloat16 type"""returnself._to(torch.bfloat16)defcomplex_double(self):"""Casts this storage to complex double type"""returnself._to(torch.cdouble)defcomplex_float(self):"""Casts this storage to complex float type"""returnself._to(torch.cfloat)defpin_memory(self):"""Copies the storage to pinned memory, if it's not already pinned."""ifself.is_cuda:raiseTypeError(f"cannot pin '{self.type()}' only CPU memory can be pinned")importtorch.cudaallocator=torch.cuda.memory._host_allocator()# type: ignore[attr-defined]returntype(self)(self.size(),allocator=allocator).copy_(self)defshare_memory_(self):"""Moves the storage to shared memory. This is a no-op for storages already in shared memory and for CUDA storages, which do not need to be moved for sharing across processes. Storages in shared memory cannot be resized. Returns: self """fromtorch.multiprocessingimportget_sharing_strategyifself.is_cuda:pass# CUDA doesn't use POSIX shared memoryelifget_sharing_strategy()=='file_system':self._share_filename_cpu_()else:self._share_fd_cpu_()returnself@classmethoddef_new_shared(cls,size,*,device='cpu'):"""Creates a new storage in shared memory with the same data type"""fromtorch.multiprocessingimportget_sharing_strategydevice=torch.device(device)ifdevice.type=='cuda':returncls(size,device=device)elifget_sharing_strategy()=='file_system':returncls._new_using_filename_cpu(size)else:returncls._new_using_fd_cpu(size)def_untyped(self):returnselfclass_UntypedStorage(torch._C.StorageBase,_StorageBase):pass@propertydefis_cuda(self):returnself.device.type=='cuda'def_load_from_bytes(b):returntorch.load(io.BytesIO(b))_StorageBase.type=_type# type: ignore[assignment]_StorageBase.cuda=_cuda# type: ignore[assignment]@lru_cache(maxsize=None)def_dtype_to_storage_type_map():# NOTE: We should no longer add dtypes to this map. This map# is only used for BC/FC with older PyTorch versions. Going forward,# new dtypes of _TypedStorage should not translate to a legacy# <type>Storage class. Instead, new dtypes of _TypedStorage should# be serialized as an _UntypedStorage paired with a torch.dtypereturn{torch.double:'DoubleStorage',torch.float:'FloatStorage',torch.half:'HalfStorage',torch.long:'LongStorage',torch.int:'IntStorage',torch.int16:'ShortStorage',torch.int8:'CharStorage',torch.uint8:'ByteStorage',torch.bool:'BoolStorage',torch.bfloat16:'BFloat16Storage',torch.cdouble:'ComplexDoubleStorage',torch.cfloat:'ComplexFloatStorage',torch.qint8:'QInt8Storage',torch.qint32:'QInt32Storage',torch.quint8:'QUInt8Storage',torch.quint4x2:'QUInt4x2Storage',torch.quint2x4:'QUInt2x4Storage',}@lru_cache(maxsize=None)def_storage_type_to_dtype_map():dtype_map={val:keyforkey,valin_dtype_to_storage_type_map().items()}returndtype_mapdef_get_storage_from_sequence(sequence,dtype,device):ifdtypein[torch.quint8,torch.quint4x2,torch.quint2x4,torch.qint32,torch.qint8]:interpret_dtypes={torch.quint8:torch.uint8,torch.quint4x2:torch.uint8,torch.quint2x4:torch.uint8,torch.qint32:torch.int32,torch.qint8:torch.int8}tmp_tensor=torch.tensor(sequence,dtype=interpret_dtypes[dtype],device=device)else:tmp_tensor=torch.tensor(sequence,dtype=dtype,device=device)returntmp_tensor.storage()._untyped()def_isint(x):ifHAS_NUMPY:returnisinstance(x,(int,np.integer))else:returnisinstance(x,int)
def__new__(cls,*args,wrap_storage=None,dtype=None,device=None):ifcls==torch.storage._LegacyStorage:raiseRuntimeError("Only child classes of _LegacyStorage can be instantiated")ifcls==_TypedStorage:returnsuper().__new__(cls)else:arg_error_msg=(f'{cls}.__new__ received an invalid combination 'f'of arguments. Expected one of:\n'' * no arguments\n'' * (int size)\n'' * (Sequence data)\n'' * (*, _UntypedStorage wrap_storage)')ifdeviceisnotNone:raiseRuntimeError(arg_error_msg+"\nKeyword argument 'device' cannot be specified")ifdtypeisnotNone:raiseRuntimeError(arg_error_msg+"\nKeyword argument 'dtype' cannot be specified")ifwrap_storageisNone:iflen(args)>1:raiseRuntimeError(arg_error_msg+"\nToo many positional arguments")iflen(args)==1andnot_isint(args[0])andnotisinstance(args[0],collections.abc.Sequence):raiseTypeError(arg_error_msg+f"\nArgument type not recognized: {type(args[0])}")return_TypedStorage(*args,dtype=cls.dtype,device='cuda'ifeval(cls.__module__)istorch.cudaelse'cpu')else:iflen(args)!=0:raiseRuntimeError(arg_error_msg+"\nNo positional arguments should be given when using ""'wrap_storage'")ifnotisinstance(wrap_storage,torch._UntypedStorage):raiseTypeError(arg_error_msg+f"\nArgument 'wrap_storage' must be _UntypedStorage, but got {type(wrap_storage)}")cls_device='cuda'ifcls.__module__=='torch.cuda'else'cpu'ifwrap_storage.device.type!=cls_device:raiseRuntimeError(arg_error_msg+f"\nDevice of 'wrap_storage' must be {cls_device}"f", but got {wrap_storage.device.type}")return_TypedStorage(*args,wrap_storage=wrap_storage,dtype=cls.dtype)def__init__(self,*args,device=None,dtype=None,wrap_storage=None):arg_error_msg=('_TypedStorage.__init__ received an invalid combination ''of arguments. Expected one of:\n'' * (*, torch.device device, torch.dtype dtype)\n'' * (int size, *, torch.device device, torch.dtype dtype)\n'' * (Sequence data, *, torch.device device, torch.dtype dtype)\n'' * (*, _UntypedStorage wrap_storage, torch.dtype dtype)')ifwrap_storageisnotNone:iflen(args)!=0:raiseRuntimeError(arg_error_msg+"\nNo positional arguments should be given when using ""'wrap_storage'")ifdtypeisNone:raiseRuntimeError(arg_error_msg+"\nArgument 'dtype' must be specified")ifnotisinstance(dtype,torch.dtype):raiseTypeError(arg_error_msg+f"\nArgument 'dtype' must be torch.dtype, not {type(dtype)}")ifdeviceisnotNone:raiseRuntimeError(arg_error_msg+"\nArgument 'device' should not be specified when 'wrap_storage' is given")self.dtype=dtypeifnotisinstance(wrap_storage,torch._UntypedStorage):raiseTypeError(arg_error_msg+f"\nArgument 'wrap_storage' must be _UntypedStorage, but got {type(wrap_storage)}")self._storage=wrap_storageelse:self.dtype=torch.get_default_dtype()ifdtypeisNoneelsedtypedevice=torch.device('cpu'ifdeviceisNoneelsedevice)ifself.dtypein[torch.quint8,torch.quint4x2,torch.quint2x4,torch.qint32,torch.qint8]:ifdevice.type=='cuda':raiseRuntimeError("Cannot create CUDA storage with quantized dtype")iflen(args)==0:self._storage=torch._UntypedStorage(device=device)eliflen(args)==1:if_isint(args[0]):self._storage=torch._UntypedStorage(int(args[0])*self.element_size(),device=device)elifisinstance(args[0],collections.abc.Sequence):self._storage=_get_storage_from_sequence(args[0],self.dtype,device)else:raiseTypeError(arg_error_msg+f"\nArgument type not recognized: {type(args[0])}")else:raiseRuntimeError(arg_error_msg+"\nToo many positional arguments")@propertydefis_cuda(self):returnself.device.type=='cuda'def_untyped(self):returnself._storagedef_new_wrapped_storage(self,untyped_storage):asserttype(untyped_storage)==torch._UntypedStorageiftype(self)==_TypedStorage:return_TypedStorage(wrap_storage=untyped_storage,dtype=self.dtype)else:returntype(self)(wrap_storage=untyped_storage)def__len__(self):returnself._storage.nbytes()//self.element_size()def_maybe_wrap_index(self,idx,is_stop=False):ifidxisNone:ifis_stop:returnself.size()else:return0else:iftype(idx)!=int:raiseTypeError(f"can't index a {type(self)} with {type(idx)}")ifis_stop:if(idx>self.size())or(idx<-self.size()):raiseIndexError(f'index {idx} out of range for storage of size {self.size()}')ifidx>0:returnidxelse:returnidx%self.size()else:if(idx>=self.size())or(idx<-self.size()):raiseIndexError(f'index {idx} out of range for storage of size {self.size()}')returnidx%self.size()def__setitem__(self,idx,value):ifnotisinstance(idx,(int,slice)):raiseRuntimeError(f"can't index a {type(self)} with {type(idx)}")ifself.dtypein[torch.quint8,torch.quint4x2,torch.quint2x4,torch.qint32,torch.qint8]:interpret_dtypes={torch.quint8:torch.uint8,torch.quint4x2:torch.uint8,torch.quint2x4:torch.uint8,torch.qint32:torch.int32,torch.qint8:torch.int8}tmp_dtype=interpret_dtypes[self.dtype]tmp_tensor=torch.tensor([],dtype=tmp_dtype,device=self.device).set_(_TypedStorage(wrap_storage=self._storage,dtype=tmp_dtype))else:tmp_tensor=torch.tensor([],dtype=self.dtype,device=self.device).set_(self)tmp_tensor[idx]=valuedef__getitem__(self,idx):# NOTE: Before _TypedStorage existed, indexing with a slice used to be# possible for <type>Storage objects. However, it would return# a storage view, which would be a hassle to implement in _TypedStorage,# so it was disabledifisinstance(idx,slice):raiseRuntimeError('slices are only supported in _UntypedStorage.__getitem__')elifnotisinstance(idx,int):raiseRuntimeError(f"can't index a {type(self)} with {type(idx)}")ifself.dtypein[torch.quint8,torch.quint4x2,torch.quint2x4,torch.qint32,torch.qint8]:interpret_dtypes={torch.quint8:torch.uint8,torch.quint4x2:torch.uint8,torch.quint2x4:torch.uint8,torch.qint32:torch.int32,torch.qint8:torch.int8}return_TypedStorage(wrap_storage=self._storage,dtype=interpret_dtypes[self.dtype])[idx]idx_wrapped=self._maybe_wrap_index(idx)tmp_tensor=torch.tensor([],dtype=self.dtype,device=self.device).set_(self)returntmp_tensor[idx_wrapped].item()
[docs]defcuda(self,device=None,non_blocking=False,**kwargs)->T:ifself.dtypein[torch.quint8,torch.quint4x2,torch.quint2x4,torch.qint32,torch.qint8]:raiseRuntimeError("Cannot create CUDA storage with quantized dtype")cuda_storage:torch._UntypedStorage=self._storage.cuda(device,non_blocking,**kwargs)returnself._new_wrapped_storage(cuda_storage)
def__str__(self):data_str=' '+'\n '.join(str(self[i])foriinrange(self.size()))returndata_str+(f'\n[{torch.typename(self)}(dtype={self.dtype}, 'f'device={self.device}) of size {len(self)}]')def__repr__(self):returnstr(self)def__iter__(self):returniter(map(lambdai:self[i],range(self.size())))def__copy__(self):returnself._new_wrapped_storage(copy.copy(self._storage))def__deepcopy__(self,memo):returnself._new_wrapped_storage(copy.deepcopy(self._storage,memo))def__sizeof__(self):returnsuper(_TypedStorage,self).__sizeof__()+self.nbytes()
[docs]defclone(self):"""Returns a copy of this storage"""returnself._new_wrapped_storage(self._storage.clone())
[docs]deftolist(self):"""Returns a list containing the elements of this storage"""returnlist(self)
[docs]defcpu(self):"""Returns a CPU copy of this storage if it's not already on the CPU"""returnself._new_wrapped_storage(self._storage.cpu())
[docs]defpin_memory(self):"""Coppies the storage to pinned memory, if it's not already pinned."""returnself._new_wrapped_storage(self._storage.pin_memory())
[docs]defshare_memory_(self):"""Moves the storage to shared memory. This is a no-op for storages already in shared memory and for CUDA storages, which do not need to be moved for sharing across processes. Storages in shared memory cannot be resized. Returns: self """self._storage.share_memory_()returnself
def_new_shared(self,size,*,device=None):"""Creates a new storage in shared memory with the same data type"""ifdeviceisNone:device='cpu'device=torch.device(device)untyped_storage=torch._UntypedStorage._new_shared(size*self.element_size(),device=device)return_TypedStorage(wrap_storage=untyped_storage,dtype=self.dtype)@propertydef_cdata(self):returnself._storage._cdata@propertydefdevice(self):returnself._storage.device
[docs]defpickle_storage_type(self):try:return_dtype_to_storage_type_map()[self.dtype]exceptKeyError:raiseKeyError(f'dtype {self.dtype} is not recognized')
[docs]@classmethoddeffrom_buffer(cls,*args,dtype=None,device=None,**kwargs):ifcls==_TypedStorage:dtype=torch.get_default_dtype()ifdtypeisNoneelsedtypedevice=torch.device('cpu'ifdeviceisNoneelsedevice)ifdevice.type!='cpu':raiseRuntimeError(f'_TypedStorage.from_buffer: Not available for device {device.type}')untyped_storage:torch._UntypedStorage=torch._UntypedStorage.from_buffer(*args,dtype=dtype,**kwargs)else:ifdtypeisnotNoneorlen(args)==5:raiseRuntimeError(("from_buffer: 'dtype' can only be specified in ""_UntypedStorage.from_buffer and _TypedStorage.from_buffer"))ifdeviceisnotNone:raiseRuntimeError(("from_buffer: 'device' can only be specified in ""_UntypedStorage.from_buffer and _TypedStorage.from_buffer"))dtype=cls.dtypeuntyped_storage=torch._UntypedStorage.from_buffer(*args,dtype=dtype,**kwargs)return_TypedStorage(wrap_storage=untyped_storage,dtype=dtype)
def_to(self,dtype):ifnotisinstance(dtype,torch.dtype):raiseTypeError(f"Argument 'dtype' must be torch.dtype, not {type(dtype)}")storage=torch.tensor([],dtype=self.dtype,device=self.device).set_(self).to(dtype).storage()ifstorage.data_ptr()==self.data_ptr():storage=storage.clone()returnstorage
[docs]defdouble(self):"""Casts this storage to double type"""returnself._to(torch.double)
[docs]deffloat(self):"""Casts this storage to float type"""returnself._to(torch.float)
[docs]defhalf(self):"""Casts this storage to half type"""returnself._to(torch.half)
[docs]deflong(self):"""Casts this storage to long type"""returnself._to(torch.long)
[docs]defint(self):"""Casts this storage to int type"""returnself._to(torch.int)
[docs]defshort(self):"""Casts this storage to short type"""returnself._to(torch.short)
[docs]defchar(self):"""Casts this storage to char type"""returnself._to(torch.int8)
[docs]defbyte(self):"""Casts this storage to byte type"""returnself._to(torch.uint8)
[docs]defbool(self):"""Casts this storage to bool type"""returnself._to(torch.bool)
[docs]defbfloat16(self):"""Casts this storage to bfloat16 type"""returnself._to(torch.bfloat16)
[docs]defcomplex_double(self):"""Casts this storage to complex double type"""returnself._to(torch.cdouble)
[docs]defcomplex_float(self):"""Casts this storage to complex float type"""returnself._to(torch.cfloat)
[docs]@classmethoddeffrom_file(cls,filename,shared,size):""" from_file(filename, shared=False, size=0) -> Storage If `shared` is `True`, then memory is shared between all processes. All changes are written to the file. If `shared` is `False`, then the changes on the storage do not affect the file. `size` is the number of elements in the storage. If `shared` is `False`, then the file must contain at least `size * sizeof(Type)` bytes (`Type` is the type of storage). If `shared` is `True` the file will be created if needed. Args: filename (str): file name to map shared (bool): whether to share memory size (int): number of elements in the storage """ifcls==_TypedStorage:raiseRuntimeError('from_file can only be called on derived classes')untyped_storage=eval(cls.__module__)._UntypedStorage.from_file(filename,shared,size*torch._utils._element_size(cls.dtype))storage=cls(wrap_storage=untyped_storage)returnstorage
_TypedStorage.type.__doc__=_type.__doc___TypedStorage.cuda.__doc__=_cuda.__doc__class_LegacyStorageMeta(type):dtype:torch.dtypedef__instancecheck__(cls,instance):iftype(instance)==_TypedStorage:cls_device='cuda'ifcls.__module__=='torch.cuda'else'cpu'return(cls_device==instance.device.type)and(cls.dtype==instance.dtype)returnFalseclass_LegacyStorage(_TypedStorage,metaclass=_LegacyStorageMeta):@classmethoddef_new_shared(cls,size):"""Creates a new storage in shared memory with the same data type"""untyped_storage=torch._UntypedStorage._new_shared(size*cls().element_size())returncls(wrap_storage=untyped_storage)@classmethoddef_release_ipc_counter(cls,*args,**kwargs):returntorch._UntypedStorage._release_ipc_counter_cuda(*args,**kwargs)@classmethoddef_new_shared_filename(cls,manager,obj,size):bytes_size=size*torch._utils._element_size(cls.dtype)returncls(wrap_storage=torch._UntypedStorage._new_shared_filename_cpu(manager,obj,bytes_size))def_get_dtype_from_pickle_storage_type(pickle_storage_type:str):try:return_storage_type_to_dtype_map()[pickle_storage_type]exceptKeyError:raiseKeyError(f'pickle storage type "{pickle_storage_type}" is not recognized')
Docs
Access comprehensive developer documentation for PyTorch
To analyze traffic and optimize your experience, we serve cookies on this site. By clicking or navigating, you agree to allow our usage of cookies. As the current maintainers of this site, Facebook’s Cookies Policy applies. Learn more, including about available controls: Cookies Policy.