importtorchfrom._utilsimport_type,_cuda,_rangeclass_StorageBase(object):is_cuda=Falseis_sparse=Falsedef__str__(self):content=' '+'\n '.join(str(self[i])foriin_range(len(self)))returncontent+'\n[{} of size {}]'.format(torch.typename(self),len(self))def__repr__(self):returnstr(self)def__iter__(self):returniter(map(lambdai:self[i],_range(self.size())))def__copy__(self):returnself.clone()def__deepcopy__(self,memo):memo=memo.setdefault('torch',{})ifself._cdatainmemo:returnmemo[self._cdata]new_storage=self.clone()memo[self._cdata]=new_storagereturnnew_storagedef__reduce__(self):returntype(self),(self.tolist(),)defclone(self):"""Returns a copy of this storage"""returntype(self)(self.size()).copy_(self)deftolist(self):"""Returns a list containing the elements of this storage"""return[vforvinself]defcpu(self):"""Returns a CPU copy of this storage if it's not already on the CPU"""returnself.type(getattr(torch,self.__class__.__name__))defdouble(self):"""Casts this storage to double type"""returnself.type(type(self).__module__+'.DoubleStorage')deffloat(self):"""Casts this storage to float type"""returnself.type(type(self).__module__+'.FloatStorage')defhalf(self):"""Casts this storage to half type"""returnself.type(type(self).__module__+'.HalfStorage')deflong(self):"""Casts this storage to long type"""returnself.type(type(self).__module__+'.LongStorage')defint(self):"""Casts this storage to int type"""returnself.type(type(self).__module__+'.IntStorage')defshort(self):"""Casts this storage to short type"""returnself.type(type(self).__module__+'.ShortStorage')defchar(self):"""Casts this storage to char type"""returnself.type(type(self).__module__+'.CharStorage')defbyte(self):"""Casts this storage to byte type"""returnself.type(type(self).__module__+'.ByteStorage')defpin_memory(self):"""Copies the storage to pinned memory, if it's not already pinned."""ifself.is_cuda:raiseTypeError("cannot pin '{0}' only CPU memory can be pinned".format(self.type()))importtorch.cudaallocator=torch.cuda._host_allocator()returntype(self)(self.size(),allocator=allocator).copy_(self)defshare_memory_(self):"""Moves the storage to shared memory. This is a no-op for storages already in shared memory and for CUDA storages, which do not need to be moved for sharing across processes. Storages in shared memory cannot be resized. Returns: self """fromtorch.multiprocessingimportget_sharing_strategyifself.is_cuda:pass# CUDA doesn't use POSIX shared memoryelifget_sharing_strategy()=='file_system':self._share_filename_()else:self._share_fd_()returnself@classmethoddef_new_shared(cls,size):"""Creates a new storage in shared memory with the same data type"""fromtorch.multiprocessingimportget_sharing_strategyifcls.is_cuda:returncls(size)elifget_sharing_strategy()=='file_system':returncls._new_using_filename(size)else:returncls._new_using_fd(size)_StorageBase.type=_type_StorageBase.cuda=_cuda