importmathimporttorchfromtorch._siximportinffromtypingimportOptionalclass__PrinterOptions(object):precision:int=4threshold:float=1000edgeitems:int=3linewidth:int=80sci_mode:Optional[bool]=NonePRINT_OPTS=__PrinterOptions()# We could use **kwargs, but this will give better docs
[docs]defset_printoptions(precision=None,threshold=None,edgeitems=None,linewidth=None,profile=None,sci_mode=None):r"""Set options for printing. Items shamelessly taken from NumPy Args: precision: Number of digits of precision for floating point output (default = 4). threshold: Total number of array elements which trigger summarization rather than full `repr` (default = 1000). edgeitems: Number of array items in summary at beginning and end of each dimension (default = 3). linewidth: The number of characters per line for the purpose of inserting line breaks (default = 80). Thresholded matrices will ignore this parameter. profile: Sane defaults for pretty printing. Can override with any of the above options. (any one of `default`, `short`, `full`) sci_mode: Enable (True) or disable (False) scientific notation. If None (default) is specified, the value is defined by `torch._tensor_str._Formatter`. This value is automatically chosen by the framework. """ifprofileisnotNone:ifprofile=="default":PRINT_OPTS.precision=4PRINT_OPTS.threshold=1000PRINT_OPTS.edgeitems=3PRINT_OPTS.linewidth=80elifprofile=="short":PRINT_OPTS.precision=2PRINT_OPTS.threshold=1000PRINT_OPTS.edgeitems=2PRINT_OPTS.linewidth=80elifprofile=="full":PRINT_OPTS.precision=4PRINT_OPTS.threshold=infPRINT_OPTS.edgeitems=3PRINT_OPTS.linewidth=80ifprecisionisnotNone:PRINT_OPTS.precision=precisionifthresholdisnotNone:PRINT_OPTS.threshold=thresholdifedgeitemsisnotNone:PRINT_OPTS.edgeitems=edgeitemsiflinewidthisnotNone:PRINT_OPTS.linewidth=linewidthPRINT_OPTS.sci_mode=sci_mode
class_Formatter(object):def__init__(self,tensor):self.floating_dtype=tensor.dtype.is_floating_pointself.int_mode=Trueself.sci_mode=Falseself.max_width=1withtorch.no_grad():tensor_view=tensor.reshape(-1)ifnotself.floating_dtype:forvalueintensor_view:value_str='{}'.format(value)self.max_width=max(self.max_width,len(value_str))else:nonzero_finite_vals=torch.masked_select(tensor_view,torch.isfinite(tensor_view)&tensor_view.ne(0))ifnonzero_finite_vals.numel()==0:# no valid number, do nothingreturn# Convert to double for easy calculation. HalfTensor overflows with 1e8, and there's no div() on CPU.nonzero_finite_abs=nonzero_finite_vals.abs().double()nonzero_finite_min=nonzero_finite_abs.min().double()nonzero_finite_max=nonzero_finite_abs.max().double()forvalueinnonzero_finite_vals:ifvalue!=torch.ceil(value):self.int_mode=Falsebreakifself.int_mode:# in int_mode for floats, all numbers are integers, and we append a decimal to nonfinites# to indicate that the tensor is of floating type. add 1 to the len to account for this.ifnonzero_finite_max/nonzero_finite_min>1000.ornonzero_finite_max>1.e8:self.sci_mode=Trueforvalueinnonzero_finite_vals:value_str=('{{:.{}e}}').format(PRINT_OPTS.precision).format(value)self.max_width=max(self.max_width,len(value_str))else:forvalueinnonzero_finite_vals:value_str=('{:.0f}').format(value)self.max_width=max(self.max_width,len(value_str)+1)else:# Check if scientific representation should be used.ifnonzero_finite_max/nonzero_finite_min>1000.\
ornonzero_finite_max>1.e8\
ornonzero_finite_min<1.e-4:self.sci_mode=Trueforvalueinnonzero_finite_vals:value_str=('{{:.{}e}}').format(PRINT_OPTS.precision).format(value)self.max_width=max(self.max_width,len(value_str))else:forvalueinnonzero_finite_vals:value_str=('{{:.{}f}}').format(PRINT_OPTS.precision).format(value)self.max_width=max(self.max_width,len(value_str))ifPRINT_OPTS.sci_modeisnotNone:self.sci_mode=PRINT_OPTS.sci_modedefwidth(self):returnself.max_widthdefformat(self,value):ifself.floating_dtype:ifself.sci_mode:ret=('{{:{}.{}e}}').format(self.max_width,PRINT_OPTS.precision).format(value)elifself.int_mode:ret='{:.0f}'.format(value)ifnot(math.isinf(value)ormath.isnan(value)):ret+='.'else:ret=('{{:.{}f}}').format(PRINT_OPTS.precision).format(value)else:ret='{}'.format(value)return(self.max_width-len(ret))*' '+retdef_scalar_str(self,formatter1,formatter2=None):ifformatter2isnotNone:real_str=_scalar_str(self.real,formatter1)imag_str=(_scalar_str(self.imag,formatter2)+"j").lstrip()# handles negative numbers, +0.0, -0.0ifimag_str[0]=='+'orimag_str[0]=='-':returnreal_str+imag_strelse:returnreal_str+"+"+imag_strelse:returnformatter1.format(self.item())def_vector_str(self,indent,summarize,formatter1,formatter2=None):# length includes spaces and comma between elementselement_length=formatter1.width()+2ifformatter2isnotNone:# width for imag_formatter + an extra j for complexelement_length+=formatter2.width()+1elements_per_line=max(1,int(math.floor((PRINT_OPTS.linewidth-indent)/(element_length))))char_per_line=element_length*elements_per_linedef_val_formatter(val,formatter1=formatter1,formatter2=formatter2):ifformatter2isnotNone:real_str=formatter1.format(val.real)imag_str=(formatter2.format(val.imag)+"j").lstrip()# handles negative numbers, +0.0, -0.0ifimag_str[0]=='+'orimag_str[0]=='-':returnreal_str+imag_strelse:returnreal_str+"+"+imag_strelse:returnformatter1.format(val)ifsummarizeandself.size(0)>2*PRINT_OPTS.edgeitems:data=([_val_formatter(val)forvalinself[:PRINT_OPTS.edgeitems].tolist()]+[' ...']+[_val_formatter(val)forvalinself[-PRINT_OPTS.edgeitems:].tolist()])else:data=[_val_formatter(val)forvalinself.tolist()]data_lines=[data[i:i+elements_per_line]foriinrange(0,len(data),elements_per_line)]lines=[', '.join(line)forlineindata_lines]return'['+(','+'\n'+' '*(indent+1)).join(lines)+']'# formatter2 is only used for printing complex tensors.# For complex tensors, formatter1 and formatter2 are the formatters for tensor.real# and tensor.imag respesectivelydef_tensor_str_with_formatter(self,indent,summarize,formatter1,formatter2=None):dim=self.dim()ifdim==0:return_scalar_str(self,formatter1,formatter2)ifdim==1:return_vector_str(self,indent,summarize,formatter1,formatter2)ifsummarizeandself.size(0)>2*PRINT_OPTS.edgeitems:slices=([_tensor_str_with_formatter(self[i],indent+1,summarize,formatter1,formatter2)foriinrange(0,PRINT_OPTS.edgeitems)]+['...']+[_tensor_str_with_formatter(self[i],indent+1,summarize,formatter1,formatter2)foriinrange(len(self)-PRINT_OPTS.edgeitems,len(self))])else:slices=[_tensor_str_with_formatter(self[i],indent+1,summarize,formatter1,formatter2)foriinrange(0,self.size(0))]tensor_str=(','+'\n'*(dim-1)+' '*(indent+1)).join(slices)return'['+tensor_str+']'def_tensor_str(self,indent):ifself.numel()==0:return'[]'ifself.has_names():# There are two main codepaths (possibly more) that tensor printing goes through:# - tensor data can fit comfortably on screen# - tensor data needs to be summarized# Some of the codepaths don't fully support named tensors, so we send in# an unnamed tensor to the formatting code as a workaround.self=self.rename(None)summarize=self.numel()>PRINT_OPTS.threshold# handle the negative bitifself.is_neg():self=self.resolve_neg()ifself.dtypeistorch.float16orself.dtypeistorch.bfloat16:self=self.float()ifself.dtype.is_complex:# handle the conjugate bitself=self.resolve_conj()real_formatter=_Formatter(get_summarized_data(self.real)ifsummarizeelseself.real)imag_formatter=_Formatter(get_summarized_data(self.imag)ifsummarizeelseself.imag)return_tensor_str_with_formatter(self,indent,summarize,real_formatter,imag_formatter)else:formatter=_Formatter(get_summarized_data(self)ifsummarizeelseself)return_tensor_str_with_formatter(self,indent,summarize,formatter)def_add_suffixes(tensor_str,suffixes,indent,force_newline):tensor_strs=[tensor_str]last_line_len=len(tensor_str)-tensor_str.rfind('\n')+1forsuffixinsuffixes:suffix_len=len(suffix)ifforce_newlineorlast_line_len+suffix_len+2>PRINT_OPTS.linewidth:tensor_strs.append(',\n'+' '*indent+suffix)last_line_len=indent+suffix_lenforce_newline=Falseelse:tensor_strs.append(', '+suffix)last_line_len+=suffix_len+2tensor_strs.append(')')return''.join(tensor_strs)defget_summarized_data(self):dim=self.dim()ifdim==0:returnselfifdim==1:ifself.size(0)>2*PRINT_OPTS.edgeitems:returntorch.cat((self[:PRINT_OPTS.edgeitems],self[-PRINT_OPTS.edgeitems:]))else:returnselfifself.size(0)>2*PRINT_OPTS.edgeitems:start=[self[i]foriinrange(0,PRINT_OPTS.edgeitems)]end=([self[i]foriinrange(len(self)-PRINT_OPTS.edgeitems,len(self))])returntorch.stack([get_summarized_data(x)forxin(start+end)])else:returntorch.stack([get_summarized_data(x)forxinself])def_str_intern(inp):prefix='tensor('indent=len(prefix)suffixes=[]# This is used to extract the primal value and thus disable the forward AD# within this function.# TODO(albanD) This needs to be updated when more than one level is supportedself,tangent=torch.autograd.forward_ad.unpack_dual(inp)# Note [Print tensor device]:# A general logic here is we only print device when it doesn't match# the device specified in default tensor type.# Currently torch.set_default_tensor_type() only supports CPU/CUDA, thus# torch._C._get_default_device() only returns either cpu or cuda.# In other cases, we don't have a way to set them as default yet,# and we should always print out device for them.ifself.device.type!=torch._C._get_default_device()\
or(self.device.type=='cuda'andtorch.cuda.current_device()!=self.device.index):suffixes.append('device=\''+str(self.device)+'\'')# TODO: add an API to map real -> complex dtypes_default_complex_dtype=torch.cdoubleiftorch.get_default_dtype()==torch.doubleelsetorch.cfloathas_default_dtype=self.dtypein(torch.get_default_dtype(),_default_complex_dtype,torch.int64,torch.bool)ifself.is_sparse:suffixes.append('size='+str(tuple(self.shape)))suffixes.append('nnz='+str(self._nnz()))ifnothas_default_dtype:suffixes.append('dtype='+str(self.dtype))indices_prefix='indices=tensor('indices=self._indices().detach()indices_str=_tensor_str(indices,indent+len(indices_prefix))ifindices.numel()==0:indices_str+=', size='+str(tuple(indices.shape))values_prefix='values=tensor('values=self._values().detach()values_str=_tensor_str(values,indent+len(values_prefix))ifvalues.numel()==0:values_str+=', size='+str(tuple(values.shape))tensor_str=indices_prefix+indices_str+'),\n'+' '*indent+values_prefix+values_str+')'elifself.is_sparse_csr:suffixes.append('size='+str(tuple(self.shape)))suffixes.append('nnz='+str(self._nnz()))ifnothas_default_dtype:suffixes.append('dtype='+str(self.dtype))crow_indices_prefix='crow_indices=tensor('crow_indices=self.crow_indices().detach()crow_indices_str=_tensor_str(crow_indices,indent+len(crow_indices_prefix))ifcrow_indices.numel()==0:crow_indices_str+=', size='+str(tuple(crow_indices.shape))col_indices_prefix='col_indices=tensor('col_indices=self.col_indices().detach()col_indices_str=_tensor_str(col_indices,indent+len(col_indices_prefix))ifcol_indices.numel()==0:col_indices_str+=', size='+str(tuple(col_indices.shape))values_prefix='values=tensor('values=self.values().detach()values_str=_tensor_str(values,indent+len(values_prefix))ifvalues.numel()==0:values_str+=', size='+str(tuple(values.shape))tensor_str=crow_indices_prefix+crow_indices_str+'),\n'+' '*indent+\
col_indices_prefix+col_indices_str+'),\n'+' '*indent+\
values_prefix+values_str+')'elifself.is_quantized:suffixes.append('size='+str(tuple(self.shape)))ifnothas_default_dtype:suffixes.append('dtype='+str(self.dtype))suffixes.append('quantization_scheme='+str(self.qscheme()))ifself.qscheme()==torch.per_tensor_affineorself.qscheme()==torch.per_tensor_symmetric:suffixes.append('scale='+str(self.q_scale()))suffixes.append('zero_point='+str(self.q_zero_point()))elifself.qscheme()==torch.per_channel_affineorself.qscheme()==torch.per_channel_symmetric \
orself.qscheme()==torch.per_channel_affine_float_qparams:suffixes.append('scale='+str(self.q_per_channel_scales()))suffixes.append('zero_point='+str(self.q_per_channel_zero_points()))suffixes.append('axis='+str(self.q_per_channel_axis()))tensor_str=_tensor_str(self.dequantize(),indent)else:ifself.is_meta:suffixes.append('size='+str(tuple(self.shape)))ifself.dtype!=torch.get_default_dtype():suffixes.append('dtype='+str(self.dtype))# TODO: This implies that ellipses is valid syntax for allocating# a meta tensor, which it could be, but it isn't right nowtensor_str='...'else:ifself.numel()==0andnotself.is_sparse:# Explicitly print the shape if it is not (0,), to match NumPy behaviorifself.dim()!=1:suffixes.append('size='+str(tuple(self.shape)))# In an empty tensor, there are no elements to infer if the dtype# should be int64, so it must be shown explicitly.ifself.dtype!=torch.get_default_dtype():suffixes.append('dtype='+str(self.dtype))tensor_str='[]'else:ifnothas_default_dtype:suffixes.append('dtype='+str(self.dtype))ifself.layout!=torch.strided:tensor_str=_tensor_str(self.to_dense(),indent)else:tensor_str=_tensor_str(self,indent)ifself.layout!=torch.strided:suffixes.append('layout='+str(self.layout))# Use inp here to get the original grad_fn and not the one generated by the forward grad# unpacking.ifinp.grad_fnisnotNone:name=type(inp.grad_fn).__name__ifname=='CppFunction':name=inp.grad_fn.name().rsplit('::',1)[-1]suffixes.append('grad_fn=<{}>'.format(name))elifinp.requires_grad:suffixes.append('requires_grad=True')ifself.has_names():suffixes.append('names={}'.format(self.names))iftangentisnotNone:suffixes.append('tangent={}'.format(tangent))return_add_suffixes(prefix+tensor_str,suffixes,indent,force_newline=self.is_sparse)def_str(self):withtorch.no_grad():return_str_intern(self)
Docs
Access comprehensive developer documentation for PyTorch
To analyze traffic and optimize your experience, we serve cookies on this site. By clicking or navigating, you agree to allow our usage of cookies. As the current maintainers of this site, Facebook’s Cookies Policy applies. Learn more, including about available controls: Cookies Policy.