# mypy: allow-untyped-defsr"""This package implements abstractions found in ``torch.cuda``to facilitate writing device-agnostic code."""fromcontextlibimportAbstractContextManagerfromtypingimportAny,Optional,Unionimporttorchfrom..importdeviceas_devicefrom.importamp__all__=["is_available","synchronize","current_device","current_stream","stream","set_device","device_count","Stream","StreamContext","Event",]_device_t=Union[_device,str,int,None]def_is_avx2_supported()->bool:r"""Returns a bool indicating if CPU supports AVX2."""returntorch._C._cpu._is_avx2_supported()def_is_avx512_supported()->bool:r"""Returns a bool indicating if CPU supports AVX512."""returntorch._C._cpu._is_avx512_supported()def_is_avx512_bf16_supported()->bool:r"""Returns a bool indicating if CPU supports AVX512_BF16."""returntorch._C._cpu._is_avx512_bf16_supported()def_is_vnni_supported()->bool:r"""Returns a bool indicating if CPU supports VNNI."""# Note: Currently, it only checks avx512_vnni, will add the support of avx2_vnni later.returntorch._C._cpu._is_avx512_vnni_supported()def_is_amx_tile_supported()->bool:r"""Returns a bool indicating if CPU supports AMX_TILE."""returntorch._C._cpu._is_amx_tile_supported()def_is_amx_fp16_supported()->bool:r"""Returns a bool indicating if CPU supports AMX FP16."""returntorch._C._cpu._is_amx_fp16_supported()def_init_amx()->bool:r"""Initializes AMX instructions."""returntorch._C._cpu._init_amx()def_is_arm_sve_supported()->bool:r"""Returns a bool indicating if CPU supports Arm SVE."""returntorch._C._cpu._is_arm_sve_supported()
[docs]defis_available()->bool:r"""Returns a bool indicating if CPU is currently available. N.B. This function only exists to facilitate device-agnostic code """returnTrue
[docs]defsynchronize(device:_device_t=None)->None:r"""Waits for all kernels in all streams on the CPU device to complete. Args: device (torch.device or int, optional): ignored, there's only one CPU device. N.B. This function only exists to facilitate device-agnostic code. """
[docs]classStream:""" N.B. This class only exists to facilitate device-agnostic code """def__init__(self,priority:int=-1)->None:passdefwait_stream(self,stream)->None:pass
[docs]defcurrent_stream(device:_device_t=None)->Stream:r"""Returns the currently selected :class:`Stream` for a given device. Args: device (torch.device or int, optional): Ignored. N.B. This function only exists to facilitate device-agnostic code """return_current_stream
[docs]classStreamContext(AbstractContextManager):r"""Context-manager that selects a given stream. N.B. This class only exists to facilitate device-agnostic code """cur_stream:Optional[Stream]def__init__(self,stream):self.stream=streamself.prev_stream=_default_cpu_streamdef__enter__(self):cur_stream=self.streamifcur_streamisNone:returnglobal_current_streamself.prev_stream=_current_stream_current_stream=cur_streamdef__exit__(self,type:Any,value:Any,traceback:Any)->None:cur_stream=self.streamifcur_streamisNone:returnglobal_current_stream_current_stream=self.prev_stream
[docs]defstream(stream:Stream)->AbstractContextManager:r"""Wrapper around the Context-manager StreamContext that selects a given stream. N.B. This function only exists to facilitate device-agnostic code """returnStreamContext(stream)
[docs]defdevice_count()->int:r"""Returns number of CPU devices (not cores). Always 1. N.B. This function only exists to facilitate device-agnostic code """return1
[docs]defset_device(device:_device_t)->None:r"""Sets the current device, in CPU we do nothing. N.B. This function only exists to facilitate device-agnostic code """
[docs]defcurrent_device()->str:r"""Returns current device for cpu. Always 'cpu'. N.B. This function only exists to facilitate device-agnostic code """return"cpu"
Docs
Access comprehensive developer documentation for PyTorch
To analyze traffic and optimize your experience, we serve cookies on this site. By clicking or navigating, you agree to allow our usage of cookies. As the current maintainers of this site, Facebook’s Cookies Policy applies. Learn more, including about available controls: Cookies Policy.