Files
ANSLibs/OpenVINO/python/openvino/_pyopenvino/properties/__init__.pyi

219 lines
6.5 KiB
Python
Raw Normal View History

# type: ignore
from . import device
from . import hint
from . import intel_auto
from . import intel_cpu
from . import intel_gpu
from . import intel_npu
from . import log
from . import streams
from __future__ import annotations
import openvino._pyopenvino
import typing
"""
openvino.properties submodule
"""
__all__ = ['CacheMode', 'WorkloadType', 'auto_batch_timeout', 'available_devices', 'cache_dir', 'cache_encryption_callbacks', 'cache_mode', 'compilation_num_threads', 'device', 'enable_mmap', 'enable_profiling', 'execution_devices', 'force_tbb_terminate', 'hint', 'inference_num_threads', 'intel_auto', 'intel_cpu', 'intel_gpu', 'intel_npu', 'key_cache_group_size', 'key_cache_precision', 'loaded_from_cache', 'log', 'max_batch_size', 'model_name', 'num_streams', 'optimal_batch_size', 'optimal_number_of_infer_requests', 'range_for_async_infer_requests', 'range_for_streams', 'streams', 'supported_properties', 'value_cache_group_size', 'value_cache_precision', 'weights_path', 'workload_type']
class CacheMode:
"""
Members:
OPTIMIZE_SIZE
OPTIMIZE_SPEED
"""
OPTIMIZE_SIZE: typing.ClassVar[CacheMode] # value = <CacheMode.OPTIMIZE_SIZE: 0>
OPTIMIZE_SPEED: typing.ClassVar[CacheMode] # value = <CacheMode.OPTIMIZE_SPEED: 1>
__members__: typing.ClassVar[dict[str, CacheMode]] # value = {'OPTIMIZE_SIZE': <CacheMode.OPTIMIZE_SIZE: 0>, 'OPTIMIZE_SPEED': <CacheMode.OPTIMIZE_SPEED: 1>}
def __eq__(self, other: typing.Any) -> bool:
...
def __ge__(self, other: typing.Any) -> bool:
...
def __getstate__(self) -> int:
...
def __gt__(self, other: typing.Any) -> bool:
...
def __hash__(self) -> int:
...
def __index__(self) -> int:
...
def __init__(self, value: typing.SupportsInt) -> None:
...
def __int__(self) -> int:
...
def __le__(self, other: typing.Any) -> bool:
...
def __lt__(self, other: typing.Any) -> bool:
...
def __ne__(self, other: typing.Any) -> bool:
...
def __repr__(self) -> str:
...
def __setstate__(self, state: typing.SupportsInt) -> None:
...
def __str__(self) -> str:
...
@property
def name(self) -> str:
...
@property
def value(self) -> int:
...
class WorkloadType:
"""
Members:
DEFAULT
EFFICIENT
"""
DEFAULT: typing.ClassVar[WorkloadType] # value = <WorkloadType.DEFAULT: 0>
EFFICIENT: typing.ClassVar[WorkloadType] # value = <WorkloadType.EFFICIENT: 1>
__members__: typing.ClassVar[dict[str, WorkloadType]] # value = {'DEFAULT': <WorkloadType.DEFAULT: 0>, 'EFFICIENT': <WorkloadType.EFFICIENT: 1>}
def __eq__(self, other: typing.Any) -> bool:
...
def __ge__(self, other: typing.Any) -> bool:
...
def __getstate__(self) -> int:
...
def __gt__(self, other: typing.Any) -> bool:
...
def __hash__(self) -> int:
...
def __index__(self) -> int:
...
def __init__(self, value: typing.SupportsInt) -> None:
...
def __int__(self) -> int:
...
def __le__(self, other: typing.Any) -> bool:
...
def __lt__(self, other: typing.Any) -> bool:
...
def __ne__(self, other: typing.Any) -> bool:
...
def __repr__(self) -> str:
...
def __setstate__(self, state: typing.SupportsInt) -> None:
...
def __str__(self) -> str:
...
@property
def name(self) -> str:
...
@property
def value(self) -> int:
...
@typing.overload
def auto_batch_timeout() -> str:
...
@typing.overload
def auto_batch_timeout(arg0: typing.SupportsInt) -> tuple[str, openvino._pyopenvino.OVAny]:
...
def available_devices() -> str:
...
@typing.overload
def cache_dir() -> str:
...
@typing.overload
def cache_dir(arg0: str) -> tuple[str, openvino._pyopenvino.OVAny]:
...
def cache_encryption_callbacks(arg0: typing.Any) -> tuple[str, openvino._pyopenvino.OVAny]:
...
@typing.overload
def cache_mode() -> str:
...
@typing.overload
def cache_mode(arg0: CacheMode) -> tuple[str, openvino._pyopenvino.OVAny]:
...
@typing.overload
def compilation_num_threads() -> str:
...
@typing.overload
def compilation_num_threads(arg0: typing.SupportsInt) -> tuple[str, openvino._pyopenvino.OVAny]:
...
@typing.overload
def enable_mmap() -> str:
...
@typing.overload
def enable_mmap(arg0: bool) -> tuple[str, openvino._pyopenvino.OVAny]:
...
@typing.overload
def enable_profiling() -> str:
...
@typing.overload
def enable_profiling(arg0: bool) -> tuple[str, openvino._pyopenvino.OVAny]:
...
def execution_devices() -> str:
...
@typing.overload
def force_tbb_terminate() -> str:
...
@typing.overload
def force_tbb_terminate(arg0: bool) -> tuple[str, openvino._pyopenvino.OVAny]:
...
@typing.overload
def inference_num_threads() -> str:
...
@typing.overload
def inference_num_threads(arg0: typing.SupportsInt) -> tuple[str, openvino._pyopenvino.OVAny]:
...
@typing.overload
def key_cache_group_size() -> str:
...
@typing.overload
def key_cache_group_size(arg0: typing.SupportsInt) -> tuple[str, openvino._pyopenvino.OVAny]:
...
@typing.overload
def key_cache_precision() -> str:
...
@typing.overload
def key_cache_precision(arg0: openvino._pyopenvino.Type) -> tuple[str, openvino._pyopenvino.OVAny]:
...
def loaded_from_cache() -> str:
...
def max_batch_size() -> str:
...
def model_name() -> str:
...
@typing.overload
def num_streams() -> str:
...
@typing.overload
def num_streams(arg0: typing.Any) -> tuple[str, openvino._pyopenvino.OVAny]:
...
def optimal_batch_size() -> str:
...
def optimal_number_of_infer_requests() -> str:
...
def range_for_async_infer_requests() -> str:
...
def range_for_streams() -> str:
...
def supported_properties() -> str:
...
@typing.overload
def value_cache_group_size() -> str:
...
@typing.overload
def value_cache_group_size(arg0: typing.SupportsInt) -> tuple[str, openvino._pyopenvino.OVAny]:
...
@typing.overload
def value_cache_precision() -> str:
...
@typing.overload
def value_cache_precision(arg0: openvino._pyopenvino.Type) -> tuple[str, openvino._pyopenvino.OVAny]:
...
@typing.overload
def weights_path() -> str:
...
@typing.overload
def weights_path(arg0: str) -> tuple[str, openvino._pyopenvino.OVAny]:
...
@typing.overload
def workload_type() -> str:
...
@typing.overload
def workload_type(arg0: WorkloadType) -> tuple[str, openvino._pyopenvino.OVAny]:
...