Documentation ¶
Index ¶
- Constants
- Variables
- type BenchmarkError
- func (*BenchmarkError) Descriptor() ([]byte, []int)deprecated
- func (x *BenchmarkError) GetErrorCode() []*ErrorCode
- func (x *BenchmarkError) GetExitCode() int32
- func (x *BenchmarkError) GetMiniBenchmarkErrorCode() int32
- func (x *BenchmarkError) GetSignal() int32
- func (x *BenchmarkError) GetStage() BenchmarkStage
- func (*BenchmarkError) ProtoMessage()
- func (x *BenchmarkError) ProtoReflect() protoreflect.Message
- func (x *BenchmarkError) Reset()
- func (x *BenchmarkError) String() string
- type BenchmarkEvent
- func (*BenchmarkEvent) Descriptor() ([]byte, []int)deprecated
- func (x *BenchmarkEvent) GetBoottimeUs() int64
- func (x *BenchmarkEvent) GetError() *BenchmarkError
- func (x *BenchmarkEvent) GetEventType() BenchmarkEventType
- func (x *BenchmarkEvent) GetResult() *BenchmarkResult
- func (x *BenchmarkEvent) GetTfliteSettings() *TFLiteSettings
- func (x *BenchmarkEvent) GetWallclockUs() int64
- func (*BenchmarkEvent) ProtoMessage()
- func (x *BenchmarkEvent) ProtoReflect() protoreflect.Message
- func (x *BenchmarkEvent) Reset()
- func (x *BenchmarkEvent) String() string
- type BenchmarkEventStorage
- func (*BenchmarkEventStorage) Descriptor() ([]byte, []int)deprecated
- func (x *BenchmarkEventStorage) GetBenchmarkEvent() *BenchmarkEvent
- func (x *BenchmarkEventStorage) GetModelIdGroup() *ModelIdGroup
- func (*BenchmarkEventStorage) ProtoMessage()
- func (x *BenchmarkEventStorage) ProtoReflect() protoreflect.Message
- func (x *BenchmarkEventStorage) Reset()
- func (x *BenchmarkEventStorage) String() string
- type BenchmarkEventType
- func (BenchmarkEventType) Descriptor() protoreflect.EnumDescriptor
- func (x BenchmarkEventType) Enum() *BenchmarkEventType
- func (BenchmarkEventType) EnumDescriptor() ([]byte, []int)deprecated
- func (x BenchmarkEventType) Number() protoreflect.EnumNumber
- func (x BenchmarkEventType) String() string
- func (BenchmarkEventType) Type() protoreflect.EnumType
- func (x *BenchmarkEventType) UnmarshalJSON(b []byte) errordeprecated
- type BenchmarkInitializationFailure
- func (*BenchmarkInitializationFailure) Descriptor() ([]byte, []int)deprecated
- func (x *BenchmarkInitializationFailure) GetInitializationStatus() int32
- func (*BenchmarkInitializationFailure) ProtoMessage()
- func (x *BenchmarkInitializationFailure) ProtoReflect() protoreflect.Message
- func (x *BenchmarkInitializationFailure) Reset()
- func (x *BenchmarkInitializationFailure) String() string
- type BenchmarkMetric
- func (*BenchmarkMetric) Descriptor() ([]byte, []int)deprecated
- func (x *BenchmarkMetric) GetName() string
- func (x *BenchmarkMetric) GetValues() []float32
- func (*BenchmarkMetric) ProtoMessage()
- func (x *BenchmarkMetric) ProtoReflect() protoreflect.Message
- func (x *BenchmarkMetric) Reset()
- func (x *BenchmarkMetric) String() string
- type BenchmarkResult
- func (*BenchmarkResult) Descriptor() ([]byte, []int)deprecated
- func (x *BenchmarkResult) GetActualOutput() []*BenchmarkResult_InferenceOutput
- func (x *BenchmarkResult) GetInferenceTimeUs() []int64
- func (x *BenchmarkResult) GetInitializationTimeUs() []int64
- func (x *BenchmarkResult) GetMaxMemoryKb() int32
- func (x *BenchmarkResult) GetMetrics() []*BenchmarkMetric
- func (x *BenchmarkResult) GetOk() bool
- func (*BenchmarkResult) ProtoMessage()
- func (x *BenchmarkResult) ProtoReflect() protoreflect.Message
- func (x *BenchmarkResult) Reset()
- func (x *BenchmarkResult) String() string
- type BenchmarkResult_InferenceOutput
- func (*BenchmarkResult_InferenceOutput) Descriptor() ([]byte, []int)deprecated
- func (x *BenchmarkResult_InferenceOutput) GetValue() []byte
- func (*BenchmarkResult_InferenceOutput) ProtoMessage()
- func (x *BenchmarkResult_InferenceOutput) ProtoReflect() protoreflect.Message
- func (x *BenchmarkResult_InferenceOutput) Reset()
- func (x *BenchmarkResult_InferenceOutput) String() string
- type BenchmarkStage
- func (BenchmarkStage) Descriptor() protoreflect.EnumDescriptor
- func (x BenchmarkStage) Enum() *BenchmarkStage
- func (BenchmarkStage) EnumDescriptor() ([]byte, []int)deprecated
- func (x BenchmarkStage) Number() protoreflect.EnumNumber
- func (x BenchmarkStage) String() string
- func (BenchmarkStage) Type() protoreflect.EnumType
- func (x *BenchmarkStage) UnmarshalJSON(b []byte) errordeprecated
- type BenchmarkStoragePaths
- func (*BenchmarkStoragePaths) Descriptor() ([]byte, []int)deprecated
- func (x *BenchmarkStoragePaths) GetDataDirectoryPath() string
- func (x *BenchmarkStoragePaths) GetStorageFilePath() string
- func (*BenchmarkStoragePaths) ProtoMessage()
- func (x *BenchmarkStoragePaths) ProtoReflect() protoreflect.Message
- func (x *BenchmarkStoragePaths) Reset()
- func (x *BenchmarkStoragePaths) String() string
- type BestAccelerationDecision
- func (*BestAccelerationDecision) Descriptor() ([]byte, []int)deprecated
- func (x *BestAccelerationDecision) GetMinInferenceTimeUs() int64
- func (x *BestAccelerationDecision) GetMinLatencyEvent() *BenchmarkEvent
- func (x *BestAccelerationDecision) GetNumberOfSourceEvents() int32
- func (*BestAccelerationDecision) ProtoMessage()
- func (x *BestAccelerationDecision) ProtoReflect() protoreflect.Message
- func (x *BestAccelerationDecision) Reset()
- func (x *BestAccelerationDecision) String() string
- type CPUSettings
- type ComputeSettings
- func (*ComputeSettings) Descriptor() ([]byte, []int)deprecated
- func (x *ComputeSettings) GetModelIdentifierForStatistics() string
- func (x *ComputeSettings) GetModelNamespaceForStatistics() string
- func (x *ComputeSettings) GetPreference() ExecutionPreference
- func (x *ComputeSettings) GetSettingsToTestLocally() *MinibenchmarkSettings
- func (x *ComputeSettings) GetTfliteSettings() *TFLiteSettings
- func (*ComputeSettings) ProtoMessage()
- func (x *ComputeSettings) ProtoReflect() protoreflect.Message
- func (x *ComputeSettings) Reset()
- func (x *ComputeSettings) String() string
- type CoralSettings
- func (*CoralSettings) Descriptor() ([]byte, []int)deprecated
- func (x *CoralSettings) GetDevice() string
- func (x *CoralSettings) GetPerformance() CoralSettings_Performance
- func (x *CoralSettings) GetUsbAlwaysDfu() bool
- func (x *CoralSettings) GetUsbMaxBulkInQueueLength() int32
- func (*CoralSettings) ProtoMessage()
- func (x *CoralSettings) ProtoReflect() protoreflect.Message
- func (x *CoralSettings) Reset()
- func (x *CoralSettings) String() string
- type CoralSettings_Performance
- func (CoralSettings_Performance) Descriptor() protoreflect.EnumDescriptor
- func (x CoralSettings_Performance) Enum() *CoralSettings_Performance
- func (CoralSettings_Performance) EnumDescriptor() ([]byte, []int)deprecated
- func (x CoralSettings_Performance) Number() protoreflect.EnumNumber
- func (x CoralSettings_Performance) String() string
- func (CoralSettings_Performance) Type() protoreflect.EnumType
- func (x *CoralSettings_Performance) UnmarshalJSON(b []byte) errordeprecated
- type CoreMLSettings
- func (*CoreMLSettings) Descriptor() ([]byte, []int)deprecated
- func (x *CoreMLSettings) GetCoremlVersion() int32
- func (x *CoreMLSettings) GetEnabledDevices() CoreMLSettings_EnabledDevices
- func (x *CoreMLSettings) GetMaxDelegatedPartitions() int32
- func (x *CoreMLSettings) GetMinNodesPerPartition() int32
- func (*CoreMLSettings) ProtoMessage()
- func (x *CoreMLSettings) ProtoReflect() protoreflect.Message
- func (x *CoreMLSettings) Reset()
- func (x *CoreMLSettings) String() string
- type CoreMLSettings_EnabledDevices
- func (CoreMLSettings_EnabledDevices) Descriptor() protoreflect.EnumDescriptor
- func (x CoreMLSettings_EnabledDevices) Enum() *CoreMLSettings_EnabledDevices
- func (CoreMLSettings_EnabledDevices) EnumDescriptor() ([]byte, []int)deprecated
- func (x CoreMLSettings_EnabledDevices) Number() protoreflect.EnumNumber
- func (x CoreMLSettings_EnabledDevices) String() string
- func (CoreMLSettings_EnabledDevices) Type() protoreflect.EnumType
- func (x *CoreMLSettings_EnabledDevices) UnmarshalJSON(b []byte) errordeprecated
- type Delegate
- func (Delegate) Descriptor() protoreflect.EnumDescriptor
- func (x Delegate) Enum() *Delegate
- func (Delegate) EnumDescriptor() ([]byte, []int)deprecated
- func (x Delegate) Number() protoreflect.EnumNumber
- func (x Delegate) String() string
- func (Delegate) Type() protoreflect.EnumType
- func (x *Delegate) UnmarshalJSON(b []byte) errordeprecated
- type EdgeTpuDeviceSpec
- func (*EdgeTpuDeviceSpec) Descriptor() ([]byte, []int)deprecated
- func (x *EdgeTpuDeviceSpec) GetChipFamily() int32
- func (x *EdgeTpuDeviceSpec) GetDevicePaths() []string
- func (x *EdgeTpuDeviceSpec) GetNumChips() int32
- func (x *EdgeTpuDeviceSpec) GetPlatformType() EdgeTpuDeviceSpec_PlatformType
- func (*EdgeTpuDeviceSpec) ProtoMessage()
- func (x *EdgeTpuDeviceSpec) ProtoReflect() protoreflect.Message
- func (x *EdgeTpuDeviceSpec) Reset()
- func (x *EdgeTpuDeviceSpec) String() string
- type EdgeTpuDeviceSpec_PlatformType
- func (EdgeTpuDeviceSpec_PlatformType) Descriptor() protoreflect.EnumDescriptor
- func (x EdgeTpuDeviceSpec_PlatformType) Enum() *EdgeTpuDeviceSpec_PlatformType
- func (EdgeTpuDeviceSpec_PlatformType) EnumDescriptor() ([]byte, []int)deprecated
- func (x EdgeTpuDeviceSpec_PlatformType) Number() protoreflect.EnumNumber
- func (x EdgeTpuDeviceSpec_PlatformType) String() string
- func (EdgeTpuDeviceSpec_PlatformType) Type() protoreflect.EnumType
- func (x *EdgeTpuDeviceSpec_PlatformType) UnmarshalJSON(b []byte) errordeprecated
- type EdgeTpuInactivePowerConfig
- func (*EdgeTpuInactivePowerConfig) Descriptor() ([]byte, []int)deprecated
- func (x *EdgeTpuInactivePowerConfig) GetInactivePowerState() EdgeTpuPowerState
- func (x *EdgeTpuInactivePowerConfig) GetInactiveTimeoutUs() int64
- func (*EdgeTpuInactivePowerConfig) ProtoMessage()
- func (x *EdgeTpuInactivePowerConfig) ProtoReflect() protoreflect.Message
- func (x *EdgeTpuInactivePowerConfig) Reset()
- func (x *EdgeTpuInactivePowerConfig) String() string
- type EdgeTpuPowerState
- func (EdgeTpuPowerState) Descriptor() protoreflect.EnumDescriptor
- func (x EdgeTpuPowerState) Enum() *EdgeTpuPowerState
- func (EdgeTpuPowerState) EnumDescriptor() ([]byte, []int)deprecated
- func (x EdgeTpuPowerState) Number() protoreflect.EnumNumber
- func (x EdgeTpuPowerState) String() string
- func (EdgeTpuPowerState) Type() protoreflect.EnumType
- func (x *EdgeTpuPowerState) UnmarshalJSON(b []byte) errordeprecated
- type EdgeTpuSettings
- func (*EdgeTpuSettings) Descriptor() ([]byte, []int)deprecated
- func (x *EdgeTpuSettings) GetEdgetpuDeviceSpec() *EdgeTpuDeviceSpec
- func (x *EdgeTpuSettings) GetFloatTruncationType() EdgeTpuSettings_FloatTruncationType
- func (x *EdgeTpuSettings) GetInactivePowerConfigs() []*EdgeTpuInactivePowerConfig
- func (x *EdgeTpuSettings) GetInferencePowerState() EdgeTpuPowerState
- func (x *EdgeTpuSettings) GetInferencePriority() int32
- func (x *EdgeTpuSettings) GetModelToken() string
- func (x *EdgeTpuSettings) GetQosClass() EdgeTpuSettings_QosClass
- func (*EdgeTpuSettings) ProtoMessage()
- func (x *EdgeTpuSettings) ProtoReflect() protoreflect.Message
- func (x *EdgeTpuSettings) Reset()
- func (x *EdgeTpuSettings) String() string
- type EdgeTpuSettings_FloatTruncationType
- func (EdgeTpuSettings_FloatTruncationType) Descriptor() protoreflect.EnumDescriptor
- func (x EdgeTpuSettings_FloatTruncationType) Enum() *EdgeTpuSettings_FloatTruncationType
- func (EdgeTpuSettings_FloatTruncationType) EnumDescriptor() ([]byte, []int)deprecated
- func (x EdgeTpuSettings_FloatTruncationType) Number() protoreflect.EnumNumber
- func (x EdgeTpuSettings_FloatTruncationType) String() string
- func (EdgeTpuSettings_FloatTruncationType) Type() protoreflect.EnumType
- func (x *EdgeTpuSettings_FloatTruncationType) UnmarshalJSON(b []byte) errordeprecated
- type EdgeTpuSettings_QosClass
- func (EdgeTpuSettings_QosClass) Descriptor() protoreflect.EnumDescriptor
- func (x EdgeTpuSettings_QosClass) Enum() *EdgeTpuSettings_QosClass
- func (EdgeTpuSettings_QosClass) EnumDescriptor() ([]byte, []int)deprecated
- func (x EdgeTpuSettings_QosClass) Number() protoreflect.EnumNumber
- func (x EdgeTpuSettings_QosClass) String() string
- func (EdgeTpuSettings_QosClass) Type() protoreflect.EnumType
- func (x *EdgeTpuSettings_QosClass) UnmarshalJSON(b []byte) errordeprecated
- type ErrorCode
- func (*ErrorCode) Descriptor() ([]byte, []int)deprecated
- func (x *ErrorCode) GetSource() Delegate
- func (x *ErrorCode) GetTfliteError() int32
- func (x *ErrorCode) GetUnderlyingApiError() int64
- func (*ErrorCode) ProtoMessage()
- func (x *ErrorCode) ProtoReflect() protoreflect.Message
- func (x *ErrorCode) Reset()
- func (x *ErrorCode) String() string
- type ExecutionPreference
- func (ExecutionPreference) Descriptor() protoreflect.EnumDescriptor
- func (x ExecutionPreference) Enum() *ExecutionPreference
- func (ExecutionPreference) EnumDescriptor() ([]byte, []int)deprecated
- func (x ExecutionPreference) Number() protoreflect.EnumNumber
- func (x ExecutionPreference) String() string
- func (ExecutionPreference) Type() protoreflect.EnumType
- func (x *ExecutionPreference) UnmarshalJSON(b []byte) errordeprecated
- type FallbackSettings
- func (*FallbackSettings) Descriptor() ([]byte, []int)deprecated
- func (x *FallbackSettings) GetAllowAutomaticFallbackOnCompilationError() bool
- func (x *FallbackSettings) GetAllowAutomaticFallbackOnExecutionError() bool
- func (*FallbackSettings) ProtoMessage()
- func (x *FallbackSettings) ProtoReflect() protoreflect.Message
- func (x *FallbackSettings) Reset()
- func (x *FallbackSettings) String() string
- type GPUBackend
- func (GPUBackend) Descriptor() protoreflect.EnumDescriptor
- func (x GPUBackend) Enum() *GPUBackend
- func (GPUBackend) EnumDescriptor() ([]byte, []int)deprecated
- func (x GPUBackend) Number() protoreflect.EnumNumber
- func (x GPUBackend) String() string
- func (GPUBackend) Type() protoreflect.EnumType
- func (x *GPUBackend) UnmarshalJSON(b []byte) errordeprecated
- type GPUInferencePriority
- func (GPUInferencePriority) Descriptor() protoreflect.EnumDescriptor
- func (x GPUInferencePriority) Enum() *GPUInferencePriority
- func (GPUInferencePriority) EnumDescriptor() ([]byte, []int)deprecated
- func (x GPUInferencePriority) Number() protoreflect.EnumNumber
- func (x GPUInferencePriority) String() string
- func (GPUInferencePriority) Type() protoreflect.EnumType
- func (x *GPUInferencePriority) UnmarshalJSON(b []byte) errordeprecated
- type GPUInferenceUsage
- func (GPUInferenceUsage) Descriptor() protoreflect.EnumDescriptor
- func (x GPUInferenceUsage) Enum() *GPUInferenceUsage
- func (GPUInferenceUsage) EnumDescriptor() ([]byte, []int)deprecated
- func (x GPUInferenceUsage) Number() protoreflect.EnumNumber
- func (x GPUInferenceUsage) String() string
- func (GPUInferenceUsage) Type() protoreflect.EnumType
- func (x *GPUInferenceUsage) UnmarshalJSON(b []byte) errordeprecated
- type GPUSettings
- func (*GPUSettings) Descriptor() ([]byte, []int)deprecated
- func (x *GPUSettings) GetCacheDirectory() string
- func (x *GPUSettings) GetEnableQuantizedInference() bool
- func (x *GPUSettings) GetForceBackend() GPUBackend
- func (x *GPUSettings) GetInferencePreference() GPUInferenceUsage
- func (x *GPUSettings) GetInferencePriority1() GPUInferencePriority
- func (x *GPUSettings) GetInferencePriority2() GPUInferencePriority
- func (x *GPUSettings) GetInferencePriority3() GPUInferencePriority
- func (x *GPUSettings) GetIsPrecisionLossAllowed() bool
- func (x *GPUSettings) GetModelToken() string
- func (*GPUSettings) ProtoMessage()
- func (x *GPUSettings) ProtoReflect() protoreflect.Message
- func (x *GPUSettings) Reset()
- func (x *GPUSettings) String() string
- type HexagonSettings
- func (*HexagonSettings) Descriptor() ([]byte, []int)deprecated
- func (x *HexagonSettings) GetDebugLevel() int32
- func (x *HexagonSettings) GetPowersaveLevel() int32
- func (x *HexagonSettings) GetPrintGraphDebug() bool
- func (x *HexagonSettings) GetPrintGraphProfile() bool
- func (*HexagonSettings) ProtoMessage()
- func (x *HexagonSettings) ProtoReflect() protoreflect.Message
- func (x *HexagonSettings) Reset()
- func (x *HexagonSettings) String() string
- type MiniBenchmarkEvent
- func (*MiniBenchmarkEvent) Descriptor() ([]byte, []int)deprecated
- func (x *MiniBenchmarkEvent) GetBenchmarkEvent() *BenchmarkEvent
- func (x *MiniBenchmarkEvent) GetBestAccelerationDecision() *BestAccelerationDecision
- func (x *MiniBenchmarkEvent) GetInitializationFailure() *BenchmarkInitializationFailure
- func (x *MiniBenchmarkEvent) GetIsLogFlushingEvent() bool
- func (*MiniBenchmarkEvent) ProtoMessage()
- func (x *MiniBenchmarkEvent) ProtoReflect() protoreflect.Message
- func (x *MiniBenchmarkEvent) Reset()
- func (x *MiniBenchmarkEvent) String() string
- type MinibenchmarkSettings
- func (*MinibenchmarkSettings) Descriptor() ([]byte, []int)deprecated
- func (x *MinibenchmarkSettings) GetModelFile() *ModelFile
- func (x *MinibenchmarkSettings) GetSettingsToTest() []*TFLiteSettings
- func (x *MinibenchmarkSettings) GetStoragePaths() *BenchmarkStoragePaths
- func (x *MinibenchmarkSettings) GetValidationSettings() *ValidationSettings
- func (*MinibenchmarkSettings) ProtoMessage()
- func (x *MinibenchmarkSettings) ProtoReflect() protoreflect.Message
- func (x *MinibenchmarkSettings) Reset()
- func (x *MinibenchmarkSettings) String() string
- type ModelFile
- func (*ModelFile) Descriptor() ([]byte, []int)deprecated
- func (x *ModelFile) GetFd() int64
- func (x *ModelFile) GetFilename() string
- func (x *ModelFile) GetLength() int64
- func (x *ModelFile) GetModelIdGroup() *ModelIdGroup
- func (x *ModelFile) GetOffset() int64
- func (*ModelFile) ProtoMessage()
- func (x *ModelFile) ProtoReflect() protoreflect.Message
- func (x *ModelFile) Reset()
- func (x *ModelFile) String() string
- type ModelIdGroup
- func (*ModelIdGroup) Descriptor() ([]byte, []int)deprecated
- func (x *ModelIdGroup) GetModelId() string
- func (x *ModelIdGroup) GetModelNamespace() string
- func (*ModelIdGroup) ProtoMessage()
- func (x *ModelIdGroup) ProtoReflect() protoreflect.Message
- func (x *ModelIdGroup) Reset()
- func (x *ModelIdGroup) String() string
- type NNAPIExecutionPreference
- func (NNAPIExecutionPreference) Descriptor() protoreflect.EnumDescriptor
- func (x NNAPIExecutionPreference) Enum() *NNAPIExecutionPreference
- func (NNAPIExecutionPreference) EnumDescriptor() ([]byte, []int)deprecated
- func (x NNAPIExecutionPreference) Number() protoreflect.EnumNumber
- func (x NNAPIExecutionPreference) String() string
- func (NNAPIExecutionPreference) Type() protoreflect.EnumType
- func (x *NNAPIExecutionPreference) UnmarshalJSON(b []byte) errordeprecated
- type NNAPIExecutionPriority
- func (NNAPIExecutionPriority) Descriptor() protoreflect.EnumDescriptor
- func (x NNAPIExecutionPriority) Enum() *NNAPIExecutionPriority
- func (NNAPIExecutionPriority) EnumDescriptor() ([]byte, []int)deprecated
- func (x NNAPIExecutionPriority) Number() protoreflect.EnumNumber
- func (x NNAPIExecutionPriority) String() string
- func (NNAPIExecutionPriority) Type() protoreflect.EnumType
- func (x *NNAPIExecutionPriority) UnmarshalJSON(b []byte) errordeprecated
- type NNAPISettings
- func (*NNAPISettings) Descriptor() ([]byte, []int)deprecated
- func (x *NNAPISettings) GetAcceleratorName() string
- func (x *NNAPISettings) GetAllowDynamicDimensions() bool
- func (x *NNAPISettings) GetAllowFp16PrecisionForFp32() bool
- func (x *NNAPISettings) GetAllowNnapiCpuOnAndroid_10Plus() bool
- func (x *NNAPISettings) GetCacheDirectory() string
- func (x *NNAPISettings) GetExecutionPreference() NNAPIExecutionPreference
- func (x *NNAPISettings) GetExecutionPriority() NNAPIExecutionPriority
- func (x *NNAPISettings) GetFallbackSettings() *FallbackSettingsdeprecated
- func (x *NNAPISettings) GetModelToken() string
- func (x *NNAPISettings) GetNoOfNnapiInstancesToCache() int32
- func (x *NNAPISettings) GetSupportLibraryHandle() int64
- func (x *NNAPISettings) GetUseBurstComputation() bool
- func (*NNAPISettings) ProtoMessage()
- func (x *NNAPISettings) ProtoReflect() protoreflect.Message
- func (x *NNAPISettings) Reset()
- func (x *NNAPISettings) String() string
- type StableDelegateLoaderSettings
- func (*StableDelegateLoaderSettings) Descriptor() ([]byte, []int)deprecated
- func (x *StableDelegateLoaderSettings) GetDelegatePath() string
- func (*StableDelegateLoaderSettings) ProtoMessage()
- func (x *StableDelegateLoaderSettings) ProtoReflect() protoreflect.Message
- func (x *StableDelegateLoaderSettings) Reset()
- func (x *StableDelegateLoaderSettings) String() string
- type TFLiteSettings
- func (*TFLiteSettings) Descriptor() ([]byte, []int)deprecated
- func (x *TFLiteSettings) GetCoralSettings() *CoralSettings
- func (x *TFLiteSettings) GetCoremlSettings() *CoreMLSettings
- func (x *TFLiteSettings) GetCpuSettings() *CPUSettings
- func (x *TFLiteSettings) GetDelegate() Delegate
- func (x *TFLiteSettings) GetDisableDefaultDelegates() bool
- func (x *TFLiteSettings) GetEdgetpuSettings() *EdgeTpuSettings
- func (x *TFLiteSettings) GetFallbackSettings() *FallbackSettings
- func (x *TFLiteSettings) GetGpuSettings() *GPUSettings
- func (x *TFLiteSettings) GetHexagonSettings() *HexagonSettings
- func (x *TFLiteSettings) GetMaxDelegatedPartitions() int32
- func (x *TFLiteSettings) GetNnapiSettings() *NNAPISettings
- func (x *TFLiteSettings) GetStableDelegateLoaderSettings() *StableDelegateLoaderSettings
- func (x *TFLiteSettings) GetXnnpackSettings() *XNNPackSettings
- func (*TFLiteSettings) ProtoMessage()
- func (x *TFLiteSettings) ProtoReflect() protoreflect.Message
- func (x *TFLiteSettings) Reset()
- func (x *TFLiteSettings) String() string
- type ValidationSettings
- func (*ValidationSettings) Descriptor() ([]byte, []int)deprecated
- func (x *ValidationSettings) GetPerTestTimeoutMs() int64
- func (*ValidationSettings) ProtoMessage()
- func (x *ValidationSettings) ProtoReflect() protoreflect.Message
- func (x *ValidationSettings) Reset()
- func (x *ValidationSettings) String() string
- type XNNPackFlags
- func (XNNPackFlags) Descriptor() protoreflect.EnumDescriptor
- func (x XNNPackFlags) Enum() *XNNPackFlags
- func (XNNPackFlags) EnumDescriptor() ([]byte, []int)deprecated
- func (x XNNPackFlags) Number() protoreflect.EnumNumber
- func (x XNNPackFlags) String() string
- func (XNNPackFlags) Type() protoreflect.EnumType
- func (x *XNNPackFlags) UnmarshalJSON(b []byte) errordeprecated
- type XNNPackSettings
- func (*XNNPackSettings) Descriptor() ([]byte, []int)deprecated
- func (x *XNNPackSettings) GetFlags() XNNPackFlags
- func (x *XNNPackSettings) GetNumThreads() int32
- func (*XNNPackSettings) ProtoMessage()
- func (x *XNNPackSettings) ProtoReflect() protoreflect.Message
- func (x *XNNPackSettings) Reset()
- func (x *XNNPackSettings) String() string
Constants ¶
const ( Default_GPUSettings_EnableQuantizedInference = bool(true) Default_GPUSettings_InferencePriority1 = GPUInferencePriority_GPU_PRIORITY_AUTO Default_GPUSettings_InferencePriority2 = GPUInferencePriority_GPU_PRIORITY_AUTO Default_GPUSettings_InferencePriority3 = GPUInferencePriority_GPU_PRIORITY_AUTO )
Default values for GPUSettings fields.
const ( Default_CoreMLSettings_MaxDelegatedPartitions = int32(0) Default_CoreMLSettings_MinNodesPerPartition = int32(2) )
Default values for CoreMLSettings fields.
const ( Default_EdgeTpuSettings_InferencePriority = int32(-1) Default_EdgeTpuSettings_QosClass = EdgeTpuSettings_QOS_UNDEFINED )
Default values for EdgeTpuSettings fields.
const (
Default_CPUSettings_NumThreads = int32(-1)
)
Default values for CPUSettings fields.
const (
Default_CoralSettings_Performance = CoralSettings_MAXIMUM
)
Default values for CoralSettings fields.
const (
Default_XNNPackSettings_Flags = XNNPackFlags_TFLITE_XNNPACK_DELEGATE_NO_FLAGS
)
Default values for XNNPackSettings fields.
Variables ¶
var ( ExecutionPreference_name = map[int32]string{ 0: "ANY", 1: "LOW_LATENCY", 2: "LOW_POWER", 3: "FORCE_CPU", } ExecutionPreference_value = map[string]int32{ "ANY": 0, "LOW_LATENCY": 1, "LOW_POWER": 2, "FORCE_CPU": 3, } )
Enum value maps for ExecutionPreference.
var ( Delegate_name = map[int32]string{ 0: "NONE", 1: "NNAPI", 2: "GPU", 3: "HEXAGON", 4: "XNNPACK", 5: "EDGETPU", 6: "EDGETPU_CORAL", 7: "CORE_ML", } Delegate_value = map[string]int32{ "NONE": 0, "NNAPI": 1, "GPU": 2, "HEXAGON": 3, "XNNPACK": 4, "EDGETPU": 5, "EDGETPU_CORAL": 6, "CORE_ML": 7, } )
Enum value maps for Delegate.
var ( NNAPIExecutionPreference_name = map[int32]string{ 0: "UNDEFINED", 1: "NNAPI_LOW_POWER", 2: "NNAPI_FAST_SINGLE_ANSWER", 3: "NNAPI_SUSTAINED_SPEED", } NNAPIExecutionPreference_value = map[string]int32{ "UNDEFINED": 0, "NNAPI_LOW_POWER": 1, "NNAPI_FAST_SINGLE_ANSWER": 2, "NNAPI_SUSTAINED_SPEED": 3, } )
Enum value maps for NNAPIExecutionPreference.
var ( NNAPIExecutionPriority_name = map[int32]string{ 0: "NNAPI_PRIORITY_UNDEFINED", 1: "NNAPI_PRIORITY_LOW", 2: "NNAPI_PRIORITY_MEDIUM", 3: "NNAPI_PRIORITY_HIGH", } NNAPIExecutionPriority_value = map[string]int32{ "NNAPI_PRIORITY_UNDEFINED": 0, "NNAPI_PRIORITY_LOW": 1, "NNAPI_PRIORITY_MEDIUM": 2, "NNAPI_PRIORITY_HIGH": 3, } )
Enum value maps for NNAPIExecutionPriority.
var ( GPUBackend_name = map[int32]string{ 0: "UNSET", 1: "OPENCL", 2: "OPENGL", } GPUBackend_value = map[string]int32{ "UNSET": 0, "OPENCL": 1, "OPENGL": 2, } )
Enum value maps for GPUBackend.
var ( GPUInferencePriority_name = map[int32]string{ 0: "GPU_PRIORITY_AUTO", 1: "GPU_PRIORITY_MAX_PRECISION", 2: "GPU_PRIORITY_MIN_LATENCY", 3: "GPU_PRIORITY_MIN_MEMORY_USAGE", } GPUInferencePriority_value = map[string]int32{ "GPU_PRIORITY_AUTO": 0, "GPU_PRIORITY_MAX_PRECISION": 1, "GPU_PRIORITY_MIN_LATENCY": 2, "GPU_PRIORITY_MIN_MEMORY_USAGE": 3, } )
Enum value maps for GPUInferencePriority.
var ( GPUInferenceUsage_name = map[int32]string{ 0: "GPU_INFERENCE_PREFERENCE_FAST_SINGLE_ANSWER", 1: "GPU_INFERENCE_PREFERENCE_SUSTAINED_SPEED", } GPUInferenceUsage_value = map[string]int32{ "GPU_INFERENCE_PREFERENCE_FAST_SINGLE_ANSWER": 0, "GPU_INFERENCE_PREFERENCE_SUSTAINED_SPEED": 1, } )
Enum value maps for GPUInferenceUsage.
var ( XNNPackFlags_name = map[int32]string{ 0: "TFLITE_XNNPACK_DELEGATE_NO_FLAGS", 1: "TFLITE_XNNPACK_DELEGATE_FLAG_QS8", 2: "TFLITE_XNNPACK_DELEGATE_FLAG_QU8", 3: "TFLITE_XNNPACK_DELEGATE_FLAG_QS8_QU8", 4: "TFLITE_XNNPACK_DELEGATE_FLAG_FORCE_FP16", } XNNPackFlags_value = map[string]int32{ "TFLITE_XNNPACK_DELEGATE_NO_FLAGS": 0, "TFLITE_XNNPACK_DELEGATE_FLAG_QS8": 1, "TFLITE_XNNPACK_DELEGATE_FLAG_QU8": 2, "TFLITE_XNNPACK_DELEGATE_FLAG_QS8_QU8": 3, "TFLITE_XNNPACK_DELEGATE_FLAG_FORCE_FP16": 4, } )
Enum value maps for XNNPackFlags.
var ( EdgeTpuPowerState_name = map[int32]string{ 0: "UNDEFINED_POWERSTATE", 1: "TPU_CORE_OFF", 2: "READY", 3: "ACTIVE_MIN_POWER", 4: "ACTIVE_VERY_LOW_POWER", 5: "ACTIVE_LOW_POWER", 6: "ACTIVE", 7: "OVER_DRIVE", } EdgeTpuPowerState_value = map[string]int32{ "UNDEFINED_POWERSTATE": 0, "TPU_CORE_OFF": 1, "READY": 2, "ACTIVE_MIN_POWER": 3, "ACTIVE_VERY_LOW_POWER": 4, "ACTIVE_LOW_POWER": 5, "ACTIVE": 6, "OVER_DRIVE": 7, } )
Enum value maps for EdgeTpuPowerState.
var ( BenchmarkEventType_name = map[int32]string{ 0: "UNDEFINED_BENCHMARK_EVENT_TYPE", 1: "START", 2: "END", 3: "ERROR", 4: "LOGGED", 5: "RECOVERED_ERROR", } BenchmarkEventType_value = map[string]int32{ "UNDEFINED_BENCHMARK_EVENT_TYPE": 0, "START": 1, "END": 2, "ERROR": 3, "LOGGED": 4, "RECOVERED_ERROR": 5, } )
Enum value maps for BenchmarkEventType.
var ( BenchmarkStage_name = map[int32]string{ 0: "UNKNOWN", 1: "INITIALIZATION", 2: "INFERENCE", } BenchmarkStage_value = map[string]int32{ "UNKNOWN": 0, "INITIALIZATION": 1, "INFERENCE": 2, } )
Enum value maps for BenchmarkStage.
var ( CoreMLSettings_EnabledDevices_name = map[int32]string{ 0: "DEVICES_ALL", 1: "DEVICES_WITH_NEURAL_ENGINE", } CoreMLSettings_EnabledDevices_value = map[string]int32{ "DEVICES_ALL": 0, "DEVICES_WITH_NEURAL_ENGINE": 1, } )
Enum value maps for CoreMLSettings_EnabledDevices.
var ( EdgeTpuDeviceSpec_PlatformType_name = map[int32]string{ 0: "MMIO", 1: "REFERENCE", 2: "SIMULATOR", 3: "REMOTE_SIMULATOR", } EdgeTpuDeviceSpec_PlatformType_value = map[string]int32{ "MMIO": 0, "REFERENCE": 1, "SIMULATOR": 2, "REMOTE_SIMULATOR": 3, } )
Enum value maps for EdgeTpuDeviceSpec_PlatformType.
var ( EdgeTpuSettings_FloatTruncationType_name = map[int32]string{ 0: "UNSPECIFIED", 1: "NO_TRUNCATION", 2: "BFLOAT16", 3: "HALF", } EdgeTpuSettings_FloatTruncationType_value = map[string]int32{ "UNSPECIFIED": 0, "NO_TRUNCATION": 1, "BFLOAT16": 2, "HALF": 3, } )
Enum value maps for EdgeTpuSettings_FloatTruncationType.
var ( EdgeTpuSettings_QosClass_name = map[int32]string{ 0: "QOS_UNDEFINED", 1: "BEST_EFFORT", 2: "REALTIME", } EdgeTpuSettings_QosClass_value = map[string]int32{ "QOS_UNDEFINED": 0, "BEST_EFFORT": 1, "REALTIME": 2, } )
Enum value maps for EdgeTpuSettings_QosClass.
var ( CoralSettings_Performance_name = map[int32]string{ 0: "UNDEFINED", 1: "MAXIMUM", 2: "HIGH", 3: "MEDIUM", 4: "LOW", } CoralSettings_Performance_value = map[string]int32{ "UNDEFINED": 0, "MAXIMUM": 1, "HIGH": 2, "MEDIUM": 3, "LOW": 4, } )
Enum value maps for CoralSettings_Performance.
var File_tensorflow_lite_experimental_acceleration_configuration_configuration_proto protoreflect.FileDescriptor
Functions ¶
This section is empty.
Types ¶
type BenchmarkError ¶
type BenchmarkError struct { // How far benchmarking got. Stage *BenchmarkStage `protobuf:"varint,1,opt,name=stage,enum=tflite.proto.BenchmarkStage" json:"stage,omitempty"` // Process exit code. ExitCode *int32 `protobuf:"varint,2,opt,name=exit_code,json=exitCode" json:"exit_code,omitempty"` // Signal the process received. Signal *int32 `protobuf:"varint,3,opt,name=signal" json:"signal,omitempty"` // Handled tflite error. ErrorCode []*ErrorCode `protobuf:"bytes,4,rep,name=error_code,json=errorCode" json:"error_code,omitempty"` // Mini-benchmark error code. MiniBenchmarkErrorCode *int32 `protobuf:"varint,5,opt,name=mini_benchmark_error_code,json=miniBenchmarkErrorCode" json:"mini_benchmark_error_code,omitempty"` // contains filtered or unexported fields }
An error that occurred during benchmarking.
Used with event type ERROR.
func (*BenchmarkError) Descriptor
deprecated
func (*BenchmarkError) Descriptor() ([]byte, []int)
Deprecated: Use BenchmarkError.ProtoReflect.Descriptor instead.
func (*BenchmarkError) GetErrorCode ¶
func (x *BenchmarkError) GetErrorCode() []*ErrorCode
func (*BenchmarkError) GetExitCode ¶
func (x *BenchmarkError) GetExitCode() int32
func (*BenchmarkError) GetMiniBenchmarkErrorCode ¶
func (x *BenchmarkError) GetMiniBenchmarkErrorCode() int32
func (*BenchmarkError) GetSignal ¶
func (x *BenchmarkError) GetSignal() int32
func (*BenchmarkError) GetStage ¶
func (x *BenchmarkError) GetStage() BenchmarkStage
func (*BenchmarkError) ProtoMessage ¶
func (*BenchmarkError) ProtoMessage()
func (*BenchmarkError) ProtoReflect ¶
func (x *BenchmarkError) ProtoReflect() protoreflect.Message
func (*BenchmarkError) Reset ¶
func (x *BenchmarkError) Reset()
func (*BenchmarkError) String ¶
func (x *BenchmarkError) String() string
type BenchmarkEvent ¶
type BenchmarkEvent struct { // Which settings were used for benchmarking. TfliteSettings *TFLiteSettings `protobuf:"bytes,1,opt,name=tflite_settings,json=tfliteSettings" json:"tflite_settings,omitempty"` // Type of the event. EventType *BenchmarkEventType `protobuf:"varint,2,opt,name=event_type,json=eventType,enum=tflite.proto.BenchmarkEventType" json:"event_type,omitempty"` // Result of benchmark, used when type is END. Result *BenchmarkResult `protobuf:"bytes,3,opt,name=result" json:"result,omitempty"` // Error during benchmark, used when type is ERROR. Error *BenchmarkError `protobuf:"bytes,4,opt,name=error" json:"error,omitempty"` // Start timestamps. These are used for // 1. Checking whether a test was started but not completed within a given // deadline. // 2. Optionally, telemetry timestamps. BoottimeUs *int64 `protobuf:"varint,5,opt,name=boottime_us,json=boottimeUs" json:"boottime_us,omitempty"` WallclockUs *int64 `protobuf:"varint,6,opt,name=wallclock_us,json=wallclockUs" json:"wallclock_us,omitempty"` // contains filtered or unexported fields }
Top-level benchmarking event stored on-device. All events for a model are parsed to detect the status.
func (*BenchmarkEvent) Descriptor
deprecated
func (*BenchmarkEvent) Descriptor() ([]byte, []int)
Deprecated: Use BenchmarkEvent.ProtoReflect.Descriptor instead.
func (*BenchmarkEvent) GetBoottimeUs ¶
func (x *BenchmarkEvent) GetBoottimeUs() int64
func (*BenchmarkEvent) GetError ¶
func (x *BenchmarkEvent) GetError() *BenchmarkError
func (*BenchmarkEvent) GetEventType ¶
func (x *BenchmarkEvent) GetEventType() BenchmarkEventType
func (*BenchmarkEvent) GetResult ¶
func (x *BenchmarkEvent) GetResult() *BenchmarkResult
func (*BenchmarkEvent) GetTfliteSettings ¶
func (x *BenchmarkEvent) GetTfliteSettings() *TFLiteSettings
func (*BenchmarkEvent) GetWallclockUs ¶
func (x *BenchmarkEvent) GetWallclockUs() int64
func (*BenchmarkEvent) ProtoMessage ¶
func (*BenchmarkEvent) ProtoMessage()
func (*BenchmarkEvent) ProtoReflect ¶
func (x *BenchmarkEvent) ProtoReflect() protoreflect.Message
func (*BenchmarkEvent) Reset ¶
func (x *BenchmarkEvent) Reset()
func (*BenchmarkEvent) String ¶
func (x *BenchmarkEvent) String() string
type BenchmarkEventStorage ¶ added in v2.12.0
type BenchmarkEventStorage struct { ModelIdGroup *ModelIdGroup `protobuf:"bytes,1,opt,name=model_id_group,json=modelIdGroup" json:"model_id_group,omitempty"` BenchmarkEvent *BenchmarkEvent `protobuf:"bytes,2,opt,name=benchmark_event,json=benchmarkEvent" json:"benchmark_event,omitempty"` // contains filtered or unexported fields }
Schema used for cache Benchmark result.
func (*BenchmarkEventStorage) Descriptor
deprecated
added in
v2.12.0
func (*BenchmarkEventStorage) Descriptor() ([]byte, []int)
Deprecated: Use BenchmarkEventStorage.ProtoReflect.Descriptor instead.
func (*BenchmarkEventStorage) GetBenchmarkEvent ¶ added in v2.12.0
func (x *BenchmarkEventStorage) GetBenchmarkEvent() *BenchmarkEvent
func (*BenchmarkEventStorage) GetModelIdGroup ¶ added in v2.12.0
func (x *BenchmarkEventStorage) GetModelIdGroup() *ModelIdGroup
func (*BenchmarkEventStorage) ProtoMessage ¶ added in v2.12.0
func (*BenchmarkEventStorage) ProtoMessage()
func (*BenchmarkEventStorage) ProtoReflect ¶ added in v2.12.0
func (x *BenchmarkEventStorage) ProtoReflect() protoreflect.Message
func (*BenchmarkEventStorage) Reset ¶ added in v2.12.0
func (x *BenchmarkEventStorage) Reset()
func (*BenchmarkEventStorage) String ¶ added in v2.12.0
func (x *BenchmarkEventStorage) String() string
type BenchmarkEventType ¶
type BenchmarkEventType int32
Which stage of benchmarking the event is for. There might be multiple events with the same type, if a benchmark is run multiple times.
const ( BenchmarkEventType_UNDEFINED_BENCHMARK_EVENT_TYPE BenchmarkEventType = 0 // Benchmark start. A start without an end can be interpreted as a test that // has crashed or hung. BenchmarkEventType_START BenchmarkEventType = 1 // Benchmarking completion. A model was successfully loaded, acceleration // configured and inference run without errors. There may still be an issue // with correctness of results, or with performance. BenchmarkEventType_END BenchmarkEventType = 2 // Benchmark was not completed due to an error. The error may be a handled // error (e.g., failure in a delegate), or a crash. BenchmarkEventType_ERROR BenchmarkEventType = 3 // Benchmark data has been sent for logging. BenchmarkEventType_LOGGED BenchmarkEventType = 4 // Benchmark encountered an error but was able to continue. The error is not // related to the model execution but to the mini-benchmark logic. An example // of error is a failure when trying to set the CPU affinity of the benchmark // runner process. BenchmarkEventType_RECOVERED_ERROR BenchmarkEventType = 5 )
func (BenchmarkEventType) Descriptor ¶
func (BenchmarkEventType) Descriptor() protoreflect.EnumDescriptor
func (BenchmarkEventType) Enum ¶
func (x BenchmarkEventType) Enum() *BenchmarkEventType
func (BenchmarkEventType) EnumDescriptor
deprecated
func (BenchmarkEventType) EnumDescriptor() ([]byte, []int)
Deprecated: Use BenchmarkEventType.Descriptor instead.
func (BenchmarkEventType) Number ¶
func (x BenchmarkEventType) Number() protoreflect.EnumNumber
func (BenchmarkEventType) String ¶
func (x BenchmarkEventType) String() string
func (BenchmarkEventType) Type ¶
func (BenchmarkEventType) Type() protoreflect.EnumType
func (*BenchmarkEventType) UnmarshalJSON
deprecated
func (x *BenchmarkEventType) UnmarshalJSON(b []byte) error
Deprecated: Do not use.
type BenchmarkInitializationFailure ¶
type BenchmarkInitializationFailure struct { // Status code returned by the mini-benchmark initialization function. InitializationStatus *int32 `protobuf:"varint,1,opt,name=initialization_status,json=initializationStatus" json:"initialization_status,omitempty"` // contains filtered or unexported fields }
Represent a failure during the initialization of the mini-benchmark.
func (*BenchmarkInitializationFailure) Descriptor
deprecated
func (*BenchmarkInitializationFailure) Descriptor() ([]byte, []int)
Deprecated: Use BenchmarkInitializationFailure.ProtoReflect.Descriptor instead.
func (*BenchmarkInitializationFailure) GetInitializationStatus ¶
func (x *BenchmarkInitializationFailure) GetInitializationStatus() int32
func (*BenchmarkInitializationFailure) ProtoMessage ¶
func (*BenchmarkInitializationFailure) ProtoMessage()
func (*BenchmarkInitializationFailure) ProtoReflect ¶
func (x *BenchmarkInitializationFailure) ProtoReflect() protoreflect.Message
func (*BenchmarkInitializationFailure) Reset ¶
func (x *BenchmarkInitializationFailure) Reset()
func (*BenchmarkInitializationFailure) String ¶
func (x *BenchmarkInitializationFailure) String() string
type BenchmarkMetric ¶
type BenchmarkMetric struct { Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` Values []float32 `protobuf:"fixed32,2,rep,packed,name=values" json:"values,omitempty"` // contains filtered or unexported fields }
A correctness metric from a benchmark, for example KL-divergence between known-good CPU output and on-device output. These are primarily used for telemetry and monitored server-side.
func (*BenchmarkMetric) Descriptor
deprecated
func (*BenchmarkMetric) Descriptor() ([]byte, []int)
Deprecated: Use BenchmarkMetric.ProtoReflect.Descriptor instead.
func (*BenchmarkMetric) GetName ¶
func (x *BenchmarkMetric) GetName() string
func (*BenchmarkMetric) GetValues ¶
func (x *BenchmarkMetric) GetValues() []float32
func (*BenchmarkMetric) ProtoMessage ¶
func (*BenchmarkMetric) ProtoMessage()
func (*BenchmarkMetric) ProtoReflect ¶
func (x *BenchmarkMetric) ProtoReflect() protoreflect.Message
func (*BenchmarkMetric) Reset ¶
func (x *BenchmarkMetric) Reset()
func (*BenchmarkMetric) String ¶
func (x *BenchmarkMetric) String() string
type BenchmarkResult ¶
type BenchmarkResult struct { // Time to load model and apply acceleration. Initialization may get run // multiple times to get information on variance. InitializationTimeUs []int64 `protobuf:"varint,1,rep,packed,name=initialization_time_us,json=initializationTimeUs" json:"initialization_time_us,omitempty"` // Time to run inference (call Invoke()). Inference may get run multiple times // to get information on variance. InferenceTimeUs []int64 `protobuf:"varint,2,rep,packed,name=inference_time_us,json=inferenceTimeUs" json:"inference_time_us,omitempty"` // Maximum memory used. Measures size of application heap (does not // necessarily take into account driver-side allocation. MaxMemoryKb *int32 `protobuf:"varint,3,opt,name=max_memory_kb,json=maxMemoryKb" json:"max_memory_kb,omitempty"` // Whether the inference produced correct results (validation graph output // 'ok' for all test inputs). Used on-device to disallow configurations that // produce incorrect results (e.g., due to OpenCL driver bugs). Ok *bool `protobuf:"varint,4,opt,name=ok" json:"ok,omitempty"` // Metrics that were used to determine the 'ok' status. Metrics []*BenchmarkMetric `protobuf:"bytes,5,rep,name=metrics" json:"metrics,omitempty"` // Model output in byte format. Each InferenceOutput comes from one output // tensor. It is ordered the same as tflite::Interpreter::output_tensor(), // i.e. the value of output_tensor(i) is stored in actual_output[i]. Only // populated in custom validation case. ActualOutput []*BenchmarkResult_InferenceOutput `protobuf:"bytes,6,rep,name=actual_output,json=actualOutput" json:"actual_output,omitempty"` // contains filtered or unexported fields }
Outcome of a successfully complete benchmark run. This information is intended to both be used on-device to select best compute configuration as well as sent to server for monitoring.
Used with event type END. Next ID: 7
func (*BenchmarkResult) Descriptor
deprecated
func (*BenchmarkResult) Descriptor() ([]byte, []int)
Deprecated: Use BenchmarkResult.ProtoReflect.Descriptor instead.
func (*BenchmarkResult) GetActualOutput ¶ added in v2.12.0
func (x *BenchmarkResult) GetActualOutput() []*BenchmarkResult_InferenceOutput
func (*BenchmarkResult) GetInferenceTimeUs ¶
func (x *BenchmarkResult) GetInferenceTimeUs() []int64
func (*BenchmarkResult) GetInitializationTimeUs ¶
func (x *BenchmarkResult) GetInitializationTimeUs() []int64
func (*BenchmarkResult) GetMaxMemoryKb ¶
func (x *BenchmarkResult) GetMaxMemoryKb() int32
func (*BenchmarkResult) GetMetrics ¶
func (x *BenchmarkResult) GetMetrics() []*BenchmarkMetric
func (*BenchmarkResult) GetOk ¶
func (x *BenchmarkResult) GetOk() bool
func (*BenchmarkResult) ProtoMessage ¶
func (*BenchmarkResult) ProtoMessage()
func (*BenchmarkResult) ProtoReflect ¶
func (x *BenchmarkResult) ProtoReflect() protoreflect.Message
func (*BenchmarkResult) Reset ¶
func (x *BenchmarkResult) Reset()
func (*BenchmarkResult) String ¶
func (x *BenchmarkResult) String() string
type BenchmarkResult_InferenceOutput ¶ added in v2.12.0
type BenchmarkResult_InferenceOutput struct { // The matching Flatbuffer type is ubyte. Value []byte `protobuf:"bytes,1,opt,name=value" json:"value,omitempty"` // contains filtered or unexported fields }
func (*BenchmarkResult_InferenceOutput) Descriptor
deprecated
added in
v2.12.0
func (*BenchmarkResult_InferenceOutput) Descriptor() ([]byte, []int)
Deprecated: Use BenchmarkResult_InferenceOutput.ProtoReflect.Descriptor instead.
func (*BenchmarkResult_InferenceOutput) GetValue ¶ added in v2.12.0
func (x *BenchmarkResult_InferenceOutput) GetValue() []byte
func (*BenchmarkResult_InferenceOutput) ProtoMessage ¶ added in v2.12.0
func (*BenchmarkResult_InferenceOutput) ProtoMessage()
func (*BenchmarkResult_InferenceOutput) ProtoReflect ¶ added in v2.12.0
func (x *BenchmarkResult_InferenceOutput) ProtoReflect() protoreflect.Message
func (*BenchmarkResult_InferenceOutput) Reset ¶ added in v2.12.0
func (x *BenchmarkResult_InferenceOutput) Reset()
func (*BenchmarkResult_InferenceOutput) String ¶ added in v2.12.0
func (x *BenchmarkResult_InferenceOutput) String() string
type BenchmarkStage ¶
type BenchmarkStage int32
When during benchmark execution an error occurred.
const ( BenchmarkStage_UNKNOWN BenchmarkStage = 0 // During model loading or delegation. BenchmarkStage_INITIALIZATION BenchmarkStage = 1 // During inference. BenchmarkStage_INFERENCE BenchmarkStage = 2 )
func (BenchmarkStage) Descriptor ¶
func (BenchmarkStage) Descriptor() protoreflect.EnumDescriptor
func (BenchmarkStage) Enum ¶
func (x BenchmarkStage) Enum() *BenchmarkStage
func (BenchmarkStage) EnumDescriptor
deprecated
func (BenchmarkStage) EnumDescriptor() ([]byte, []int)
Deprecated: Use BenchmarkStage.Descriptor instead.
func (BenchmarkStage) Number ¶
func (x BenchmarkStage) Number() protoreflect.EnumNumber
func (BenchmarkStage) String ¶
func (x BenchmarkStage) String() string
func (BenchmarkStage) Type ¶
func (BenchmarkStage) Type() protoreflect.EnumType
func (*BenchmarkStage) UnmarshalJSON
deprecated
func (x *BenchmarkStage) UnmarshalJSON(b []byte) error
Deprecated: Do not use.
type BenchmarkStoragePaths ¶
type BenchmarkStoragePaths struct { // Base path to the files used to store benchmark results in. Two files // will be generated: one with the given path and an extra file to store // events related to best acceleration results at path storage_file_path + // ".extra.fb". Must be specific to the model. // Note on Android, this should be the code cache directory. StorageFilePath *string `protobuf:"bytes,1,opt,name=storage_file_path,json=storageFilePath" json:"storage_file_path,omitempty"` // Path to a directory for intermediate files (lock files, extracted // binaries). // Note on Android, this typically is the data cache directory (i.e. the one // returned by `getCacheDir()`). DataDirectoryPath *string `protobuf:"bytes,2,opt,name=data_directory_path,json=dataDirectoryPath" json:"data_directory_path,omitempty"` // contains filtered or unexported fields }
Where to store mini-benchmark state.
func (*BenchmarkStoragePaths) Descriptor
deprecated
func (*BenchmarkStoragePaths) Descriptor() ([]byte, []int)
Deprecated: Use BenchmarkStoragePaths.ProtoReflect.Descriptor instead.
func (*BenchmarkStoragePaths) GetDataDirectoryPath ¶
func (x *BenchmarkStoragePaths) GetDataDirectoryPath() string
func (*BenchmarkStoragePaths) GetStorageFilePath ¶
func (x *BenchmarkStoragePaths) GetStorageFilePath() string
func (*BenchmarkStoragePaths) ProtoMessage ¶
func (*BenchmarkStoragePaths) ProtoMessage()
func (*BenchmarkStoragePaths) ProtoReflect ¶
func (x *BenchmarkStoragePaths) ProtoReflect() protoreflect.Message
func (*BenchmarkStoragePaths) Reset ¶
func (x *BenchmarkStoragePaths) Reset()
func (*BenchmarkStoragePaths) String ¶
func (x *BenchmarkStoragePaths) String() string
type BestAccelerationDecision ¶
type BestAccelerationDecision struct { // Number of events used to take the decision. // Using just the size instaed of the full list of events to save space. NumberOfSourceEvents *int32 `protobuf:"varint,1,opt,name=number_of_source_events,json=numberOfSourceEvents" json:"number_of_source_events,omitempty"` // Event with min latency in the source ones. MinLatencyEvent *BenchmarkEvent `protobuf:"bytes,2,opt,name=min_latency_event,json=minLatencyEvent" json:"min_latency_event,omitempty"` // Min latency as read from min_latency_event. MinInferenceTimeUs *int64 `protobuf:"varint,3,opt,name=min_inference_time_us,json=minInferenceTimeUs" json:"min_inference_time_us,omitempty"` // contains filtered or unexported fields }
Represent the decision on the best acceleration from the mini-benchmark.
func (*BestAccelerationDecision) Descriptor
deprecated
func (*BestAccelerationDecision) Descriptor() ([]byte, []int)
Deprecated: Use BestAccelerationDecision.ProtoReflect.Descriptor instead.
func (*BestAccelerationDecision) GetMinInferenceTimeUs ¶
func (x *BestAccelerationDecision) GetMinInferenceTimeUs() int64
func (*BestAccelerationDecision) GetMinLatencyEvent ¶
func (x *BestAccelerationDecision) GetMinLatencyEvent() *BenchmarkEvent
func (*BestAccelerationDecision) GetNumberOfSourceEvents ¶
func (x *BestAccelerationDecision) GetNumberOfSourceEvents() int32
func (*BestAccelerationDecision) ProtoMessage ¶
func (*BestAccelerationDecision) ProtoMessage()
func (*BestAccelerationDecision) ProtoReflect ¶
func (x *BestAccelerationDecision) ProtoReflect() protoreflect.Message
func (*BestAccelerationDecision) Reset ¶
func (x *BestAccelerationDecision) Reset()
func (*BestAccelerationDecision) String ¶
func (x *BestAccelerationDecision) String() string
type CPUSettings ¶
type CPUSettings struct { // Set to -1 to let the interpreter choose. Otherwise, must be > 0. NumThreads *int32 `protobuf:"varint,1,opt,name=num_threads,json=numThreads,def=-1" json:"num_threads,omitempty"` // contains filtered or unexported fields }
func (*CPUSettings) Descriptor
deprecated
func (*CPUSettings) Descriptor() ([]byte, []int)
Deprecated: Use CPUSettings.ProtoReflect.Descriptor instead.
func (*CPUSettings) GetNumThreads ¶
func (x *CPUSettings) GetNumThreads() int32
func (*CPUSettings) ProtoMessage ¶
func (*CPUSettings) ProtoMessage()
func (*CPUSettings) ProtoReflect ¶
func (x *CPUSettings) ProtoReflect() protoreflect.Message
func (*CPUSettings) Reset ¶
func (x *CPUSettings) Reset()
func (*CPUSettings) String ¶
func (x *CPUSettings) String() string
type ComputeSettings ¶
type ComputeSettings struct { // Which preference to use this accelerator for. Preference *ExecutionPreference `protobuf:"varint,1,opt,name=preference,enum=tflite.proto.ExecutionPreference" json:"preference,omitempty"` // How to configure TFLite TfliteSettings *TFLiteSettings `protobuf:"bytes,2,opt,name=tflite_settings,json=tfliteSettings" json:"tflite_settings,omitempty"` // Identifiers to use for instrumentation and telemetry. ModelNamespaceForStatistics *string `` /* 139-byte string literal not displayed */ ModelIdentifierForStatistics *string `` /* 142-byte string literal not displayed */ // 'Maybe' acceleration: use mini-benchmark to select settings. SettingsToTestLocally *MinibenchmarkSettings `protobuf:"bytes,5,opt,name=settings_to_test_locally,json=settingsToTestLocally" json:"settings_to_test_locally,omitempty"` // contains filtered or unexported fields }
One possible acceleration configuration.
func (*ComputeSettings) Descriptor
deprecated
func (*ComputeSettings) Descriptor() ([]byte, []int)
Deprecated: Use ComputeSettings.ProtoReflect.Descriptor instead.
func (*ComputeSettings) GetModelIdentifierForStatistics ¶
func (x *ComputeSettings) GetModelIdentifierForStatistics() string
func (*ComputeSettings) GetModelNamespaceForStatistics ¶
func (x *ComputeSettings) GetModelNamespaceForStatistics() string
func (*ComputeSettings) GetPreference ¶
func (x *ComputeSettings) GetPreference() ExecutionPreference
func (*ComputeSettings) GetSettingsToTestLocally ¶
func (x *ComputeSettings) GetSettingsToTestLocally() *MinibenchmarkSettings
func (*ComputeSettings) GetTfliteSettings ¶
func (x *ComputeSettings) GetTfliteSettings() *TFLiteSettings
func (*ComputeSettings) ProtoMessage ¶
func (*ComputeSettings) ProtoMessage()
func (*ComputeSettings) ProtoReflect ¶
func (x *ComputeSettings) ProtoReflect() protoreflect.Message
func (*ComputeSettings) Reset ¶
func (x *ComputeSettings) Reset()
func (*ComputeSettings) String ¶
func (x *ComputeSettings) String() string
type CoralSettings ¶
type CoralSettings struct { // The Edge Tpu device to be used. See // https://github.com/google-coral/libcoral/blob/982426546dfa10128376d0c24fd8a8b161daac97/coral/tflite_utils.h#L131-L137 Device *string `protobuf:"bytes,1,opt,name=device" json:"device,omitempty"` // The desired performance level. This setting adjusts the internal clock // rate to achieve different performance / power balance. Higher performance // values improve speed, but increase power usage. Performance *CoralSettings_Performance `protobuf:"varint,2,opt,name=performance,enum=tflite.proto.CoralSettings_Performance,def=1" json:"performance,omitempty"` // If true, always perform device firmware update (DFU) after reset. DFU is // usually only necessary after power cycle. UsbAlwaysDfu *bool `protobuf:"varint,3,opt,name=usb_always_dfu,json=usbAlwaysDfu" json:"usb_always_dfu,omitempty"` // The maximum bulk in queue length. Larger queue length may improve USB // performance on the direction from device to host. When not specified (or // zero), `usb_max_bulk_in_queue_length` will default to 32 according to the // current EdgeTpu Coral implementation. UsbMaxBulkInQueueLength *int32 `` /* 132-byte string literal not displayed */ // contains filtered or unexported fields }
Coral Dev Board / USB accelerator delegate settings.
See https://github.com/google-coral/edgetpu/blob/master/libedgetpu/edgetpu_c.h
func (*CoralSettings) Descriptor
deprecated
func (*CoralSettings) Descriptor() ([]byte, []int)
Deprecated: Use CoralSettings.ProtoReflect.Descriptor instead.
func (*CoralSettings) GetDevice ¶
func (x *CoralSettings) GetDevice() string
func (*CoralSettings) GetPerformance ¶
func (x *CoralSettings) GetPerformance() CoralSettings_Performance
func (*CoralSettings) GetUsbAlwaysDfu ¶
func (x *CoralSettings) GetUsbAlwaysDfu() bool
func (*CoralSettings) GetUsbMaxBulkInQueueLength ¶
func (x *CoralSettings) GetUsbMaxBulkInQueueLength() int32
func (*CoralSettings) ProtoMessage ¶
func (*CoralSettings) ProtoMessage()
func (*CoralSettings) ProtoReflect ¶
func (x *CoralSettings) ProtoReflect() protoreflect.Message
func (*CoralSettings) Reset ¶
func (x *CoralSettings) Reset()
func (*CoralSettings) String ¶
func (x *CoralSettings) String() string
type CoralSettings_Performance ¶
type CoralSettings_Performance int32
const ( CoralSettings_UNDEFINED CoralSettings_Performance = 0 CoralSettings_MAXIMUM CoralSettings_Performance = 1 CoralSettings_HIGH CoralSettings_Performance = 2 CoralSettings_MEDIUM CoralSettings_Performance = 3 CoralSettings_LOW CoralSettings_Performance = 4 )
func (CoralSettings_Performance) Descriptor ¶
func (CoralSettings_Performance) Descriptor() protoreflect.EnumDescriptor
func (CoralSettings_Performance) Enum ¶
func (x CoralSettings_Performance) Enum() *CoralSettings_Performance
func (CoralSettings_Performance) EnumDescriptor
deprecated
func (CoralSettings_Performance) EnumDescriptor() ([]byte, []int)
Deprecated: Use CoralSettings_Performance.Descriptor instead.
func (CoralSettings_Performance) Number ¶
func (x CoralSettings_Performance) Number() protoreflect.EnumNumber
func (CoralSettings_Performance) String ¶
func (x CoralSettings_Performance) String() string
func (CoralSettings_Performance) Type ¶
func (CoralSettings_Performance) Type() protoreflect.EnumType
func (*CoralSettings_Performance) UnmarshalJSON
deprecated
func (x *CoralSettings_Performance) UnmarshalJSON(b []byte) error
Deprecated: Do not use.
type CoreMLSettings ¶
type CoreMLSettings struct { // Only create delegate when Neural Engine is available on the device. EnabledDevices *CoreMLSettings_EnabledDevices `` /* 145-byte string literal not displayed */ // Specifies target Core ML version for model conversion. // Core ML 3 come with a lot more ops, but some ops (e.g. reshape) is not // delegated due to input rank constraint. // if not set to one of the valid versions, the delegate will use highest // version possible in the platform. // Valid versions: (2, 3) CoremlVersion *int32 `protobuf:"varint,2,opt,name=coreml_version,json=coremlVersion" json:"coreml_version,omitempty"` // This sets the maximum number of Core ML delegates created. // Each graph corresponds to one delegated node subset in the // TFLite model. Set this to 0 to delegate all possible partitions. MaxDelegatedPartitions *int32 `` /* 129-byte string literal not displayed */ // This sets the minimum number of nodes per partition delegated with // Core ML delegate. Defaults to 2. MinNodesPerPartition *int32 `protobuf:"varint,4,opt,name=min_nodes_per_partition,json=minNodesPerPartition,def=2" json:"min_nodes_per_partition,omitempty"` // contains filtered or unexported fields }
CoreML Delegate settings.
func (*CoreMLSettings) Descriptor
deprecated
func (*CoreMLSettings) Descriptor() ([]byte, []int)
Deprecated: Use CoreMLSettings.ProtoReflect.Descriptor instead.
func (*CoreMLSettings) GetCoremlVersion ¶
func (x *CoreMLSettings) GetCoremlVersion() int32
func (*CoreMLSettings) GetEnabledDevices ¶
func (x *CoreMLSettings) GetEnabledDevices() CoreMLSettings_EnabledDevices
func (*CoreMLSettings) GetMaxDelegatedPartitions ¶
func (x *CoreMLSettings) GetMaxDelegatedPartitions() int32
func (*CoreMLSettings) GetMinNodesPerPartition ¶
func (x *CoreMLSettings) GetMinNodesPerPartition() int32
func (*CoreMLSettings) ProtoMessage ¶
func (*CoreMLSettings) ProtoMessage()
func (*CoreMLSettings) ProtoReflect ¶
func (x *CoreMLSettings) ProtoReflect() protoreflect.Message
func (*CoreMLSettings) Reset ¶
func (x *CoreMLSettings) Reset()
func (*CoreMLSettings) String ¶
func (x *CoreMLSettings) String() string
type CoreMLSettings_EnabledDevices ¶
type CoreMLSettings_EnabledDevices int32
Note the enum order change from the above header for better proto practice.
const ( // Always create Core ML delegate. CoreMLSettings_DEVICES_ALL CoreMLSettings_EnabledDevices = 0 // Create Core ML delegate only on devices with Apple Neural Engine. CoreMLSettings_DEVICES_WITH_NEURAL_ENGINE CoreMLSettings_EnabledDevices = 1 )
func (CoreMLSettings_EnabledDevices) Descriptor ¶
func (CoreMLSettings_EnabledDevices) Descriptor() protoreflect.EnumDescriptor
func (CoreMLSettings_EnabledDevices) Enum ¶
func (x CoreMLSettings_EnabledDevices) Enum() *CoreMLSettings_EnabledDevices
func (CoreMLSettings_EnabledDevices) EnumDescriptor
deprecated
func (CoreMLSettings_EnabledDevices) EnumDescriptor() ([]byte, []int)
Deprecated: Use CoreMLSettings_EnabledDevices.Descriptor instead.
func (CoreMLSettings_EnabledDevices) Number ¶
func (x CoreMLSettings_EnabledDevices) Number() protoreflect.EnumNumber
func (CoreMLSettings_EnabledDevices) String ¶
func (x CoreMLSettings_EnabledDevices) String() string
func (CoreMLSettings_EnabledDevices) Type ¶
func (CoreMLSettings_EnabledDevices) Type() protoreflect.EnumType
func (*CoreMLSettings_EnabledDevices) UnmarshalJSON
deprecated
func (x *CoreMLSettings_EnabledDevices) UnmarshalJSON(b []byte) error
Deprecated: Do not use.
type Delegate ¶
type Delegate int32
TFLite accelerator to use.
STATUS: support library and the stable delegate loader settings are agnostic to the actual accelerator.
const ( Delegate_NONE Delegate = 0 Delegate_NNAPI Delegate = 1 Delegate_GPU Delegate = 2 Delegate_HEXAGON Delegate = 3 Delegate_XNNPACK Delegate = 4 // The EdgeTpu in Pixel devices. Delegate_EDGETPU Delegate = 5 // The Coral EdgeTpu Dev Board / USB accelerator. Delegate_EDGETPU_CORAL Delegate = 6 // Apple CoreML. Delegate_CORE_ML Delegate = 7 )
func (Delegate) Descriptor ¶
func (Delegate) Descriptor() protoreflect.EnumDescriptor
func (Delegate) EnumDescriptor
deprecated
func (Delegate) Number ¶
func (x Delegate) Number() protoreflect.EnumNumber
func (Delegate) Type ¶
func (Delegate) Type() protoreflect.EnumType
func (*Delegate) UnmarshalJSON
deprecated
type EdgeTpuDeviceSpec ¶
type EdgeTpuDeviceSpec struct { // Execution platform for the EdgeTPU device. PlatformType *EdgeTpuDeviceSpec_PlatformType `` /* 140-byte string literal not displayed */ // Number of chips to use for the EdgeTPU device. NumChips *int32 `protobuf:"varint,2,opt,name=num_chips,json=numChips" json:"num_chips,omitempty"` // Paths to the EdgeTPU devices; DevicePaths []string `protobuf:"bytes,3,rep,name=device_paths,json=devicePaths" json:"device_paths,omitempty"` // Chip family used by the EdgeTpu device. ChipFamily *int32 `protobuf:"varint,4,opt,name=chip_family,json=chipFamily" json:"chip_family,omitempty"` // contains filtered or unexported fields }
EdgeTPU device spec.
func (*EdgeTpuDeviceSpec) Descriptor
deprecated
func (*EdgeTpuDeviceSpec) Descriptor() ([]byte, []int)
Deprecated: Use EdgeTpuDeviceSpec.ProtoReflect.Descriptor instead.
func (*EdgeTpuDeviceSpec) GetChipFamily ¶
func (x *EdgeTpuDeviceSpec) GetChipFamily() int32
func (*EdgeTpuDeviceSpec) GetDevicePaths ¶
func (x *EdgeTpuDeviceSpec) GetDevicePaths() []string
func (*EdgeTpuDeviceSpec) GetNumChips ¶
func (x *EdgeTpuDeviceSpec) GetNumChips() int32
func (*EdgeTpuDeviceSpec) GetPlatformType ¶
func (x *EdgeTpuDeviceSpec) GetPlatformType() EdgeTpuDeviceSpec_PlatformType
func (*EdgeTpuDeviceSpec) ProtoMessage ¶
func (*EdgeTpuDeviceSpec) ProtoMessage()
func (*EdgeTpuDeviceSpec) ProtoReflect ¶
func (x *EdgeTpuDeviceSpec) ProtoReflect() protoreflect.Message
func (*EdgeTpuDeviceSpec) Reset ¶
func (x *EdgeTpuDeviceSpec) Reset()
func (*EdgeTpuDeviceSpec) String ¶
func (x *EdgeTpuDeviceSpec) String() string
type EdgeTpuDeviceSpec_PlatformType ¶
type EdgeTpuDeviceSpec_PlatformType int32
EdgeTPU platform types.
const ( EdgeTpuDeviceSpec_MMIO EdgeTpuDeviceSpec_PlatformType = 0 EdgeTpuDeviceSpec_REFERENCE EdgeTpuDeviceSpec_PlatformType = 1 EdgeTpuDeviceSpec_SIMULATOR EdgeTpuDeviceSpec_PlatformType = 2 EdgeTpuDeviceSpec_REMOTE_SIMULATOR EdgeTpuDeviceSpec_PlatformType = 3 )
func (EdgeTpuDeviceSpec_PlatformType) Descriptor ¶
func (EdgeTpuDeviceSpec_PlatformType) Descriptor() protoreflect.EnumDescriptor
func (EdgeTpuDeviceSpec_PlatformType) Enum ¶
func (x EdgeTpuDeviceSpec_PlatformType) Enum() *EdgeTpuDeviceSpec_PlatformType
func (EdgeTpuDeviceSpec_PlatformType) EnumDescriptor
deprecated
func (EdgeTpuDeviceSpec_PlatformType) EnumDescriptor() ([]byte, []int)
Deprecated: Use EdgeTpuDeviceSpec_PlatformType.Descriptor instead.
func (EdgeTpuDeviceSpec_PlatformType) Number ¶
func (x EdgeTpuDeviceSpec_PlatformType) Number() protoreflect.EnumNumber
func (EdgeTpuDeviceSpec_PlatformType) String ¶
func (x EdgeTpuDeviceSpec_PlatformType) String() string
func (EdgeTpuDeviceSpec_PlatformType) Type ¶
func (EdgeTpuDeviceSpec_PlatformType) Type() protoreflect.EnumType
func (*EdgeTpuDeviceSpec_PlatformType) UnmarshalJSON
deprecated
func (x *EdgeTpuDeviceSpec_PlatformType) UnmarshalJSON(b []byte) error
Deprecated: Do not use.
type EdgeTpuInactivePowerConfig ¶
type EdgeTpuInactivePowerConfig struct { // Inactive power states between inferences. InactivePowerState *EdgeTpuPowerState `` /* 147-byte string literal not displayed */ // Inactive timeout in microseconds between inferences. InactiveTimeoutUs *int64 `protobuf:"varint,2,opt,name=inactive_timeout_us,json=inactiveTimeoutUs" json:"inactive_timeout_us,omitempty"` // contains filtered or unexported fields }
func (*EdgeTpuInactivePowerConfig) Descriptor
deprecated
func (*EdgeTpuInactivePowerConfig) Descriptor() ([]byte, []int)
Deprecated: Use EdgeTpuInactivePowerConfig.ProtoReflect.Descriptor instead.
func (*EdgeTpuInactivePowerConfig) GetInactivePowerState ¶
func (x *EdgeTpuInactivePowerConfig) GetInactivePowerState() EdgeTpuPowerState
func (*EdgeTpuInactivePowerConfig) GetInactiveTimeoutUs ¶
func (x *EdgeTpuInactivePowerConfig) GetInactiveTimeoutUs() int64
func (*EdgeTpuInactivePowerConfig) ProtoMessage ¶
func (*EdgeTpuInactivePowerConfig) ProtoMessage()
func (*EdgeTpuInactivePowerConfig) ProtoReflect ¶
func (x *EdgeTpuInactivePowerConfig) ProtoReflect() protoreflect.Message
func (*EdgeTpuInactivePowerConfig) Reset ¶
func (x *EdgeTpuInactivePowerConfig) Reset()
func (*EdgeTpuInactivePowerConfig) String ¶
func (x *EdgeTpuInactivePowerConfig) String() string
type EdgeTpuPowerState ¶
type EdgeTpuPowerState int32
Generic definitions of EdgeTPU power states.
const ( // Undefined power state. EdgeTpuPowerState_UNDEFINED_POWERSTATE EdgeTpuPowerState = 0 // TPU core is off but control cluster is on. EdgeTpuPowerState_TPU_CORE_OFF EdgeTpuPowerState = 1 // A non-active low-power state that has much smaller transition time to // active compared to off. EdgeTpuPowerState_READY EdgeTpuPowerState = 2 // Minimum power active state. EdgeTpuPowerState_ACTIVE_MIN_POWER EdgeTpuPowerState = 3 // Very low performance, very low power. EdgeTpuPowerState_ACTIVE_VERY_LOW_POWER EdgeTpuPowerState = 4 // Low performance, low power. EdgeTpuPowerState_ACTIVE_LOW_POWER EdgeTpuPowerState = 5 // The normal performance and power. This setting usually provides the // optimal perf/power trade-off for the average use-case. EdgeTpuPowerState_ACTIVE EdgeTpuPowerState = 6 // Maximum performance level. Potentially higher power and thermal. This // setting may not be allowed in production depending on the system. EdgeTpuPowerState_OVER_DRIVE EdgeTpuPowerState = 7 )
func (EdgeTpuPowerState) Descriptor ¶
func (EdgeTpuPowerState) Descriptor() protoreflect.EnumDescriptor
func (EdgeTpuPowerState) Enum ¶
func (x EdgeTpuPowerState) Enum() *EdgeTpuPowerState
func (EdgeTpuPowerState) EnumDescriptor
deprecated
func (EdgeTpuPowerState) EnumDescriptor() ([]byte, []int)
Deprecated: Use EdgeTpuPowerState.Descriptor instead.
func (EdgeTpuPowerState) Number ¶
func (x EdgeTpuPowerState) Number() protoreflect.EnumNumber
func (EdgeTpuPowerState) String ¶
func (x EdgeTpuPowerState) String() string
func (EdgeTpuPowerState) Type ¶
func (EdgeTpuPowerState) Type() protoreflect.EnumType
func (*EdgeTpuPowerState) UnmarshalJSON
deprecated
func (x *EdgeTpuPowerState) UnmarshalJSON(b []byte) error
Deprecated: Do not use.
type EdgeTpuSettings ¶
type EdgeTpuSettings struct { // Target inference power state for running the model. InferencePowerState *EdgeTpuPowerState `` /* 150-byte string literal not displayed */ // Inactive power states between inferences. InactivePowerConfigs []*EdgeTpuInactivePowerConfig `protobuf:"bytes,2,rep,name=inactive_power_configs,json=inactivePowerConfigs" json:"inactive_power_configs,omitempty"` // Priority for the inference request. InferencePriority *int32 `protobuf:"varint,3,opt,name=inference_priority,json=inferencePriority,def=-1" json:"inference_priority,omitempty"` // Device spec for creating the EdgeTpu device. EdgetpuDeviceSpec *EdgeTpuDeviceSpec `protobuf:"bytes,4,opt,name=edgetpu_device_spec,json=edgetpuDeviceSpec" json:"edgetpu_device_spec,omitempty"` // A unique identifier of the input TfLite model. ModelToken *string `protobuf:"bytes,5,opt,name=model_token,json=modelToken" json:"model_token,omitempty"` // Float truncation type for EdgeTPU. FloatTruncationType *EdgeTpuSettings_FloatTruncationType `` /* 168-byte string literal not displayed */ // QoS class to determine chunking size for PRO onward. QosClass *EdgeTpuSettings_QosClass `` /* 128-byte string literal not displayed */ // contains filtered or unexported fields }
EdgeTPU Delegate settings.
func (*EdgeTpuSettings) Descriptor
deprecated
func (*EdgeTpuSettings) Descriptor() ([]byte, []int)
Deprecated: Use EdgeTpuSettings.ProtoReflect.Descriptor instead.
func (*EdgeTpuSettings) GetEdgetpuDeviceSpec ¶
func (x *EdgeTpuSettings) GetEdgetpuDeviceSpec() *EdgeTpuDeviceSpec
func (*EdgeTpuSettings) GetFloatTruncationType ¶
func (x *EdgeTpuSettings) GetFloatTruncationType() EdgeTpuSettings_FloatTruncationType
func (*EdgeTpuSettings) GetInactivePowerConfigs ¶
func (x *EdgeTpuSettings) GetInactivePowerConfigs() []*EdgeTpuInactivePowerConfig
func (*EdgeTpuSettings) GetInferencePowerState ¶
func (x *EdgeTpuSettings) GetInferencePowerState() EdgeTpuPowerState
func (*EdgeTpuSettings) GetInferencePriority ¶
func (x *EdgeTpuSettings) GetInferencePriority() int32
func (*EdgeTpuSettings) GetModelToken ¶
func (x *EdgeTpuSettings) GetModelToken() string
func (*EdgeTpuSettings) GetQosClass ¶
func (x *EdgeTpuSettings) GetQosClass() EdgeTpuSettings_QosClass
func (*EdgeTpuSettings) ProtoMessage ¶
func (*EdgeTpuSettings) ProtoMessage()
func (*EdgeTpuSettings) ProtoReflect ¶
func (x *EdgeTpuSettings) ProtoReflect() protoreflect.Message
func (*EdgeTpuSettings) Reset ¶
func (x *EdgeTpuSettings) Reset()
func (*EdgeTpuSettings) String ¶
func (x *EdgeTpuSettings) String() string
type EdgeTpuSettings_FloatTruncationType ¶
type EdgeTpuSettings_FloatTruncationType int32
Float truncation types for EdgeTPU.
const ( EdgeTpuSettings_UNSPECIFIED EdgeTpuSettings_FloatTruncationType = 0 EdgeTpuSettings_NO_TRUNCATION EdgeTpuSettings_FloatTruncationType = 1 EdgeTpuSettings_BFLOAT16 EdgeTpuSettings_FloatTruncationType = 2 EdgeTpuSettings_HALF EdgeTpuSettings_FloatTruncationType = 3 )
func (EdgeTpuSettings_FloatTruncationType) Descriptor ¶
func (EdgeTpuSettings_FloatTruncationType) Descriptor() protoreflect.EnumDescriptor
func (EdgeTpuSettings_FloatTruncationType) Enum ¶
func (x EdgeTpuSettings_FloatTruncationType) Enum() *EdgeTpuSettings_FloatTruncationType
func (EdgeTpuSettings_FloatTruncationType) EnumDescriptor
deprecated
func (EdgeTpuSettings_FloatTruncationType) EnumDescriptor() ([]byte, []int)
Deprecated: Use EdgeTpuSettings_FloatTruncationType.Descriptor instead.
func (EdgeTpuSettings_FloatTruncationType) Number ¶
func (x EdgeTpuSettings_FloatTruncationType) Number() protoreflect.EnumNumber
func (EdgeTpuSettings_FloatTruncationType) String ¶
func (x EdgeTpuSettings_FloatTruncationType) String() string
func (EdgeTpuSettings_FloatTruncationType) Type ¶
func (EdgeTpuSettings_FloatTruncationType) Type() protoreflect.EnumType
func (*EdgeTpuSettings_FloatTruncationType) UnmarshalJSON
deprecated
func (x *EdgeTpuSettings_FloatTruncationType) UnmarshalJSON(b []byte) error
Deprecated: Do not use.
type EdgeTpuSettings_QosClass ¶
type EdgeTpuSettings_QosClass int32
const ( EdgeTpuSettings_QOS_UNDEFINED EdgeTpuSettings_QosClass = 0 EdgeTpuSettings_BEST_EFFORT EdgeTpuSettings_QosClass = 1 EdgeTpuSettings_REALTIME EdgeTpuSettings_QosClass = 2 )
func (EdgeTpuSettings_QosClass) Descriptor ¶
func (EdgeTpuSettings_QosClass) Descriptor() protoreflect.EnumDescriptor
func (EdgeTpuSettings_QosClass) Enum ¶
func (x EdgeTpuSettings_QosClass) Enum() *EdgeTpuSettings_QosClass
func (EdgeTpuSettings_QosClass) EnumDescriptor
deprecated
func (EdgeTpuSettings_QosClass) EnumDescriptor() ([]byte, []int)
Deprecated: Use EdgeTpuSettings_QosClass.Descriptor instead.
func (EdgeTpuSettings_QosClass) Number ¶
func (x EdgeTpuSettings_QosClass) Number() protoreflect.EnumNumber
func (EdgeTpuSettings_QosClass) String ¶
func (x EdgeTpuSettings_QosClass) String() string
func (EdgeTpuSettings_QosClass) Type ¶
func (EdgeTpuSettings_QosClass) Type() protoreflect.EnumType
func (*EdgeTpuSettings_QosClass) UnmarshalJSON
deprecated
func (x *EdgeTpuSettings_QosClass) UnmarshalJSON(b []byte) error
Deprecated: Do not use.
type ErrorCode ¶
type ErrorCode struct { // Which delegate the error comes from (or NONE, if it comes from the tflite // framework). Source *Delegate `protobuf:"varint,1,opt,name=source,enum=tflite.proto.Delegate" json:"source,omitempty"` // What the tflite level error is. TfliteError *int32 `protobuf:"varint,2,opt,name=tflite_error,json=tfliteError" json:"tflite_error,omitempty"` // What the underlying error is (e.g., NNAPI or OpenGL error). UnderlyingApiError *int64 `protobuf:"varint,3,opt,name=underlying_api_error,json=underlyingApiError" json:"underlying_api_error,omitempty"` // contains filtered or unexported fields }
A handled error.
func (*ErrorCode) Descriptor
deprecated
func (*ErrorCode) GetTfliteError ¶
func (*ErrorCode) GetUnderlyingApiError ¶
func (*ErrorCode) ProtoMessage ¶
func (*ErrorCode) ProtoMessage()
func (*ErrorCode) ProtoReflect ¶
func (x *ErrorCode) ProtoReflect() protoreflect.Message
type ExecutionPreference ¶
type ExecutionPreference int32
ExecutionPreference is used to match accelerators against the preferences of the current application or usecase. Some of the values here can appear both in the compatibility list and as input, some only as input.
These are separate from NNAPIExecutionPreference - the compatibility list design doesn't assume a one-to-one mapping between which usecases compatibility list entries have been developed for and what settings are used for NNAPI.
const ( // Match any selected preference. Allowlist (semantically - value is same as // on input). ExecutionPreference_ANY ExecutionPreference = 0 // Match low latency preference. Both compatibility list and input. ExecutionPreference_LOW_LATENCY ExecutionPreference = 1 // Math low power preference. Both compatibility list and input. ExecutionPreference_LOW_POWER ExecutionPreference = 2 // Never accelerate. Can be used for input to compatibility list or for // standalone Acceleration configuration. ExecutionPreference_FORCE_CPU ExecutionPreference = 3 )
func (ExecutionPreference) Descriptor ¶
func (ExecutionPreference) Descriptor() protoreflect.EnumDescriptor
func (ExecutionPreference) Enum ¶
func (x ExecutionPreference) Enum() *ExecutionPreference
func (ExecutionPreference) EnumDescriptor
deprecated
func (ExecutionPreference) EnumDescriptor() ([]byte, []int)
Deprecated: Use ExecutionPreference.Descriptor instead.
func (ExecutionPreference) Number ¶
func (x ExecutionPreference) Number() protoreflect.EnumNumber
func (ExecutionPreference) String ¶
func (x ExecutionPreference) String() string
func (ExecutionPreference) Type ¶
func (ExecutionPreference) Type() protoreflect.EnumType
func (*ExecutionPreference) UnmarshalJSON
deprecated
func (x *ExecutionPreference) UnmarshalJSON(b []byte) error
Deprecated: Do not use.
type FallbackSettings ¶
type FallbackSettings struct { // Whether to allow automatically falling back to TfLite CPU path on // compilation failure. Default is not allowing automatic fallback. // // This is useful in naive production usecases where the caller would prefer // for the model to run even if it's not accelerated. More advanced users will // implement fallback themselves; e.g., by using a different model on CPU. // // Note that compilation errors may occur either at initial // ModifyGraphWithDelegate() time, or when calling AllocateTensors() after // resizing. AllowAutomaticFallbackOnCompilationError *bool `` /* 183-byte string literal not displayed */ // Whether to allow automatically falling back to TfLite CPU path on // execution error. Default is not allowing automatic fallback. // // Experimental, use with care (only when you have complete control over the // client code). // // The caveat above for compilation error holds. Additionally, execution-time // errors are harder to handle automatically as they require invalidating the // TfLite interpreter which most client code has not been designed to deal // with. AllowAutomaticFallbackOnExecutionError *bool `` /* 177-byte string literal not displayed */ // contains filtered or unexported fields }
Whether to automatically fallback to TFLite CPU path on delegation errors.
Typically fallback is enabled in production use but disabled in tests and benchmarks to ensure they test the intended path.
func (*FallbackSettings) Descriptor
deprecated
func (*FallbackSettings) Descriptor() ([]byte, []int)
Deprecated: Use FallbackSettings.ProtoReflect.Descriptor instead.
func (*FallbackSettings) GetAllowAutomaticFallbackOnCompilationError ¶
func (x *FallbackSettings) GetAllowAutomaticFallbackOnCompilationError() bool
func (*FallbackSettings) GetAllowAutomaticFallbackOnExecutionError ¶
func (x *FallbackSettings) GetAllowAutomaticFallbackOnExecutionError() bool
func (*FallbackSettings) ProtoMessage ¶
func (*FallbackSettings) ProtoMessage()
func (*FallbackSettings) ProtoReflect ¶
func (x *FallbackSettings) ProtoReflect() protoreflect.Message
func (*FallbackSettings) Reset ¶
func (x *FallbackSettings) Reset()
func (*FallbackSettings) String ¶
func (x *FallbackSettings) String() string
type GPUBackend ¶
type GPUBackend int32
LINT.IfChange Which GPU backend to select. Default behaviour on Android is to try OpenCL and if it's not available fall back to OpenGL.
const ( GPUBackend_UNSET GPUBackend = 0 GPUBackend_OPENCL GPUBackend = 1 GPUBackend_OPENGL GPUBackend = 2 )
func (GPUBackend) Descriptor ¶
func (GPUBackend) Descriptor() protoreflect.EnumDescriptor
func (GPUBackend) Enum ¶
func (x GPUBackend) Enum() *GPUBackend
func (GPUBackend) EnumDescriptor
deprecated
func (GPUBackend) EnumDescriptor() ([]byte, []int)
Deprecated: Use GPUBackend.Descriptor instead.
func (GPUBackend) Number ¶
func (x GPUBackend) Number() protoreflect.EnumNumber
func (GPUBackend) String ¶
func (x GPUBackend) String() string
func (GPUBackend) Type ¶
func (GPUBackend) Type() protoreflect.EnumType
func (*GPUBackend) UnmarshalJSON
deprecated
func (x *GPUBackend) UnmarshalJSON(b []byte) error
Deprecated: Do not use.
type GPUInferencePriority ¶
type GPUInferencePriority int32
GPU inference priorities define relative priorities given by the GPU delegate to different client needs. Corresponds to TfLiteGpuInferencePriority.
const ( GPUInferencePriority_GPU_PRIORITY_AUTO GPUInferencePriority = 0 GPUInferencePriority_GPU_PRIORITY_MAX_PRECISION GPUInferencePriority = 1 GPUInferencePriority_GPU_PRIORITY_MIN_LATENCY GPUInferencePriority = 2 GPUInferencePriority_GPU_PRIORITY_MIN_MEMORY_USAGE GPUInferencePriority = 3 )
func (GPUInferencePriority) Descriptor ¶
func (GPUInferencePriority) Descriptor() protoreflect.EnumDescriptor
func (GPUInferencePriority) Enum ¶
func (x GPUInferencePriority) Enum() *GPUInferencePriority
func (GPUInferencePriority) EnumDescriptor
deprecated
func (GPUInferencePriority) EnumDescriptor() ([]byte, []int)
Deprecated: Use GPUInferencePriority.Descriptor instead.
func (GPUInferencePriority) Number ¶
func (x GPUInferencePriority) Number() protoreflect.EnumNumber
func (GPUInferencePriority) String ¶
func (x GPUInferencePriority) String() string
func (GPUInferencePriority) Type ¶
func (GPUInferencePriority) Type() protoreflect.EnumType
func (*GPUInferencePriority) UnmarshalJSON
deprecated
func (x *GPUInferencePriority) UnmarshalJSON(b []byte) error
Deprecated: Do not use.
type GPUInferenceUsage ¶
type GPUInferenceUsage int32
GPU inference preference for initialization time vs. inference time. Corresponds to TfLiteGpuInferenceUsage.
const ( // Delegate will be used only once, therefore, bootstrap/init time should // be taken into account. GPUInferenceUsage_GPU_INFERENCE_PREFERENCE_FAST_SINGLE_ANSWER GPUInferenceUsage = 0 // Prefer maximizing the throughput. Same delegate will be used repeatedly on // multiple inputs. GPUInferenceUsage_GPU_INFERENCE_PREFERENCE_SUSTAINED_SPEED GPUInferenceUsage = 1 )
func (GPUInferenceUsage) Descriptor ¶
func (GPUInferenceUsage) Descriptor() protoreflect.EnumDescriptor
func (GPUInferenceUsage) Enum ¶
func (x GPUInferenceUsage) Enum() *GPUInferenceUsage
func (GPUInferenceUsage) EnumDescriptor
deprecated
func (GPUInferenceUsage) EnumDescriptor() ([]byte, []int)
Deprecated: Use GPUInferenceUsage.Descriptor instead.
func (GPUInferenceUsage) Number ¶
func (x GPUInferenceUsage) Number() protoreflect.EnumNumber
func (GPUInferenceUsage) String ¶
func (x GPUInferenceUsage) String() string
func (GPUInferenceUsage) Type ¶
func (GPUInferenceUsage) Type() protoreflect.EnumType
func (*GPUInferenceUsage) UnmarshalJSON
deprecated
func (x *GPUInferenceUsage) UnmarshalJSON(b []byte) error
Deprecated: Do not use.
type GPUSettings ¶
type GPUSettings struct { // Obsolete: Ignored if inference_priority1/2/3 are set. IsPrecisionLossAllowed *bool `protobuf:"varint,1,opt,name=is_precision_loss_allowed,json=isPrecisionLossAllowed" json:"is_precision_loss_allowed,omitempty"` EnableQuantizedInference *bool `` /* 135-byte string literal not displayed */ ForceBackend *GPUBackend `protobuf:"varint,3,opt,name=force_backend,json=forceBackend,enum=tflite.proto.GPUBackend" json:"force_backend,omitempty"` // Ordered priorities provide better control over desired semantics, // where priority(n) is more important than priority(n+1). Therefore, // each time inference engine needs to make a decision, it uses // ordered priorities to do so. // // Default values correspond to GPU_PRIORITY_AUTO. // AUTO priority can only be used when higher priorities are fully specified. // For example: // // VALID: priority1 = MIN_LATENCY, priority2 = AUTO, priority3 = AUTO // VALID: priority1 = MIN_LATENCY, priority2 = MAX_PRECISION, // priority3 = AUTO // INVALID: priority1 = AUTO, priority2 = MIN_LATENCY, priority3 = AUTO // INVALID: priority1 = MIN_LATENCY, priority2 = AUTO, // priority3 = MAX_PRECISION // // Invalid priorities will result in error. // // For more information, see TfLiteGpuDelegateOptionsV2. InferencePriority1 *GPUInferencePriority `` /* 154-byte string literal not displayed */ InferencePriority2 *GPUInferencePriority `` /* 154-byte string literal not displayed */ InferencePriority3 *GPUInferencePriority `` /* 154-byte string literal not displayed */ // Whether to optimize for compilation+execution time or execution time only. InferencePreference *GPUInferenceUsage `` /* 148-byte string literal not displayed */ // Model serialization. Setting both of these fields will also set the // TFLITE_GPU_EXPERIMENTAL_FLAGS_ENABLE_SERIALIZATION flag on the delegate. // // GPU model serialization directory passed in TfLiteGpuDelegateOptionsV2. // This should be set to the application's code cache directory so that it can // not be accessed by other apps and is correctly deleted on app updates. // tflite::StatefulNnApiDelegate CacheDirectory *string `protobuf:"bytes,8,opt,name=cache_directory,json=cacheDirectory" json:"cache_directory,omitempty"` // Normally, the model name with version number should be provided here, since // each model needs an unique ID to avoid cache collision. ModelToken *string `protobuf:"bytes,9,opt,name=model_token,json=modelToken" json:"model_token,omitempty"` // contains filtered or unexported fields }
GPU Delegate settings.
See https://github.com/tensorflow/tensorflow/blob/master/tensorflow/lite/delegates/gpu/delegate.h
func (*GPUSettings) Descriptor
deprecated
func (*GPUSettings) Descriptor() ([]byte, []int)
Deprecated: Use GPUSettings.ProtoReflect.Descriptor instead.
func (*GPUSettings) GetCacheDirectory ¶
func (x *GPUSettings) GetCacheDirectory() string
func (*GPUSettings) GetEnableQuantizedInference ¶
func (x *GPUSettings) GetEnableQuantizedInference() bool
func (*GPUSettings) GetForceBackend ¶
func (x *GPUSettings) GetForceBackend() GPUBackend
func (*GPUSettings) GetInferencePreference ¶
func (x *GPUSettings) GetInferencePreference() GPUInferenceUsage
func (*GPUSettings) GetInferencePriority1 ¶
func (x *GPUSettings) GetInferencePriority1() GPUInferencePriority
func (*GPUSettings) GetInferencePriority2 ¶
func (x *GPUSettings) GetInferencePriority2() GPUInferencePriority
func (*GPUSettings) GetInferencePriority3 ¶
func (x *GPUSettings) GetInferencePriority3() GPUInferencePriority
func (*GPUSettings) GetIsPrecisionLossAllowed ¶
func (x *GPUSettings) GetIsPrecisionLossAllowed() bool
func (*GPUSettings) GetModelToken ¶
func (x *GPUSettings) GetModelToken() string
func (*GPUSettings) ProtoMessage ¶
func (*GPUSettings) ProtoMessage()
func (*GPUSettings) ProtoReflect ¶
func (x *GPUSettings) ProtoReflect() protoreflect.Message
func (*GPUSettings) Reset ¶
func (x *GPUSettings) Reset()
func (*GPUSettings) String ¶
func (x *GPUSettings) String() string
type HexagonSettings ¶
type HexagonSettings struct { DebugLevel *int32 `protobuf:"varint,1,opt,name=debug_level,json=debugLevel" json:"debug_level,omitempty"` PowersaveLevel *int32 `protobuf:"varint,2,opt,name=powersave_level,json=powersaveLevel" json:"powersave_level,omitempty"` PrintGraphProfile *bool `protobuf:"varint,3,opt,name=print_graph_profile,json=printGraphProfile" json:"print_graph_profile,omitempty"` PrintGraphDebug *bool `protobuf:"varint,4,opt,name=print_graph_debug,json=printGraphDebug" json:"print_graph_debug,omitempty"` // contains filtered or unexported fields }
Hexagon Delegate settings.
func (*HexagonSettings) Descriptor
deprecated
func (*HexagonSettings) Descriptor() ([]byte, []int)
Deprecated: Use HexagonSettings.ProtoReflect.Descriptor instead.
func (*HexagonSettings) GetDebugLevel ¶
func (x *HexagonSettings) GetDebugLevel() int32
func (*HexagonSettings) GetPowersaveLevel ¶
func (x *HexagonSettings) GetPowersaveLevel() int32
func (*HexagonSettings) GetPrintGraphDebug ¶
func (x *HexagonSettings) GetPrintGraphDebug() bool
func (*HexagonSettings) GetPrintGraphProfile ¶
func (x *HexagonSettings) GetPrintGraphProfile() bool
func (*HexagonSettings) ProtoMessage ¶
func (*HexagonSettings) ProtoMessage()
func (*HexagonSettings) ProtoReflect ¶
func (x *HexagonSettings) ProtoReflect() protoreflect.Message
func (*HexagonSettings) Reset ¶
func (x *HexagonSettings) Reset()
func (*HexagonSettings) String ¶
func (x *HexagonSettings) String() string
type MiniBenchmarkEvent ¶
type MiniBenchmarkEvent struct { // If set to true, this event is used to mark all previous events in the // mini-benchmark internal storage as read and one of the other fields // in this message will have a value. IsLogFlushingEvent *bool `protobuf:"varint,1,opt,name=is_log_flushing_event,json=isLogFlushingEvent" json:"is_log_flushing_event,omitempty"` // Event generated when a best acceleration decision is taken. BestAccelerationDecision *BestAccelerationDecision `` /* 128-byte string literal not displayed */ // Reports a failure during mini-benchmark initialization. InitializationFailure *BenchmarkInitializationFailure `protobuf:"bytes,3,opt,name=initialization_failure,json=initializationFailure" json:"initialization_failure,omitempty"` // Event generated while benchmarking the different settings to test locally. BenchmarkEvent *BenchmarkEvent `protobuf:"bytes,4,opt,name=benchmark_event,json=benchmarkEvent" json:"benchmark_event,omitempty"` // contains filtered or unexported fields }
Events generated by the mini-benchmark before and after triggering the different configuration-specific benchmarks
func (*MiniBenchmarkEvent) Descriptor
deprecated
func (*MiniBenchmarkEvent) Descriptor() ([]byte, []int)
Deprecated: Use MiniBenchmarkEvent.ProtoReflect.Descriptor instead.
func (*MiniBenchmarkEvent) GetBenchmarkEvent ¶
func (x *MiniBenchmarkEvent) GetBenchmarkEvent() *BenchmarkEvent
func (*MiniBenchmarkEvent) GetBestAccelerationDecision ¶
func (x *MiniBenchmarkEvent) GetBestAccelerationDecision() *BestAccelerationDecision
func (*MiniBenchmarkEvent) GetInitializationFailure ¶
func (x *MiniBenchmarkEvent) GetInitializationFailure() *BenchmarkInitializationFailure
func (*MiniBenchmarkEvent) GetIsLogFlushingEvent ¶
func (x *MiniBenchmarkEvent) GetIsLogFlushingEvent() bool
func (*MiniBenchmarkEvent) ProtoMessage ¶
func (*MiniBenchmarkEvent) ProtoMessage()
func (*MiniBenchmarkEvent) ProtoReflect ¶
func (x *MiniBenchmarkEvent) ProtoReflect() protoreflect.Message
func (*MiniBenchmarkEvent) Reset ¶
func (x *MiniBenchmarkEvent) Reset()
func (*MiniBenchmarkEvent) String ¶
func (x *MiniBenchmarkEvent) String() string
type MinibenchmarkSettings ¶
type MinibenchmarkSettings struct { // Which settings to test. This would typically be filled in from an // allowlist. SettingsToTest []*TFLiteSettings `protobuf:"bytes,1,rep,name=settings_to_test,json=settingsToTest" json:"settings_to_test,omitempty"` // How to access the model. This would typically be set dynamically, as it // depends on the application folder and/or runtime state. ModelFile *ModelFile `protobuf:"bytes,2,opt,name=model_file,json=modelFile" json:"model_file,omitempty"` // Where to store state. This would typically be set dynamically, as it // depends on the application folder. StoragePaths *BenchmarkStoragePaths `protobuf:"bytes,3,opt,name=storage_paths,json=storagePaths" json:"storage_paths,omitempty"` // Validation test related settings. ValidationSettings *ValidationSettings `protobuf:"bytes,4,opt,name=validation_settings,json=validationSettings" json:"validation_settings,omitempty"` // contains filtered or unexported fields }
How to run a minibenchmark. Next ID: 5
func (*MinibenchmarkSettings) Descriptor
deprecated
func (*MinibenchmarkSettings) Descriptor() ([]byte, []int)
Deprecated: Use MinibenchmarkSettings.ProtoReflect.Descriptor instead.
func (*MinibenchmarkSettings) GetModelFile ¶
func (x *MinibenchmarkSettings) GetModelFile() *ModelFile
func (*MinibenchmarkSettings) GetSettingsToTest ¶
func (x *MinibenchmarkSettings) GetSettingsToTest() []*TFLiteSettings
func (*MinibenchmarkSettings) GetStoragePaths ¶
func (x *MinibenchmarkSettings) GetStoragePaths() *BenchmarkStoragePaths
func (*MinibenchmarkSettings) GetValidationSettings ¶ added in v2.11.0
func (x *MinibenchmarkSettings) GetValidationSettings() *ValidationSettings
func (*MinibenchmarkSettings) ProtoMessage ¶
func (*MinibenchmarkSettings) ProtoMessage()
func (*MinibenchmarkSettings) ProtoReflect ¶
func (x *MinibenchmarkSettings) ProtoReflect() protoreflect.Message
func (*MinibenchmarkSettings) Reset ¶
func (x *MinibenchmarkSettings) Reset()
func (*MinibenchmarkSettings) String ¶
func (x *MinibenchmarkSettings) String() string
type ModelFile ¶
type ModelFile struct { // Filename for reading model from. Filename *string `protobuf:"bytes,1,opt,name=filename" json:"filename,omitempty"` // File descriptor to read model from. Fd *int64 `protobuf:"varint,2,opt,name=fd" json:"fd,omitempty"` // Offset for model in file descriptor. Offset *int64 `protobuf:"varint,3,opt,name=offset" json:"offset,omitempty"` // Length of model in file descriptor. Length *int64 `protobuf:"varint,4,opt,name=length" json:"length,omitempty"` ModelIdGroup *ModelIdGroup `protobuf:"bytes,5,opt,name=model_id_group,json=modelIdGroup" json:"model_id_group,omitempty"` // contains filtered or unexported fields }
How to access the model for mini-benchmark. Since mini-benchmark runs in a separate process, it can not access an in-memory model. It can read the model either from a file or from a file descriptor. The file descriptor typically comes from the Android asset manager.
Users should set either filename, or all of fd, offset and length.
func (*ModelFile) Descriptor
deprecated
func (*ModelFile) GetFilename ¶
func (*ModelFile) GetModelIdGroup ¶ added in v2.12.0
func (x *ModelFile) GetModelIdGroup() *ModelIdGroup
func (*ModelFile) ProtoMessage ¶
func (*ModelFile) ProtoMessage()
func (*ModelFile) ProtoReflect ¶
func (x *ModelFile) ProtoReflect() protoreflect.Message
type ModelIdGroup ¶ added in v2.12.0
type ModelIdGroup struct { ModelNamespace *string `protobuf:"bytes,1,opt,name=model_namespace,json=modelNamespace" json:"model_namespace,omitempty"` ModelId *string `protobuf:"bytes,2,opt,name=model_id,json=modelId" json:"model_id,omitempty"` // contains filtered or unexported fields }
func (*ModelIdGroup) Descriptor
deprecated
added in
v2.12.0
func (*ModelIdGroup) Descriptor() ([]byte, []int)
Deprecated: Use ModelIdGroup.ProtoReflect.Descriptor instead.
func (*ModelIdGroup) GetModelId ¶ added in v2.12.0
func (x *ModelIdGroup) GetModelId() string
func (*ModelIdGroup) GetModelNamespace ¶ added in v2.12.0
func (x *ModelIdGroup) GetModelNamespace() string
func (*ModelIdGroup) ProtoMessage ¶ added in v2.12.0
func (*ModelIdGroup) ProtoMessage()
func (*ModelIdGroup) ProtoReflect ¶ added in v2.12.0
func (x *ModelIdGroup) ProtoReflect() protoreflect.Message
func (*ModelIdGroup) Reset ¶ added in v2.12.0
func (x *ModelIdGroup) Reset()
func (*ModelIdGroup) String ¶ added in v2.12.0
func (x *ModelIdGroup) String() string
type NNAPIExecutionPreference ¶
type NNAPIExecutionPreference int32
const ( // Undefined. NNAPIExecutionPreference_UNDEFINED NNAPIExecutionPreference = 0 // Prefer executing in a way that minimizes battery drain. NNAPIExecutionPreference_NNAPI_LOW_POWER NNAPIExecutionPreference = 1 // Prefer returning a single answer as fast as possible, even if this causes // more power consumption. NNAPIExecutionPreference_NNAPI_FAST_SINGLE_ANSWER NNAPIExecutionPreference = 2 // Prefer maximizing the throughput of successive frames, for example when // processing successive frames coming from the camera. NNAPIExecutionPreference_NNAPI_SUSTAINED_SPEED NNAPIExecutionPreference = 3 )
func (NNAPIExecutionPreference) Descriptor ¶
func (NNAPIExecutionPreference) Descriptor() protoreflect.EnumDescriptor
func (NNAPIExecutionPreference) Enum ¶
func (x NNAPIExecutionPreference) Enum() *NNAPIExecutionPreference
func (NNAPIExecutionPreference) EnumDescriptor
deprecated
func (NNAPIExecutionPreference) EnumDescriptor() ([]byte, []int)
Deprecated: Use NNAPIExecutionPreference.Descriptor instead.
func (NNAPIExecutionPreference) Number ¶
func (x NNAPIExecutionPreference) Number() protoreflect.EnumNumber
func (NNAPIExecutionPreference) String ¶
func (x NNAPIExecutionPreference) String() string
func (NNAPIExecutionPreference) Type ¶
func (NNAPIExecutionPreference) Type() protoreflect.EnumType
func (*NNAPIExecutionPreference) UnmarshalJSON
deprecated
func (x *NNAPIExecutionPreference) UnmarshalJSON(b []byte) error
Deprecated: Do not use.
type NNAPIExecutionPriority ¶
type NNAPIExecutionPriority int32
const ( NNAPIExecutionPriority_NNAPI_PRIORITY_UNDEFINED NNAPIExecutionPriority = 0 NNAPIExecutionPriority_NNAPI_PRIORITY_LOW NNAPIExecutionPriority = 1 NNAPIExecutionPriority_NNAPI_PRIORITY_MEDIUM NNAPIExecutionPriority = 2 NNAPIExecutionPriority_NNAPI_PRIORITY_HIGH NNAPIExecutionPriority = 3 )
func (NNAPIExecutionPriority) Descriptor ¶
func (NNAPIExecutionPriority) Descriptor() protoreflect.EnumDescriptor
func (NNAPIExecutionPriority) Enum ¶
func (x NNAPIExecutionPriority) Enum() *NNAPIExecutionPriority
func (NNAPIExecutionPriority) EnumDescriptor
deprecated
func (NNAPIExecutionPriority) EnumDescriptor() ([]byte, []int)
Deprecated: Use NNAPIExecutionPriority.Descriptor instead.
func (NNAPIExecutionPriority) Number ¶
func (x NNAPIExecutionPriority) Number() protoreflect.EnumNumber
func (NNAPIExecutionPriority) String ¶
func (x NNAPIExecutionPriority) String() string
func (NNAPIExecutionPriority) Type ¶
func (NNAPIExecutionPriority) Type() protoreflect.EnumType
func (*NNAPIExecutionPriority) UnmarshalJSON
deprecated
func (x *NNAPIExecutionPriority) UnmarshalJSON(b []byte) error
Deprecated: Do not use.
type NNAPISettings ¶
type NNAPISettings struct { // Which instance (NNAPI accelerator) to use. One driver may provide several // accelerators (though a driver may also hide several back-ends behind one // name, at the choice of the driver vendor). // Note that driver introspection is only available in Android Q and later. AcceleratorName *string `protobuf:"bytes,1,opt,name=accelerator_name,json=acceleratorName" json:"accelerator_name,omitempty"` // NNAPI model compilation caching settings to be passed to // tflite::StatefulNnApiDelegate CacheDirectory *string `protobuf:"bytes,2,opt,name=cache_directory,json=cacheDirectory" json:"cache_directory,omitempty"` ModelToken *string `protobuf:"bytes,3,opt,name=model_token,json=modelToken" json:"model_token,omitempty"` // NNAPI execution preference to pass. See // https://developer.android.com/ndk/reference/group/neural-networks.html ExecutionPreference *NNAPIExecutionPreference `` /* 155-byte string literal not displayed */ // Number of instances to cache for the same model (for input size // changes). This is mandatory for getting reasonable performance in that // case. NoOfNnapiInstancesToCache *int32 `` /* 138-byte string literal not displayed */ // Deprecated; use the fallback_settings in TFLiteSettings. // // Whether to automatically fall back to TFLite CPU path. // // Deprecated: Marked as deprecated in tensorflow/lite/experimental/acceleration/configuration/configuration.proto. FallbackSettings *FallbackSettings `protobuf:"bytes,6,opt,name=fallback_settings,json=fallbackSettings" json:"fallback_settings,omitempty"` // Whether to allow use of NNAPI CPU (nnapi-reference accelerator) on Android // 10+ when an accelerator name is not specified. The NNAPI CPU typically // performs less well than the TfLite built-in kernels; but allowing allows a // model to be partially accelerated which may be a win. AllowNnapiCpuOnAndroid_10Plus *bool `` /* 149-byte string literal not displayed */ ExecutionPriority *NNAPIExecutionPriority `` /* 147-byte string literal not displayed */ // Whether to allow dynamic dimension sizes without re-compilation. // A tensor of with dynamic dimension must have a valid dims_signature // defined. // Only supported in NNAPI 1.1 and newer versions. // WARNING: Setting this flag to true may result in model being rejected by // accelerator. This should only be enabled if the target device supports // dynamic dimensions of the model. // By default this is set to false. AllowDynamicDimensions *bool `protobuf:"varint,9,opt,name=allow_dynamic_dimensions,json=allowDynamicDimensions" json:"allow_dynamic_dimensions,omitempty"` // Whether to allow the NNAPI accelerator to optionally use lower-precision // float16 (16-bit floating point) arithmetic when doing calculations on // float32 (32-bit floating point). AllowFp16PrecisionForFp32 *bool `` /* 137-byte string literal not displayed */ // Whether to use NNAPI Burst mode. // Burst mode allows accelerators to efficiently manage resources, which // would significantly reduce overhead especially if the same delegate // instance is to be used for multiple inferences. UseBurstComputation *bool `protobuf:"varint,11,opt,name=use_burst_computation,json=useBurstComputation" json:"use_burst_computation,omitempty"` // Optional pointer to NNAPI Support Library provided pointer to // NnApiSLDriverImplFL5 which can be used to construct the // NNAPI delegate. SupportLibraryHandle *int64 `protobuf:"varint,12,opt,name=support_library_handle,json=supportLibraryHandle" json:"support_library_handle,omitempty"` // contains filtered or unexported fields }
NNAPI delegate settings.
func (*NNAPISettings) Descriptor
deprecated
func (*NNAPISettings) Descriptor() ([]byte, []int)
Deprecated: Use NNAPISettings.ProtoReflect.Descriptor instead.
func (*NNAPISettings) GetAcceleratorName ¶
func (x *NNAPISettings) GetAcceleratorName() string
func (*NNAPISettings) GetAllowDynamicDimensions ¶
func (x *NNAPISettings) GetAllowDynamicDimensions() bool
func (*NNAPISettings) GetAllowFp16PrecisionForFp32 ¶
func (x *NNAPISettings) GetAllowFp16PrecisionForFp32() bool
func (*NNAPISettings) GetAllowNnapiCpuOnAndroid_10Plus ¶
func (x *NNAPISettings) GetAllowNnapiCpuOnAndroid_10Plus() bool
func (*NNAPISettings) GetCacheDirectory ¶
func (x *NNAPISettings) GetCacheDirectory() string
func (*NNAPISettings) GetExecutionPreference ¶
func (x *NNAPISettings) GetExecutionPreference() NNAPIExecutionPreference
func (*NNAPISettings) GetExecutionPriority ¶
func (x *NNAPISettings) GetExecutionPriority() NNAPIExecutionPriority
func (*NNAPISettings) GetFallbackSettings
deprecated
func (x *NNAPISettings) GetFallbackSettings() *FallbackSettings
Deprecated: Marked as deprecated in tensorflow/lite/experimental/acceleration/configuration/configuration.proto.
func (*NNAPISettings) GetModelToken ¶
func (x *NNAPISettings) GetModelToken() string
func (*NNAPISettings) GetNoOfNnapiInstancesToCache ¶
func (x *NNAPISettings) GetNoOfNnapiInstancesToCache() int32
func (*NNAPISettings) GetSupportLibraryHandle ¶
func (x *NNAPISettings) GetSupportLibraryHandle() int64
func (*NNAPISettings) GetUseBurstComputation ¶
func (x *NNAPISettings) GetUseBurstComputation() bool
func (*NNAPISettings) ProtoMessage ¶
func (*NNAPISettings) ProtoMessage()
func (*NNAPISettings) ProtoReflect ¶
func (x *NNAPISettings) ProtoReflect() protoreflect.Message
func (*NNAPISettings) Reset ¶
func (x *NNAPISettings) Reset()
func (*NNAPISettings) String ¶
func (x *NNAPISettings) String() string
type StableDelegateLoaderSettings ¶ added in v2.12.0
type StableDelegateLoaderSettings struct { // The path of the stable delegate shared object file. Then the stable // delegate provider can dynamically load the shared object file. DelegatePath *string `protobuf:"bytes,1,opt,name=delegate_path,json=delegatePath" json:"delegate_path,omitempty"` // contains filtered or unexported fields }
Stable delegate loader settings.
See tensorflow/lite/core/experimental/acceleration/configuration/c/stable_delegate.h An example stable delegate: tensorflow/lite/delegates/utils/experimental/sample_stable_delegate
func (*StableDelegateLoaderSettings) Descriptor
deprecated
added in
v2.12.0
func (*StableDelegateLoaderSettings) Descriptor() ([]byte, []int)
Deprecated: Use StableDelegateLoaderSettings.ProtoReflect.Descriptor instead.
func (*StableDelegateLoaderSettings) GetDelegatePath ¶ added in v2.12.0
func (x *StableDelegateLoaderSettings) GetDelegatePath() string
func (*StableDelegateLoaderSettings) ProtoMessage ¶ added in v2.12.0
func (*StableDelegateLoaderSettings) ProtoMessage()
func (*StableDelegateLoaderSettings) ProtoReflect ¶ added in v2.12.0
func (x *StableDelegateLoaderSettings) ProtoReflect() protoreflect.Message
func (*StableDelegateLoaderSettings) Reset ¶ added in v2.12.0
func (x *StableDelegateLoaderSettings) Reset()
func (*StableDelegateLoaderSettings) String ¶ added in v2.12.0
func (x *StableDelegateLoaderSettings) String() string
type TFLiteSettings ¶
type TFLiteSettings struct { // Which delegate to use. Delegate *Delegate `protobuf:"varint,1,opt,name=delegate,enum=tflite.proto.Delegate" json:"delegate,omitempty"` // How to configure the chosen delegate. // (In principle we would like to use 'oneof', but flatc turns that into an // nested anonymous table rather than a union. See // https://github.com/google/flatbuffers/issues/4628). NnapiSettings *NNAPISettings `protobuf:"bytes,2,opt,name=nnapi_settings,json=nnapiSettings" json:"nnapi_settings,omitempty"` GpuSettings *GPUSettings `protobuf:"bytes,3,opt,name=gpu_settings,json=gpuSettings" json:"gpu_settings,omitempty"` HexagonSettings *HexagonSettings `protobuf:"bytes,4,opt,name=hexagon_settings,json=hexagonSettings" json:"hexagon_settings,omitempty"` XnnpackSettings *XNNPackSettings `protobuf:"bytes,5,opt,name=xnnpack_settings,json=xnnpackSettings" json:"xnnpack_settings,omitempty"` CoremlSettings *CoreMLSettings `protobuf:"bytes,11,opt,name=coreml_settings,json=coremlSettings" json:"coreml_settings,omitempty"` // How to configure CPU execution. CpuSettings *CPUSettings `protobuf:"bytes,6,opt,name=cpu_settings,json=cpuSettings" json:"cpu_settings,omitempty"` // Shared delegation settings. MaxDelegatedPartitions *int32 `protobuf:"varint,7,opt,name=max_delegated_partitions,json=maxDelegatedPartitions" json:"max_delegated_partitions,omitempty"` // For configuring the EdgeTpuDelegate. EdgetpuSettings *EdgeTpuSettings `protobuf:"bytes,8,opt,name=edgetpu_settings,json=edgetpuSettings" json:"edgetpu_settings,omitempty"` // For configuring the Coral EdgeTpu Delegate. CoralSettings *CoralSettings `protobuf:"bytes,10,opt,name=coral_settings,json=coralSettings" json:"coral_settings,omitempty"` // Whether to automatically fall back to TFLite CPU path. FallbackSettings *FallbackSettings `protobuf:"bytes,9,opt,name=fallback_settings,json=fallbackSettings" json:"fallback_settings,omitempty"` // Whether to disable default delegates (XNNPack). // TODO(b/260405596): Update the comment to clarify the interaction between // `disable_default_delegates` and `fallback_settings`. DisableDefaultDelegates *bool `` /* 127-byte string literal not displayed */ // For loading a stable delegate. If an app supplies a delegate shared library // (e.g. packaged with the app, or downloaded separately), the app can use // this field for passing the path to the delegate shared library. // // The stable delegate loader settings field works together with the settings // of other concrete stable delegates; the stable delegate loader is not a // concrete delegate type but a mechanism for initializing the TF Lite stable // delegates. // // See // tensorflow/lite/delegates/utils/experimental/sample_stable_delegate StableDelegateLoaderSettings *StableDelegateLoaderSettings `` /* 143-byte string literal not displayed */ // contains filtered or unexported fields }
How to configure TFLite.
func (*TFLiteSettings) Descriptor
deprecated
func (*TFLiteSettings) Descriptor() ([]byte, []int)
Deprecated: Use TFLiteSettings.ProtoReflect.Descriptor instead.
func (*TFLiteSettings) GetCoralSettings ¶
func (x *TFLiteSettings) GetCoralSettings() *CoralSettings
func (*TFLiteSettings) GetCoremlSettings ¶
func (x *TFLiteSettings) GetCoremlSettings() *CoreMLSettings
func (*TFLiteSettings) GetCpuSettings ¶
func (x *TFLiteSettings) GetCpuSettings() *CPUSettings
func (*TFLiteSettings) GetDelegate ¶
func (x *TFLiteSettings) GetDelegate() Delegate
func (*TFLiteSettings) GetDisableDefaultDelegates ¶
func (x *TFLiteSettings) GetDisableDefaultDelegates() bool
func (*TFLiteSettings) GetEdgetpuSettings ¶
func (x *TFLiteSettings) GetEdgetpuSettings() *EdgeTpuSettings
func (*TFLiteSettings) GetFallbackSettings ¶
func (x *TFLiteSettings) GetFallbackSettings() *FallbackSettings
func (*TFLiteSettings) GetGpuSettings ¶
func (x *TFLiteSettings) GetGpuSettings() *GPUSettings
func (*TFLiteSettings) GetHexagonSettings ¶
func (x *TFLiteSettings) GetHexagonSettings() *HexagonSettings
func (*TFLiteSettings) GetMaxDelegatedPartitions ¶
func (x *TFLiteSettings) GetMaxDelegatedPartitions() int32
func (*TFLiteSettings) GetNnapiSettings ¶
func (x *TFLiteSettings) GetNnapiSettings() *NNAPISettings
func (*TFLiteSettings) GetStableDelegateLoaderSettings ¶ added in v2.12.0
func (x *TFLiteSettings) GetStableDelegateLoaderSettings() *StableDelegateLoaderSettings
func (*TFLiteSettings) GetXnnpackSettings ¶
func (x *TFLiteSettings) GetXnnpackSettings() *XNNPackSettings
func (*TFLiteSettings) ProtoMessage ¶
func (*TFLiteSettings) ProtoMessage()
func (*TFLiteSettings) ProtoReflect ¶
func (x *TFLiteSettings) ProtoReflect() protoreflect.Message
func (*TFLiteSettings) Reset ¶
func (x *TFLiteSettings) Reset()
func (*TFLiteSettings) String ¶
func (x *TFLiteSettings) String() string
type ValidationSettings ¶ added in v2.11.0
type ValidationSettings struct { // Timeout for one settings under test. If test didn't finish within this // timeout, this setting is considered hanging. PerTestTimeoutMs *int64 `protobuf:"varint,1,opt,name=per_test_timeout_ms,json=perTestTimeoutMs" json:"per_test_timeout_ms,omitempty"` // contains filtered or unexported fields }
Validation related settings. Next ID: 2
func (*ValidationSettings) Descriptor
deprecated
added in
v2.11.0
func (*ValidationSettings) Descriptor() ([]byte, []int)
Deprecated: Use ValidationSettings.ProtoReflect.Descriptor instead.
func (*ValidationSettings) GetPerTestTimeoutMs ¶ added in v2.11.0
func (x *ValidationSettings) GetPerTestTimeoutMs() int64
func (*ValidationSettings) ProtoMessage ¶ added in v2.11.0
func (*ValidationSettings) ProtoMessage()
func (*ValidationSettings) ProtoReflect ¶ added in v2.11.0
func (x *ValidationSettings) ProtoReflect() protoreflect.Message
func (*ValidationSettings) Reset ¶ added in v2.11.0
func (x *ValidationSettings) Reset()
func (*ValidationSettings) String ¶ added in v2.11.0
func (x *ValidationSettings) String() string
type XNNPackFlags ¶
type XNNPackFlags int32
XNNPack Delegate settings.
const ( // These flags match the flags in xnnpack_delegate.h. XNNPackFlags_TFLITE_XNNPACK_DELEGATE_NO_FLAGS XNNPackFlags = 0 // Enable fast signed integer XNNpack kernels. XNNPackFlags_TFLITE_XNNPACK_DELEGATE_FLAG_QS8 XNNPackFlags = 1 // Enable fast unsigned integer XNNpack kernels. XNNPackFlags_TFLITE_XNNPACK_DELEGATE_FLAG_QU8 XNNPackFlags = 2 // Enable both, signed and unsigned integer XNNpack kernels. XNNPackFlags_TFLITE_XNNPACK_DELEGATE_FLAG_QS8_QU8 XNNPackFlags = 3 // Force 16-bit floating point inference. XNNPackFlags_TFLITE_XNNPACK_DELEGATE_FLAG_FORCE_FP16 XNNPackFlags = 4 )
func (XNNPackFlags) Descriptor ¶
func (XNNPackFlags) Descriptor() protoreflect.EnumDescriptor
func (XNNPackFlags) Enum ¶
func (x XNNPackFlags) Enum() *XNNPackFlags
func (XNNPackFlags) EnumDescriptor
deprecated
func (XNNPackFlags) EnumDescriptor() ([]byte, []int)
Deprecated: Use XNNPackFlags.Descriptor instead.
func (XNNPackFlags) Number ¶
func (x XNNPackFlags) Number() protoreflect.EnumNumber
func (XNNPackFlags) String ¶
func (x XNNPackFlags) String() string
func (XNNPackFlags) Type ¶
func (XNNPackFlags) Type() protoreflect.EnumType
func (*XNNPackFlags) UnmarshalJSON
deprecated
func (x *XNNPackFlags) UnmarshalJSON(b []byte) error
Deprecated: Do not use.
type XNNPackSettings ¶
type XNNPackSettings struct { NumThreads *int32 `protobuf:"varint,1,opt,name=num_threads,json=numThreads" json:"num_threads,omitempty"` Flags *XNNPackFlags `protobuf:"varint,2,opt,name=flags,enum=tflite.proto.XNNPackFlags,def=0" json:"flags,omitempty"` // contains filtered or unexported fields }
func (*XNNPackSettings) Descriptor
deprecated
func (*XNNPackSettings) Descriptor() ([]byte, []int)
Deprecated: Use XNNPackSettings.ProtoReflect.Descriptor instead.
func (*XNNPackSettings) GetFlags ¶
func (x *XNNPackSettings) GetFlags() XNNPackFlags
func (*XNNPackSettings) GetNumThreads ¶
func (x *XNNPackSettings) GetNumThreads() int32
func (*XNNPackSettings) ProtoMessage ¶
func (*XNNPackSettings) ProtoMessage()
func (*XNNPackSettings) ProtoReflect ¶
func (x *XNNPackSettings) ProtoReflect() protoreflect.Message
func (*XNNPackSettings) Reset ¶
func (x *XNNPackSettings) Reset()
func (*XNNPackSettings) String ¶
func (x *XNNPackSettings) String() string