Documentation ¶
Index ¶
- Variables
- type BatchingParameters
- func (*BatchingParameters) Descriptor() ([]byte, []int)deprecated
- func (x *BatchingParameters) GetAllowedBatchSizes() []int64
- func (x *BatchingParameters) GetBatchTimeoutMicros() *wrapperspb.Int64Value
- func (x *BatchingParameters) GetEnableLargeBatchSplitting() *wrapperspb.BoolValue
- func (x *BatchingParameters) GetMaxBatchSize() *wrapperspb.Int64Value
- func (x *BatchingParameters) GetMaxEnqueuedBatches() *wrapperspb.Int64Value
- func (x *BatchingParameters) GetMaxExecutionBatchSize() *wrapperspb.Int64Value
- func (x *BatchingParameters) GetNumBatchThreads() *wrapperspb.Int64Value
- func (x *BatchingParameters) GetPadVariableLengthInputs() bool
- func (x *BatchingParameters) GetThreadPoolName() *wrapperspb.StringValue
- func (*BatchingParameters) ProtoMessage()
- func (x *BatchingParameters) ProtoReflect() protoreflect.Message
- func (x *BatchingParameters) Reset()
- func (x *BatchingParameters) String() string
- type ModelWarmupOptions
- func (*ModelWarmupOptions) Descriptor() ([]byte, []int)deprecated
- func (x *ModelWarmupOptions) GetNumModelWarmupThreads() *wrapperspb.Int32Value
- func (x *ModelWarmupOptions) GetNumRequestIterations() *wrapperspb.Int32Value
- func (*ModelWarmupOptions) ProtoMessage()
- func (x *ModelWarmupOptions) ProtoReflect() protoreflect.Message
- func (x *ModelWarmupOptions) Reset()
- func (x *ModelWarmupOptions) String() string
- type SavedModelBundleSourceAdapterConfig
- func (*SavedModelBundleSourceAdapterConfig) Descriptor() ([]byte, []int)deprecated
- func (x *SavedModelBundleSourceAdapterConfig) GetLegacyConfig() *SessionBundleConfig
- func (*SavedModelBundleSourceAdapterConfig) ProtoMessage()
- func (x *SavedModelBundleSourceAdapterConfig) ProtoReflect() protoreflect.Message
- func (x *SavedModelBundleSourceAdapterConfig) Reset()
- func (x *SavedModelBundleSourceAdapterConfig) String() string
- type SessionBundleConfig
- func (*SessionBundleConfig) Descriptor() ([]byte, []int)deprecated
- func (x *SessionBundleConfig) GetBatchingParameters() *BatchingParameters
- func (x *SessionBundleConfig) GetEnableModelWarmup() bool
- func (x *SessionBundleConfig) GetEnableSessionMetadata() bool
- func (x *SessionBundleConfig) GetExperimentalFixedInputTensors() []*protobuf.NamedTensorProto
- func (x *SessionBundleConfig) GetExperimentalTransientRamBytesDuringLoad() uint64
- func (x *SessionBundleConfig) GetModelWarmupOptions() *ModelWarmupOptions
- func (x *SessionBundleConfig) GetNumTfliteInterpreters() int32deprecated
- func (x *SessionBundleConfig) GetNumTfliteInterpretersPerPool() int32
- func (x *SessionBundleConfig) GetNumTflitePools() int32
- func (x *SessionBundleConfig) GetPreferTfliteModel() bool
- func (x *SessionBundleConfig) GetRemoveUnusedFieldsFromBundleMetagraph() bool
- func (x *SessionBundleConfig) GetResourceEstimationUsesValidationResult() bool
- func (x *SessionBundleConfig) GetSavedModelTags() []string
- func (x *SessionBundleConfig) GetSessionConfig() *protobuf.ConfigProto
- func (x *SessionBundleConfig) GetSessionRunLoadThreadpoolIndex() *wrapperspb.Int32Value
- func (x *SessionBundleConfig) GetSessionTarget() string
- func (x *SessionBundleConfig) GetWrapSessionWithNoThreadingParams() bool
- func (*SessionBundleConfig) ProtoMessage()
- func (x *SessionBundleConfig) ProtoReflect() protoreflect.Message
- func (x *SessionBundleConfig) Reset()
- func (x *SessionBundleConfig) String() string
- type ThreadPoolFactoryConfig
- func (*ThreadPoolFactoryConfig) Descriptor() ([]byte, []int)deprecated
- func (x *ThreadPoolFactoryConfig) GetThreadPoolFactoryConfig() *anypb.Any
- func (*ThreadPoolFactoryConfig) ProtoMessage()
- func (x *ThreadPoolFactoryConfig) ProtoReflect() protoreflect.Message
- func (x *ThreadPoolFactoryConfig) Reset()
- func (x *ThreadPoolFactoryConfig) String() string
Constants ¶
This section is empty.
Variables ¶
var File_tensorflow_serving_servables_tensorflow_saved_model_bundle_source_adapter_proto protoreflect.FileDescriptor
var File_tensorflow_serving_servables_tensorflow_session_bundle_config_proto protoreflect.FileDescriptor
var File_tensorflow_serving_servables_tensorflow_thread_pool_factory_config_proto protoreflect.FileDescriptor
Functions ¶
This section is empty.
Types ¶
type BatchingParameters ¶
type BatchingParameters struct { // The maximum size of each input batch. // // IMPORTANT: As discussed above, use 'max_batch_size * 2' client threads to // achieve high throughput with batching. MaxBatchSize *wrapperspb.Int64Value `protobuf:"bytes,1,opt,name=max_batch_size,json=maxBatchSize,proto3" json:"max_batch_size,omitempty"` // If a task has been enqueued for this amount of time (in microseconds), and // a thread is available, the scheduler will immediately form a batch from // enqueued tasks and assign the batch to the thread for processing, even if // the batch's size is below 'max_batch_size'. BatchTimeoutMicros *wrapperspb.Int64Value `protobuf:"bytes,2,opt,name=batch_timeout_micros,json=batchTimeoutMicros,proto3" json:"batch_timeout_micros,omitempty"` // The maximum length of the queue, in terms of the number of batches. (A // batch that has been scheduled on a thread is considered to have been // removed from the queue.) MaxEnqueuedBatches *wrapperspb.Int64Value `protobuf:"bytes,3,opt,name=max_enqueued_batches,json=maxEnqueuedBatches,proto3" json:"max_enqueued_batches,omitempty"` // The number of threads to use to process batches. // Must be >= 1, and should be tuned carefully. NumBatchThreads *wrapperspb.Int64Value `protobuf:"bytes,4,opt,name=num_batch_threads,json=numBatchThreads,proto3" json:"num_batch_threads,omitempty"` // The name to use for the pool of batch threads. ThreadPoolName *wrapperspb.StringValue `protobuf:"bytes,5,opt,name=thread_pool_name,json=threadPoolName,proto3" json:"thread_pool_name,omitempty"` // If true, queue implementation would split one input batch task into // subtasks (as specified by `split_input_task_func` below) and fit subtasks // into different batches. EnableLargeBatchSplitting *wrapperspb.BoolValue `` /* 140-byte string literal not displayed */ // The maximum size of each enqueued batch to be processed (i.e., in // `batches_`). Relevant iff enable_large_batch_splitting is true. And when // relevant, 'max_batch_size' should be greater or equal than // `max_execution_batch_size` // // The scheduler may form batches of any size between 1 and this number // (inclusive). MaxExecutionBatchSize *wrapperspb.Int64Value `` /* 128-byte string literal not displayed */ // The allowed batch sizes. (Ignored if left empty.) // Requirements: // - The entries must be in increasing order. // - The final entry must equal 'max_batch_size'. AllowedBatchSizes []int64 `protobuf:"varint,6,rep,packed,name=allowed_batch_sizes,json=allowedBatchSizes,proto3" json:"allowed_batch_sizes,omitempty"` // Whether to pad variable-length inputs when a batch is formed. PadVariableLengthInputs bool `` /* 135-byte string literal not displayed */ // contains filtered or unexported fields }
Batching parameters. Each individual parameter is optional. If omitted, the default value from the relevant batching config struct (SharedBatchScheduler ::Options or BatchSchedulerRetrier::Options) is used.
func (*BatchingParameters) Descriptor
deprecated
func (*BatchingParameters) Descriptor() ([]byte, []int)
Deprecated: Use BatchingParameters.ProtoReflect.Descriptor instead.
func (*BatchingParameters) GetAllowedBatchSizes ¶
func (x *BatchingParameters) GetAllowedBatchSizes() []int64
func (*BatchingParameters) GetBatchTimeoutMicros ¶
func (x *BatchingParameters) GetBatchTimeoutMicros() *wrapperspb.Int64Value
func (*BatchingParameters) GetEnableLargeBatchSplitting ¶
func (x *BatchingParameters) GetEnableLargeBatchSplitting() *wrapperspb.BoolValue
func (*BatchingParameters) GetMaxBatchSize ¶
func (x *BatchingParameters) GetMaxBatchSize() *wrapperspb.Int64Value
func (*BatchingParameters) GetMaxEnqueuedBatches ¶
func (x *BatchingParameters) GetMaxEnqueuedBatches() *wrapperspb.Int64Value
func (*BatchingParameters) GetMaxExecutionBatchSize ¶
func (x *BatchingParameters) GetMaxExecutionBatchSize() *wrapperspb.Int64Value
func (*BatchingParameters) GetNumBatchThreads ¶
func (x *BatchingParameters) GetNumBatchThreads() *wrapperspb.Int64Value
func (*BatchingParameters) GetPadVariableLengthInputs ¶
func (x *BatchingParameters) GetPadVariableLengthInputs() bool
func (*BatchingParameters) GetThreadPoolName ¶
func (x *BatchingParameters) GetThreadPoolName() *wrapperspb.StringValue
func (*BatchingParameters) ProtoMessage ¶
func (*BatchingParameters) ProtoMessage()
func (*BatchingParameters) ProtoReflect ¶
func (x *BatchingParameters) ProtoReflect() protoreflect.Message
func (*BatchingParameters) Reset ¶
func (x *BatchingParameters) Reset()
func (*BatchingParameters) String ¶
func (x *BatchingParameters) String() string
type ModelWarmupOptions ¶
type ModelWarmupOptions struct { // Number of times a request is iterated during warmup replay. By default 1. NumRequestIterations *wrapperspb.Int32Value `protobuf:"bytes,1,opt,name=num_request_iterations,json=numRequestIterations,proto3" json:"num_request_iterations,omitempty"` // The number of threads to parallel execute warm up queries. By default 1. NumModelWarmupThreads *wrapperspb.Int32Value `` /* 128-byte string literal not displayed */ // contains filtered or unexported fields }
Options related to model-warmup.
func (*ModelWarmupOptions) Descriptor
deprecated
func (*ModelWarmupOptions) Descriptor() ([]byte, []int)
Deprecated: Use ModelWarmupOptions.ProtoReflect.Descriptor instead.
func (*ModelWarmupOptions) GetNumModelWarmupThreads ¶
func (x *ModelWarmupOptions) GetNumModelWarmupThreads() *wrapperspb.Int32Value
func (*ModelWarmupOptions) GetNumRequestIterations ¶
func (x *ModelWarmupOptions) GetNumRequestIterations() *wrapperspb.Int32Value
func (*ModelWarmupOptions) ProtoMessage ¶
func (*ModelWarmupOptions) ProtoMessage()
func (*ModelWarmupOptions) ProtoReflect ¶
func (x *ModelWarmupOptions) ProtoReflect() protoreflect.Message
func (*ModelWarmupOptions) Reset ¶
func (x *ModelWarmupOptions) Reset()
func (*ModelWarmupOptions) String ¶
func (x *ModelWarmupOptions) String() string
type SavedModelBundleSourceAdapterConfig ¶
type SavedModelBundleSourceAdapterConfig struct { // A SessionBundleConfig. // FOR INTERNAL USE ONLY DURING TRANSITION TO SAVED_MODEL. WILL BE DEPRECATED. // TODO(b/32248363): Replace this field with the "real" field(s). LegacyConfig *SessionBundleConfig `protobuf:"bytes,1000,opt,name=legacy_config,json=legacyConfig,proto3" json:"legacy_config,omitempty"` // contains filtered or unexported fields }
Config proto for SavedModelBundleSourceAdapter.
func (*SavedModelBundleSourceAdapterConfig) Descriptor
deprecated
func (*SavedModelBundleSourceAdapterConfig) Descriptor() ([]byte, []int)
Deprecated: Use SavedModelBundleSourceAdapterConfig.ProtoReflect.Descriptor instead.
func (*SavedModelBundleSourceAdapterConfig) GetLegacyConfig ¶
func (x *SavedModelBundleSourceAdapterConfig) GetLegacyConfig() *SessionBundleConfig
func (*SavedModelBundleSourceAdapterConfig) ProtoMessage ¶
func (*SavedModelBundleSourceAdapterConfig) ProtoMessage()
func (*SavedModelBundleSourceAdapterConfig) ProtoReflect ¶
func (x *SavedModelBundleSourceAdapterConfig) ProtoReflect() protoreflect.Message
func (*SavedModelBundleSourceAdapterConfig) Reset ¶
func (x *SavedModelBundleSourceAdapterConfig) Reset()
func (*SavedModelBundleSourceAdapterConfig) String ¶
func (x *SavedModelBundleSourceAdapterConfig) String() string
type SessionBundleConfig ¶
type SessionBundleConfig struct { // The TensorFlow runtime to connect to. // See full documentation in tensorflow/core/public/session_options.h. // // For single machine serving, we recommend using the empty string "", which // will configure the local TensorFlow runtime implementation. This provides // the best isolation currently available across multiple Session servables. SessionTarget string `protobuf:"bytes,1,opt,name=session_target,json=sessionTarget,proto3" json:"session_target,omitempty"` // TensorFlow Session configuration options. // See details at tensorflow/core/protobuf/config.proto. SessionConfig *protobuf.ConfigProto `protobuf:"bytes,2,opt,name=session_config,json=sessionConfig,proto3" json:"session_config,omitempty"` // If set, each emitted session is wrapped with a layer that schedules Run() // calls in batches. The batching layer is transparent to the client // (implements the tensorflow::Session API). // // IMPORTANT: With batching enabled, client threads will spend most of their // time blocked on Session::Run() calls, waiting for enough peer threads to // also call Session::Run() such that a large batch can be formed. For good // throughput, we recommend setting the number of client threads equal to // roughly twice the maximum batch size ('max_batch_size' below). // // The batching layer uses a SharedBatchScheduler to coordinate batching // across multiple session servables emitted by this source adapter. A // BatchSchedulerRetrier is added on top of each batching session. BatchingParameters *BatchingParameters `protobuf:"bytes,3,opt,name=batching_parameters,json=batchingParameters,proto3" json:"batching_parameters,omitempty"` // If set, session run calls use a separate threadpool for restore and init // ops as part of loading the session-bundle. The value of this field should // correspond to the index of the tensorflow::ThreadPoolOptionProto defined as // part of `session_config.session_inter_op_thread_pool`. SessionRunLoadThreadpoolIndex *wrapperspb.Int32Value `` /* 154-byte string literal not displayed */ // EXPERIMENTAL. THIS FIELD MAY CHANGE OR GO AWAY. USE WITH CAUTION. // // Transient memory used while loading a model, which is released once the // loading phase has completed. (This is on top of the memory used in steady- // state while the model is in memory after it has finished loading.) // // TODO(b/38376838): This is a temporary hack, and it applies to all models. // Remove it once resource estimates are moved inside SavedModel. ExperimentalTransientRamBytesDuringLoad uint64 `` /* 187-byte string literal not displayed */ // Set of SavedModel tags identifying the specific meta graph def to be // loaded. SavedModelTags []string `protobuf:"bytes,6,rep,name=saved_model_tags,json=savedModelTags,proto3" json:"saved_model_tags,omitempty"` // EXPERIMENTAL. THIS FIELD MAY CHANGE OR GO AWAY. USE WITH CAUTION. // // Input tensors to append to every Session::Run() call. ExperimentalFixedInputTensors []*protobuf.NamedTensorProto `` /* 154-byte string literal not displayed */ // Enables model warmup. EnableModelWarmup bool `protobuf:"varint,779,opt,name=enable_model_warmup,json=enableModelWarmup,proto3" json:"enable_model_warmup,omitempty"` ModelWarmupOptions *ModelWarmupOptions `protobuf:"bytes,780,opt,name=model_warmup_options,json=modelWarmupOptions,proto3" json:"model_warmup_options,omitempty"` // EXPERIMENTAL. THIS FIELD MAY CHANGE OR GO AWAY. USE WITH CAUTION. // // Enables passing in the SessionMetadata to the Session. The SessionMetadata // consists of information like the model name, version, which can then be // used by the TensorFlow runtime appropriately (for debugging, logging, etc). EnableSessionMetadata bool `` /* 129-byte string literal not displayed */ // EXPERIMENTAL. THIS FIELD MAY CHANGE OR GO AWAY. USE WITH CAUTION. // // Remove unused fields from metagraph proto message in SavedModelBundle. // This message is stored alongside the `Session` object. Removing unwanted // fields helps reduce memory footprint. RemoveUnusedFieldsFromBundleMetagraph bool `` /* 183-byte string literal not displayed */ // EXPERIMENTAL. THIS FIELD MAY CHANGE OR GO AWAY. USE WITH CAUTION. // // Prefer TensorFlow Lite model from `model.tflite` file in SavedModel // directory, instead of the TensorFlow model from `saved_model.pb` file. // If no TensorFlow Lite model found, fallback to TensorFlow model. PreferTfliteModel bool `protobuf:"varint,783,opt,name=prefer_tflite_model,json=preferTfliteModel,proto3" json:"prefer_tflite_model,omitempty"` // Tries to use infra validation result to estimate resource usage. ResourceEstimationUsesValidationResult bool `` /* 184-byte string literal not displayed */ // Deprecated: Marked as deprecated in tensorflow_serving/servables/tensorflow/session_bundle_config.proto. NumTfliteInterpreters int32 `` /* 129-byte string literal not displayed */ // EXPERIMENTAL. THIS FIELD MAY CHANGE OR GO AWAY. USE WITH CAUTION. // // Number of TFLite interpreters in an interpreter pool of TfLiteSession. NumTfliteInterpretersPerPool int32 `` /* 154-byte string literal not displayed */ // EXPERIMENTAL. THIS FIELD MAY CHANGE OR GO AWAY. USE WITH CAUTION. // // Number of TFLite interpreter pools in a TfLiteSession. NumTflitePools int32 `protobuf:"varint,787,opt,name=num_tflite_pools,json=numTflitePools,proto3" json:"num_tflite_pools,omitempty"` // EXPERIMENTAL. THIS FIELD MAY CHANGE OR GO AWAY. USE WITH CAUTION. // // Use SessionWrapperIgnoreThreadPoolOptions instead. WrapSessionWithNoThreadingParams bool `` /* 168-byte string literal not displayed */ // contains filtered or unexported fields }
Configuration parameters for a SessionBundle, with optional batching.
func (*SessionBundleConfig) Descriptor
deprecated
func (*SessionBundleConfig) Descriptor() ([]byte, []int)
Deprecated: Use SessionBundleConfig.ProtoReflect.Descriptor instead.
func (*SessionBundleConfig) GetBatchingParameters ¶
func (x *SessionBundleConfig) GetBatchingParameters() *BatchingParameters
func (*SessionBundleConfig) GetEnableModelWarmup ¶
func (x *SessionBundleConfig) GetEnableModelWarmup() bool
func (*SessionBundleConfig) GetEnableSessionMetadata ¶
func (x *SessionBundleConfig) GetEnableSessionMetadata() bool
func (*SessionBundleConfig) GetExperimentalFixedInputTensors ¶
func (x *SessionBundleConfig) GetExperimentalFixedInputTensors() []*protobuf.NamedTensorProto
func (*SessionBundleConfig) GetExperimentalTransientRamBytesDuringLoad ¶
func (x *SessionBundleConfig) GetExperimentalTransientRamBytesDuringLoad() uint64
func (*SessionBundleConfig) GetModelWarmupOptions ¶
func (x *SessionBundleConfig) GetModelWarmupOptions() *ModelWarmupOptions
func (*SessionBundleConfig) GetNumTfliteInterpreters
deprecated
func (x *SessionBundleConfig) GetNumTfliteInterpreters() int32
Deprecated: Marked as deprecated in tensorflow_serving/servables/tensorflow/session_bundle_config.proto.
func (*SessionBundleConfig) GetNumTfliteInterpretersPerPool ¶
func (x *SessionBundleConfig) GetNumTfliteInterpretersPerPool() int32
func (*SessionBundleConfig) GetNumTflitePools ¶
func (x *SessionBundleConfig) GetNumTflitePools() int32
func (*SessionBundleConfig) GetPreferTfliteModel ¶
func (x *SessionBundleConfig) GetPreferTfliteModel() bool
func (*SessionBundleConfig) GetRemoveUnusedFieldsFromBundleMetagraph ¶
func (x *SessionBundleConfig) GetRemoveUnusedFieldsFromBundleMetagraph() bool
func (*SessionBundleConfig) GetResourceEstimationUsesValidationResult ¶
func (x *SessionBundleConfig) GetResourceEstimationUsesValidationResult() bool
func (*SessionBundleConfig) GetSavedModelTags ¶
func (x *SessionBundleConfig) GetSavedModelTags() []string
func (*SessionBundleConfig) GetSessionConfig ¶
func (x *SessionBundleConfig) GetSessionConfig() *protobuf.ConfigProto
func (*SessionBundleConfig) GetSessionRunLoadThreadpoolIndex ¶
func (x *SessionBundleConfig) GetSessionRunLoadThreadpoolIndex() *wrapperspb.Int32Value
func (*SessionBundleConfig) GetSessionTarget ¶
func (x *SessionBundleConfig) GetSessionTarget() string
func (*SessionBundleConfig) GetWrapSessionWithNoThreadingParams ¶
func (x *SessionBundleConfig) GetWrapSessionWithNoThreadingParams() bool
func (*SessionBundleConfig) ProtoMessage ¶
func (*SessionBundleConfig) ProtoMessage()
func (*SessionBundleConfig) ProtoReflect ¶
func (x *SessionBundleConfig) ProtoReflect() protoreflect.Message
func (*SessionBundleConfig) Reset ¶
func (x *SessionBundleConfig) Reset()
func (*SessionBundleConfig) String ¶
func (x *SessionBundleConfig) String() string
type ThreadPoolFactoryConfig ¶
type ThreadPoolFactoryConfig struct { // The config proto for a ThreadPoolFactory in the ThreadPoolFactory registry. ThreadPoolFactoryConfig *anypb.Any `` /* 134-byte string literal not displayed */ // contains filtered or unexported fields }
Configuration for a thread pool factory.
func (*ThreadPoolFactoryConfig) Descriptor
deprecated
func (*ThreadPoolFactoryConfig) Descriptor() ([]byte, []int)
Deprecated: Use ThreadPoolFactoryConfig.ProtoReflect.Descriptor instead.
func (*ThreadPoolFactoryConfig) GetThreadPoolFactoryConfig ¶
func (x *ThreadPoolFactoryConfig) GetThreadPoolFactoryConfig() *anypb.Any
func (*ThreadPoolFactoryConfig) ProtoMessage ¶
func (*ThreadPoolFactoryConfig) ProtoMessage()
func (*ThreadPoolFactoryConfig) ProtoReflect ¶
func (x *ThreadPoolFactoryConfig) ProtoReflect() protoreflect.Message
func (*ThreadPoolFactoryConfig) Reset ¶
func (x *ThreadPoolFactoryConfig) Reset()
func (*ThreadPoolFactoryConfig) String ¶
func (x *ThreadPoolFactoryConfig) String() string