tensorflow_go_proto

package
v0.9.0 Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: Jul 25, 2024 License: Apache-2.0 Imports: 9 Imported by: 0

Documentation

Index

Constants

This section is empty.

Variables

View Source
var (
	TfrtSavedModelConfig_TpuUnpaddedBatchMode_name = map[int32]string{
		0: "UNPADDED_BATCH_DISABLED",
		1: "UNPADDED_BATCH_AUTO",
		2: "UNPADDED_BATCH_ENFORCED",
	}
	TfrtSavedModelConfig_TpuUnpaddedBatchMode_value = map[string]int32{
		"UNPADDED_BATCH_DISABLED": 0,
		"UNPADDED_BATCH_AUTO":     1,
		"UNPADDED_BATCH_ENFORCED": 2,
	}
)

Enum value maps for TfrtSavedModelConfig_TpuUnpaddedBatchMode.

View Source
var (
	TfrtSavedModelConfig_PredictResponseTensorSerializationOption_name = map[int32]string{
		0: "AS_PROTO_DEFAULT",

		1: "AS_PROTO_CONTENT",
	}
	TfrtSavedModelConfig_PredictResponseTensorSerializationOption_value = map[string]int32{
		"AS_PROTO_DEFAULT": 0,
		"AS_PROTO_FIELD":   0,
		"AS_PROTO_CONTENT": 1,
	}
)

Enum value maps for TfrtSavedModelConfig_PredictResponseTensorSerializationOption.

View Source
var File_tensorflow_serving_servables_tensorflow_remote_op_config_rewriter_proto protoreflect.FileDescriptor
View Source
var File_tensorflow_serving_servables_tensorflow_saved_model_bundle_source_adapter_proto protoreflect.FileDescriptor
View Source
var File_tensorflow_serving_servables_tensorflow_saved_model_config_proto protoreflect.FileDescriptor
View Source
var File_tensorflow_serving_servables_tensorflow_session_bundle_config_proto protoreflect.FileDescriptor
View Source
var File_tensorflow_serving_servables_tensorflow_tfrt_saved_model_source_adapter_proto protoreflect.FileDescriptor
View Source
var File_tensorflow_serving_servables_tensorflow_thread_pool_factory_config_proto protoreflect.FileDescriptor

Functions

This section is empty.

Types

type BatchingParameters

type BatchingParameters struct {

	// The maximum size of each input batch.
	//
	// IMPORTANT: As discussed above, use 'max_batch_size * 2' client threads to
	// achieve high throughput with batching.
	MaxBatchSize *wrapperspb.Int64Value `protobuf:"bytes,1,opt,name=max_batch_size,json=maxBatchSize,proto3" json:"max_batch_size,omitempty"`
	// If a task has been enqueued for this amount of time (in microseconds), and
	// a thread is available, the scheduler will immediately form a batch from
	// enqueued tasks and assign the batch to the thread for processing, even if
	// the batch's size is below 'max_batch_size'.
	BatchTimeoutMicros *wrapperspb.Int64Value `protobuf:"bytes,2,opt,name=batch_timeout_micros,json=batchTimeoutMicros,proto3" json:"batch_timeout_micros,omitempty"`
	// The maximum length of the queue, in terms of the number of batches. (A
	// batch that has been scheduled on a thread is considered to have been
	// removed from the queue.)
	MaxEnqueuedBatches *wrapperspb.Int64Value `protobuf:"bytes,3,opt,name=max_enqueued_batches,json=maxEnqueuedBatches,proto3" json:"max_enqueued_batches,omitempty"`
	// The number of threads to use to process batches.
	// Must be >= 1, and should be tuned carefully.
	NumBatchThreads *wrapperspb.Int64Value `protobuf:"bytes,4,opt,name=num_batch_threads,json=numBatchThreads,proto3" json:"num_batch_threads,omitempty"`
	// The name to use for the pool of batch threads.
	ThreadPoolName *wrapperspb.StringValue `protobuf:"bytes,5,opt,name=thread_pool_name,json=threadPoolName,proto3" json:"thread_pool_name,omitempty"`
	// If true, queue implementation would split one input batch task into
	// subtasks (as specified by `split_input_task_func` below) and fit subtasks
	// into different batches.
	EnableLargeBatchSplitting *wrapperspb.BoolValue `` /* 140-byte string literal not displayed */
	// The maximum size of each enqueued batch to be processed (i.e., in
	// `batches_`). Relevant iff enable_large_batch_splitting is true. And when
	// relevant, 'max_batch_size' should be greater or equal than
	// `max_execution_batch_size`
	//
	// The scheduler may form batches of any size between 1 and this number
	// (inclusive).
	MaxExecutionBatchSize *wrapperspb.Int64Value `` /* 128-byte string literal not displayed */
	// The allowed batch sizes. (Ignored if left empty.)
	// Requirements:
	//   - The entries must be in increasing order.
	//   - The final entry must equal 'max_batch_size'.
	AllowedBatchSizes []int64 `protobuf:"varint,6,rep,packed,name=allowed_batch_sizes,json=allowedBatchSizes,proto3" json:"allowed_batch_sizes,omitempty"`
	// Whether to pad variable-length inputs when a batch is formed.
	PadVariableLengthInputs bool `` /* 135-byte string literal not displayed */
	// contains filtered or unexported fields
}

Batching parameters. Each individual parameter is optional. If omitted, the default value from the relevant batching config struct (SharedBatchScheduler ::Options or BatchSchedulerRetrier::Options) is used.

func (*BatchingParameters) Descriptor deprecated

func (*BatchingParameters) Descriptor() ([]byte, []int)

Deprecated: Use BatchingParameters.ProtoReflect.Descriptor instead.

func (*BatchingParameters) GetAllowedBatchSizes

func (x *BatchingParameters) GetAllowedBatchSizes() []int64

func (*BatchingParameters) GetBatchTimeoutMicros

func (x *BatchingParameters) GetBatchTimeoutMicros() *wrapperspb.Int64Value

func (*BatchingParameters) GetEnableLargeBatchSplitting

func (x *BatchingParameters) GetEnableLargeBatchSplitting() *wrapperspb.BoolValue

func (*BatchingParameters) GetMaxBatchSize

func (x *BatchingParameters) GetMaxBatchSize() *wrapperspb.Int64Value

func (*BatchingParameters) GetMaxEnqueuedBatches

func (x *BatchingParameters) GetMaxEnqueuedBatches() *wrapperspb.Int64Value

func (*BatchingParameters) GetMaxExecutionBatchSize

func (x *BatchingParameters) GetMaxExecutionBatchSize() *wrapperspb.Int64Value

func (*BatchingParameters) GetNumBatchThreads

func (x *BatchingParameters) GetNumBatchThreads() *wrapperspb.Int64Value

func (*BatchingParameters) GetPadVariableLengthInputs

func (x *BatchingParameters) GetPadVariableLengthInputs() bool

func (*BatchingParameters) GetThreadPoolName

func (x *BatchingParameters) GetThreadPoolName() *wrapperspb.StringValue

func (*BatchingParameters) ProtoMessage

func (*BatchingParameters) ProtoMessage()

func (*BatchingParameters) ProtoReflect

func (x *BatchingParameters) ProtoReflect() protoreflect.Message

func (*BatchingParameters) Reset

func (x *BatchingParameters) Reset()

func (*BatchingParameters) String

func (x *BatchingParameters) String() string

type ModelWarmupOptions

type ModelWarmupOptions struct {

	// Number of times a request is iterated during warmup replay. By default 1.
	NumRequestIterations *wrapperspb.Int32Value `protobuf:"bytes,1,opt,name=num_request_iterations,json=numRequestIterations,proto3" json:"num_request_iterations,omitempty"`
	// The number of threads to parallel execute warm up queries. By default 1.
	NumModelWarmupThreads *wrapperspb.Int32Value `` /* 128-byte string literal not displayed */
	// Model name.
	ModelName string `protobuf:"bytes,3,opt,name=model_name,json=modelName,proto3" json:"model_name,omitempty"`
	// Model version.
	ModelVersion int64 `protobuf:"varint,4,opt,name=model_version,json=modelVersion,proto3" json:"model_version,omitempty"`
	// If true, warmup queries initiate parallel (dummy) warmup queries for each
	// `allowed_batch_sizes` of supported batch ops.
	// The extra queries' outputs are not returned.
	EnableAllBatchSizesWarmup bool `` /* 143-byte string literal not displayed */
	// contains filtered or unexported fields
}

Options related to model-warmup.

func (*ModelWarmupOptions) Descriptor deprecated

func (*ModelWarmupOptions) Descriptor() ([]byte, []int)

Deprecated: Use ModelWarmupOptions.ProtoReflect.Descriptor instead.

func (*ModelWarmupOptions) GetEnableAllBatchSizesWarmup

func (x *ModelWarmupOptions) GetEnableAllBatchSizesWarmup() bool

func (*ModelWarmupOptions) GetModelName

func (x *ModelWarmupOptions) GetModelName() string

func (*ModelWarmupOptions) GetModelVersion

func (x *ModelWarmupOptions) GetModelVersion() int64

func (*ModelWarmupOptions) GetNumModelWarmupThreads

func (x *ModelWarmupOptions) GetNumModelWarmupThreads() *wrapperspb.Int32Value

func (*ModelWarmupOptions) GetNumRequestIterations

func (x *ModelWarmupOptions) GetNumRequestIterations() *wrapperspb.Int32Value

func (*ModelWarmupOptions) ProtoMessage

func (*ModelWarmupOptions) ProtoMessage()

func (*ModelWarmupOptions) ProtoReflect

func (x *ModelWarmupOptions) ProtoReflect() protoreflect.Message

func (*ModelWarmupOptions) Reset

func (x *ModelWarmupOptions) Reset()

func (*ModelWarmupOptions) String

func (x *ModelWarmupOptions) String() string

type RemoteOpRemapConfig

type RemoteOpRemapConfig struct {

	// Map for placeholder target model_names to actual target model_name.
	ModelNameRemap map[string]string `` /* 193-byte string literal not displayed */
	// Map for placeholder target_address to actual target_address.
	TargetAddressRemap map[string]string `` /* 205-byte string literal not displayed */
	// Map for model name to actual target_address. This will only be used when
	// target_address_remap is not set. Also, when the model_name_remap is set,
	// the model name here is the name BEFORE the rewrite. The model name here is
	// backend model name.
	BackendModelNameToTargetAddressRemap map[string]string `` /* 267-byte string literal not displayed */
	// If true, version will be propagated from SessionMetadata.version.
	// See: http://shortn/_XDBisC2j9k
	// Requires enable_tf_session_metadata = true.
	PropagateVersion bool `protobuf:"varint,4,opt,name=propagate_version,json=propagateVersion,proto3" json:"propagate_version,omitempty"`
	// If true, model name will be propagated from SessionMetadata.name.
	// See: http://shortn/_XDBisC2j9k
	// Requires enable_tf_session_metadata = true. This takes precedence
	// over remapping from model_name_remap.
	PropagateModelName bool `protobuf:"varint,5,opt,name=propagate_model_name,json=propagateModelName,proto3" json:"propagate_model_name,omitempty"`
	// contains filtered or unexported fields
}

Config for the remote op rewriter. This should be serialized/encoded and set a param in RewriterConfig with key kRemoteOpRewriteConfigParamKey.

func (*RemoteOpRemapConfig) Descriptor deprecated

func (*RemoteOpRemapConfig) Descriptor() ([]byte, []int)

Deprecated: Use RemoteOpRemapConfig.ProtoReflect.Descriptor instead.

func (*RemoteOpRemapConfig) GetBackendModelNameToTargetAddressRemap

func (x *RemoteOpRemapConfig) GetBackendModelNameToTargetAddressRemap() map[string]string

func (*RemoteOpRemapConfig) GetModelNameRemap

func (x *RemoteOpRemapConfig) GetModelNameRemap() map[string]string

func (*RemoteOpRemapConfig) GetPropagateModelName

func (x *RemoteOpRemapConfig) GetPropagateModelName() bool

func (*RemoteOpRemapConfig) GetPropagateVersion

func (x *RemoteOpRemapConfig) GetPropagateVersion() bool

func (*RemoteOpRemapConfig) GetTargetAddressRemap

func (x *RemoteOpRemapConfig) GetTargetAddressRemap() map[string]string

func (*RemoteOpRemapConfig) ProtoMessage

func (*RemoteOpRemapConfig) ProtoMessage()

func (*RemoteOpRemapConfig) ProtoReflect

func (x *RemoteOpRemapConfig) ProtoReflect() protoreflect.Message

func (*RemoteOpRemapConfig) Reset

func (x *RemoteOpRemapConfig) Reset()

func (*RemoteOpRemapConfig) String

func (x *RemoteOpRemapConfig) String() string

type SavedModelBundleSourceAdapterConfig

type SavedModelBundleSourceAdapterConfig struct {

	// A SessionBundleConfig.
	// FOR INTERNAL USE ONLY DURING TRANSITION TO SAVED_MODEL. WILL BE DEPRECATED.
	// TODO(b/32248363): Replace this field with the "real" field(s).
	LegacyConfig *SessionBundleConfig `protobuf:"bytes,1000,opt,name=legacy_config,json=legacyConfig,proto3" json:"legacy_config,omitempty"`
	// contains filtered or unexported fields
}

Config proto for SavedModelBundleSourceAdapter.

func (*SavedModelBundleSourceAdapterConfig) Descriptor deprecated

func (*SavedModelBundleSourceAdapterConfig) Descriptor() ([]byte, []int)

Deprecated: Use SavedModelBundleSourceAdapterConfig.ProtoReflect.Descriptor instead.

func (*SavedModelBundleSourceAdapterConfig) GetLegacyConfig

func (*SavedModelBundleSourceAdapterConfig) ProtoMessage

func (*SavedModelBundleSourceAdapterConfig) ProtoMessage()

func (*SavedModelBundleSourceAdapterConfig) ProtoReflect

func (*SavedModelBundleSourceAdapterConfig) Reset

func (*SavedModelBundleSourceAdapterConfig) String

type SavedModelConfig

type SavedModelConfig struct {

	// A select set of fields from SessionOptions which, at the model level, can
	// be used to override SessionOptions set for the entire processes.
	SessionOverrides *SessionOverrides `protobuf:"bytes,1,opt,name=session_overrides,json=sessionOverrides,proto3,oneof" json:"session_overrides,omitempty"`
	// Runtime specific configuration proto. They can be used to specify
	// environments for runtime.
	TfrtRuntimeConfig *graph_executor_go_proto.RuntimeConfigProto `protobuf:"bytes,2,opt,name=tfrt_runtime_config,json=tfrtRuntimeConfig,proto3,oneof" json:"tfrt_runtime_config,omitempty"`
	// A boolean field that indicates whether the model is critical, i.e., whether
	// the entire server cannot serve requests before this model has been loaded.
	Critical bool `protobuf:"varint,3,opt,name=critical,proto3" json:"critical,omitempty"`
	// contains filtered or unexported fields
}

func (*SavedModelConfig) Descriptor deprecated

func (*SavedModelConfig) Descriptor() ([]byte, []int)

Deprecated: Use SavedModelConfig.ProtoReflect.Descriptor instead.

func (*SavedModelConfig) GetCritical

func (x *SavedModelConfig) GetCritical() bool

func (*SavedModelConfig) GetSessionOverrides

func (x *SavedModelConfig) GetSessionOverrides() *SessionOverrides

func (*SavedModelConfig) GetTfrtRuntimeConfig

func (x *SavedModelConfig) GetTfrtRuntimeConfig() *graph_executor_go_proto.RuntimeConfigProto

func (*SavedModelConfig) ProtoMessage

func (*SavedModelConfig) ProtoMessage()

func (*SavedModelConfig) ProtoReflect

func (x *SavedModelConfig) ProtoReflect() protoreflect.Message

func (*SavedModelConfig) Reset

func (x *SavedModelConfig) Reset()

func (*SavedModelConfig) String

func (x *SavedModelConfig) String() string

type SessionBundleConfig

type SessionBundleConfig struct {

	// The TensorFlow runtime to connect to.
	// See full documentation in tensorflow/core/public/session_options.h.
	//
	// For single machine serving, we recommend using the empty string "", which
	// will configure the local TensorFlow runtime implementation. This provides
	// the best isolation currently available across multiple Session servables.
	SessionTarget string `protobuf:"bytes,1,opt,name=session_target,json=sessionTarget,proto3" json:"session_target,omitempty"`
	// TensorFlow Session configuration options.
	// See details at tensorflow/core/protobuf/config.proto.
	SessionConfig *for_core_protos_go_proto.ConfigProto `protobuf:"bytes,2,opt,name=session_config,json=sessionConfig,proto3" json:"session_config,omitempty"`
	// If set, each emitted session is wrapped with a layer that schedules Run()
	// calls in batches. The batching layer is transparent to the client
	// (implements the tensorflow::Session API).
	//
	// IMPORTANT: With batching enabled, client threads will spend most of their
	// time blocked on Session::Run() calls, waiting for enough peer threads to
	// also call Session::Run() such that a large batch can be formed. For good
	// throughput, we recommend setting the number of client threads equal to
	// roughly twice the maximum batch size ('max_batch_size' below).
	//
	// The batching layer uses a SharedBatchScheduler to coordinate batching
	// across multiple session servables emitted by this source adapter. A
	// BatchSchedulerRetrier is added on top of each batching session.
	BatchingParameters *BatchingParameters `protobuf:"bytes,3,opt,name=batching_parameters,json=batchingParameters,proto3" json:"batching_parameters,omitempty"`
	// If set, session run calls use a separate threadpool for restore and init
	// ops as part of loading the session-bundle. The value of this field should
	// correspond to the index of the tensorflow::ThreadPoolOptionProto defined as
	// part of `session_config.session_inter_op_thread_pool`.
	SessionRunLoadThreadpoolIndex *wrapperspb.Int32Value `` /* 154-byte string literal not displayed */
	// EXPERIMENTAL. THIS FIELD MAY CHANGE OR GO AWAY. USE WITH CAUTION.
	//
	// Transient memory used while loading a model, which is released once the
	// loading phase has completed. (This is on top of the memory used in steady-
	// state while the model is in memory after it has finished loading.)
	//
	// TODO(b/38376838): This is a temporary hack, and it applies to all models.
	// Remove it once resource estimates are moved inside SavedModel.
	ExperimentalTransientRamBytesDuringLoad uint64 `` /* 187-byte string literal not displayed */
	// Set of SavedModel tags identifying the specific meta graph def to be
	// loaded.
	SavedModelTags []string `protobuf:"bytes,6,rep,name=saved_model_tags,json=savedModelTags,proto3" json:"saved_model_tags,omitempty"`
	// EXPERIMENTAL. THIS FIELD MAY CHANGE OR GO AWAY. USE WITH CAUTION.
	//
	// Input tensors to append to every Session::Run() call.
	ExperimentalFixedInputTensors []*for_core_protos_go_proto.NamedTensorProto `` /* 154-byte string literal not displayed */
	// Enables model warmup.
	EnableModelWarmup  bool                `protobuf:"varint,779,opt,name=enable_model_warmup,json=enableModelWarmup,proto3" json:"enable_model_warmup,omitempty"`
	ModelWarmupOptions *ModelWarmupOptions `protobuf:"bytes,780,opt,name=model_warmup_options,json=modelWarmupOptions,proto3" json:"model_warmup_options,omitempty"`
	// EXPERIMENTAL. THIS FIELD MAY CHANGE OR GO AWAY. USE WITH CAUTION.
	//
	// Enables passing in the SessionMetadata to the Session. The SessionMetadata
	// consists of information like the model name, version, which can then be
	// used by the TensorFlow runtime appropriately (for debugging, logging, etc).
	EnableSessionMetadata bool `` /* 129-byte string literal not displayed */
	// EXPERIMENTAL. THIS FIELD MAY CHANGE OR GO AWAY. USE WITH CAUTION.
	//
	// Remove unused fields from metagraph proto message in SavedModelBundle.
	// This message is stored alongside the `Session` object. Removing unwanted
	// fields helps reduce memory footprint.
	RemoveUnusedFieldsFromBundleMetagraph bool `` /* 183-byte string literal not displayed */
	// EXPERIMENTAL. THIS FIELD MAY CHANGE OR GO AWAY. USE WITH CAUTION.
	//
	// Prefer TensorFlow Lite model from `model.tflite` file in SavedModel
	// directory, instead of the TensorFlow model from `saved_model.pb` file.
	// If no TensorFlow Lite model found, fallback to TensorFlow model.
	PreferTfliteModel bool `protobuf:"varint,783,opt,name=prefer_tflite_model,json=preferTfliteModel,proto3" json:"prefer_tflite_model,omitempty"`
	// Tries to use infra validation result to estimate resource usage.
	ResourceEstimationUsesValidationResult bool `` /* 184-byte string literal not displayed */
	// Deprecated: Marked as deprecated in tensorflow/serving/servables/tensorflow/session_bundle_config.proto.
	NumTfliteInterpreters int32 `` /* 129-byte string literal not displayed */
	// EXPERIMENTAL. THIS FIELD MAY CHANGE OR GO AWAY. USE WITH CAUTION.
	//
	// Number of TFLite interpreters in an interpreter pool of TfLiteSession.
	NumTfliteInterpretersPerPool int32 `` /* 154-byte string literal not displayed */
	// EXPERIMENTAL. THIS FIELD MAY CHANGE OR GO AWAY. USE WITH CAUTION.
	//
	// Number of TFLite interpreter pools in a TfLiteSession.
	NumTflitePools int32 `protobuf:"varint,787,opt,name=num_tflite_pools,json=numTflitePools,proto3" json:"num_tflite_pools,omitempty"`
	// EXPERIMENTAL. THIS FIELD MAY CHANGE OR GO AWAY. USE WITH CAUTION.
	//
	// Use SessionWrapperIgnoreThreadPoolOptions instead.
	WrapSessionWithNoThreadingParams bool `` /* 168-byte string literal not displayed */
	// EXPERIMENTAL. THIS FIELD MAY CHANGE OR GO AWAY. USE WITH CAUTION.
	//
	// Enable per-model batching parameters (present in SavedModel). If this
	// option is enabled, model specific batching params (e.g. timeout, batch
	// sizes etc.) from `batching_parameters` field above are *ignored* and
	// instead the one in SavedModel directory are used. This field is only
	// used if batching is enabled (i.e. `batching_parameters` message above
	// is set).
	EnablePerModelBatchingParams bool `` /* 154-byte string literal not displayed */
	// EXPERIMENTAL. THIS FIELD MAY CHANGE OR GO AWAY. USE WITH CAUTION
	//
	// Enable overriding model configs via assets.extra/saved_model_config.pb.
	EnableSavedModelConfig bool `` /* 134-byte string literal not displayed */
	// Add bf16 mixed_precision option
	MixedPrecision string `protobuf:"bytes,791,opt,name=mixed_precision,json=mixedPrecision,proto3" json:"mixed_precision,omitempty"`
	// contains filtered or unexported fields
}

Configuration parameters for a SessionBundle, with optional batching.

func (*SessionBundleConfig) Descriptor deprecated

func (*SessionBundleConfig) Descriptor() ([]byte, []int)

Deprecated: Use SessionBundleConfig.ProtoReflect.Descriptor instead.

func (*SessionBundleConfig) GetBatchingParameters

func (x *SessionBundleConfig) GetBatchingParameters() *BatchingParameters

func (*SessionBundleConfig) GetEnableModelWarmup

func (x *SessionBundleConfig) GetEnableModelWarmup() bool

func (*SessionBundleConfig) GetEnablePerModelBatchingParams

func (x *SessionBundleConfig) GetEnablePerModelBatchingParams() bool

func (*SessionBundleConfig) GetEnableSavedModelConfig

func (x *SessionBundleConfig) GetEnableSavedModelConfig() bool

func (*SessionBundleConfig) GetEnableSessionMetadata

func (x *SessionBundleConfig) GetEnableSessionMetadata() bool

func (*SessionBundleConfig) GetExperimentalFixedInputTensors

func (x *SessionBundleConfig) GetExperimentalFixedInputTensors() []*for_core_protos_go_proto.NamedTensorProto

func (*SessionBundleConfig) GetExperimentalTransientRamBytesDuringLoad

func (x *SessionBundleConfig) GetExperimentalTransientRamBytesDuringLoad() uint64

func (*SessionBundleConfig) GetMixedPrecision

func (x *SessionBundleConfig) GetMixedPrecision() string

func (*SessionBundleConfig) GetModelWarmupOptions

func (x *SessionBundleConfig) GetModelWarmupOptions() *ModelWarmupOptions

func (*SessionBundleConfig) GetNumTfliteInterpreters deprecated

func (x *SessionBundleConfig) GetNumTfliteInterpreters() int32

Deprecated: Marked as deprecated in tensorflow/serving/servables/tensorflow/session_bundle_config.proto.

func (*SessionBundleConfig) GetNumTfliteInterpretersPerPool

func (x *SessionBundleConfig) GetNumTfliteInterpretersPerPool() int32

func (*SessionBundleConfig) GetNumTflitePools

func (x *SessionBundleConfig) GetNumTflitePools() int32

func (*SessionBundleConfig) GetPreferTfliteModel

func (x *SessionBundleConfig) GetPreferTfliteModel() bool

func (*SessionBundleConfig) GetRemoveUnusedFieldsFromBundleMetagraph

func (x *SessionBundleConfig) GetRemoveUnusedFieldsFromBundleMetagraph() bool

func (*SessionBundleConfig) GetResourceEstimationUsesValidationResult

func (x *SessionBundleConfig) GetResourceEstimationUsesValidationResult() bool

func (*SessionBundleConfig) GetSavedModelTags

func (x *SessionBundleConfig) GetSavedModelTags() []string

func (*SessionBundleConfig) GetSessionConfig

func (*SessionBundleConfig) GetSessionRunLoadThreadpoolIndex

func (x *SessionBundleConfig) GetSessionRunLoadThreadpoolIndex() *wrapperspb.Int32Value

func (*SessionBundleConfig) GetSessionTarget

func (x *SessionBundleConfig) GetSessionTarget() string

func (*SessionBundleConfig) GetWrapSessionWithNoThreadingParams

func (x *SessionBundleConfig) GetWrapSessionWithNoThreadingParams() bool

func (*SessionBundleConfig) ProtoMessage

func (*SessionBundleConfig) ProtoMessage()

func (*SessionBundleConfig) ProtoReflect

func (x *SessionBundleConfig) ProtoReflect() protoreflect.Message

func (*SessionBundleConfig) Reset

func (x *SessionBundleConfig) Reset()

func (*SessionBundleConfig) String

func (x *SessionBundleConfig) String() string

type SessionOverrides

type SessionOverrides struct {
	RemoteOpRemapConfig   *RemoteOpRemapConfig                     `` /* 128-byte string literal not displayed */
	BatchOpRewriterConfig *inference_go_proto.BatchOpRewriteConfig `` /* 134-byte string literal not displayed */
	// contains filtered or unexported fields
}

func (*SessionOverrides) Descriptor deprecated

func (*SessionOverrides) Descriptor() ([]byte, []int)

Deprecated: Use SessionOverrides.ProtoReflect.Descriptor instead.

func (*SessionOverrides) GetBatchOpRewriterConfig

func (x *SessionOverrides) GetBatchOpRewriterConfig() *inference_go_proto.BatchOpRewriteConfig

func (*SessionOverrides) GetRemoteOpRemapConfig

func (x *SessionOverrides) GetRemoteOpRemapConfig() *RemoteOpRemapConfig

func (*SessionOverrides) ProtoMessage

func (*SessionOverrides) ProtoMessage()

func (*SessionOverrides) ProtoReflect

func (x *SessionOverrides) ProtoReflect() protoreflect.Message

func (*SessionOverrides) Reset

func (x *SessionOverrides) Reset()

func (*SessionOverrides) String

func (x *SessionOverrides) String() string

type TfrtSavedModelConfig

type TfrtSavedModelConfig struct {

	// A SessionBundleConfig.
	// Deprecated.
	LegacyConfig *SessionBundleConfig `protobuf:"bytes,1000,opt,name=legacy_config,json=legacyConfig,proto3" json:"legacy_config,omitempty"`
	// If true, native ops will be used if they are implemented in TFRT. If
	// false, all ops are using fallback.
	//
	// Deprecated: Marked as deprecated in tensorflow/serving/servables/tensorflow/tfrt_saved_model_source_adapter.proto.
	EnableNativeOps bool `protobuf:"varint,1001,opt,name=enable_native_ops,json=enableNativeOps,proto3" json:"enable_native_ops,omitempty"`
	// If true, the lite version of MLIR importer would be used.
	UseLiteMlirImporter bool `` /* 126-byte string literal not displayed */
	// If true, enable grappler for TFRT.
	EnableGrappler bool `protobuf:"varint,1003,opt,name=enable_grappler,json=enableGrappler,proto3" json:"enable_grappler,omitempty"`
	// If true, when creating an optimized subgraph, Placer and Grappler will
	// also run on the functions.
	RunPlacerGrapplerOnFunctions bool `` /* 155-byte string literal not displayed */
	// If true, the function optimizer in the grappler will be enabled, and
	// optimizations like function inlining will be applied.
	EnableGrapplerFunctionOptimizer bool `` /* 162-byte string literal not displayed */
	// If true, for each client graph, the op costs of the first request will be
	// recorded and used to re-compile the client graph.
	EnableOnlineCostAnalysis bool `` /* 141-byte string literal not displayed */
	// If true, the TFRT runtime will use the new interpreter for host execution.
	EnableMlrt bool `protobuf:"varint,1030,opt,name=enable_mlrt,json=enableMlrt,proto3" json:"enable_mlrt,omitempty"`
	// If true, TFRT will perform GPU related compiler passes.
	EnableTfrtGpu bool `protobuf:"varint,1024,opt,name=enable_tfrt_gpu,json=enableTfrtGpu,proto3" json:"enable_tfrt_gpu,omitempty"`
	// If true, TFRT will use TPU specific compiler passes and perform TPU
	// specific initialization.
	TargetTpu bool `protobuf:"varint,1004,opt,name=target_tpu,json=targetTpu,proto3" json:"target_tpu,omitempty"`
	// If true, resource loading will happen during model loading instead of
	// inference time.
	HoistInvariantOps bool `protobuf:"varint,1005,opt,name=hoist_invariant_ops,json=hoistInvariantOps,proto3" json:"hoist_invariant_ops,omitempty"`
	// If true, sink the resource handle into the nested function to facilitate
	// invariant ops hoisting.
	SinkInInvariantOps bool `protobuf:"varint,1028,opt,name=sink_in_invariant_ops,json=sinkInInvariantOps,proto3" json:"sink_in_invariant_ops,omitempty"`
	// The threshold to merge small streams in TFRT. The stream with cost
	// smaller than the threshold will be merged.
	StreamMergeThreshold int64 `` /* 127-byte string literal not displayed */
	// The upper bond to merge dependent streams.
	StreamMergeUpperCostThreshold int64 `` /* 158-byte string literal not displayed */
	// If true, streams with inter data depenedencies will be merged.
	MergeInterDependentStreams bool `` /* 147-byte string literal not displayed */
	// Options that apply to all graphs.
	GraphOptions *for_core_protos_go_proto.GraphOptions `protobuf:"bytes,1006,opt,name=graph_options,json=graphOptions,proto3" json:"graph_options,omitempty"`
	// If set, each emitted saved model is wrapped with a layer that schedules
	// Run() calls in batches. The batching layer is transparent to the client
	// (implements the tfrt::SavedModel API).
	//
	// IMPORTANT: With batching enabled, client threads will spend most of their
	// time blocked on SavedModel::Run() calls, waiting for enough peer threads to
	// also call SavedModel::Run() such that a large batch can be formed. For good
	// throughput, we recommend setting the number of client threads equal to
	// roughly twice the maximum batch size ('max_batch_size' below).
	//
	// The batching layer uses a SharedBatchScheduler to coordinate batching
	// across multiple saved model servables emitted by this source adapter. A
	// BatchSchedulerRetrier is added on top of each batching saved model.
	BatchingParameters *BatchingParameters `protobuf:"bytes,1007,opt,name=batching_parameters,json=batchingParameters,proto3" json:"batching_parameters,omitempty"`
	// Set of SavedModel tags identifying the specific meta graph def to be
	// loaded.
	SavedModelTags []string `protobuf:"bytes,1008,rep,name=saved_model_tags,json=savedModelTags,proto3" json:"saved_model_tags,omitempty"`
	// Enables model warmup.
	EnableModelWarmup  bool                `protobuf:"varint,1009,opt,name=enable_model_warmup,json=enableModelWarmup,proto3" json:"enable_model_warmup,omitempty"`
	ModelWarmupOptions *ModelWarmupOptions `protobuf:"bytes,1010,opt,name=model_warmup_options,json=modelWarmupOptions,proto3" json:"model_warmup_options,omitempty"`
	// Tries to use infra validation result to estimate resource usage.
	ResourceEstimationUsesValidationResult bool `` /* 185-byte string literal not displayed */
	// If the model contains more than lazy_init_threshold signature defs, use
	// lazy initilizaion as it is likely not all sigantures are used for serving.
	LazyInitThreshold int32 `protobuf:"varint,1013,opt,name=lazy_init_threshold,json=lazyInitThreshold,proto3" json:"lazy_init_threshold,omitempty"`
	// If true, we'll attempt to find MLArchive within the model source path. If
	// no MLArchive is found, will use the path as a normal SavedModel directory.
	MaybeLoadFromMla bool `protobuf:"varint,1025,opt,name=maybe_load_from_mla,json=maybeLoadFromMla,proto3" json:"maybe_load_from_mla,omitempty"`
	// If true, MIRA implementation of SavedModel API is used, so MIRA API is used
	// for loading and running the model.
	UseMira bool `protobuf:"varint,1027,opt,name=use_mira,json=useMira,proto3" json:"use_mira,omitempty"`
	// If true, skip the warmup requests if the signature def is initialized.
	SkipWarmupRequestsIfInitialized bool `` /* 164-byte string literal not displayed */
	// If true, move resource gather op to host side.
	TpuMoveResourceGatherToHost bool `` /* 154-byte string literal not displayed */
	// If tpu_move_resource_gather_to_host is true and the width of the gather
	// table is less than the threshold, it will be placed on the host.
	TpuGatherTableWidthThresholdBytes int32 `` /* 172-byte string literal not displayed */
	// If true, use fuesd op for tpu compile, execute and data transfer.
	UseFusedTpuOp bool `protobuf:"varint,1018,opt,name=use_fused_tpu_op,json=useFusedTpuOp,proto3" json:"use_fused_tpu_op,omitempty"`
	// If true, tf.While's iterations will be parallelized.
	EnableWhileParallelIterations bool `` /* 156-byte string literal not displayed */
	// If true, fallback executeops that produce inputs to tpu program will use
	// tpu host allocator.
	UseTpuHostAllocatorForInputs bool `` /* 157-byte string literal not displayed */
	// Enables model specific runtime params described in saved model's
	// assets.extra/saved_model_config.pb file.
	EnableSavedModelConfig bool `` /* 135-byte string literal not displayed */
	// These are no longer used.
	//
	// Deprecated: Marked as deprecated in tensorflow/serving/servables/tensorflow/tfrt_saved_model_source_adapter.proto.
	AutoFusionOp []string `protobuf:"bytes,2006,rep,name=auto_fusion_op,json=autoFusionOp,proto3" json:"auto_fusion_op,omitempty"`
	// Deprecated: Marked as deprecated in tensorflow/serving/servables/tensorflow/tfrt_saved_model_source_adapter.proto.
	AutoFusionMinClusterSize int32 `` /* 143-byte string literal not displayed */
	// EXPERIMENTAL. THIS FIELD MAY CHANGE OR GO AWAY. USE WITH CAUTION.
	//
	// Enable per-model batching parameters (present in SavedModel). If this
	// option is enabled, model specific batching params (e.g. timeout, batch
	// sizes etc.) from `batching_parameters` field above are *ignored* and
	// instead the one in SavedModel directory are used. This field is only
	// used if batching is enabled (i.e. `batching_parameters` message above
	// is set).
	EnablePerModelBatchingParams bool `` /* 155-byte string literal not displayed */
	// If true, the lazy loading path will use tfrt_stub::GraphExecutor.
	LazyLoadingUseGraphExecutor bool `` /* 152-byte string literal not displayed */
	// Mode for TPU Unpadded batch.
	TpuUnpaddedBatchMode TfrtSavedModelConfig_TpuUnpaddedBatchMode `` /* 195-byte string literal not displayed */
	// The config file path of the ThreadPoolFactory used to create inter-op
	// ThreadPools. If unspecified, the default Tensorflow threadpools will be
	// used.
	ThreadPoolFactoryConfigFilepath string `` /* 163-byte string literal not displayed */
	// If true, validate input tensor specs before executing the request.
	ValidateInputSpecs bool `protobuf:"varint,2012,opt,name=validate_input_specs,json=validateInputSpecs,proto3" json:"validate_input_specs,omitempty"`
	// TODO(b/279197040) Remove after b/279197040 is fixed.
	// If true, validate (by logging mismatches) input tensor specs before
	// executing the request.
	ValidateInputSpecsDryRun                 bool                                                          `` /* 143-byte string literal not displayed */
	PredictResponseTensorSerializationOption TfrtSavedModelConfig_PredictResponseTensorSerializationOption `` /* 277-byte string literal not displayed */
	// The size of reserved memory space for GPU system.
	GpuSystemMemorySizeInMb int32 `` /* 142-byte string literal not displayed */
	// Disables compilations after model initialization is complete
	// (ignored if enable_model_warmup is false)
	FreezeAfterInit bool `protobuf:"varint,2016,opt,name=freeze_after_init,json=freezeAfterInit,proto3" json:"freeze_after_init,omitempty"`
	// The number of virtual GPUs to create on a physical GPU.
	TfrtGpuParallelism int32 `protobuf:"varint,2017,opt,name=tfrt_gpu_parallelism,json=tfrtGpuParallelism,proto3" json:"tfrt_gpu_parallelism,omitempty"`
	// Whether to use fused op for GPU compile, execute and data transfer.
	TfrtUseFusedGpuOp bool `protobuf:"varint,2018,opt,name=tfrt_use_fused_gpu_op,json=tfrtUseFusedGpuOp,proto3" json:"tfrt_use_fused_gpu_op,omitempty"`
	// The minimum number of batch threads.
	TfrtMinNumBatchThreads int64 `` /* 137-byte string literal not displayed */
	// Whether to use IFRT as device driver for device execution.
	TfrtUseIfrt bool `protobuf:"varint,2020,opt,name=tfrt_use_ifrt,json=tfrtUseIfrt,proto3" json:"tfrt_use_ifrt,omitempty"`
	// Whether to enable core selector for TFRT+IFRT. It is only effective when
	// tfrt_use_ifrt is true.
	EnableTfrtUseIfrtCoreSelector bool `` /* 160-byte string literal not displayed */
	// Whether to enable paging. This should only be true when using Pathways
	// backend.
	EnablePaging bool `protobuf:"varint,2022,opt,name=enable_paging,json=enablePaging,proto3" json:"enable_paging,omitempty"`
	// contains filtered or unexported fields
}

func (*TfrtSavedModelConfig) Descriptor deprecated

func (*TfrtSavedModelConfig) Descriptor() ([]byte, []int)

Deprecated: Use TfrtSavedModelConfig.ProtoReflect.Descriptor instead.

func (*TfrtSavedModelConfig) GetAutoFusionMinClusterSize deprecated

func (x *TfrtSavedModelConfig) GetAutoFusionMinClusterSize() int32

Deprecated: Marked as deprecated in tensorflow/serving/servables/tensorflow/tfrt_saved_model_source_adapter.proto.

func (*TfrtSavedModelConfig) GetAutoFusionOp deprecated

func (x *TfrtSavedModelConfig) GetAutoFusionOp() []string

Deprecated: Marked as deprecated in tensorflow/serving/servables/tensorflow/tfrt_saved_model_source_adapter.proto.

func (*TfrtSavedModelConfig) GetBatchingParameters

func (x *TfrtSavedModelConfig) GetBatchingParameters() *BatchingParameters

func (*TfrtSavedModelConfig) GetEnableGrappler

func (x *TfrtSavedModelConfig) GetEnableGrappler() bool

func (*TfrtSavedModelConfig) GetEnableGrapplerFunctionOptimizer

func (x *TfrtSavedModelConfig) GetEnableGrapplerFunctionOptimizer() bool

func (*TfrtSavedModelConfig) GetEnableMlrt

func (x *TfrtSavedModelConfig) GetEnableMlrt() bool

func (*TfrtSavedModelConfig) GetEnableModelWarmup

func (x *TfrtSavedModelConfig) GetEnableModelWarmup() bool

func (*TfrtSavedModelConfig) GetEnableNativeOps deprecated

func (x *TfrtSavedModelConfig) GetEnableNativeOps() bool

Deprecated: Marked as deprecated in tensorflow/serving/servables/tensorflow/tfrt_saved_model_source_adapter.proto.

func (*TfrtSavedModelConfig) GetEnableOnlineCostAnalysis

func (x *TfrtSavedModelConfig) GetEnableOnlineCostAnalysis() bool

func (*TfrtSavedModelConfig) GetEnablePaging

func (x *TfrtSavedModelConfig) GetEnablePaging() bool

func (*TfrtSavedModelConfig) GetEnablePerModelBatchingParams

func (x *TfrtSavedModelConfig) GetEnablePerModelBatchingParams() bool

func (*TfrtSavedModelConfig) GetEnableSavedModelConfig

func (x *TfrtSavedModelConfig) GetEnableSavedModelConfig() bool

func (*TfrtSavedModelConfig) GetEnableTfrtGpu

func (x *TfrtSavedModelConfig) GetEnableTfrtGpu() bool

func (*TfrtSavedModelConfig) GetEnableTfrtUseIfrtCoreSelector

func (x *TfrtSavedModelConfig) GetEnableTfrtUseIfrtCoreSelector() bool

func (*TfrtSavedModelConfig) GetEnableWhileParallelIterations

func (x *TfrtSavedModelConfig) GetEnableWhileParallelIterations() bool

func (*TfrtSavedModelConfig) GetFreezeAfterInit

func (x *TfrtSavedModelConfig) GetFreezeAfterInit() bool

func (*TfrtSavedModelConfig) GetGpuSystemMemorySizeInMb

func (x *TfrtSavedModelConfig) GetGpuSystemMemorySizeInMb() int32

func (*TfrtSavedModelConfig) GetGraphOptions

func (*TfrtSavedModelConfig) GetHoistInvariantOps

func (x *TfrtSavedModelConfig) GetHoistInvariantOps() bool

func (*TfrtSavedModelConfig) GetLazyInitThreshold

func (x *TfrtSavedModelConfig) GetLazyInitThreshold() int32

func (*TfrtSavedModelConfig) GetLazyLoadingUseGraphExecutor

func (x *TfrtSavedModelConfig) GetLazyLoadingUseGraphExecutor() bool

func (*TfrtSavedModelConfig) GetLegacyConfig

func (x *TfrtSavedModelConfig) GetLegacyConfig() *SessionBundleConfig

func (*TfrtSavedModelConfig) GetMaybeLoadFromMla

func (x *TfrtSavedModelConfig) GetMaybeLoadFromMla() bool

func (*TfrtSavedModelConfig) GetMergeInterDependentStreams

func (x *TfrtSavedModelConfig) GetMergeInterDependentStreams() bool

func (*TfrtSavedModelConfig) GetModelWarmupOptions

func (x *TfrtSavedModelConfig) GetModelWarmupOptions() *ModelWarmupOptions

func (*TfrtSavedModelConfig) GetPredictResponseTensorSerializationOption

func (x *TfrtSavedModelConfig) GetPredictResponseTensorSerializationOption() TfrtSavedModelConfig_PredictResponseTensorSerializationOption

func (*TfrtSavedModelConfig) GetResourceEstimationUsesValidationResult

func (x *TfrtSavedModelConfig) GetResourceEstimationUsesValidationResult() bool

func (*TfrtSavedModelConfig) GetRunPlacerGrapplerOnFunctions

func (x *TfrtSavedModelConfig) GetRunPlacerGrapplerOnFunctions() bool

func (*TfrtSavedModelConfig) GetSavedModelTags

func (x *TfrtSavedModelConfig) GetSavedModelTags() []string

func (*TfrtSavedModelConfig) GetSinkInInvariantOps

func (x *TfrtSavedModelConfig) GetSinkInInvariantOps() bool

func (*TfrtSavedModelConfig) GetSkipWarmupRequestsIfInitialized

func (x *TfrtSavedModelConfig) GetSkipWarmupRequestsIfInitialized() bool

func (*TfrtSavedModelConfig) GetStreamMergeThreshold

func (x *TfrtSavedModelConfig) GetStreamMergeThreshold() int64

func (*TfrtSavedModelConfig) GetStreamMergeUpperCostThreshold

func (x *TfrtSavedModelConfig) GetStreamMergeUpperCostThreshold() int64

func (*TfrtSavedModelConfig) GetTargetTpu

func (x *TfrtSavedModelConfig) GetTargetTpu() bool

func (*TfrtSavedModelConfig) GetTfrtGpuParallelism

func (x *TfrtSavedModelConfig) GetTfrtGpuParallelism() int32

func (*TfrtSavedModelConfig) GetTfrtMinNumBatchThreads

func (x *TfrtSavedModelConfig) GetTfrtMinNumBatchThreads() int64

func (*TfrtSavedModelConfig) GetTfrtUseFusedGpuOp

func (x *TfrtSavedModelConfig) GetTfrtUseFusedGpuOp() bool

func (*TfrtSavedModelConfig) GetTfrtUseIfrt

func (x *TfrtSavedModelConfig) GetTfrtUseIfrt() bool

func (*TfrtSavedModelConfig) GetThreadPoolFactoryConfigFilepath

func (x *TfrtSavedModelConfig) GetThreadPoolFactoryConfigFilepath() string

func (*TfrtSavedModelConfig) GetTpuGatherTableWidthThresholdBytes

func (x *TfrtSavedModelConfig) GetTpuGatherTableWidthThresholdBytes() int32

func (*TfrtSavedModelConfig) GetTpuMoveResourceGatherToHost

func (x *TfrtSavedModelConfig) GetTpuMoveResourceGatherToHost() bool

func (*TfrtSavedModelConfig) GetTpuUnpaddedBatchMode

func (*TfrtSavedModelConfig) GetUseFusedTpuOp

func (x *TfrtSavedModelConfig) GetUseFusedTpuOp() bool

func (*TfrtSavedModelConfig) GetUseLiteMlirImporter

func (x *TfrtSavedModelConfig) GetUseLiteMlirImporter() bool

func (*TfrtSavedModelConfig) GetUseMira

func (x *TfrtSavedModelConfig) GetUseMira() bool

func (*TfrtSavedModelConfig) GetUseTpuHostAllocatorForInputs

func (x *TfrtSavedModelConfig) GetUseTpuHostAllocatorForInputs() bool

func (*TfrtSavedModelConfig) GetValidateInputSpecs

func (x *TfrtSavedModelConfig) GetValidateInputSpecs() bool

func (*TfrtSavedModelConfig) GetValidateInputSpecsDryRun

func (x *TfrtSavedModelConfig) GetValidateInputSpecsDryRun() bool

func (*TfrtSavedModelConfig) ProtoMessage

func (*TfrtSavedModelConfig) ProtoMessage()

func (*TfrtSavedModelConfig) ProtoReflect

func (x *TfrtSavedModelConfig) ProtoReflect() protoreflect.Message

func (*TfrtSavedModelConfig) Reset

func (x *TfrtSavedModelConfig) Reset()

func (*TfrtSavedModelConfig) String

func (x *TfrtSavedModelConfig) String() string

type TfrtSavedModelConfig_PredictResponseTensorSerializationOption

type TfrtSavedModelConfig_PredictResponseTensorSerializationOption int32

In a predict handler, this option specifies how to serialize tensors (e.g: as proto fields or as proto content). Serialize as proto fields by default, for backward compatibility.

const (
	TfrtSavedModelConfig_AS_PROTO_DEFAULT TfrtSavedModelConfig_PredictResponseTensorSerializationOption = 0
	TfrtSavedModelConfig_AS_PROTO_FIELD   TfrtSavedModelConfig_PredictResponseTensorSerializationOption = 0
	TfrtSavedModelConfig_AS_PROTO_CONTENT TfrtSavedModelConfig_PredictResponseTensorSerializationOption = 1
)

func (TfrtSavedModelConfig_PredictResponseTensorSerializationOption) Descriptor

func (TfrtSavedModelConfig_PredictResponseTensorSerializationOption) Enum

func (TfrtSavedModelConfig_PredictResponseTensorSerializationOption) EnumDescriptor deprecated

Deprecated: Use TfrtSavedModelConfig_PredictResponseTensorSerializationOption.Descriptor instead.

func (TfrtSavedModelConfig_PredictResponseTensorSerializationOption) Number

func (TfrtSavedModelConfig_PredictResponseTensorSerializationOption) String

func (TfrtSavedModelConfig_PredictResponseTensorSerializationOption) Type

type TfrtSavedModelConfig_TpuUnpaddedBatchMode

type TfrtSavedModelConfig_TpuUnpaddedBatchMode int32
const (
	// Disable this feature.
	TfrtSavedModelConfig_UNPADDED_BATCH_DISABLED TfrtSavedModelConfig_TpuUnpaddedBatchMode = 0
	// Enable this feature when in-graph batching is detected.
	TfrtSavedModelConfig_UNPADDED_BATCH_AUTO TfrtSavedModelConfig_TpuUnpaddedBatchMode = 1
	// Always enable this feature.
	TfrtSavedModelConfig_UNPADDED_BATCH_ENFORCED TfrtSavedModelConfig_TpuUnpaddedBatchMode = 2
)

func (TfrtSavedModelConfig_TpuUnpaddedBatchMode) Descriptor

func (TfrtSavedModelConfig_TpuUnpaddedBatchMode) Enum

func (TfrtSavedModelConfig_TpuUnpaddedBatchMode) EnumDescriptor deprecated

func (TfrtSavedModelConfig_TpuUnpaddedBatchMode) EnumDescriptor() ([]byte, []int)

Deprecated: Use TfrtSavedModelConfig_TpuUnpaddedBatchMode.Descriptor instead.

func (TfrtSavedModelConfig_TpuUnpaddedBatchMode) Number

func (TfrtSavedModelConfig_TpuUnpaddedBatchMode) String

func (TfrtSavedModelConfig_TpuUnpaddedBatchMode) Type

type TfrtSavedModelSourceAdapterConfig

type TfrtSavedModelSourceAdapterConfig struct {
	SavedModelConfig *TfrtSavedModelConfig `protobuf:"bytes,1,opt,name=saved_model_config,json=savedModelConfig,proto3" json:"saved_model_config,omitempty"`
	// contains filtered or unexported fields
}

Config proto for TfrtSavedModelSourceAdapter.

func (*TfrtSavedModelSourceAdapterConfig) Descriptor deprecated

func (*TfrtSavedModelSourceAdapterConfig) Descriptor() ([]byte, []int)

Deprecated: Use TfrtSavedModelSourceAdapterConfig.ProtoReflect.Descriptor instead.

func (*TfrtSavedModelSourceAdapterConfig) GetSavedModelConfig

func (x *TfrtSavedModelSourceAdapterConfig) GetSavedModelConfig() *TfrtSavedModelConfig

func (*TfrtSavedModelSourceAdapterConfig) ProtoMessage

func (*TfrtSavedModelSourceAdapterConfig) ProtoMessage()

func (*TfrtSavedModelSourceAdapterConfig) ProtoReflect

func (*TfrtSavedModelSourceAdapterConfig) Reset

func (*TfrtSavedModelSourceAdapterConfig) String

type ThreadPoolFactoryConfig

type ThreadPoolFactoryConfig struct {

	// The config proto for a ThreadPoolFactory in the ThreadPoolFactory registry.
	ThreadPoolFactoryConfig *anypb.Any `` /* 134-byte string literal not displayed */
	// contains filtered or unexported fields
}

Configuration for a thread pool factory.

func (*ThreadPoolFactoryConfig) Descriptor deprecated

func (*ThreadPoolFactoryConfig) Descriptor() ([]byte, []int)

Deprecated: Use ThreadPoolFactoryConfig.ProtoReflect.Descriptor instead.

func (*ThreadPoolFactoryConfig) GetThreadPoolFactoryConfig

func (x *ThreadPoolFactoryConfig) GetThreadPoolFactoryConfig() *anypb.Any

func (*ThreadPoolFactoryConfig) ProtoMessage

func (*ThreadPoolFactoryConfig) ProtoMessage()

func (*ThreadPoolFactoryConfig) ProtoReflect

func (x *ThreadPoolFactoryConfig) ProtoReflect() protoreflect.Message

func (*ThreadPoolFactoryConfig) Reset

func (x *ThreadPoolFactoryConfig) Reset()

func (*ThreadPoolFactoryConfig) String

func (x *ThreadPoolFactoryConfig) String() string

Jump to

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to
y or Y : Canonical URL