Documentation
¶
Index ¶
- type ConfigProto
- func (*ConfigProto) Descriptor() ([]byte, []int)
- func (m *ConfigProto) GetDeviceCount() map[string]int32
- func (m *ConfigProto) GetGpuOptions() *GPUOptions
- func (*ConfigProto) ProtoMessage()
- func (m *ConfigProto) Reset()
- func (m *ConfigProto) String() string
- func (m *ConfigProto) XXX_DiscardUnknown()
- func (m *ConfigProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)
- func (m *ConfigProto) XXX_Merge(src proto.Message)
- func (m *ConfigProto) XXX_Size() int
- func (m *ConfigProto) XXX_Unmarshal(b []byte) error
- type GPUOptions
- func (*GPUOptions) Descriptor() ([]byte, []int)
- func (m *GPUOptions) GetAllocatorType() string
- func (m *GPUOptions) GetAllowGrowth() bool
- func (m *GPUOptions) GetDeferredDeletionBytes() int64
- func (m *GPUOptions) GetExperimental() *anypb.Any
- func (m *GPUOptions) GetForceGpuCompatible() bool
- func (m *GPUOptions) GetPerProcessGpuMemoryFraction() float64
- func (m *GPUOptions) GetPollingActiveDelayUsecs() int32
- func (m *GPUOptions) GetPollingInactiveDelayMsecs() int32
- func (m *GPUOptions) GetVisibleDeviceList() string
- func (*GPUOptions) ProtoMessage()
- func (m *GPUOptions) Reset()
- func (m *GPUOptions) String() string
- func (m *GPUOptions) XXX_DiscardUnknown()
- func (m *GPUOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)
- func (m *GPUOptions) XXX_Merge(src proto.Message)
- func (m *GPUOptions) XXX_Size() int
- func (m *GPUOptions) XXX_Unmarshal(b []byte) error
Constants ¶
This section is empty.
Variables ¶
This section is empty.
Functions ¶
This section is empty.
Types ¶
type ConfigProto ¶
type ConfigProto struct { // Map from device type name (e.g., "CPU" or "GPU" ) to maximum // number of devices of that type to use. If a particular device // type is not found in the map, the system picks an appropriate // number. DeviceCount map[string]int32 `` /* 183-byte string literal not displayed */ // Options that apply to all GPUs. GpuOptions *GPUOptions `protobuf:"bytes,6,opt,name=gpu_options,json=gpuOptions,proto3" json:"gpu_options,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` }
Session configuration parameters. The system picks appropriate values for fields that are not set.
func (*ConfigProto) Descriptor ¶
func (*ConfigProto) Descriptor() ([]byte, []int)
func (*ConfigProto) GetDeviceCount ¶
func (m *ConfigProto) GetDeviceCount() map[string]int32
func (*ConfigProto) GetGpuOptions ¶
func (m *ConfigProto) GetGpuOptions() *GPUOptions
func (*ConfigProto) ProtoMessage ¶
func (*ConfigProto) ProtoMessage()
func (*ConfigProto) Reset ¶
func (m *ConfigProto) Reset()
func (*ConfigProto) String ¶
func (m *ConfigProto) String() string
func (*ConfigProto) XXX_DiscardUnknown ¶
func (m *ConfigProto) XXX_DiscardUnknown()
func (*ConfigProto) XXX_Marshal ¶
func (m *ConfigProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)
func (*ConfigProto) XXX_Merge ¶
func (m *ConfigProto) XXX_Merge(src proto.Message)
func (*ConfigProto) XXX_Size ¶
func (m *ConfigProto) XXX_Size() int
func (*ConfigProto) XXX_Unmarshal ¶
func (m *ConfigProto) XXX_Unmarshal(b []byte) error
type GPUOptions ¶
type GPUOptions struct { // Fraction of the available GPU memory to allocate for each process. // 1 means to allocate all of the GPU memory, 0.5 means the process // allocates up to ~50% of the available GPU memory. // // GPU memory is pre-allocated unless the allow_growth option is enabled. // // If greater than 1.0, uses CUDA unified memory to potentially oversubscribe // the amount of memory available on the GPU device by using host memory as a // swap space. Accessing memory not available on the device will be // significantly slower as that would require memory transfer between the host // and the device. Options to reduce the memory requirement should be // considered before enabling this option as this may come with a negative // performance impact. Oversubscription using the unified memory requires // Pascal class or newer GPUs and it is currently only supported on the Linux // operating system. See // https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#um-requirements // for the detailed requirements. PerProcessGpuMemoryFraction float64 `` /* 150-byte string literal not displayed */ // If true, the allocator does not pre-allocate the entire specified // GPU memory region, instead starting small and growing as needed. AllowGrowth bool `protobuf:"varint,4,opt,name=allow_growth,json=allowGrowth,proto3" json:"allow_growth,omitempty"` // The type of GPU allocation strategy to use. // // Allowed values: // "": The empty string (default) uses a system-chosen default // which may change over time. // // "BFC": A "Best-fit with coalescing" algorithm, simplified from a // version of dlmalloc. AllocatorType string `protobuf:"bytes,2,opt,name=allocator_type,json=allocatorType,proto3" json:"allocator_type,omitempty"` // Delay deletion of up to this many bytes to reduce the number of // interactions with gpu driver code. If 0, the system chooses // a reasonable default (several MBs). DeferredDeletionBytes int64 `` /* 127-byte string literal not displayed */ // A comma-separated list of GPU ids that determines the 'visible' // to 'virtual' mapping of GPU devices. For example, if TensorFlow // can see 8 GPU devices in the process, and one wanted to map // visible GPU devices 5 and 3 as "/device:GPU:0", and "/device:GPU:1", // then one would specify this field as "5,3". This field is similar in // spirit to the CUDA_VISIBLE_DEVICES environment variable, except // it applies to the visible GPU devices in the process. // // NOTE: // 1. The GPU driver provides the process with the visible GPUs // in an order which is not guaranteed to have any correlation to // the *physical* GPU id in the machine. This field is used for // remapping "visible" to "virtual", which means this operates only // after the process starts. Users are required to use vendor // specific mechanisms (e.g., CUDA_VISIBLE_DEVICES) to control the // physical to visible device mapping prior to invoking TensorFlow. // 2. In the code, the ids in this list are also called "platform GPU id"s, // and the 'virtual' ids of GPU devices (i.e. the ids in the device // name "/device:GPU:<id>") are also called "TF GPU id"s. Please // refer to third_party/tensorflow/core/common_runtime/gpu/gpu_id.h // for more information. VisibleDeviceList string `protobuf:"bytes,5,opt,name=visible_device_list,json=visibleDeviceList,proto3" json:"visible_device_list,omitempty"` // In the event polling loop sleep this many microseconds between // PollEvents calls, when the queue is not empty. If value is not // set or set to 0, gets set to a non-zero default. PollingActiveDelayUsecs int32 `` /* 135-byte string literal not displayed */ // This field is deprecated and ignored. PollingInactiveDelayMsecs int32 `` /* 141-byte string literal not displayed */ // Force all tensors to be gpu_compatible. On a GPU-enabled TensorFlow, // enabling this option forces all CPU tensors to be allocated with Cuda // pinned memory. Normally, TensorFlow will infer which tensors should be // allocated as the pinned memory. But in case where the inference is // incomplete, this option can significantly speed up the cross-device memory // copy performance as long as it fits the memory. // Note that this option is not something that should be // enabled by default for unknown or very large models, since all Cuda pinned // memory is unpageable, having too much pinned memory might negatively impact // the overall host system performance. ForceGpuCompatible bool `protobuf:"varint,8,opt,name=force_gpu_compatible,json=forceGpuCompatible,proto3" json:"force_gpu_compatible,omitempty"` // Unused Experimental *anypb.Any `protobuf:"bytes,9,opt,name=experimental,proto3" json:"experimental,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` }
func (*GPUOptions) Descriptor ¶
func (*GPUOptions) Descriptor() ([]byte, []int)
func (*GPUOptions) GetAllocatorType ¶
func (m *GPUOptions) GetAllocatorType() string
func (*GPUOptions) GetAllowGrowth ¶
func (m *GPUOptions) GetAllowGrowth() bool
func (*GPUOptions) GetDeferredDeletionBytes ¶
func (m *GPUOptions) GetDeferredDeletionBytes() int64
func (*GPUOptions) GetExperimental ¶
func (m *GPUOptions) GetExperimental() *anypb.Any
func (*GPUOptions) GetForceGpuCompatible ¶
func (m *GPUOptions) GetForceGpuCompatible() bool
func (*GPUOptions) GetPerProcessGpuMemoryFraction ¶
func (m *GPUOptions) GetPerProcessGpuMemoryFraction() float64
func (*GPUOptions) GetPollingActiveDelayUsecs ¶
func (m *GPUOptions) GetPollingActiveDelayUsecs() int32
func (*GPUOptions) GetPollingInactiveDelayMsecs ¶
func (m *GPUOptions) GetPollingInactiveDelayMsecs() int32
func (*GPUOptions) GetVisibleDeviceList ¶
func (m *GPUOptions) GetVisibleDeviceList() string
func (*GPUOptions) ProtoMessage ¶
func (*GPUOptions) ProtoMessage()
func (*GPUOptions) Reset ¶
func (m *GPUOptions) Reset()
func (*GPUOptions) String ¶
func (m *GPUOptions) String() string
func (*GPUOptions) XXX_DiscardUnknown ¶
func (m *GPUOptions) XXX_DiscardUnknown()
func (*GPUOptions) XXX_Marshal ¶
func (m *GPUOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)
func (*GPUOptions) XXX_Merge ¶
func (m *GPUOptions) XXX_Merge(src proto.Message)
func (*GPUOptions) XXX_Size ¶
func (m *GPUOptions) XXX_Size() int
func (*GPUOptions) XXX_Unmarshal ¶
func (m *GPUOptions) XXX_Unmarshal(b []byte) error
Click to show internal directories.
Click to hide internal directories.