Documentation ¶
Overview ¶
Package op defines functions for adding TensorFlow operations to a Graph.
Functions for adding an operation to a graph take a Scope object as the first argument. The Scope object encapsulates a graph and a set of properties (such as a name prefix) for all operations being added to the graph.
WARNING: The API in this package has not been finalized and can change without notice.
Example ¶
// This example creates a Graph that multiplies a constant matrix with // a matrix to be provided during graph execution (via // tensorflow.Session). s := NewScope() input := Placeholder(s, tf.Float) // Matrix to be provided to Session.Run output := MatMul(s, Const(s, [][]float32{{10}, {20}}), // Constant 2x1 matrix input, MatMulTransposeB(true)) if s.Err() != nil { panic(s.Err()) } // Shape of the product: The number of rows is fixed by m1, but the // number of columns will depend on m2, which is unknown. fmt.Println(output.Shape())
Output: [2, ?]
Index ¶
- func Abort(scope *Scope, optional ...AbortAttr) (o *tf.Operation)
- func Abs(scope *Scope, x tf.Output) (y tf.Output)
- func AccumulateNV2(scope *Scope, inputs []tf.Output, shape tf.Shape) (sum tf.Output)
- func Acos(scope *Scope, x tf.Output) (y tf.Output)
- func Acosh(scope *Scope, x tf.Output) (y tf.Output)
- func Add(scope *Scope, x tf.Output, y tf.Output) (z tf.Output)
- func AddManySparseToTensorsMap(scope *Scope, sparse_indices tf.Output, sparse_values tf.Output, ...) (sparse_handles tf.Output)
- func AddN(scope *Scope, inputs []tf.Output) (sum tf.Output)
- func AddSparseToTensorsMap(scope *Scope, sparse_indices tf.Output, sparse_values tf.Output, ...) (sparse_handle tf.Output)
- func AddV2(scope *Scope, x tf.Output, y tf.Output) (z tf.Output)
- func AdjustContrast(scope *Scope, images tf.Output, contrast_factor tf.Output, min_value tf.Output, ...) (output tf.Output)
- func AdjustContrastv2(scope *Scope, images tf.Output, contrast_factor tf.Output) (output tf.Output)
- func AdjustHue(scope *Scope, images tf.Output, delta tf.Output) (output tf.Output)
- func AdjustSaturation(scope *Scope, images tf.Output, scale tf.Output) (output tf.Output)
- func All(scope *Scope, input tf.Output, axis tf.Output, optional ...AllAttr) (output tf.Output)
- func AllCandidateSampler(scope *Scope, true_classes tf.Output, num_true int64, num_sampled int64, ...) (sampled_candidates tf.Output, true_expected_count tf.Output, ...)
- func AllToAll(scope *Scope, input tf.Output, group_assignment tf.Output, ...) (output tf.Output)
- func Angle(scope *Scope, input tf.Output, optional ...AngleAttr) (output tf.Output)
- func AnonymousHashTable(scope *Scope, key_dtype tf.DataType, value_dtype tf.DataType) (table_handle tf.Output)
- func AnonymousIterator(scope *Scope, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output)
- func AnonymousIteratorV2(scope *Scope, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output, deleter tf.Output)
- func AnonymousIteratorV3(scope *Scope, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output)
- func AnonymousMultiDeviceIterator(scope *Scope, devices []string, output_types []tf.DataType, ...) (handle tf.Output, deleter tf.Output)
- func AnonymousMultiDeviceIteratorV3(scope *Scope, devices []string, output_types []tf.DataType, ...) (handle tf.Output)
- func AnonymousMutableDenseHashTable(scope *Scope, empty_key tf.Output, deleted_key tf.Output, ...) (table_handle tf.Output)
- func AnonymousMutableHashTable(scope *Scope, key_dtype tf.DataType, value_dtype tf.DataType) (table_handle tf.Output)
- func AnonymousMutableHashTableOfTensors(scope *Scope, key_dtype tf.DataType, value_dtype tf.DataType, ...) (table_handle tf.Output)
- func Any(scope *Scope, input tf.Output, axis tf.Output, optional ...AnyAttr) (output tf.Output)
- func ApproxTopK(scope *Scope, input tf.Output, k int64, optional ...ApproxTopKAttr) (values tf.Output, indices tf.Output)
- func ApproximateEqual(scope *Scope, x tf.Output, y tf.Output, optional ...ApproximateEqualAttr) (z tf.Output)
- func ArgMax(scope *Scope, input tf.Output, dimension tf.Output, optional ...ArgMaxAttr) (output tf.Output)
- func ArgMin(scope *Scope, input tf.Output, dimension tf.Output, optional ...ArgMinAttr) (output tf.Output)
- func AsString(scope *Scope, input tf.Output, optional ...AsStringAttr) (output tf.Output)
- func Asin(scope *Scope, x tf.Output) (y tf.Output)
- func Asinh(scope *Scope, x tf.Output) (y tf.Output)
- func Assert(scope *Scope, condition tf.Output, data []tf.Output, optional ...AssertAttr) (o *tf.Operation)
- func AssertNextDataset(scope *Scope, input_dataset tf.Output, transformations tf.Output, ...) (handle tf.Output)
- func AssertPrevDataset(scope *Scope, input_dataset tf.Output, transformations tf.Output, ...) (handle tf.Output)
- func AssignAddVariableOp(scope *Scope, resource tf.Output, value tf.Output) (o *tf.Operation)
- func AssignSubVariableOp(scope *Scope, resource tf.Output, value tf.Output) (o *tf.Operation)
- func AssignVariableOp(scope *Scope, resource tf.Output, value tf.Output, ...) (o *tf.Operation)
- func AssignVariableXlaConcatND(scope *Scope, resource tf.Output, inputs []tf.Output, num_concats []int64, ...) (o *tf.Operation)
- func Atan(scope *Scope, x tf.Output) (y tf.Output)
- func Atan2(scope *Scope, y tf.Output, x tf.Output) (z tf.Output)
- func Atanh(scope *Scope, x tf.Output) (y tf.Output)
- func AudioSpectrogram(scope *Scope, input tf.Output, window_size int64, stride int64, ...) (spectrogram tf.Output)
- func AudioSummary(scope *Scope, tag tf.Output, tensor tf.Output, sample_rate float32, ...) (summary tf.Output)
- func AudioSummaryV2(scope *Scope, tag tf.Output, tensor tf.Output, sample_rate tf.Output, ...) (summary tf.Output)
- func AutoShardDataset(scope *Scope, input_dataset tf.Output, num_workers tf.Output, index tf.Output, ...) (handle tf.Output)
- func AvgPool(scope *Scope, value tf.Output, ksize []int64, strides []int64, padding string, ...) (output tf.Output)
- func AvgPool3D(scope *Scope, input tf.Output, ksize []int64, strides []int64, padding string, ...) (output tf.Output)
- func AvgPool3DGrad(scope *Scope, orig_input_shape tf.Output, grad tf.Output, ksize []int64, ...) (output tf.Output)
- func AvgPoolGrad(scope *Scope, orig_input_shape tf.Output, grad tf.Output, ksize []int64, ...) (output tf.Output)
- func Batch(scope *Scope, in_tensors []tf.Output, num_batch_threads int64, ...) (batched_tensors []tf.Output, batch_index tf.Output, id tf.Output)
- func BatchDataset(scope *Scope, input_dataset tf.Output, batch_size tf.Output, ...) (handle tf.Output)
- func BatchDatasetV2(scope *Scope, input_dataset tf.Output, batch_size tf.Output, ...) (handle tf.Output)
- func BatchMatMul(scope *Scope, x tf.Output, y tf.Output, optional ...BatchMatMulAttr) (output tf.Output)
- func BatchMatMulV2(scope *Scope, x tf.Output, y tf.Output, optional ...BatchMatMulV2Attr) (output tf.Output)
- func BatchMatMulV3(scope *Scope, x tf.Output, y tf.Output, Tout tf.DataType, ...) (output tf.Output)
- func BatchNormWithGlobalNormalization(scope *Scope, t tf.Output, m tf.Output, v tf.Output, beta tf.Output, ...) (result tf.Output)
- func BatchNormWithGlobalNormalizationGrad(scope *Scope, t tf.Output, m tf.Output, v tf.Output, gamma tf.Output, ...) (dx tf.Output, dm tf.Output, dv tf.Output, db tf.Output, dg tf.Output)
- func BatchToSpace(scope *Scope, input tf.Output, crops tf.Output, block_size int64) (output tf.Output)
- func BatchToSpaceND(scope *Scope, input tf.Output, block_shape tf.Output, crops tf.Output) (output tf.Output)
- func Betainc(scope *Scope, a tf.Output, b tf.Output, x tf.Output) (z tf.Output)
- func BiasAdd(scope *Scope, value tf.Output, bias tf.Output, optional ...BiasAddAttr) (output tf.Output)
- func BiasAddGrad(scope *Scope, out_backprop tf.Output, optional ...BiasAddGradAttr) (output tf.Output)
- func BiasAddV1(scope *Scope, value tf.Output, bias tf.Output) (output tf.Output)
- func Bincount(scope *Scope, arr tf.Output, size tf.Output, weights tf.Output) (bins tf.Output)
- func Bitcast(scope *Scope, input tf.Output, type_ tf.DataType) (output tf.Output)
- func BitwiseAnd(scope *Scope, x tf.Output, y tf.Output) (z tf.Output)
- func BitwiseOr(scope *Scope, x tf.Output, y tf.Output) (z tf.Output)
- func BitwiseXor(scope *Scope, x tf.Output, y tf.Output) (z tf.Output)
- func BlockLSTM(scope *Scope, seq_len_max tf.Output, x tf.Output, cs_prev tf.Output, ...) (i tf.Output, cs tf.Output, f tf.Output, o tf.Output, ci tf.Output, ...)
- func BlockLSTMGrad(scope *Scope, seq_len_max tf.Output, x tf.Output, cs_prev tf.Output, ...) (x_grad tf.Output, cs_prev_grad tf.Output, h_prev_grad tf.Output, ...)
- func BlockLSTMGradV2(scope *Scope, seq_len_max tf.Output, x tf.Output, cs_prev tf.Output, ...) (x_grad tf.Output, cs_prev_grad tf.Output, h_prev_grad tf.Output, ...)
- func BlockLSTMV2(scope *Scope, seq_len_max tf.Output, x tf.Output, cs_prev tf.Output, ...) (i tf.Output, cs tf.Output, f tf.Output, o tf.Output, ci tf.Output, ...)
- func BoostedTreesAggregateStats(scope *Scope, node_ids tf.Output, gradients tf.Output, hessians tf.Output, ...) (stats_summary tf.Output)
- func BoostedTreesBucketize(scope *Scope, float_values []tf.Output, bucket_boundaries []tf.Output) (buckets []tf.Output)
- func BoostedTreesCalculateBestFeatureSplit(scope *Scope, node_id_range tf.Output, stats_summary tf.Output, l1 tf.Output, ...) (node_ids tf.Output, gains tf.Output, feature_dimensions tf.Output, ...)
- func BoostedTreesCalculateBestFeatureSplitV2(scope *Scope, node_id_range tf.Output, stats_summaries_list []tf.Output, ...) (node_ids tf.Output, gains tf.Output, feature_ids tf.Output, ...)
- func BoostedTreesCalculateBestGainsPerFeature(scope *Scope, node_id_range tf.Output, stats_summary_list []tf.Output, ...) (node_ids_list []tf.Output, gains_list []tf.Output, thresholds_list []tf.Output, ...)
- func BoostedTreesCenterBias(scope *Scope, tree_ensemble_handle tf.Output, mean_gradients tf.Output, ...) (continue_centering tf.Output)
- func BoostedTreesCreateEnsemble(scope *Scope, tree_ensemble_handle tf.Output, stamp_token tf.Output, ...) (o *tf.Operation)
- func BoostedTreesCreateQuantileStreamResource(scope *Scope, quantile_stream_resource_handle tf.Output, epsilon tf.Output, ...) (o *tf.Operation)
- func BoostedTreesDeserializeEnsemble(scope *Scope, tree_ensemble_handle tf.Output, stamp_token tf.Output, ...) (o *tf.Operation)
- func BoostedTreesEnsembleResourceHandleOp(scope *Scope, optional ...BoostedTreesEnsembleResourceHandleOpAttr) (resource tf.Output)
- func BoostedTreesExampleDebugOutputs(scope *Scope, tree_ensemble_handle tf.Output, bucketized_features []tf.Output, ...) (examples_debug_outputs_serialized tf.Output)
- func BoostedTreesFlushQuantileSummaries(scope *Scope, quantile_stream_resource_handle tf.Output, num_features int64) (summaries []tf.Output)
- func BoostedTreesGetEnsembleStates(scope *Scope, tree_ensemble_handle tf.Output) (stamp_token tf.Output, num_trees tf.Output, num_finalized_trees tf.Output, ...)
- func BoostedTreesMakeQuantileSummaries(scope *Scope, float_values []tf.Output, example_weights tf.Output, ...) (summaries []tf.Output)
- func BoostedTreesMakeStatsSummary(scope *Scope, node_ids tf.Output, gradients tf.Output, hessians tf.Output, ...) (stats_summary tf.Output)
- func BoostedTreesPredict(scope *Scope, tree_ensemble_handle tf.Output, bucketized_features []tf.Output, ...) (logits tf.Output)
- func BoostedTreesQuantileStreamResourceAddSummaries(scope *Scope, quantile_stream_resource_handle tf.Output, summaries []tf.Output) (o *tf.Operation)
- func BoostedTreesQuantileStreamResourceDeserialize(scope *Scope, quantile_stream_resource_handle tf.Output, ...) (o *tf.Operation)
- func BoostedTreesQuantileStreamResourceFlush(scope *Scope, quantile_stream_resource_handle tf.Output, num_buckets tf.Output, ...) (o *tf.Operation)
- func BoostedTreesQuantileStreamResourceGetBucketBoundaries(scope *Scope, quantile_stream_resource_handle tf.Output, num_features int64) (bucket_boundaries []tf.Output)
- func BoostedTreesQuantileStreamResourceHandleOp(scope *Scope, optional ...BoostedTreesQuantileStreamResourceHandleOpAttr) (resource tf.Output)
- func BoostedTreesSerializeEnsemble(scope *Scope, tree_ensemble_handle tf.Output) (stamp_token tf.Output, tree_ensemble_serialized tf.Output)
- func BoostedTreesSparseAggregateStats(scope *Scope, node_ids tf.Output, gradients tf.Output, hessians tf.Output, ...) (stats_summary_indices tf.Output, stats_summary_values tf.Output, ...)
- func BoostedTreesSparseCalculateBestFeatureSplit(scope *Scope, node_id_range tf.Output, stats_summary_indices tf.Output, ...) (node_ids tf.Output, gains tf.Output, feature_dimensions tf.Output, ...)
- func BoostedTreesTrainingPredict(scope *Scope, tree_ensemble_handle tf.Output, cached_tree_ids tf.Output, ...) (partial_logits tf.Output, tree_ids tf.Output, node_ids tf.Output)
- func BoostedTreesUpdateEnsemble(scope *Scope, tree_ensemble_handle tf.Output, feature_ids tf.Output, ...) (o *tf.Operation)
- func BoostedTreesUpdateEnsembleV2(scope *Scope, tree_ensemble_handle tf.Output, feature_ids []tf.Output, ...) (o *tf.Operation)
- func BroadcastArgs(scope *Scope, s0 tf.Output, s1 tf.Output) (r0 tf.Output)
- func BroadcastGradientArgs(scope *Scope, s0 tf.Output, s1 tf.Output) (r0 tf.Output, r1 tf.Output)
- func BroadcastTo(scope *Scope, input tf.Output, shape tf.Output) (output tf.Output)
- func Bucketize(scope *Scope, input tf.Output, boundaries []float32) (output tf.Output)
- func BytesProducedStatsDataset(scope *Scope, input_dataset tf.Output, tag tf.Output, ...) (handle tf.Output)
- func CSRSparseMatrixComponents(scope *Scope, csr_sparse_matrix tf.Output, index tf.Output, type_ tf.DataType) (row_ptrs tf.Output, col_inds tf.Output, values tf.Output)
- func CSRSparseMatrixToDense(scope *Scope, sparse_input tf.Output, type_ tf.DataType) (dense_output tf.Output)
- func CSRSparseMatrixToSparseTensor(scope *Scope, sparse_matrix tf.Output, type_ tf.DataType) (indices tf.Output, values tf.Output, dense_shape tf.Output)
- func CTCBeamSearchDecoder(scope *Scope, inputs tf.Output, sequence_length tf.Output, beam_width int64, ...) (decoded_indices []tf.Output, decoded_values []tf.Output, ...)
- func CTCGreedyDecoder(scope *Scope, inputs tf.Output, sequence_length tf.Output, ...) (decoded_indices tf.Output, decoded_values tf.Output, decoded_shape tf.Output, ...)
- func CTCLoss(scope *Scope, inputs tf.Output, labels_indices tf.Output, ...) (loss tf.Output, gradient tf.Output)
- func CTCLossV2(scope *Scope, inputs tf.Output, labels_indices tf.Output, ...) (loss tf.Output, gradient tf.Output)
- func CacheDataset(scope *Scope, input_dataset tf.Output, filename tf.Output, ...) (handle tf.Output)
- func Cast(scope *Scope, x tf.Output, DstT tf.DataType, optional ...CastAttr) (y tf.Output)
- func Ceil(scope *Scope, x tf.Output) (y tf.Output)
- func CheckNumerics(scope *Scope, tensor tf.Output, message string) (output tf.Output)
- func CheckNumericsV2(scope *Scope, tensor tf.Output, message string) (output tf.Output)
- func Cholesky(scope *Scope, input tf.Output) (output tf.Output)
- func CholeskyGrad(scope *Scope, l tf.Output, grad tf.Output) (output tf.Output)
- func ClipByValue(scope *Scope, t tf.Output, clip_value_min tf.Output, clip_value_max tf.Output) (output tf.Output)
- func CollateTPUEmbeddingMemory(scope *Scope, memory_configs []tf.Output) (merged_memory_config tf.Output)
- func CollectiveAllToAllV2(scope *Scope, input tf.Output, group_size tf.Output, group_key tf.Output, ...) (data tf.Output)
- func CollectiveAllToAllV3(scope *Scope, input tf.Output, communicator tf.Output, ...) (data tf.Output)
- func CollectiveAssignGroupV2(scope *Scope, group_assignment tf.Output, device_index tf.Output, ...) (group_size tf.Output, group_key tf.Output)
- func CollectiveBcastRecv(scope *Scope, T tf.DataType, group_size int64, group_key int64, ...) (data tf.Output)
- func CollectiveBcastRecvV2(scope *Scope, group_size tf.Output, group_key tf.Output, ...) (data tf.Output)
- func CollectiveBcastSend(scope *Scope, input tf.Output, group_size int64, group_key int64, ...) (data tf.Output)
- func CollectiveBcastSendV2(scope *Scope, input tf.Output, group_size tf.Output, group_key tf.Output, ...) (data tf.Output)
- func CollectiveGather(scope *Scope, input tf.Output, group_size int64, group_key int64, ...) (data tf.Output)
- func CollectiveGatherV2(scope *Scope, input tf.Output, group_size tf.Output, group_key tf.Output, ...) (data tf.Output)
- func CollectiveInitializeCommunicator(scope *Scope, group_key tf.Output, rank tf.Output, group_size tf.Output, ...) (communicator tf.Output)
- func CollectivePermute(scope *Scope, input tf.Output, source_target_pairs tf.Output) (output tf.Output)
- func CollectiveReduce(scope *Scope, input tf.Output, group_size int64, group_key int64, ...) (data tf.Output)
- func CollectiveReduceScatterV2(scope *Scope, input tf.Output, group_size tf.Output, group_key tf.Output, ...) (data tf.Output)
- func CollectiveReduceV2(scope *Scope, input tf.Output, group_size tf.Output, group_key tf.Output, ...) (data tf.Output)
- func CollectiveReduceV3(scope *Scope, input tf.Output, communicator tf.Output, ...) (data tf.Output)
- func CombinedNonMaxSuppression(scope *Scope, boxes tf.Output, scores tf.Output, ...) (nmsed_boxes tf.Output, nmsed_scores tf.Output, nmsed_classes tf.Output, ...)
- func Complex(scope *Scope, real tf.Output, imag tf.Output, optional ...ComplexAttr) (out tf.Output)
- func ComplexAbs(scope *Scope, x tf.Output, optional ...ComplexAbsAttr) (y tf.Output)
- func CompositeTensorVariantFromComponents(scope *Scope, components []tf.Output, metadata string) (encoded tf.Output)
- func CompositeTensorVariantToComponents(scope *Scope, encoded tf.Output, metadata string, Tcomponents []tf.DataType) (components []tf.Output)
- func CompressElement(scope *Scope, components []tf.Output) (compressed tf.Output)
- func ComputeAccidentalHits(scope *Scope, true_classes tf.Output, sampled_candidates tf.Output, ...) (indices tf.Output, ids tf.Output, weights tf.Output)
- func ComputeBatchSize(scope *Scope, input_dataset tf.Output) (batch_size tf.Output)
- func ComputeDedupDataSize(scope *Scope, config string) (num_elements tf.Output)
- func ComputeDedupDataSizeV2(scope *Scope, config string, embedding_partitions string, ...) (num_elements tf.Output)
- func ComputeDedupDataTupleMask(scope *Scope, config string) (output_shape tf.Output)
- func ComputeDedupDataTupleMaskV2(scope *Scope, config string, embedding_partitions string, ...) (output_shape tf.Output)
- func Concat(scope *Scope, concat_dim tf.Output, values []tf.Output) (output tf.Output)
- func ConcatOffset(scope *Scope, concat_dim tf.Output, shape []tf.Output) (offset []tf.Output)
- func ConcatV2(scope *Scope, values []tf.Output, axis tf.Output) (output tf.Output)
- func ConcatenateDataset(scope *Scope, input_dataset tf.Output, another_dataset tf.Output, ...) (handle tf.Output)
- func ConfigureAndInitializeGlobalTPU(scope *Scope, optional ...ConfigureAndInitializeGlobalTPUAttr) (output tf.Output)
- func ConfigureDistributedTPU(scope *Scope, optional ...ConfigureDistributedTPUAttr) (topology tf.Output)
- func ConfigureTPUEmbedding(scope *Scope, config string) (o *tf.Operation)
- func ConfigureTPUEmbeddingHost(scope *Scope, common_config tf.Output, memory_config tf.Output, config string) (network_config tf.Output)
- func ConfigureTPUEmbeddingMemory(scope *Scope, common_config tf.Output) (memory_config tf.Output)
- func Conj(scope *Scope, input tf.Output) (output tf.Output)
- func ConjugateTranspose(scope *Scope, x tf.Output, perm tf.Output) (y tf.Output)
- func ConnectTPUEmbeddingHosts(scope *Scope, network_configs []tf.Output) (o *tf.Operation)
- func Const(scope *Scope, value interface{}) (output tf.Output)
- func ConsumeMutexLock(scope *Scope, mutex_lock tf.Output) (o *tf.Operation)
- func ControlTrigger(scope *Scope) (o *tf.Operation)
- func Conv(scope *Scope, input tf.Output, filter tf.Output, strides []int64, ...) (output tf.Output)
- func Conv2D(scope *Scope, input tf.Output, filter tf.Output, strides []int64, ...) (output tf.Output)
- func Conv2DBackpropFilter(scope *Scope, input tf.Output, filter_sizes tf.Output, out_backprop tf.Output, ...) (output tf.Output)
- func Conv2DBackpropFilterV2(scope *Scope, input tf.Output, filter tf.Output, out_backprop tf.Output, ...) (output tf.Output)
- func Conv2DBackpropInput(scope *Scope, input_sizes tf.Output, filter tf.Output, out_backprop tf.Output, ...) (output tf.Output)
- func Conv2DBackpropInputV2(scope *Scope, input tf.Output, filter tf.Output, out_backprop tf.Output, ...) (output tf.Output)
- func Conv3D(scope *Scope, input tf.Output, filter tf.Output, strides []int64, ...) (output tf.Output)
- func Conv3DBackpropFilter(scope *Scope, input tf.Output, filter tf.Output, out_backprop tf.Output, ...) (output tf.Output)
- func Conv3DBackpropFilterV2(scope *Scope, input tf.Output, filter_sizes tf.Output, out_backprop tf.Output, ...) (output tf.Output)
- func Conv3DBackpropInput(scope *Scope, input tf.Output, filter tf.Output, out_backprop tf.Output, ...) (output tf.Output)
- func Conv3DBackpropInputV2(scope *Scope, input_sizes tf.Output, filter tf.Output, out_backprop tf.Output, ...) (output tf.Output)
- func Copy(scope *Scope, input tf.Output, optional ...CopyAttr) (output tf.Output)
- func CopyHost(scope *Scope, input tf.Output, optional ...CopyHostAttr) (output tf.Output)
- func Cos(scope *Scope, x tf.Output) (y tf.Output)
- func Cosh(scope *Scope, x tf.Output) (y tf.Output)
- func CropAndResize(scope *Scope, image tf.Output, boxes tf.Output, box_ind tf.Output, ...) (crops tf.Output)
- func CropAndResizeGradBoxes(scope *Scope, grads tf.Output, image tf.Output, boxes tf.Output, ...) (output tf.Output)
- func CropAndResizeGradImage(scope *Scope, grads tf.Output, boxes tf.Output, box_ind tf.Output, ...) (output tf.Output)
- func Cross(scope *Scope, a tf.Output, b tf.Output) (product tf.Output)
- func CrossReplicaSum(scope *Scope, input tf.Output, group_assignment tf.Output) (output tf.Output)
- func CudnnRNN(scope *Scope, input tf.Output, input_h tf.Output, input_c tf.Output, ...) (output tf.Output, output_h tf.Output, output_c tf.Output, ...)
- func CudnnRNNBackprop(scope *Scope, input tf.Output, input_h tf.Output, input_c tf.Output, ...) (input_backprop tf.Output, input_h_backprop tf.Output, ...)
- func CudnnRNNBackpropV2(scope *Scope, input tf.Output, input_h tf.Output, input_c tf.Output, ...) (input_backprop tf.Output, input_h_backprop tf.Output, ...)
- func CudnnRNNBackpropV3(scope *Scope, input tf.Output, input_h tf.Output, input_c tf.Output, ...) (input_backprop tf.Output, input_h_backprop tf.Output, ...)
- func CudnnRNNCanonicalToParams(scope *Scope, num_layers tf.Output, num_units tf.Output, input_size tf.Output, ...) (params tf.Output)
- func CudnnRNNCanonicalToParamsV2(scope *Scope, num_layers tf.Output, num_units tf.Output, input_size tf.Output, ...) (params tf.Output)
- func CudnnRNNParamsSize(scope *Scope, num_layers tf.Output, num_units tf.Output, input_size tf.Output, ...) (params_size tf.Output)
- func CudnnRNNParamsToCanonical(scope *Scope, num_layers tf.Output, num_units tf.Output, input_size tf.Output, ...) (weights []tf.Output, biases []tf.Output)
- func CudnnRNNParamsToCanonicalV2(scope *Scope, num_layers tf.Output, num_units tf.Output, input_size tf.Output, ...) (weights []tf.Output, biases []tf.Output)
- func CudnnRNNV2(scope *Scope, input tf.Output, input_h tf.Output, input_c tf.Output, ...) (output tf.Output, output_h tf.Output, output_c tf.Output, ...)
- func CudnnRNNV3(scope *Scope, input tf.Output, input_h tf.Output, input_c tf.Output, ...) (output tf.Output, output_h tf.Output, output_c tf.Output, ...)
- func Cumprod(scope *Scope, x tf.Output, axis tf.Output, optional ...CumprodAttr) (out tf.Output)
- func Cumsum(scope *Scope, x tf.Output, axis tf.Output, optional ...CumsumAttr) (out tf.Output)
- func CumulativeLogsumexp(scope *Scope, x tf.Output, axis tf.Output, optional ...CumulativeLogsumexpAttr) (out tf.Output)
- func DTensorSetGlobalTPUArray(scope *Scope, topology tf.Output) (o *tf.Operation)
- func DataFormatDimMap(scope *Scope, x tf.Output, optional ...DataFormatDimMapAttr) (y tf.Output)
- func DataFormatVecPermute(scope *Scope, x tf.Output, optional ...DataFormatVecPermuteAttr) (y tf.Output)
- func DataServiceDataset(scope *Scope, dataset_id tf.Output, processing_mode tf.Output, ...) (handle tf.Output)
- func DataServiceDatasetV2(scope *Scope, dataset_id tf.Output, processing_mode tf.Output, ...) (handle tf.Output)
- func DatasetCardinality(scope *Scope, input_dataset tf.Output, optional ...DatasetCardinalityAttr) (cardinality tf.Output)
- func DatasetFingerprint(scope *Scope, input_dataset tf.Output) (fingerprint tf.Output)
- func DatasetFromGraph(scope *Scope, graph_def tf.Output) (handle tf.Output)
- func DatasetToGraph(scope *Scope, input_dataset tf.Output, optional ...DatasetToGraphAttr) (graph tf.Output)
- func DatasetToGraphV2(scope *Scope, input_dataset tf.Output, optional ...DatasetToGraphV2Attr) (graph tf.Output)
- func DatasetToSingleElement(scope *Scope, dataset tf.Output, output_types []tf.DataType, ...) (components []tf.Output)
- func DatasetToTFRecord(scope *Scope, input_dataset tf.Output, filename tf.Output, ...) (o *tf.Operation)
- func DebugGradientIdentity(scope *Scope, input tf.Output) (output tf.Output)
- func DebugIdentity(scope *Scope, input tf.Output, optional ...DebugIdentityAttr) (output tf.Output)
- func DebugIdentityV2(scope *Scope, input tf.Output, optional ...DebugIdentityV2Attr) (output tf.Output)
- func DebugIdentityV3(scope *Scope, input tf.Output, optional ...DebugIdentityV3Attr) (output tf.Output)
- func DebugNanCount(scope *Scope, input tf.Output, optional ...DebugNanCountAttr) (output tf.Output)
- func DebugNumericSummary(scope *Scope, input tf.Output, optional ...DebugNumericSummaryAttr) (output tf.Output)
- func DebugNumericSummaryV2(scope *Scope, input tf.Output, optional ...DebugNumericSummaryV2Attr) (output tf.Output)
- func DecodeAndCropJpeg(scope *Scope, contents tf.Output, crop_window tf.Output, ...) (image tf.Output)
- func DecodeBase64(scope *Scope, input tf.Output) (output tf.Output)
- func DecodeBmp(scope *Scope, contents tf.Output, optional ...DecodeBmpAttr) (image tf.Output)
- func DecodeCSV(scope *Scope, records tf.Output, record_defaults []tf.Output, ...) (output []tf.Output)
- func DecodeCompressed(scope *Scope, bytes tf.Output, optional ...DecodeCompressedAttr) (output tf.Output)
- func DecodeGif(scope *Scope, contents tf.Output) (image tf.Output)
- func DecodeImage(scope *Scope, contents tf.Output, optional ...DecodeImageAttr) (image tf.Output)
- func DecodeJSONExample(scope *Scope, json_examples tf.Output) (binary_examples tf.Output)
- func DecodeJpeg(scope *Scope, contents tf.Output, optional ...DecodeJpegAttr) (image tf.Output)
- func DecodePaddedRaw(scope *Scope, input_bytes tf.Output, fixed_length tf.Output, ...) (output tf.Output)
- func DecodePng(scope *Scope, contents tf.Output, optional ...DecodePngAttr) (image tf.Output)
- func DecodeProtoV2(scope *Scope, bytes tf.Output, message_type string, field_names []string, ...) (sizes tf.Output, values []tf.Output)
- func DecodeRaw(scope *Scope, bytes tf.Output, out_type tf.DataType, optional ...DecodeRawAttr) (output tf.Output)
- func DecodeWav(scope *Scope, contents tf.Output, optional ...DecodeWavAttr) (audio tf.Output, sample_rate tf.Output)
- func DeepCopy(scope *Scope, x tf.Output) (y tf.Output)
- func DeleteIterator(scope *Scope, handle tf.Output, deleter tf.Output) (o *tf.Operation)
- func DeleteMultiDeviceIterator(scope *Scope, multi_device_iterator tf.Output, iterators []tf.Output, ...) (o *tf.Operation)
- func DeleteSessionTensor(scope *Scope, handle tf.Output) (o *tf.Operation)
- func DenseBincount(scope *Scope, input tf.Output, size tf.Output, weights tf.Output, ...) (output tf.Output)
- func DenseCountSparseOutput(scope *Scope, values tf.Output, weights tf.Output, binary_output bool, ...) (output_indices tf.Output, output_values tf.Output, ...)
- func DenseToCSRSparseMatrix(scope *Scope, dense_input tf.Output, indices tf.Output) (sparse_output tf.Output)
- func DenseToDenseSetOperation(scope *Scope, set1 tf.Output, set2 tf.Output, set_operation string, ...) (result_indices tf.Output, result_values tf.Output, result_shape tf.Output)
- func DenseToSparseBatchDataset(scope *Scope, input_dataset tf.Output, batch_size tf.Output, ...) (handle tf.Output)
- func DenseToSparseSetOperation(scope *Scope, set1 tf.Output, set2_indices tf.Output, set2_values tf.Output, ...) (result_indices tf.Output, result_values tf.Output, result_shape tf.Output)
- func DepthToSpace(scope *Scope, input tf.Output, block_size int64, optional ...DepthToSpaceAttr) (output tf.Output)
- func DepthwiseConv2dNative(scope *Scope, input tf.Output, filter tf.Output, strides []int64, ...) (output tf.Output)
- func DepthwiseConv2dNativeBackpropFilter(scope *Scope, input tf.Output, filter_sizes tf.Output, out_backprop tf.Output, ...) (output tf.Output)
- func DepthwiseConv2dNativeBackpropInput(scope *Scope, input_sizes tf.Output, filter tf.Output, out_backprop tf.Output, ...) (output tf.Output)
- func Dequantize(scope *Scope, input tf.Output, min_range tf.Output, max_range tf.Output, ...) (output tf.Output)
- func DeserializeIterator(scope *Scope, resource_handle tf.Output, serialized tf.Output) (o *tf.Operation)
- func DeserializeManySparse(scope *Scope, serialized_sparse tf.Output, dtype tf.DataType) (sparse_indices tf.Output, sparse_values tf.Output, sparse_shape tf.Output)
- func DeserializeSparse(scope *Scope, serialized_sparse tf.Output, dtype tf.DataType) (sparse_indices tf.Output, sparse_values tf.Output, sparse_shape tf.Output)
- func DestroyResourceOp(scope *Scope, resource tf.Output, optional ...DestroyResourceOpAttr) (o *tf.Operation)
- func DeviceIndex(scope *Scope, device_names []string) (index tf.Output)
- func Diag(scope *Scope, diagonal tf.Output) (output tf.Output)
- func DiagPart(scope *Scope, input tf.Output) (diagonal tf.Output)
- func Digamma(scope *Scope, x tf.Output) (y tf.Output)
- func Dilation2D(scope *Scope, input tf.Output, filter tf.Output, strides []int64, ...) (output tf.Output)
- func Dilation2DBackpropFilter(scope *Scope, input tf.Output, filter tf.Output, out_backprop tf.Output, ...) (filter_backprop tf.Output)
- func Dilation2DBackpropInput(scope *Scope, input tf.Output, filter tf.Output, out_backprop tf.Output, ...) (in_backprop tf.Output)
- func DirectedInterleaveDataset(scope *Scope, selector_input_dataset tf.Output, ...) (handle tf.Output)
- func DisableCopyOnRead(scope *Scope, resource tf.Output) (o *tf.Operation)
- func Div(scope *Scope, x tf.Output, y tf.Output) (z tf.Output)
- func DivNoNan(scope *Scope, x tf.Output, y tf.Output) (z tf.Output)
- func DrawBoundingBoxes(scope *Scope, images tf.Output, boxes tf.Output) (output tf.Output)
- func DrawBoundingBoxesV2(scope *Scope, images tf.Output, boxes tf.Output, colors tf.Output) (output tf.Output)
- func DynamicEnqueueTPUEmbeddingArbitraryTensorBatch(scope *Scope, sample_indices_or_row_splits []tf.Output, ...) (o *tf.Operation)
- func DynamicPartition(scope *Scope, data tf.Output, partitions tf.Output, num_partitions int64) (outputs []tf.Output)
- func DynamicStitch(scope *Scope, indices []tf.Output, data []tf.Output) (merged tf.Output)
- func EagerPyFunc(scope *Scope, input []tf.Output, token string, Tout []tf.DataType, ...) (output []tf.Output)
- func EditDistance(scope *Scope, hypothesis_indices tf.Output, hypothesis_values tf.Output, ...) (output tf.Output)
- func Eig(scope *Scope, input tf.Output, Tout tf.DataType, optional ...EigAttr) (e tf.Output, v tf.Output)
- func Einsum(scope *Scope, inputs []tf.Output, equation string) (output tf.Output)
- func Elu(scope *Scope, features tf.Output) (activations tf.Output)
- func EluGrad(scope *Scope, gradients tf.Output, outputs tf.Output) (backprops tf.Output)
- func Empty(scope *Scope, shape tf.Output, dtype tf.DataType, optional ...EmptyAttr) (output tf.Output)
- func EmptyTensorList(scope *Scope, element_shape tf.Output, max_num_elements tf.Output, ...) (handle tf.Output)
- func EmptyTensorMap(scope *Scope) (handle tf.Output)
- func EncodeBase64(scope *Scope, input tf.Output, optional ...EncodeBase64Attr) (output tf.Output)
- func EncodeJpeg(scope *Scope, image tf.Output, optional ...EncodeJpegAttr) (contents tf.Output)
- func EncodeJpegVariableQuality(scope *Scope, images tf.Output, quality tf.Output) (contents tf.Output)
- func EncodePng(scope *Scope, image tf.Output, optional ...EncodePngAttr) (contents tf.Output)
- func EncodeProto(scope *Scope, sizes tf.Output, values []tf.Output, field_names []string, ...) (bytes tf.Output)
- func EncodeWav(scope *Scope, audio tf.Output, sample_rate tf.Output) (contents tf.Output)
- func EnqueueTPUEmbeddingArbitraryTensorBatch(scope *Scope, sample_indices_or_row_splits []tf.Output, ...) (o *tf.Operation)
- func EnqueueTPUEmbeddingBatch(scope *Scope, batch []tf.Output, mode_override tf.Output, ...) (o *tf.Operation)
- func EnqueueTPUEmbeddingIntegerBatch(scope *Scope, batch []tf.Output, mode_override tf.Output, ...) (o *tf.Operation)
- func EnqueueTPUEmbeddingRaggedTensorBatch(scope *Scope, sample_splits []tf.Output, embedding_indices []tf.Output, ...) (o *tf.Operation)
- func EnqueueTPUEmbeddingSparseBatch(scope *Scope, sample_indices []tf.Output, embedding_indices []tf.Output, ...) (o *tf.Operation)
- func EnqueueTPUEmbeddingSparseTensorBatch(scope *Scope, sample_indices []tf.Output, embedding_indices []tf.Output, ...) (o *tf.Operation)
- func EnsureShape(scope *Scope, input tf.Output, shape tf.Shape) (output tf.Output)
- func Enter(scope *Scope, data tf.Output, frame_name string, optional ...EnterAttr) (output tf.Output)
- func Equal(scope *Scope, x tf.Output, y tf.Output, optional ...EqualAttr) (z tf.Output)
- func Erf(scope *Scope, x tf.Output) (y tf.Output)
- func Erfc(scope *Scope, x tf.Output) (y tf.Output)
- func EuclideanNorm(scope *Scope, input tf.Output, axis tf.Output, optional ...EuclideanNormAttr) (output tf.Output)
- func ExecuteTPUEmbeddingPartitioner(scope *Scope, config string) (common_config tf.Output)
- func Exit(scope *Scope, data tf.Output) (output tf.Output)
- func Exp(scope *Scope, x tf.Output) (y tf.Output)
- func ExpandDims(scope *Scope, input tf.Output, axis tf.Output) (output tf.Output)
- func ExperimentalAutoShardDataset(scope *Scope, input_dataset tf.Output, num_workers tf.Output, index tf.Output, ...) (handle tf.Output)
- func ExperimentalBytesProducedStatsDataset(scope *Scope, input_dataset tf.Output, tag tf.Output, ...) (handle tf.Output)
- func ExperimentalDatasetCardinality(scope *Scope, input_dataset tf.Output) (cardinality tf.Output)
- func ExperimentalDatasetToTFRecord(scope *Scope, input_dataset tf.Output, filename tf.Output, ...) (o *tf.Operation)
- func ExperimentalDenseToSparseBatchDataset(scope *Scope, input_dataset tf.Output, batch_size tf.Output, ...) (handle tf.Output)
- func ExperimentalDirectedInterleaveDataset(scope *Scope, selector_input_dataset tf.Output, ...) (handle tf.Output)
- func ExperimentalIgnoreErrorsDataset(scope *Scope, input_dataset tf.Output, output_types []tf.DataType, ...) (handle tf.Output)
- func ExperimentalIteratorGetDevice(scope *Scope, resource tf.Output) (device tf.Output)
- func ExperimentalLatencyStatsDataset(scope *Scope, input_dataset tf.Output, tag tf.Output, ...) (handle tf.Output)
- func ExperimentalMaxIntraOpParallelismDataset(scope *Scope, input_dataset tf.Output, max_intra_op_parallelism tf.Output, ...) (handle tf.Output)
- func ExperimentalParseExampleDataset(scope *Scope, input_dataset tf.Output, num_parallel_calls tf.Output, ...) (handle tf.Output)
- func ExperimentalPrivateThreadPoolDataset(scope *Scope, input_dataset tf.Output, num_threads tf.Output, ...) (handle tf.Output)
- func ExperimentalRandomDataset(scope *Scope, seed tf.Output, seed2 tf.Output, output_types []tf.DataType, ...) (handle tf.Output)
- func ExperimentalRebatchDataset(scope *Scope, input_dataset tf.Output, num_replicas tf.Output, ...) (handle tf.Output)
- func ExperimentalSlidingWindowDataset(scope *Scope, input_dataset tf.Output, window_size tf.Output, ...) (handle tf.Output)
- func ExperimentalSqlDataset(scope *Scope, driver_name tf.Output, data_source_name tf.Output, ...) (handle tf.Output)
- func ExperimentalStatsAggregatorHandle(scope *Scope, optional ...ExperimentalStatsAggregatorHandleAttr) (handle tf.Output)
- func ExperimentalStatsAggregatorSummary(scope *Scope, iterator tf.Output) (summary tf.Output)
- func ExperimentalThreadPoolDataset(scope *Scope, input_dataset tf.Output, thread_pool tf.Output, ...) (handle tf.Output)
- func ExperimentalThreadPoolHandle(scope *Scope, num_threads int64, display_name string, ...) (handle tf.Output)
- func ExperimentalUnbatchDataset(scope *Scope, input_dataset tf.Output, output_types []tf.DataType, ...) (handle tf.Output)
- func ExperimentalUniqueDataset(scope *Scope, input_dataset tf.Output, output_types []tf.DataType, ...) (handle tf.Output)
- func Expm1(scope *Scope, x tf.Output) (y tf.Output)
- func ExtractGlimpse(scope *Scope, input tf.Output, size tf.Output, offsets tf.Output, ...) (glimpse tf.Output)
- func ExtractGlimpseV2(scope *Scope, input tf.Output, size tf.Output, offsets tf.Output, ...) (glimpse tf.Output)
- func ExtractImagePatches(scope *Scope, images tf.Output, ksizes []int64, strides []int64, rates []int64, ...) (patches tf.Output)
- func ExtractJpegShape(scope *Scope, contents tf.Output, optional ...ExtractJpegShapeAttr) (image_shape tf.Output)
- func ExtractVolumePatches(scope *Scope, input tf.Output, ksizes []int64, strides []int64, padding string) (patches tf.Output)
- func FFT(scope *Scope, input tf.Output) (output tf.Output)
- func FFT2D(scope *Scope, input tf.Output) (output tf.Output)
- func FFT3D(scope *Scope, input tf.Output) (output tf.Output)
- func FFTND(scope *Scope, input tf.Output, fft_length tf.Output, axes tf.Output) (output tf.Output)
- func FIFOQueueV2(scope *Scope, component_types []tf.DataType, optional ...FIFOQueueV2Attr) (handle tf.Output)
- func Fact(scope *Scope) (fact tf.Output)
- func FakeParam(scope *Scope, dtype tf.DataType, shape tf.Shape) (output tf.Output)
- func FakeQuantWithMinMaxArgs(scope *Scope, inputs tf.Output, optional ...FakeQuantWithMinMaxArgsAttr) (outputs tf.Output)
- func FakeQuantWithMinMaxArgsGradient(scope *Scope, gradients tf.Output, inputs tf.Output, ...) (backprops tf.Output)
- func FakeQuantWithMinMaxVars(scope *Scope, inputs tf.Output, min tf.Output, max tf.Output, ...) (outputs tf.Output)
- func FakeQuantWithMinMaxVarsGradient(scope *Scope, gradients tf.Output, inputs tf.Output, min tf.Output, ...) (backprops_wrt_input tf.Output, backprop_wrt_min tf.Output, ...)
- func FakeQuantWithMinMaxVarsPerChannel(scope *Scope, inputs tf.Output, min tf.Output, max tf.Output, ...) (outputs tf.Output)
- func FakeQuantWithMinMaxVarsPerChannelGradient(scope *Scope, gradients tf.Output, inputs tf.Output, min tf.Output, ...) (backprops_wrt_input tf.Output, backprop_wrt_min tf.Output, ...)
- func FileSystemSetConfiguration(scope *Scope, scheme tf.Output, key tf.Output, value tf.Output) (o *tf.Operation)
- func Fill(scope *Scope, dims tf.Output, value tf.Output) (output tf.Output)
- func FilterByLastComponentDataset(scope *Scope, input_dataset tf.Output, output_types []tf.DataType, ...) (output tf.Output)
- func FinalizeDataset(scope *Scope, input_dataset tf.Output, output_types []tf.DataType, ...) (handle tf.Output)
- func FinalizeTPUEmbedding(scope *Scope, common_config tf.Output, memory_config tf.Output) (o *tf.Operation)
- func FinalizeTPUEmbeddingV2(scope *Scope, common_config tf.Output, memory_config tf.Output) (embedding_partitions tf.Output, hbm_buffers_config tf.Output)
- func Fingerprint(scope *Scope, data tf.Output, method tf.Output) (fingerprint tf.Output)
- func FixedLengthRecordDataset(scope *Scope, filenames tf.Output, header_bytes tf.Output, ...) (handle tf.Output)
- func FixedLengthRecordReaderV2(scope *Scope, record_bytes int64, optional ...FixedLengthRecordReaderV2Attr) (reader_handle tf.Output)
- func FixedUnigramCandidateSampler(scope *Scope, true_classes tf.Output, num_true int64, num_sampled int64, ...) (sampled_candidates tf.Output, true_expected_count tf.Output, ...)
- func Floor(scope *Scope, x tf.Output) (y tf.Output)
- func FloorDiv(scope *Scope, x tf.Output, y tf.Output) (z tf.Output)
- func FloorMod(scope *Scope, x tf.Output, y tf.Output) (z tf.Output)
- func FractionalAvgPool(scope *Scope, value tf.Output, pooling_ratio []float32, ...) (output tf.Output, row_pooling_sequence tf.Output, ...)
- func FractionalAvgPoolGrad(scope *Scope, orig_input_tensor_shape tf.Output, out_backprop tf.Output, ...) (output tf.Output)
- func FractionalMaxPool(scope *Scope, value tf.Output, pooling_ratio []float32, ...) (output tf.Output, row_pooling_sequence tf.Output, ...)
- func FractionalMaxPoolGrad(scope *Scope, orig_input tf.Output, orig_output tf.Output, ...) (output tf.Output)
- func FusedBatchNorm(scope *Scope, x tf.Output, scale tf.Output, offset tf.Output, mean tf.Output, ...) (y tf.Output, batch_mean tf.Output, batch_variance tf.Output, ...)
- func FusedBatchNormGrad(scope *Scope, y_backprop tf.Output, x tf.Output, scale tf.Output, ...) (x_backprop tf.Output, scale_backprop tf.Output, offset_backprop tf.Output, ...)
- func FusedBatchNormGradV2(scope *Scope, y_backprop tf.Output, x tf.Output, scale tf.Output, ...) (x_backprop tf.Output, scale_backprop tf.Output, offset_backprop tf.Output, ...)
- func FusedBatchNormGradV3(scope *Scope, y_backprop tf.Output, x tf.Output, scale tf.Output, ...) (x_backprop tf.Output, scale_backprop tf.Output, offset_backprop tf.Output, ...)
- func FusedBatchNormV2(scope *Scope, x tf.Output, scale tf.Output, offset tf.Output, mean tf.Output, ...) (y tf.Output, batch_mean tf.Output, batch_variance tf.Output, ...)
- func FusedBatchNormV3(scope *Scope, x tf.Output, scale tf.Output, offset tf.Output, mean tf.Output, ...) (y tf.Output, batch_mean tf.Output, batch_variance tf.Output, ...)
- func FusedPadConv2D(scope *Scope, input tf.Output, paddings tf.Output, filter tf.Output, ...) (output tf.Output)
- func FusedResizeAndPadConv2D(scope *Scope, input tf.Output, size tf.Output, paddings tf.Output, ...) (output tf.Output)
- func GRUBlockCell(scope *Scope, x tf.Output, h_prev tf.Output, w_ru tf.Output, w_c tf.Output, ...) (r tf.Output, u tf.Output, c tf.Output, h tf.Output)
- func GRUBlockCellGrad(scope *Scope, x tf.Output, h_prev tf.Output, w_ru tf.Output, w_c tf.Output, ...) (d_x tf.Output, d_h_prev tf.Output, d_c_bar tf.Output, d_r_bar_u_bar tf.Output)
- func Gather(scope *Scope, params tf.Output, indices tf.Output, optional ...GatherAttr) (output tf.Output)
- func GatherNd(scope *Scope, params tf.Output, indices tf.Output, optional ...GatherNdAttr) (output tf.Output)
- func GatherV2(scope *Scope, params tf.Output, indices tf.Output, axis tf.Output, ...) (output tf.Output)
- func GenerateBoundingBoxProposals(scope *Scope, scores tf.Output, bbox_deltas tf.Output, image_info tf.Output, ...) (rois tf.Output, roi_probabilities tf.Output)
- func GenerateVocabRemapping(scope *Scope, new_vocab_file tf.Output, old_vocab_file tf.Output, ...) (remapping tf.Output, num_present tf.Output)
- func GetElementAtIndex(scope *Scope, dataset tf.Output, index tf.Output, output_types []tf.DataType, ...) (components []tf.Output)
- func GetOptions(scope *Scope, input_dataset tf.Output) (serialized_options tf.Output)
- func GetSessionHandle(scope *Scope, value tf.Output) (handle tf.Output)
- func GetSessionHandleV2(scope *Scope, value tf.Output) (handle tf.Output)
- func GetSessionTensor(scope *Scope, handle tf.Output, dtype tf.DataType) (value tf.Output)
- func GetTpuTaskId(scope *Scope) (tpu_task_id tf.Output)
- func Gradients(scope *Scope, y []tf.Output, x []tf.Output, dx ...tf.Output) (output []tf.Output)
- func Greater(scope *Scope, x tf.Output, y tf.Output) (z tf.Output)
- func GreaterEqual(scope *Scope, x tf.Output, y tf.Output) (z tf.Output)
- func GuaranteeConst(scope *Scope, input tf.Output) (output tf.Output)
- func HSVToRGB(scope *Scope, images tf.Output) (output tf.Output)
- func HashTableV2(scope *Scope, key_dtype tf.DataType, value_dtype tf.DataType, ...) (table_handle tf.Output)
- func HistogramFixedWidth(scope *Scope, values tf.Output, value_range tf.Output, nbins tf.Output, ...) (out tf.Output)
- func HistogramSummary(scope *Scope, tag tf.Output, values tf.Output) (summary tf.Output)
- func HostConst(scope *Scope, value tf.Tensor, dtype tf.DataType) (output tf.Output)
- func IFFT(scope *Scope, input tf.Output) (output tf.Output)
- func IFFT2D(scope *Scope, input tf.Output) (output tf.Output)
- func IFFT3D(scope *Scope, input tf.Output) (output tf.Output)
- func IFFTND(scope *Scope, input tf.Output, fft_length tf.Output, axes tf.Output) (output tf.Output)
- func IRFFT(scope *Scope, input tf.Output, fft_length tf.Output, optional ...IRFFTAttr) (output tf.Output)
- func IRFFT2D(scope *Scope, input tf.Output, fft_length tf.Output, optional ...IRFFT2DAttr) (output tf.Output)
- func IRFFT3D(scope *Scope, input tf.Output, fft_length tf.Output, optional ...IRFFT3DAttr) (output tf.Output)
- func IRFFTND(scope *Scope, input tf.Output, fft_length tf.Output, axes tf.Output, ...) (output tf.Output)
- func Identity(scope *Scope, input tf.Output) (output tf.Output)
- func IdentityN(scope *Scope, input []tf.Output) (output []tf.Output)
- func IdentityReaderV2(scope *Scope, optional ...IdentityReaderV2Attr) (reader_handle tf.Output)
- func Igamma(scope *Scope, a tf.Output, x tf.Output) (z tf.Output)
- func IgammaGradA(scope *Scope, a tf.Output, x tf.Output) (z tf.Output)
- func Igammac(scope *Scope, a tf.Output, x tf.Output) (z tf.Output)
- func IgnoreErrorsDataset(scope *Scope, input_dataset tf.Output, output_types []tf.DataType, ...) (handle tf.Output)
- func Imag(scope *Scope, input tf.Output, optional ...ImagAttr) (output tf.Output)
- func ImageProjectiveTransformV2(scope *Scope, images tf.Output, transforms tf.Output, output_shape tf.Output, ...) (transformed_images tf.Output)
- func ImageProjectiveTransformV3(scope *Scope, images tf.Output, transforms tf.Output, output_shape tf.Output, ...) (transformed_images tf.Output)
- func ImageSummary(scope *Scope, tag tf.Output, tensor tf.Output, optional ...ImageSummaryAttr) (summary tf.Output)
- func ImmutableConst(scope *Scope, dtype tf.DataType, shape tf.Shape, memory_region_name string) (tensor tf.Output)
- func InTopK(scope *Scope, predictions tf.Output, targets tf.Output, k int64) (precision tf.Output)
- func InTopKV2(scope *Scope, predictions tf.Output, targets tf.Output, k tf.Output) (precision tf.Output)
- func InfeedDequeue(scope *Scope, dtype tf.DataType, shape tf.Shape) (output tf.Output)
- func InfeedDequeueTuple(scope *Scope, dtypes []tf.DataType, shapes []tf.Shape) (outputs []tf.Output)
- func InfeedEnqueue(scope *Scope, input tf.Output, optional ...InfeedEnqueueAttr) (o *tf.Operation)
- func InfeedEnqueuePrelinearizedBuffer(scope *Scope, input tf.Output, ...) (o *tf.Operation)
- func InfeedEnqueueTuple(scope *Scope, inputs []tf.Output, shapes []tf.Shape, ...) (o *tf.Operation)
- func InitializeTableFromTextFileV2(scope *Scope, table_handle tf.Output, filename tf.Output, key_index int64, ...) (o *tf.Operation)
- func InitializeTableV2(scope *Scope, table_handle tf.Output, keys tf.Output, values tf.Output) (o *tf.Operation)
- func InplaceAdd(scope *Scope, x tf.Output, i tf.Output, v tf.Output) (y tf.Output)
- func InplaceSub(scope *Scope, x tf.Output, i tf.Output, v tf.Output) (y tf.Output)
- func InplaceUpdate(scope *Scope, x tf.Output, i tf.Output, v tf.Output) (y tf.Output)
- func Inv(scope *Scope, x tf.Output) (y tf.Output)
- func InvGrad(scope *Scope, y tf.Output, dy tf.Output) (z tf.Output)
- func Invert(scope *Scope, x tf.Output) (y tf.Output)
- func InvertPermutation(scope *Scope, x tf.Output) (y tf.Output)
- func IsBoostedTreesEnsembleInitialized(scope *Scope, tree_ensemble_handle tf.Output) (is_initialized tf.Output)
- func IsBoostedTreesQuantileStreamResourceInitialized(scope *Scope, quantile_stream_resource_handle tf.Output) (is_initialized tf.Output)
- func IsFinite(scope *Scope, x tf.Output) (y tf.Output)
- func IsInf(scope *Scope, x tf.Output) (y tf.Output)
- func IsNan(scope *Scope, x tf.Output) (y tf.Output)
- func IsTPUEmbeddingInitialized(scope *Scope, optional ...IsTPUEmbeddingInitializedAttr) (is_tpu_embedding_initialized tf.Output)
- func IsotonicRegression(scope *Scope, input tf.Output, optional ...IsotonicRegressionAttr) (output tf.Output, segments tf.Output)
- func Iterator(scope *Scope, shared_name string, container string, output_types []tf.DataType, ...) (handle tf.Output)
- func IteratorFromStringHandle(scope *Scope, string_handle tf.Output, ...) (resource_handle tf.Output)
- func IteratorGetDevice(scope *Scope, resource tf.Output) (device tf.Output)
- func IteratorGetModelProto(scope *Scope, iterator tf.Output) (model_proto tf.Output)
- func IteratorGetNext(scope *Scope, iterator tf.Output, output_types []tf.DataType, ...) (components []tf.Output)
- func IteratorGetNextAsOptional(scope *Scope, iterator tf.Output, output_types []tf.DataType, ...) (optional tf.Output)
- func IteratorGetNextSync(scope *Scope, iterator tf.Output, output_types []tf.DataType, ...) (components []tf.Output)
- func IteratorToStringHandle(scope *Scope, resource_handle tf.Output) (string_handle tf.Output)
- func KMC2ChainInitialization(scope *Scope, distances tf.Output, seed tf.Output) (index tf.Output)
- func KmeansPlusPlusInitialization(scope *Scope, points tf.Output, num_to_sample tf.Output, seed tf.Output, ...) (samples tf.Output)
- func KthOrderStatistic(scope *Scope, input tf.Output, k int64) (output tf.Output)
- func L2Loss(scope *Scope, t tf.Output) (output tf.Output)
- func LMDBDataset(scope *Scope, filenames tf.Output, output_types []tf.DataType, ...) (handle tf.Output)
- func LRN(scope *Scope, input tf.Output, optional ...LRNAttr) (output tf.Output)
- func LRNGrad(scope *Scope, input_grads tf.Output, input_image tf.Output, ...) (output tf.Output)
- func LSTMBlockCell(scope *Scope, x tf.Output, cs_prev tf.Output, h_prev tf.Output, w tf.Output, ...) (i tf.Output, cs tf.Output, f tf.Output, o tf.Output, ci tf.Output, ...)
- func LSTMBlockCellGrad(scope *Scope, x tf.Output, cs_prev tf.Output, h_prev tf.Output, w tf.Output, ...) (cs_prev_grad tf.Output, dicfo tf.Output, wci_grad tf.Output, ...)
- func LatencyStatsDataset(scope *Scope, input_dataset tf.Output, tag tf.Output, ...) (handle tf.Output)
- func LeakyRelu(scope *Scope, features tf.Output, optional ...LeakyReluAttr) (activations tf.Output)
- func LeakyReluGrad(scope *Scope, gradients tf.Output, features tf.Output, ...) (backprops tf.Output)
- func LearnedUnigramCandidateSampler(scope *Scope, true_classes tf.Output, num_true int64, num_sampled int64, ...) (sampled_candidates tf.Output, true_expected_count tf.Output, ...)
- func LeftShift(scope *Scope, x tf.Output, y tf.Output) (z tf.Output)
- func Less(scope *Scope, x tf.Output, y tf.Output) (z tf.Output)
- func LessEqual(scope *Scope, x tf.Output, y tf.Output) (z tf.Output)
- func Lgamma(scope *Scope, x tf.Output) (y tf.Output)
- func LinSpace(scope *Scope, start tf.Output, stop tf.Output, num tf.Output) (output tf.Output)
- func ListDataset(scope *Scope, tensors []tf.Output, output_types []tf.DataType, ...) (handle tf.Output)
- func ListDiff(scope *Scope, x tf.Output, y tf.Output, optional ...ListDiffAttr) (out tf.Output, idx tf.Output)
- func LoadAllTPUEmbeddingParameters(scope *Scope, parameters []tf.Output, auxiliary1 []tf.Output, ...) (o *tf.Operation)
- func LoadAndRemapMatrix(scope *Scope, ckpt_path tf.Output, old_tensor_name tf.Output, ...) (output_matrix tf.Output)
- func LoadTPUEmbeddingADAMParameters(scope *Scope, parameters tf.Output, momenta tf.Output, velocities tf.Output, ...) (o *tf.Operation)
- func LoadTPUEmbeddingAdadeltaParameters(scope *Scope, parameters tf.Output, accumulators tf.Output, updates tf.Output, ...) (o *tf.Operation)
- func LoadTPUEmbeddingAdagradMomentumParameters(scope *Scope, parameters tf.Output, accumulators tf.Output, momenta tf.Output, ...) (o *tf.Operation)
- func LoadTPUEmbeddingAdagradParameters(scope *Scope, parameters tf.Output, accumulators tf.Output, num_shards int64, ...) (o *tf.Operation)
- func LoadTPUEmbeddingCenteredRMSPropParameters(scope *Scope, parameters tf.Output, ms tf.Output, mom tf.Output, mg tf.Output, ...) (o *tf.Operation)
- func LoadTPUEmbeddingFTRLParameters(scope *Scope, parameters tf.Output, accumulators tf.Output, linears tf.Output, ...) (o *tf.Operation)
- func LoadTPUEmbeddingFrequencyEstimatorParameters(scope *Scope, parameters tf.Output, last_hit_step tf.Output, num_shards int64, ...) (o *tf.Operation)
- func LoadTPUEmbeddingMDLAdagradLightParameters(scope *Scope, parameters tf.Output, accumulators tf.Output, weights tf.Output, ...) (o *tf.Operation)
- func LoadTPUEmbeddingMomentumParameters(scope *Scope, parameters tf.Output, momenta tf.Output, num_shards int64, ...) (o *tf.Operation)
- func LoadTPUEmbeddingProximalAdagradParameters(scope *Scope, parameters tf.Output, accumulators tf.Output, num_shards int64, ...) (o *tf.Operation)
- func LoadTPUEmbeddingRMSPropParameters(scope *Scope, parameters tf.Output, ms tf.Output, mom tf.Output, ...) (o *tf.Operation)
- func LoadTPUEmbeddingStochasticGradientDescentParameters(scope *Scope, parameters tf.Output, num_shards int64, shard_id int64, ...) (o *tf.Operation)
- func Log(scope *Scope, x tf.Output) (y tf.Output)
- func Log1p(scope *Scope, x tf.Output) (y tf.Output)
- func LogMatrixDeterminant(scope *Scope, input tf.Output) (sign tf.Output, log_abs_determinant tf.Output)
- func LogSoftmax(scope *Scope, logits tf.Output) (logsoftmax tf.Output)
- func LogUniformCandidateSampler(scope *Scope, true_classes tf.Output, num_true int64, num_sampled int64, ...) (sampled_candidates tf.Output, true_expected_count tf.Output, ...)
- func LogicalAnd(scope *Scope, x tf.Output, y tf.Output) (z tf.Output)
- func LogicalNot(scope *Scope, x tf.Output) (y tf.Output)
- func LogicalOr(scope *Scope, x tf.Output, y tf.Output) (z tf.Output)
- func LookupTableExportV2(scope *Scope, table_handle tf.Output, Tkeys tf.DataType, Tvalues tf.DataType) (keys tf.Output, values tf.Output)
- func LookupTableFindV2(scope *Scope, table_handle tf.Output, keys tf.Output, default_value tf.Output) (values tf.Output)
- func LookupTableImportV2(scope *Scope, table_handle tf.Output, keys tf.Output, values tf.Output) (o *tf.Operation)
- func LookupTableInsertV2(scope *Scope, table_handle tf.Output, keys tf.Output, values tf.Output) (o *tf.Operation)
- func LookupTableRemoveV2(scope *Scope, table_handle tf.Output, keys tf.Output) (o *tf.Operation)
- func LookupTableSizeV2(scope *Scope, table_handle tf.Output) (size tf.Output)
- func LoopCond(scope *Scope, input tf.Output) (output tf.Output)
- func LowerBound(scope *Scope, sorted_inputs tf.Output, values tf.Output, ...) (output tf.Output)
- func Lu(scope *Scope, input tf.Output, optional ...LuAttr) (lu tf.Output, p tf.Output)
- func MakeIterator(scope *Scope, dataset tf.Output, iterator tf.Output) (o *tf.Operation)
- func MakeUnique(scope *Scope, input tf.Output) (output tf.Output)
- func MapClear(scope *Scope, dtypes []tf.DataType, optional ...MapClearAttr) (o *tf.Operation)
- func MapIncompleteSize(scope *Scope, dtypes []tf.DataType, optional ...MapIncompleteSizeAttr) (size tf.Output)
- func MapPeek(scope *Scope, key tf.Output, indices tf.Output, dtypes []tf.DataType, ...) (values []tf.Output)
- func MapSize(scope *Scope, dtypes []tf.DataType, optional ...MapSizeAttr) (size tf.Output)
- func MapStage(scope *Scope, key tf.Output, indices tf.Output, values []tf.Output, ...) (o *tf.Operation)
- func MapUnstage(scope *Scope, key tf.Output, indices tf.Output, dtypes []tf.DataType, ...) (values []tf.Output)
- func MapUnstageNoKey(scope *Scope, indices tf.Output, dtypes []tf.DataType, ...) (key tf.Output, values []tf.Output)
- func MatMul(scope *Scope, a tf.Output, b tf.Output, optional ...MatMulAttr) (product tf.Output)
- func MatchingFiles(scope *Scope, pattern tf.Output) (filenames tf.Output)
- func MatrixBandPart(scope *Scope, input tf.Output, num_lower tf.Output, num_upper tf.Output) (band tf.Output)
- func MatrixDeterminant(scope *Scope, input tf.Output) (output tf.Output)
- func MatrixDiag(scope *Scope, diagonal tf.Output) (output tf.Output)
- func MatrixDiagPart(scope *Scope, input tf.Output) (diagonal tf.Output)
- func MatrixDiagPartV2(scope *Scope, input tf.Output, k tf.Output, padding_value tf.Output) (diagonal tf.Output)
- func MatrixDiagPartV3(scope *Scope, input tf.Output, k tf.Output, padding_value tf.Output, ...) (diagonal tf.Output)
- func MatrixDiagV2(scope *Scope, diagonal tf.Output, k tf.Output, num_rows tf.Output, ...) (output tf.Output)
- func MatrixDiagV3(scope *Scope, diagonal tf.Output, k tf.Output, num_rows tf.Output, ...) (output tf.Output)
- func MatrixExponential(scope *Scope, input tf.Output) (output tf.Output)
- func MatrixInverse(scope *Scope, input tf.Output, optional ...MatrixInverseAttr) (output tf.Output)
- func MatrixLogarithm(scope *Scope, input tf.Output) (output tf.Output)
- func MatrixSetDiag(scope *Scope, input tf.Output, diagonal tf.Output) (output tf.Output)
- func MatrixSetDiagV2(scope *Scope, input tf.Output, diagonal tf.Output, k tf.Output) (output tf.Output)
- func MatrixSetDiagV3(scope *Scope, input tf.Output, diagonal tf.Output, k tf.Output, ...) (output tf.Output)
- func MatrixSolve(scope *Scope, matrix tf.Output, rhs tf.Output, optional ...MatrixSolveAttr) (output tf.Output)
- func MatrixSolveLs(scope *Scope, matrix tf.Output, rhs tf.Output, l2_regularizer tf.Output, ...) (output tf.Output)
- func MatrixSquareRoot(scope *Scope, input tf.Output) (output tf.Output)
- func MatrixTriangularSolve(scope *Scope, matrix tf.Output, rhs tf.Output, ...) (output tf.Output)
- func Max(scope *Scope, input tf.Output, axis tf.Output, optional ...MaxAttr) (output tf.Output)
- func MaxIntraOpParallelismDataset(scope *Scope, input_dataset tf.Output, max_intra_op_parallelism tf.Output, ...) (handle tf.Output)
- func MaxPool(scope *Scope, input tf.Output, ksize []int64, strides []int64, padding string, ...) (output tf.Output)
- func MaxPool3D(scope *Scope, input tf.Output, ksize []int64, strides []int64, padding string, ...) (output tf.Output)
- func MaxPool3DGrad(scope *Scope, orig_input tf.Output, orig_output tf.Output, grad tf.Output, ...) (output tf.Output)
- func MaxPool3DGradGrad(scope *Scope, orig_input tf.Output, orig_output tf.Output, grad tf.Output, ...) (output tf.Output)
- func MaxPoolGrad(scope *Scope, orig_input tf.Output, orig_output tf.Output, grad tf.Output, ...) (output tf.Output)
- func MaxPoolGradGrad(scope *Scope, orig_input tf.Output, orig_output tf.Output, grad tf.Output, ...) (output tf.Output)
- func MaxPoolGradGradV2(scope *Scope, orig_input tf.Output, orig_output tf.Output, grad tf.Output, ...) (output tf.Output)
- func MaxPoolGradGradWithArgmax(scope *Scope, input tf.Output, grad tf.Output, argmax tf.Output, ksize []int64, ...) (output tf.Output)
- func MaxPoolGradV2(scope *Scope, orig_input tf.Output, orig_output tf.Output, grad tf.Output, ...) (output tf.Output)
- func MaxPoolGradWithArgmax(scope *Scope, input tf.Output, grad tf.Output, argmax tf.Output, ksize []int64, ...) (output tf.Output)
- func MaxPoolV2(scope *Scope, input tf.Output, ksize tf.Output, strides tf.Output, ...) (output tf.Output)
- func MaxPoolWithArgmax(scope *Scope, input tf.Output, ksize []int64, strides []int64, padding string, ...) (output tf.Output, argmax tf.Output)
- func Maximum(scope *Scope, x tf.Output, y tf.Output) (z tf.Output)
- func Mean(scope *Scope, input tf.Output, axis tf.Output, optional ...MeanAttr) (output tf.Output)
- func Merge(scope *Scope, inputs []tf.Output) (output tf.Output, value_index tf.Output)
- func MergeDedupData(scope *Scope, integer_tensor tf.Output, float_tensor tf.Output, ...) (output tf.Output)
- func MergeSummary(scope *Scope, inputs []tf.Output) (summary tf.Output)
- func MergeV2Checkpoints(scope *Scope, checkpoint_prefixes tf.Output, destination_prefix tf.Output, ...) (o *tf.Operation)
- func Mfcc(scope *Scope, spectrogram tf.Output, sample_rate tf.Output, ...) (output tf.Output)
- func Min(scope *Scope, input tf.Output, axis tf.Output, optional ...MinAttr) (output tf.Output)
- func Minimum(scope *Scope, x tf.Output, y tf.Output) (z tf.Output)
- func MirrorPad(scope *Scope, input tf.Output, paddings tf.Output, mode string) (output tf.Output)
- func MirrorPadGrad(scope *Scope, input tf.Output, paddings tf.Output, mode string) (output tf.Output)
- func MlirPassthroughOp(scope *Scope, inputs []tf.Output, mlir_module string, Toutputs []tf.DataType) (outputs []tf.Output)
- func Mod(scope *Scope, x tf.Output, y tf.Output) (z tf.Output)
- func ModelDataset(scope *Scope, input_dataset tf.Output, output_types []tf.DataType, ...) (handle tf.Output)
- func Mul(scope *Scope, x tf.Output, y tf.Output) (z tf.Output)
- func MulNoNan(scope *Scope, x tf.Output, y tf.Output) (z tf.Output)
- func MultiDeviceIterator(scope *Scope, devices []string, shared_name string, container string, ...) (handle tf.Output)
- func MultiDeviceIteratorFromStringHandle(scope *Scope, string_handle tf.Output, ...) (multi_device_iterator tf.Output)
- func MultiDeviceIteratorGetNextFromShard(scope *Scope, multi_device_iterator tf.Output, shard_num tf.Output, ...) (components []tf.Output)
- func MultiDeviceIteratorInit(scope *Scope, dataset tf.Output, multi_device_iterator tf.Output, ...) (incarnation_id tf.Output)
- func MultiDeviceIteratorToStringHandle(scope *Scope, multi_device_iterator tf.Output) (string_handle tf.Output)
- func Multinomial(scope *Scope, logits tf.Output, num_samples tf.Output, ...) (output tf.Output)
- func MutableDenseHashTableV2(scope *Scope, empty_key tf.Output, deleted_key tf.Output, ...) (table_handle tf.Output)
- func MutableHashTableOfTensorsV2(scope *Scope, key_dtype tf.DataType, value_dtype tf.DataType, ...) (table_handle tf.Output)
- func MutableHashTableV2(scope *Scope, key_dtype tf.DataType, value_dtype tf.DataType, ...) (table_handle tf.Output)
- func MutexLock(scope *Scope, mutex tf.Output) (mutex_lock tf.Output)
- func MutexV2(scope *Scope, optional ...MutexV2Attr) (resource tf.Output)
- func NcclAllReduce(scope *Scope, input tf.Output, reduction string, num_devices int64, ...) (data tf.Output)
- func NcclBroadcast(scope *Scope, input tf.Output, shape tf.Shape) (output tf.Output)
- func NcclReduce(scope *Scope, input []tf.Output, reduction string) (data tf.Output)
- func NearestNeighbors(scope *Scope, points tf.Output, centers tf.Output, k tf.Output) (nearest_center_indices tf.Output, nearest_center_distances tf.Output)
- func Neg(scope *Scope, x tf.Output) (y tf.Output)
- func NextAfter(scope *Scope, x1 tf.Output, x2 tf.Output) (output tf.Output)
- func NextIteration(scope *Scope, data tf.Output) (output tf.Output)
- func NoOp(scope *Scope) (o *tf.Operation)
- func NonDeterministicInts(scope *Scope, shape tf.Output, optional ...NonDeterministicIntsAttr) (output tf.Output)
- func NonMaxSuppression(scope *Scope, boxes tf.Output, scores tf.Output, max_output_size tf.Output, ...) (selected_indices tf.Output)
- func NonMaxSuppressionV2(scope *Scope, boxes tf.Output, scores tf.Output, max_output_size tf.Output, ...) (selected_indices tf.Output)
- func NonMaxSuppressionV3(scope *Scope, boxes tf.Output, scores tf.Output, max_output_size tf.Output, ...) (selected_indices tf.Output)
- func NonMaxSuppressionV4(scope *Scope, boxes tf.Output, scores tf.Output, max_output_size tf.Output, ...) (selected_indices tf.Output, valid_outputs tf.Output)
- func NonMaxSuppressionV5(scope *Scope, boxes tf.Output, scores tf.Output, max_output_size tf.Output, ...) (selected_indices tf.Output, selected_scores tf.Output, valid_outputs tf.Output)
- func NonMaxSuppressionWithOverlaps(scope *Scope, overlaps tf.Output, scores tf.Output, max_output_size tf.Output, ...) (selected_indices tf.Output)
- func NotEqual(scope *Scope, x tf.Output, y tf.Output, optional ...NotEqualAttr) (z tf.Output)
- func NthElement(scope *Scope, input tf.Output, n tf.Output, optional ...NthElementAttr) (values tf.Output)
- func OneHot(scope *Scope, indices tf.Output, depth tf.Output, on_value tf.Output, ...) (output tf.Output)
- func OnesLike(scope *Scope, x tf.Output) (y tf.Output)
- func OptimizeDataset(scope *Scope, input_dataset tf.Output, optimizations tf.Output, ...) (handle tf.Output)
- func OptimizeDatasetV2(scope *Scope, input_dataset tf.Output, optimizations_enabled tf.Output, ...) (handle tf.Output)
- func OptionalFromValue(scope *Scope, components []tf.Output) (optional tf.Output)
- func OptionalGetValue(scope *Scope, optional tf.Output, output_types []tf.DataType, ...) (components []tf.Output)
- func OptionalHasValue(scope *Scope, optional tf.Output) (has_value tf.Output)
- func OptionalNone(scope *Scope) (optional tf.Output)
- func OptionsDataset(scope *Scope, input_dataset tf.Output, serialized_options string, ...) (handle tf.Output)
- func OrderedMapClear(scope *Scope, dtypes []tf.DataType, optional ...OrderedMapClearAttr) (o *tf.Operation)
- func OrderedMapIncompleteSize(scope *Scope, dtypes []tf.DataType, optional ...OrderedMapIncompleteSizeAttr) (size tf.Output)
- func OrderedMapPeek(scope *Scope, key tf.Output, indices tf.Output, dtypes []tf.DataType, ...) (values []tf.Output)
- func OrderedMapSize(scope *Scope, dtypes []tf.DataType, optional ...OrderedMapSizeAttr) (size tf.Output)
- func OrderedMapStage(scope *Scope, key tf.Output, indices tf.Output, values []tf.Output, ...) (o *tf.Operation)
- func OrderedMapUnstage(scope *Scope, key tf.Output, indices tf.Output, dtypes []tf.DataType, ...) (values []tf.Output)
- func OrderedMapUnstageNoKey(scope *Scope, indices tf.Output, dtypes []tf.DataType, ...) (key tf.Output, values []tf.Output)
- func OutfeedDequeue(scope *Scope, dtype tf.DataType, shape tf.Shape, ...) (output tf.Output)
- func OutfeedDequeueTuple(scope *Scope, dtypes []tf.DataType, shapes []tf.Shape, ...) (outputs []tf.Output)
- func OutfeedDequeueTupleV2(scope *Scope, device_ordinal tf.Output, dtypes []tf.DataType, ...) (outputs []tf.Output)
- func OutfeedDequeueV2(scope *Scope, device_ordinal tf.Output, dtype tf.DataType, shape tf.Shape) (output tf.Output)
- func OutfeedEnqueue(scope *Scope, input tf.Output) (o *tf.Operation)
- func OutfeedEnqueueTuple(scope *Scope, inputs []tf.Output) (o *tf.Operation)
- func Pack(scope *Scope, values []tf.Output, optional ...PackAttr) (output tf.Output)
- func Pad(scope *Scope, input tf.Output, paddings tf.Output) (output tf.Output)
- func PadV2(scope *Scope, input tf.Output, paddings tf.Output, constant_values tf.Output) (output tf.Output)
- func PaddedBatchDataset(scope *Scope, input_dataset tf.Output, batch_size tf.Output, ...) (handle tf.Output)
- func PaddedBatchDatasetV2(scope *Scope, input_dataset tf.Output, batch_size tf.Output, ...) (handle tf.Output)
- func PaddingFIFOQueueV2(scope *Scope, component_types []tf.DataType, ...) (handle tf.Output)
- func ParallelConcat(scope *Scope, values []tf.Output, shape tf.Shape) (output tf.Output)
- func ParallelDynamicStitch(scope *Scope, indices []tf.Output, data []tf.Output) (merged tf.Output)
- func ParameterizedTruncatedNormal(scope *Scope, shape tf.Output, means tf.Output, stdevs tf.Output, ...) (output tf.Output)
- func ParseExample(scope *Scope, serialized tf.Output, names tf.Output, sparse_keys []tf.Output, ...) (sparse_indices []tf.Output, sparse_values []tf.Output, ...)
- func ParseExampleDataset(scope *Scope, input_dataset tf.Output, num_parallel_calls tf.Output, ...) (handle tf.Output)
- func ParseExampleDatasetV2(scope *Scope, input_dataset tf.Output, num_parallel_calls tf.Output, ...) (handle tf.Output)
- func ParseExampleV2(scope *Scope, serialized tf.Output, names tf.Output, sparse_keys tf.Output, ...) (sparse_indices []tf.Output, sparse_values []tf.Output, ...)
- func ParseSequenceExample(scope *Scope, serialized tf.Output, debug_name tf.Output, ...) (context_sparse_indices []tf.Output, context_sparse_values []tf.Output, ...)
- func ParseSequenceExampleV2(scope *Scope, serialized tf.Output, debug_name tf.Output, ...) (context_sparse_indices []tf.Output, context_sparse_values []tf.Output, ...)
- func ParseSingleExample(scope *Scope, serialized tf.Output, dense_defaults []tf.Output, ...) (sparse_indices []tf.Output, sparse_values []tf.Output, ...)
- func ParseSingleSequenceExample(scope *Scope, serialized tf.Output, ...) (context_sparse_indices []tf.Output, context_sparse_values []tf.Output, ...)
- func ParseTensor(scope *Scope, serialized tf.Output, out_type tf.DataType) (output tf.Output)
- func Placeholder(scope *Scope, dtype tf.DataType, optional ...PlaceholderAttr) (output tf.Output)
- func PlaceholderV2(scope *Scope, dtype tf.DataType, shape tf.Shape) (output tf.Output)
- func PlaceholderWithDefault(scope *Scope, input tf.Output, shape tf.Shape) (output tf.Output)
- func Polygamma(scope *Scope, a tf.Output, x tf.Output) (z tf.Output)
- func PopulationCount(scope *Scope, x tf.Output) (y tf.Output)
- func Pow(scope *Scope, x tf.Output, y tf.Output) (z tf.Output)
- func PrefetchDataset(scope *Scope, input_dataset tf.Output, buffer_size tf.Output, ...) (handle tf.Output)
- func Prelinearize(scope *Scope, input tf.Output, optional ...PrelinearizeAttr) (output tf.Output)
- func PrelinearizeTuple(scope *Scope, inputs []tf.Output, shapes []tf.Shape, ...) (output tf.Output)
- func PreventGradient(scope *Scope, input tf.Output, optional ...PreventGradientAttr) (output tf.Output)
- func Print(scope *Scope, input tf.Output, data []tf.Output, optional ...PrintAttr) (output tf.Output)
- func PrintV2(scope *Scope, input tf.Output, optional ...PrintV2Attr) (o *tf.Operation)
- func PriorityQueueV2(scope *Scope, shapes []tf.Shape, optional ...PriorityQueueV2Attr) (handle tf.Output)
- func PrivateThreadPoolDataset(scope *Scope, input_dataset tf.Output, num_threads tf.Output, ...) (handle tf.Output)
- func Prod(scope *Scope, input tf.Output, axis tf.Output, optional ...ProdAttr) (output tf.Output)
- func Qr(scope *Scope, input tf.Output, optional ...QrAttr) (q tf.Output, r tf.Output)
- func QuantizeAndDequantize(scope *Scope, input tf.Output, optional ...QuantizeAndDequantizeAttr) (output tf.Output)
- func QuantizeAndDequantizeV2(scope *Scope, input tf.Output, input_min tf.Output, input_max tf.Output, ...) (output tf.Output)
- func QuantizeAndDequantizeV3(scope *Scope, input tf.Output, input_min tf.Output, input_max tf.Output, ...) (output tf.Output)
- func QuantizeAndDequantizeV4(scope *Scope, input tf.Output, input_min tf.Output, input_max tf.Output, ...) (output tf.Output)
- func QuantizeAndDequantizeV4Grad(scope *Scope, gradients tf.Output, input tf.Output, input_min tf.Output, ...) (input_backprop tf.Output, input_min_backprop tf.Output, ...)
- func QuantizeDownAndShrinkRange(scope *Scope, input tf.Output, input_min tf.Output, input_max tf.Output, ...) (output tf.Output, output_min tf.Output, output_max tf.Output)
- func QuantizeV2(scope *Scope, input tf.Output, min_range tf.Output, max_range tf.Output, ...) (output tf.Output, output_min tf.Output, output_max tf.Output)
- func QuantizedAdd(scope *Scope, x tf.Output, y tf.Output, min_x tf.Output, max_x tf.Output, ...) (z tf.Output, min_z tf.Output, max_z tf.Output)
- func QuantizedAvgPool(scope *Scope, input tf.Output, min_input tf.Output, max_input tf.Output, ...) (output tf.Output, min_output tf.Output, max_output tf.Output)
- func QuantizedBatchNormWithGlobalNormalization(scope *Scope, t tf.Output, t_min tf.Output, t_max tf.Output, m tf.Output, ...) (result tf.Output, result_min tf.Output, result_max tf.Output)
- func QuantizedBiasAdd(scope *Scope, input tf.Output, bias tf.Output, min_input tf.Output, ...) (output tf.Output, min_out tf.Output, max_out tf.Output)
- func QuantizedConcat(scope *Scope, concat_dim tf.Output, values []tf.Output, input_mins []tf.Output, ...) (output tf.Output, output_min tf.Output, output_max tf.Output)
- func QuantizedConv2D(scope *Scope, input tf.Output, filter tf.Output, min_input tf.Output, ...) (output tf.Output, min_output tf.Output, max_output tf.Output)
- func QuantizedConv2DPerChannel(scope *Scope, input tf.Output, filter tf.Output, min_input tf.Output, ...) (output tf.Output, min_output tf.Output, max_output tf.Output)
- func QuantizedDepthwiseConv2D(scope *Scope, input tf.Output, filter tf.Output, min_input tf.Output, ...) (output tf.Output, min_output tf.Output, max_output tf.Output)
- func QuantizedDepthwiseConv2DWithBias(scope *Scope, input tf.Output, filter tf.Output, bias tf.Output, ...) (output tf.Output, min_output tf.Output, max_output tf.Output)
- func QuantizedDepthwiseConv2DWithBiasAndRelu(scope *Scope, input tf.Output, filter tf.Output, bias tf.Output, ...) (output tf.Output, min_output tf.Output, max_output tf.Output)
- func QuantizedDepthwiseConv2DWithBiasAndReluAndRequantize(scope *Scope, input tf.Output, filter tf.Output, bias tf.Output, ...) (output tf.Output, min_output tf.Output, max_output tf.Output)
- func QuantizedInstanceNorm(scope *Scope, x tf.Output, x_min tf.Output, x_max tf.Output, ...) (y tf.Output, y_min tf.Output, y_max tf.Output)
- func QuantizedMatMul(scope *Scope, a tf.Output, b tf.Output, min_a tf.Output, max_a tf.Output, ...) (out tf.Output, min_out tf.Output, max_out tf.Output)
- func QuantizedMatMulWithBias(scope *Scope, a tf.Output, b tf.Output, bias tf.Output, min_a tf.Output, ...) (out tf.Output, min_out tf.Output, max_out tf.Output)
- func QuantizedMatMulWithBiasAndRelu(scope *Scope, a tf.Output, b tf.Output, bias tf.Output, min_a tf.Output, ...) (out tf.Output, min_out tf.Output, max_out tf.Output)
- func QuantizedMatMulWithBiasAndReluAndRequantize(scope *Scope, a tf.Output, b tf.Output, bias tf.Output, min_a tf.Output, ...) (out tf.Output, min_out tf.Output, max_out tf.Output)
- func QuantizedMaxPool(scope *Scope, input tf.Output, min_input tf.Output, max_input tf.Output, ...) (output tf.Output, min_output tf.Output, max_output tf.Output)
- func QuantizedMul(scope *Scope, x tf.Output, y tf.Output, min_x tf.Output, max_x tf.Output, ...) (z tf.Output, min_z tf.Output, max_z tf.Output)
- func QuantizedRelu(scope *Scope, features tf.Output, min_features tf.Output, ...) (activations tf.Output, min_activations tf.Output, max_activations tf.Output)
- func QuantizedRelu6(scope *Scope, features tf.Output, min_features tf.Output, ...) (activations tf.Output, min_activations tf.Output, max_activations tf.Output)
- func QuantizedReluX(scope *Scope, features tf.Output, max_value tf.Output, min_features tf.Output, ...) (activations tf.Output, min_activations tf.Output, max_activations tf.Output)
- func QuantizedReshape(scope *Scope, tensor tf.Output, shape tf.Output, input_min tf.Output, ...) (output tf.Output, output_min tf.Output, output_max tf.Output)
- func QuantizedResizeBilinear(scope *Scope, images tf.Output, size tf.Output, min tf.Output, max tf.Output, ...) (resized_images tf.Output, out_min tf.Output, out_max tf.Output)
- func QueueCloseV2(scope *Scope, handle tf.Output, optional ...QueueCloseV2Attr) (o *tf.Operation)
- func QueueDequeueManyV2(scope *Scope, handle tf.Output, n tf.Output, component_types []tf.DataType, ...) (components []tf.Output)
- func QueueDequeueUpToV2(scope *Scope, handle tf.Output, n tf.Output, component_types []tf.DataType, ...) (components []tf.Output)
- func QueueDequeueV2(scope *Scope, handle tf.Output, component_types []tf.DataType, ...) (components []tf.Output)
- func QueueEnqueueManyV2(scope *Scope, handle tf.Output, components []tf.Output, ...) (o *tf.Operation)
- func QueueEnqueueV2(scope *Scope, handle tf.Output, components []tf.Output, ...) (o *tf.Operation)
- func QueueIsClosedV2(scope *Scope, handle tf.Output) (is_closed tf.Output)
- func QueueSizeV2(scope *Scope, handle tf.Output) (size tf.Output)
- func RFFT(scope *Scope, input tf.Output, fft_length tf.Output, optional ...RFFTAttr) (output tf.Output)
- func RFFT2D(scope *Scope, input tf.Output, fft_length tf.Output, optional ...RFFT2DAttr) (output tf.Output)
- func RFFT3D(scope *Scope, input tf.Output, fft_length tf.Output, optional ...RFFT3DAttr) (output tf.Output)
- func RFFTND(scope *Scope, input tf.Output, fft_length tf.Output, axes tf.Output, ...) (output tf.Output)
- func RGBToHSV(scope *Scope, images tf.Output) (output tf.Output)
- func RaggedBincount(scope *Scope, splits tf.Output, values tf.Output, size tf.Output, ...) (output tf.Output)
- func RaggedCountSparseOutput(scope *Scope, splits tf.Output, values tf.Output, weights tf.Output, ...) (output_indices tf.Output, output_values tf.Output, ...)
- func RaggedCross(scope *Scope, ragged_values []tf.Output, ragged_row_splits []tf.Output, ...) (output_values tf.Output, output_row_splits tf.Output)
- func RaggedGather(scope *Scope, params_nested_splits []tf.Output, params_dense_values tf.Output, ...) (output_nested_splits []tf.Output, output_dense_values tf.Output)
- func RaggedRange(scope *Scope, starts tf.Output, limits tf.Output, deltas tf.Output, ...) (rt_nested_splits tf.Output, rt_dense_values tf.Output)
- func RaggedTensorFromVariant(scope *Scope, encoded_ragged tf.Output, input_ragged_rank int64, ...) (output_nested_splits []tf.Output, output_dense_values tf.Output)
- func RaggedTensorToSparse(scope *Scope, rt_nested_splits []tf.Output, rt_dense_values tf.Output) (sparse_indices tf.Output, sparse_values tf.Output, ...)
- func RaggedTensorToTensor(scope *Scope, shape tf.Output, values tf.Output, default_value tf.Output, ...) (result tf.Output)
- func RaggedTensorToVariant(scope *Scope, rt_nested_splits []tf.Output, rt_dense_values tf.Output, ...) (encoded_ragged tf.Output)
- func RaggedTensorToVariantGradient(scope *Scope, encoded_ragged_grad tf.Output, row_splits tf.Output, ...) (dense_values_grad tf.Output)
- func RandomCrop(scope *Scope, image tf.Output, size tf.Output, optional ...RandomCropAttr) (output tf.Output)
- func RandomDataset(scope *Scope, seed tf.Output, seed2 tf.Output, output_types []tf.DataType, ...) (handle tf.Output)
- func RandomDatasetV2(scope *Scope, seed tf.Output, seed2 tf.Output, seed_generator tf.Output, ...) (handle tf.Output)
- func RandomGamma(scope *Scope, shape tf.Output, alpha tf.Output, optional ...RandomGammaAttr) (output tf.Output)
- func RandomGammaGrad(scope *Scope, alpha tf.Output, sample tf.Output) (output tf.Output)
- func RandomIndexShuffle(scope *Scope, index tf.Output, seed tf.Output, max_index tf.Output, ...) (output tf.Output)
- func RandomPoisson(scope *Scope, shape tf.Output, rate tf.Output, optional ...RandomPoissonAttr) (output tf.Output)
- func RandomPoissonV2(scope *Scope, shape tf.Output, rate tf.Output, optional ...RandomPoissonV2Attr) (output tf.Output)
- func RandomShuffle(scope *Scope, value tf.Output, optional ...RandomShuffleAttr) (output tf.Output)
- func RandomShuffleQueueV2(scope *Scope, component_types []tf.DataType, ...) (handle tf.Output)
- func RandomStandardNormal(scope *Scope, shape tf.Output, dtype tf.DataType, ...) (output tf.Output)
- func RandomUniform(scope *Scope, shape tf.Output, dtype tf.DataType, ...) (output tf.Output)
- func RandomUniformInt(scope *Scope, shape tf.Output, minval tf.Output, maxval tf.Output, ...) (output tf.Output)
- func Range(scope *Scope, start tf.Output, limit tf.Output, delta tf.Output) (output tf.Output)
- func RangeDataset(scope *Scope, start tf.Output, stop tf.Output, step tf.Output, ...) (handle tf.Output)
- func Rank(scope *Scope, input tf.Output) (output tf.Output)
- func ReadFile(scope *Scope, filename tf.Output) (contents tf.Output)
- func ReadVariableOp(scope *Scope, resource tf.Output, dtype tf.DataType) (value tf.Output)
- func ReadVariableXlaSplitND(scope *Scope, resource tf.Output, T tf.DataType, N int64, num_splits []int64, ...) (outputs []tf.Output)
- func ReaderNumRecordsProducedV2(scope *Scope, reader_handle tf.Output) (records_produced tf.Output)
- func ReaderNumWorkUnitsCompletedV2(scope *Scope, reader_handle tf.Output) (units_completed tf.Output)
- func ReaderReadUpToV2(scope *Scope, reader_handle tf.Output, queue_handle tf.Output, ...) (keys tf.Output, values tf.Output)
- func ReaderReadV2(scope *Scope, reader_handle tf.Output, queue_handle tf.Output) (key tf.Output, value tf.Output)
- func ReaderResetV2(scope *Scope, reader_handle tf.Output) (o *tf.Operation)
- func ReaderRestoreStateV2(scope *Scope, reader_handle tf.Output, state tf.Output) (o *tf.Operation)
- func ReaderSerializeStateV2(scope *Scope, reader_handle tf.Output) (state tf.Output)
- func Real(scope *Scope, input tf.Output, optional ...RealAttr) (output tf.Output)
- func RealDiv(scope *Scope, x tf.Output, y tf.Output) (z tf.Output)
- func RebatchDataset(scope *Scope, input_dataset tf.Output, num_replicas tf.Output, ...) (handle tf.Output)
- func RebatchDatasetV2(scope *Scope, input_dataset tf.Output, batch_sizes tf.Output, ...) (handle tf.Output)
- func Reciprocal(scope *Scope, x tf.Output) (y tf.Output)
- func ReciprocalGrad(scope *Scope, y tf.Output, dy tf.Output) (z tf.Output)
- func RecordInput(scope *Scope, file_pattern string, optional ...RecordInputAttr) (records tf.Output)
- func Recv(scope *Scope, tensor_type tf.DataType, tensor_name string, send_device string, ...) (tensor tf.Output)
- func RecvTPUEmbeddingActivations(scope *Scope, num_outputs int64, config string) (outputs []tf.Output)
- func ReduceJoin(scope *Scope, inputs tf.Output, reduction_indices tf.Output, ...) (output tf.Output)
- func RegexFullMatch(scope *Scope, input tf.Output, pattern tf.Output) (output tf.Output)
- func RegexReplace(scope *Scope, input tf.Output, pattern tf.Output, rewrite tf.Output, ...) (output tf.Output)
- func RegisterDataset(scope *Scope, dataset tf.Output, address tf.Output, protocol tf.Output, ...) (dataset_id tf.Output)
- func RegisterDatasetV2(scope *Scope, dataset tf.Output, address tf.Output, protocol tf.Output, ...) (dataset_id tf.Output)
- func Relu(scope *Scope, features tf.Output) (activations tf.Output)
- func Relu6(scope *Scope, features tf.Output) (activations tf.Output)
- func Relu6Grad(scope *Scope, gradients tf.Output, features tf.Output) (backprops tf.Output)
- func ReluGrad(scope *Scope, gradients tf.Output, features tf.Output) (backprops tf.Output)
- func RepeatDataset(scope *Scope, input_dataset tf.Output, count tf.Output, ...) (handle tf.Output)
- func RequantizationRange(scope *Scope, input tf.Output, input_min tf.Output, input_max tf.Output) (output_min tf.Output, output_max tf.Output)
- func RequantizationRangePerChannel(scope *Scope, input tf.Output, input_min tf.Output, input_max tf.Output, ...) (output_min tf.Output, output_max tf.Output)
- func Requantize(scope *Scope, input tf.Output, input_min tf.Output, input_max tf.Output, ...) (output tf.Output, output_min tf.Output, output_max tf.Output)
- func RequantizePerChannel(scope *Scope, input tf.Output, input_min tf.Output, input_max tf.Output, ...) (output tf.Output, output_min tf.Output, output_max tf.Output)
- func Reshape(scope *Scope, tensor tf.Output, shape tf.Output) (output tf.Output)
- func ResizeArea(scope *Scope, images tf.Output, size tf.Output, optional ...ResizeAreaAttr) (resized_images tf.Output)
- func ResizeBicubic(scope *Scope, images tf.Output, size tf.Output, optional ...ResizeBicubicAttr) (resized_images tf.Output)
- func ResizeBicubicGrad(scope *Scope, grads tf.Output, original_image tf.Output, ...) (output tf.Output)
- func ResizeBilinear(scope *Scope, images tf.Output, size tf.Output, optional ...ResizeBilinearAttr) (resized_images tf.Output)
- func ResizeBilinearGrad(scope *Scope, grads tf.Output, original_image tf.Output, ...) (output tf.Output)
- func ResizeNearestNeighbor(scope *Scope, images tf.Output, size tf.Output, ...) (resized_images tf.Output)
- func ResizeNearestNeighborGrad(scope *Scope, grads tf.Output, size tf.Output, ...) (output tf.Output)
- func ResourceAccumulatorApplyGradient(scope *Scope, handle tf.Output, local_step tf.Output, gradient tf.Output) (o *tf.Operation)
- func ResourceAccumulatorNumAccumulated(scope *Scope, handle tf.Output) (num_accumulated tf.Output)
- func ResourceAccumulatorSetGlobalStep(scope *Scope, handle tf.Output, new_global_step tf.Output) (o *tf.Operation)
- func ResourceAccumulatorTakeGradient(scope *Scope, handle tf.Output, num_required tf.Output, dtype tf.DataType) (average tf.Output)
- func ResourceApplyAdaMax(scope *Scope, var_ tf.Output, m tf.Output, v tf.Output, beta1_power tf.Output, ...) (o *tf.Operation)
- func ResourceApplyAdadelta(scope *Scope, var_ tf.Output, accum tf.Output, accum_update tf.Output, ...) (o *tf.Operation)
- func ResourceApplyAdagrad(scope *Scope, var_ tf.Output, accum tf.Output, lr tf.Output, grad tf.Output, ...) (o *tf.Operation)
- func ResourceApplyAdagradDA(scope *Scope, var_ tf.Output, gradient_accumulator tf.Output, ...) (o *tf.Operation)
- func ResourceApplyAdagradV2(scope *Scope, var_ tf.Output, accum tf.Output, lr tf.Output, epsilon tf.Output, ...) (o *tf.Operation)
- func ResourceApplyAdam(scope *Scope, var_ tf.Output, m tf.Output, v tf.Output, beta1_power tf.Output, ...) (o *tf.Operation)
- func ResourceApplyAdamWithAmsgrad(scope *Scope, var_ tf.Output, m tf.Output, v tf.Output, vhat tf.Output, ...) (o *tf.Operation)
- func ResourceApplyAddSign(scope *Scope, var_ tf.Output, m tf.Output, lr tf.Output, alpha tf.Output, ...) (o *tf.Operation)
- func ResourceApplyCenteredRMSProp(scope *Scope, var_ tf.Output, mg tf.Output, ms tf.Output, mom tf.Output, ...) (o *tf.Operation)
- func ResourceApplyFtrl(scope *Scope, var_ tf.Output, accum tf.Output, linear tf.Output, ...) (o *tf.Operation)
- func ResourceApplyFtrlV2(scope *Scope, var_ tf.Output, accum tf.Output, linear tf.Output, ...) (o *tf.Operation)
- func ResourceApplyGradientDescent(scope *Scope, var_ tf.Output, alpha tf.Output, delta tf.Output, ...) (o *tf.Operation)
- func ResourceApplyKerasMomentum(scope *Scope, var_ tf.Output, accum tf.Output, lr tf.Output, grad tf.Output, ...) (o *tf.Operation)
- func ResourceApplyMomentum(scope *Scope, var_ tf.Output, accum tf.Output, lr tf.Output, grad tf.Output, ...) (o *tf.Operation)
- func ResourceApplyPowerSign(scope *Scope, var_ tf.Output, m tf.Output, lr tf.Output, logbase tf.Output, ...) (o *tf.Operation)
- func ResourceApplyProximalAdagrad(scope *Scope, var_ tf.Output, accum tf.Output, lr tf.Output, l1 tf.Output, ...) (o *tf.Operation)
- func ResourceApplyProximalGradientDescent(scope *Scope, var_ tf.Output, alpha tf.Output, l1 tf.Output, l2 tf.Output, ...) (o *tf.Operation)
- func ResourceApplyRMSProp(scope *Scope, var_ tf.Output, ms tf.Output, mom tf.Output, lr tf.Output, ...) (o *tf.Operation)
- func ResourceConditionalAccumulator(scope *Scope, dtype tf.DataType, shape tf.Shape, ...) (handle tf.Output)
- func ResourceCountUpTo(scope *Scope, resource tf.Output, limit int64, T tf.DataType) (output tf.Output)
- func ResourceGather(scope *Scope, resource tf.Output, indices tf.Output, dtype tf.DataType, ...) (output tf.Output)
- func ResourceScatterAdd(scope *Scope, resource tf.Output, indices tf.Output, updates tf.Output) (o *tf.Operation)
- func ResourceScatterDiv(scope *Scope, resource tf.Output, indices tf.Output, updates tf.Output) (o *tf.Operation)
- func ResourceScatterMax(scope *Scope, resource tf.Output, indices tf.Output, updates tf.Output) (o *tf.Operation)
- func ResourceScatterMin(scope *Scope, resource tf.Output, indices tf.Output, updates tf.Output) (o *tf.Operation)
- func ResourceScatterMul(scope *Scope, resource tf.Output, indices tf.Output, updates tf.Output) (o *tf.Operation)
- func ResourceScatterNdAdd(scope *Scope, ref tf.Output, indices tf.Output, updates tf.Output, ...) (o *tf.Operation)
- func ResourceScatterNdSub(scope *Scope, ref tf.Output, indices tf.Output, updates tf.Output, ...) (o *tf.Operation)
- func ResourceScatterNdUpdate(scope *Scope, ref tf.Output, indices tf.Output, updates tf.Output, ...) (o *tf.Operation)
- func ResourceScatterSub(scope *Scope, resource tf.Output, indices tf.Output, updates tf.Output) (o *tf.Operation)
- func ResourceScatterUpdate(scope *Scope, resource tf.Output, indices tf.Output, updates tf.Output) (o *tf.Operation)
- func ResourceSparseApplyAdadelta(scope *Scope, var_ tf.Output, accum tf.Output, accum_update tf.Output, ...) (o *tf.Operation)
- func ResourceSparseApplyAdagrad(scope *Scope, var_ tf.Output, accum tf.Output, lr tf.Output, grad tf.Output, ...) (o *tf.Operation)
- func ResourceSparseApplyAdagradDA(scope *Scope, var_ tf.Output, gradient_accumulator tf.Output, ...) (o *tf.Operation)
- func ResourceSparseApplyAdagradV2(scope *Scope, var_ tf.Output, accum tf.Output, lr tf.Output, epsilon tf.Output, ...) (o *tf.Operation)
- func ResourceSparseApplyCenteredRMSProp(scope *Scope, var_ tf.Output, mg tf.Output, ms tf.Output, mom tf.Output, ...) (o *tf.Operation)
- func ResourceSparseApplyFtrl(scope *Scope, var_ tf.Output, accum tf.Output, linear tf.Output, ...) (o *tf.Operation)
- func ResourceSparseApplyFtrlV2(scope *Scope, var_ tf.Output, accum tf.Output, linear tf.Output, ...) (o *tf.Operation)
- func ResourceSparseApplyKerasMomentum(scope *Scope, var_ tf.Output, accum tf.Output, lr tf.Output, grad tf.Output, ...) (o *tf.Operation)
- func ResourceSparseApplyMomentum(scope *Scope, var_ tf.Output, accum tf.Output, lr tf.Output, grad tf.Output, ...) (o *tf.Operation)
- func ResourceSparseApplyProximalAdagrad(scope *Scope, var_ tf.Output, accum tf.Output, lr tf.Output, l1 tf.Output, ...) (o *tf.Operation)
- func ResourceSparseApplyProximalGradientDescent(scope *Scope, var_ tf.Output, alpha tf.Output, l1 tf.Output, l2 tf.Output, ...) (o *tf.Operation)
- func ResourceSparseApplyRMSProp(scope *Scope, var_ tf.Output, ms tf.Output, mom tf.Output, lr tf.Output, ...) (o *tf.Operation)
- func ResourceStridedSliceAssign(scope *Scope, ref tf.Output, begin tf.Output, end tf.Output, strides tf.Output, ...) (o *tf.Operation)
- func Restore(scope *Scope, file_pattern tf.Output, tensor_name tf.Output, dt tf.DataType, ...) (tensor tf.Output)
- func RestoreSlice(scope *Scope, file_pattern tf.Output, tensor_name tf.Output, ...) (tensor tf.Output)
- func RestoreV2(scope *Scope, prefix tf.Output, tensor_names tf.Output, ...) (tensors []tf.Output)
- func RetrieveAllTPUEmbeddingParameters(scope *Scope, NumTables int64, config string, num_shards int64, shard_id int64) (parameters []tf.Output, auxiliary1 []tf.Output, auxiliary2 []tf.Output, ...)
- func RetrieveTPUEmbeddingADAMParameters(scope *Scope, num_shards int64, shard_id int64, ...) (parameters tf.Output, momenta tf.Output, velocities tf.Output)
- func RetrieveTPUEmbeddingAdadeltaParameters(scope *Scope, num_shards int64, shard_id int64, ...) (parameters tf.Output, accumulators tf.Output, updates tf.Output)
- func RetrieveTPUEmbeddingAdagradMomentumParameters(scope *Scope, num_shards int64, shard_id int64, ...) (parameters tf.Output, accumulators tf.Output, momenta tf.Output)
- func RetrieveTPUEmbeddingAdagradParameters(scope *Scope, num_shards int64, shard_id int64, ...) (parameters tf.Output, accumulators tf.Output)
- func RetrieveTPUEmbeddingCenteredRMSPropParameters(scope *Scope, num_shards int64, shard_id int64, ...) (parameters tf.Output, ms tf.Output, mom tf.Output, mg tf.Output)
- func RetrieveTPUEmbeddingFTRLParameters(scope *Scope, num_shards int64, shard_id int64, ...) (parameters tf.Output, accumulators tf.Output, linears tf.Output)
- func RetrieveTPUEmbeddingFrequencyEstimatorParameters(scope *Scope, num_shards int64, shard_id int64, ...) (parameters tf.Output, last_hit_step tf.Output)
- func RetrieveTPUEmbeddingMDLAdagradLightParameters(scope *Scope, num_shards int64, shard_id int64, ...) (parameters tf.Output, accumulators tf.Output, weights tf.Output, ...)
- func RetrieveTPUEmbeddingMomentumParameters(scope *Scope, num_shards int64, shard_id int64, ...) (parameters tf.Output, momenta tf.Output)
- func RetrieveTPUEmbeddingProximalAdagradParameters(scope *Scope, num_shards int64, shard_id int64, ...) (parameters tf.Output, accumulators tf.Output)
- func RetrieveTPUEmbeddingRMSPropParameters(scope *Scope, num_shards int64, shard_id int64, ...) (parameters tf.Output, ms tf.Output, mom tf.Output)
- func RetrieveTPUEmbeddingStochasticGradientDescentParameters(scope *Scope, num_shards int64, shard_id int64, ...) (parameters tf.Output)
- func Reverse(scope *Scope, tensor tf.Output, dims tf.Output) (output tf.Output)
- func ReverseSequence(scope *Scope, input tf.Output, seq_lengths tf.Output, seq_dim int64, ...) (output tf.Output)
- func ReverseV2(scope *Scope, tensor tf.Output, axis tf.Output) (output tf.Output)
- func RightShift(scope *Scope, x tf.Output, y tf.Output) (z tf.Output)
- func Rint(scope *Scope, x tf.Output) (y tf.Output)
- func RiscAdd(scope *Scope, x tf.Output, y tf.Output) (z tf.Output)
- func RiscMax(scope *Scope, x tf.Output, y tf.Output) (max tf.Output)
- func RngReadAndSkip(scope *Scope, resource tf.Output, alg tf.Output, delta tf.Output) (value tf.Output)
- func RngSkip(scope *Scope, resource tf.Output, algorithm tf.Output, delta tf.Output) (o *tf.Operation)
- func Roll(scope *Scope, input tf.Output, shift tf.Output, axis tf.Output) (output tf.Output)
- func Round(scope *Scope, x tf.Output) (y tf.Output)
- func Rsqrt(scope *Scope, x tf.Output) (y tf.Output)
- func RsqrtGrad(scope *Scope, y tf.Output, dy tf.Output) (z tf.Output)
- func SampleDistortedBoundingBox(scope *Scope, image_size tf.Output, bounding_boxes tf.Output, ...) (begin tf.Output, size tf.Output, bboxes tf.Output)
- func SampleDistortedBoundingBoxV2(scope *Scope, image_size tf.Output, bounding_boxes tf.Output, ...) (begin tf.Output, size tf.Output, bboxes tf.Output)
- func SamplingDataset(scope *Scope, input_dataset tf.Output, rate tf.Output, seed tf.Output, ...) (handle tf.Output)
- func Save(scope *Scope, filename tf.Output, tensor_names tf.Output, data []tf.Output) (o *tf.Operation)
- func SaveSlices(scope *Scope, filename tf.Output, tensor_names tf.Output, ...) (o *tf.Operation)
- func SaveV2(scope *Scope, prefix tf.Output, tensor_names tf.Output, ...) (o *tf.Operation)
- func ScalarSummary(scope *Scope, tags tf.Output, values tf.Output) (summary tf.Output)
- func ScatterNd(scope *Scope, indices tf.Output, updates tf.Output, shape tf.Output, ...) (output tf.Output)
- func ScatterNdNonAliasingAdd(scope *Scope, input tf.Output, indices tf.Output, updates tf.Output, ...) (output tf.Output)
- func SdcaFprint(scope *Scope, input tf.Output) (output tf.Output)
- func SdcaOptimizer(scope *Scope, sparse_example_indices []tf.Output, ...) (out_example_state_data tf.Output, out_delta_sparse_weights []tf.Output, ...)
- func SdcaOptimizerV2(scope *Scope, sparse_example_indices []tf.Output, ...) (out_example_state_data tf.Output, out_delta_sparse_weights []tf.Output, ...)
- func SegmentMax(scope *Scope, data tf.Output, segment_ids tf.Output) (output tf.Output)
- func SegmentMaxV2(scope *Scope, data tf.Output, segment_ids tf.Output, num_segments tf.Output) (output tf.Output)
- func SegmentMean(scope *Scope, data tf.Output, segment_ids tf.Output) (output tf.Output)
- func SegmentMin(scope *Scope, data tf.Output, segment_ids tf.Output) (output tf.Output)
- func SegmentMinV2(scope *Scope, data tf.Output, segment_ids tf.Output, num_segments tf.Output) (output tf.Output)
- func SegmentProd(scope *Scope, data tf.Output, segment_ids tf.Output) (output tf.Output)
- func SegmentProdV2(scope *Scope, data tf.Output, segment_ids tf.Output, num_segments tf.Output) (output tf.Output)
- func SegmentSum(scope *Scope, data tf.Output, segment_ids tf.Output) (output tf.Output)
- func SegmentSumV2(scope *Scope, data tf.Output, segment_ids tf.Output, num_segments tf.Output) (output tf.Output)
- func Select(scope *Scope, condition tf.Output, x tf.Output, y tf.Output) (output tf.Output)
- func SelfAdjointEig(scope *Scope, input tf.Output) (output tf.Output)
- func SelfAdjointEigV2(scope *Scope, input tf.Output, optional ...SelfAdjointEigV2Attr) (e tf.Output, v tf.Output)
- func Selu(scope *Scope, features tf.Output) (activations tf.Output)
- func SeluGrad(scope *Scope, gradients tf.Output, outputs tf.Output) (backprops tf.Output)
- func Send(scope *Scope, tensor tf.Output, tensor_name string, send_device string, ...) (o *tf.Operation)
- func SendTPUEmbeddingGradients(scope *Scope, inputs []tf.Output, learning_rates []tf.Output, config string) (o *tf.Operation)
- func SerializeIterator(scope *Scope, resource_handle tf.Output, optional ...SerializeIteratorAttr) (serialized tf.Output)
- func SerializeManySparse(scope *Scope, sparse_indices tf.Output, sparse_values tf.Output, ...) (serialized_sparse tf.Output)
- func SerializeSparse(scope *Scope, sparse_indices tf.Output, sparse_values tf.Output, ...) (serialized_sparse tf.Output)
- func SerializeTensor(scope *Scope, tensor tf.Output) (serialized tf.Output)
- func SetSize(scope *Scope, set_indices tf.Output, set_values tf.Output, set_shape tf.Output, ...) (size tf.Output)
- func Shape(scope *Scope, input tf.Output, optional ...ShapeAttr) (output tf.Output)
- func ShapeN(scope *Scope, input []tf.Output, optional ...ShapeNAttr) (output []tf.Output)
- func ShardDataset(scope *Scope, input_dataset tf.Output, num_shards tf.Output, index tf.Output, ...) (handle tf.Output)
- func ShardedFilename(scope *Scope, basename tf.Output, shard tf.Output, num_shards tf.Output) (filename tf.Output)
- func ShardedFilespec(scope *Scope, basename tf.Output, num_shards tf.Output) (filename tf.Output)
- func ShuffleAndRepeatDataset(scope *Scope, input_dataset tf.Output, buffer_size tf.Output, seed tf.Output, ...) (handle tf.Output)
- func ShuffleDataset(scope *Scope, input_dataset tf.Output, buffer_size tf.Output, seed tf.Output, ...) (handle tf.Output)
- func ShutdownDistributedTPU(scope *Scope) (o *tf.Operation)
- func ShutdownTPUSystem(scope *Scope) (success tf.Output)
- func Sigmoid(scope *Scope, x tf.Output) (y tf.Output)
- func SigmoidGrad(scope *Scope, y tf.Output, dy tf.Output) (z tf.Output)
- func Sign(scope *Scope, x tf.Output) (y tf.Output)
- func Sin(scope *Scope, x tf.Output) (y tf.Output)
- func Sinh(scope *Scope, x tf.Output) (y tf.Output)
- func Size(scope *Scope, input tf.Output, optional ...SizeAttr) (output tf.Output)
- func SkipDataset(scope *Scope, input_dataset tf.Output, count tf.Output, ...) (handle tf.Output)
- func Skipgram(scope *Scope, filename string, batch_size int64, optional ...SkipgramAttr) (vocab_word tf.Output, vocab_freq tf.Output, words_per_epoch tf.Output, ...)
- func Slice(scope *Scope, input tf.Output, begin tf.Output, size tf.Output) (output tf.Output)
- func SlidingWindowDataset(scope *Scope, input_dataset tf.Output, window_size tf.Output, ...) (handle tf.Output)
- func Snapshot(scope *Scope, input tf.Output) (output tf.Output)
- func SnapshotDataset(scope *Scope, input_dataset tf.Output, path tf.Output, ...) (handle tf.Output)
- func SobolSample(scope *Scope, dim tf.Output, num_results tf.Output, skip tf.Output, ...) (samples tf.Output)
- func Softmax(scope *Scope, logits tf.Output) (softmax tf.Output)
- func SoftmaxCrossEntropyWithLogits(scope *Scope, features tf.Output, labels tf.Output) (loss tf.Output, backprop tf.Output)
- func SoftplusGrad(scope *Scope, gradients tf.Output, features tf.Output) (backprops tf.Output)
- func Softsign(scope *Scope, features tf.Output) (activations tf.Output)
- func SoftsignGrad(scope *Scope, gradients tf.Output, features tf.Output) (backprops tf.Output)
- func SpaceToBatch(scope *Scope, input tf.Output, paddings tf.Output, block_size int64) (output tf.Output)
- func SpaceToBatchND(scope *Scope, input tf.Output, block_shape tf.Output, paddings tf.Output) (output tf.Output)
- func SpaceToDepth(scope *Scope, input tf.Output, block_size int64, optional ...SpaceToDepthAttr) (output tf.Output)
- func SparseAdd(scope *Scope, a_indices tf.Output, a_values tf.Output, a_shape tf.Output, ...) (sum_indices tf.Output, sum_values tf.Output, sum_shape tf.Output)
- func SparseAddGrad(scope *Scope, backprop_val_grad tf.Output, a_indices tf.Output, ...) (a_val_grad tf.Output, b_val_grad tf.Output)
- func SparseBincount(scope *Scope, indices tf.Output, values tf.Output, dense_shape tf.Output, ...) (output tf.Output)
- func SparseConcat(scope *Scope, indices []tf.Output, values []tf.Output, shapes []tf.Output, ...) (output_indices tf.Output, output_values tf.Output, output_shape tf.Output)
- func SparseCountSparseOutput(scope *Scope, indices tf.Output, values tf.Output, dense_shape tf.Output, ...) (output_indices tf.Output, output_values tf.Output, ...)
- func SparseCross(scope *Scope, indices []tf.Output, values []tf.Output, shapes []tf.Output, ...) (output_indices tf.Output, output_values tf.Output, output_shape tf.Output)
- func SparseCrossHashed(scope *Scope, indices []tf.Output, values []tf.Output, shapes []tf.Output, ...) (output_indices tf.Output, output_values tf.Output, output_shape tf.Output)
- func SparseCrossV2(scope *Scope, indices []tf.Output, values []tf.Output, shapes []tf.Output, ...) (output_indices tf.Output, output_values tf.Output, output_shape tf.Output)
- func SparseDenseCwiseAdd(scope *Scope, sp_indices tf.Output, sp_values tf.Output, sp_shape tf.Output, ...) (output tf.Output)
- func SparseDenseCwiseDiv(scope *Scope, sp_indices tf.Output, sp_values tf.Output, sp_shape tf.Output, ...) (output tf.Output)
- func SparseDenseCwiseMul(scope *Scope, sp_indices tf.Output, sp_values tf.Output, sp_shape tf.Output, ...) (output tf.Output)
- func SparseFillEmptyRows(scope *Scope, indices tf.Output, values tf.Output, dense_shape tf.Output, ...) (output_indices tf.Output, output_values tf.Output, ...)
- func SparseFillEmptyRowsGrad(scope *Scope, reverse_index_map tf.Output, grad_values tf.Output) (d_values tf.Output, d_default_value tf.Output)
- func SparseMatMul(scope *Scope, a tf.Output, b tf.Output, optional ...SparseMatMulAttr) (product tf.Output)
- func SparseMatrixAdd(scope *Scope, a tf.Output, b tf.Output, alpha tf.Output, beta tf.Output) (c tf.Output)
- func SparseMatrixMatMul(scope *Scope, a tf.Output, b tf.Output, optional ...SparseMatrixMatMulAttr) (output tf.Output)
- func SparseMatrixMul(scope *Scope, a tf.Output, b tf.Output) (output tf.Output)
- func SparseMatrixNNZ(scope *Scope, sparse_matrix tf.Output) (nnz tf.Output)
- func SparseMatrixOrderingAMD(scope *Scope, input tf.Output) (output tf.Output)
- func SparseMatrixSoftmax(scope *Scope, logits tf.Output, type_ tf.DataType) (softmax tf.Output)
- func SparseMatrixSoftmaxGrad(scope *Scope, softmax tf.Output, grad_softmax tf.Output, type_ tf.DataType) (gradient tf.Output)
- func SparseMatrixSparseCholesky(scope *Scope, input tf.Output, permutation tf.Output, type_ tf.DataType) (output tf.Output)
- func SparseMatrixSparseMatMul(scope *Scope, a tf.Output, b tf.Output, type_ tf.DataType, ...) (c tf.Output)
- func SparseMatrixTranspose(scope *Scope, input tf.Output, type_ tf.DataType, ...) (output tf.Output)
- func SparseMatrixZeros(scope *Scope, dense_shape tf.Output, type_ tf.DataType) (sparse_matrix tf.Output)
- func SparseReduceMax(scope *Scope, input_indices tf.Output, input_values tf.Output, ...) (output tf.Output)
- func SparseReduceMaxSparse(scope *Scope, input_indices tf.Output, input_values tf.Output, ...) (output_indices tf.Output, output_values tf.Output, output_shape tf.Output)
- func SparseReduceSum(scope *Scope, input_indices tf.Output, input_values tf.Output, ...) (output tf.Output)
- func SparseReduceSumSparse(scope *Scope, input_indices tf.Output, input_values tf.Output, ...) (output_indices tf.Output, output_values tf.Output, output_shape tf.Output)
- func SparseReorder(scope *Scope, input_indices tf.Output, input_values tf.Output, ...) (output_indices tf.Output, output_values tf.Output)
- func SparseReshape(scope *Scope, input_indices tf.Output, input_shape tf.Output, ...) (output_indices tf.Output, output_shape tf.Output)
- func SparseSegmentMean(scope *Scope, data tf.Output, indices tf.Output, segment_ids tf.Output, ...) (output tf.Output)
- func SparseSegmentMeanGrad(scope *Scope, grad tf.Output, indices tf.Output, segment_ids tf.Output, ...) (output tf.Output)
- func SparseSegmentMeanGradV2(scope *Scope, grad tf.Output, indices tf.Output, segment_ids tf.Output, ...) (output tf.Output, sorted_unique_indices tf.Output)
- func SparseSegmentMeanWithNumSegments(scope *Scope, data tf.Output, indices tf.Output, segment_ids tf.Output, ...) (output tf.Output)
- func SparseSegmentSqrtN(scope *Scope, data tf.Output, indices tf.Output, segment_ids tf.Output, ...) (output tf.Output)
- func SparseSegmentSqrtNGrad(scope *Scope, grad tf.Output, indices tf.Output, segment_ids tf.Output, ...) (output tf.Output)
- func SparseSegmentSqrtNGradV2(scope *Scope, grad tf.Output, indices tf.Output, segment_ids tf.Output, ...) (output tf.Output, sorted_unique_indices tf.Output)
- func SparseSegmentSqrtNWithNumSegments(scope *Scope, data tf.Output, indices tf.Output, segment_ids tf.Output, ...) (output tf.Output)
- func SparseSegmentSum(scope *Scope, data tf.Output, indices tf.Output, segment_ids tf.Output, ...) (output tf.Output)
- func SparseSegmentSumGrad(scope *Scope, grad tf.Output, indices tf.Output, segment_ids tf.Output, ...) (output tf.Output)
- func SparseSegmentSumGradV2(scope *Scope, grad tf.Output, indices tf.Output, segment_ids tf.Output, ...) (output tf.Output, sorted_unique_indices tf.Output)
- func SparseSegmentSumWithNumSegments(scope *Scope, data tf.Output, indices tf.Output, segment_ids tf.Output, ...) (output tf.Output)
- func SparseSlice(scope *Scope, indices tf.Output, values tf.Output, shape tf.Output, ...) (output_indices tf.Output, output_values tf.Output, output_shape tf.Output)
- func SparseSliceGrad(scope *Scope, backprop_val_grad tf.Output, input_indices tf.Output, ...) (val_grad tf.Output)
- func SparseSoftmax(scope *Scope, sp_indices tf.Output, sp_values tf.Output, sp_shape tf.Output) (output tf.Output)
- func SparseSoftmaxCrossEntropyWithLogits(scope *Scope, features tf.Output, labels tf.Output) (loss tf.Output, backprop tf.Output)
- func SparseSparseMaximum(scope *Scope, a_indices tf.Output, a_values tf.Output, a_shape tf.Output, ...) (output_indices tf.Output, output_values tf.Output)
- func SparseSparseMinimum(scope *Scope, a_indices tf.Output, a_values tf.Output, a_shape tf.Output, ...) (output_indices tf.Output, output_values tf.Output)
- func SparseSplit(scope *Scope, split_dim tf.Output, indices tf.Output, values tf.Output, ...) (output_indices []tf.Output, output_values []tf.Output, ...)
- func SparseTensorDenseAdd(scope *Scope, a_indices tf.Output, a_values tf.Output, a_shape tf.Output, ...) (output tf.Output)
- func SparseTensorDenseMatMul(scope *Scope, a_indices tf.Output, a_values tf.Output, a_shape tf.Output, ...) (product tf.Output)
- func SparseTensorSliceDataset(scope *Scope, indices tf.Output, values tf.Output, dense_shape tf.Output) (handle tf.Output)
- func SparseTensorToCSRSparseMatrix(scope *Scope, indices tf.Output, values tf.Output, dense_shape tf.Output) (sparse_matrix tf.Output)
- func SparseToDense(scope *Scope, sparse_indices tf.Output, output_shape tf.Output, ...) (dense tf.Output)
- func SparseToSparseSetOperation(scope *Scope, set1_indices tf.Output, set1_values tf.Output, ...) (result_indices tf.Output, result_values tf.Output, result_shape tf.Output)
- func Split(scope *Scope, axis tf.Output, value tf.Output, num_split int64) (output []tf.Output)
- func SplitDedupData(scope *Scope, input tf.Output, integer_type tf.DataType, ...) (integer_tensor tf.Output, float_tensor tf.Output)
- func SplitV(scope *Scope, value tf.Output, size_splits tf.Output, axis tf.Output, ...) (output []tf.Output)
- func SqlDataset(scope *Scope, driver_name tf.Output, data_source_name tf.Output, ...) (handle tf.Output)
- func Sqrt(scope *Scope, x tf.Output) (y tf.Output)
- func SqrtGrad(scope *Scope, y tf.Output, dy tf.Output) (z tf.Output)
- func Square(scope *Scope, x tf.Output) (y tf.Output)
- func SquaredDifference(scope *Scope, x tf.Output, y tf.Output) (z tf.Output)
- func Squeeze(scope *Scope, input tf.Output, optional ...SqueezeAttr) (output tf.Output)
- func StackCloseV2(scope *Scope, handle tf.Output) (o *tf.Operation)
- func StackPopV2(scope *Scope, handle tf.Output, elem_type tf.DataType) (elem tf.Output)
- func StackPushV2(scope *Scope, handle tf.Output, elem tf.Output, optional ...StackPushV2Attr) (output tf.Output)
- func StackV2(scope *Scope, max_size tf.Output, elem_type tf.DataType, ...) (handle tf.Output)
- func Stage(scope *Scope, values []tf.Output, optional ...StageAttr) (o *tf.Operation)
- func StageClear(scope *Scope, dtypes []tf.DataType, optional ...StageClearAttr) (o *tf.Operation)
- func StagePeek(scope *Scope, index tf.Output, dtypes []tf.DataType, optional ...StagePeekAttr) (values []tf.Output)
- func StageSize(scope *Scope, dtypes []tf.DataType, optional ...StageSizeAttr) (size tf.Output)
- func StatefulStandardNormal(scope *Scope, resource tf.Output, shape tf.Output, ...) (output tf.Output)
- func StatefulStandardNormalV2(scope *Scope, resource tf.Output, algorithm tf.Output, shape tf.Output, ...) (output tf.Output)
- func StatefulTruncatedNormal(scope *Scope, resource tf.Output, algorithm tf.Output, shape tf.Output, ...) (output tf.Output)
- func StatefulUniform(scope *Scope, resource tf.Output, algorithm tf.Output, shape tf.Output, ...) (output tf.Output)
- func StatefulUniformFullInt(scope *Scope, resource tf.Output, algorithm tf.Output, shape tf.Output, ...) (output tf.Output)
- func StatefulUniformInt(scope *Scope, resource tf.Output, algorithm tf.Output, shape tf.Output, ...) (output tf.Output)
- func StatelessMultinomial(scope *Scope, logits tf.Output, num_samples tf.Output, seed tf.Output, ...) (output tf.Output)
- func StatelessRandomBinomial(scope *Scope, shape tf.Output, seed tf.Output, counts tf.Output, ...) (output tf.Output)
- func StatelessRandomGammaV2(scope *Scope, shape tf.Output, seed tf.Output, alpha tf.Output) (output tf.Output)
- func StatelessRandomGammaV3(scope *Scope, shape tf.Output, key tf.Output, counter tf.Output, alg tf.Output, ...) (output tf.Output)
- func StatelessRandomGetAlg(scope *Scope) (alg tf.Output)
- func StatelessRandomGetKeyCounter(scope *Scope, seed tf.Output) (key tf.Output, counter tf.Output)
- func StatelessRandomGetKeyCounterAlg(scope *Scope, seed tf.Output) (key tf.Output, counter tf.Output, alg tf.Output)
- func StatelessRandomNormal(scope *Scope, shape tf.Output, seed tf.Output, ...) (output tf.Output)
- func StatelessRandomNormalV2(scope *Scope, shape tf.Output, key tf.Output, counter tf.Output, alg tf.Output, ...) (output tf.Output)
- func StatelessRandomPoisson(scope *Scope, shape tf.Output, seed tf.Output, lam tf.Output, ...) (output tf.Output)
- func StatelessRandomUniform(scope *Scope, shape tf.Output, seed tf.Output, ...) (output tf.Output)
- func StatelessRandomUniformFullInt(scope *Scope, shape tf.Output, seed tf.Output, ...) (output tf.Output)
- func StatelessRandomUniformFullIntV2(scope *Scope, shape tf.Output, key tf.Output, counter tf.Output, alg tf.Output, ...) (output tf.Output)
- func StatelessRandomUniformInt(scope *Scope, shape tf.Output, seed tf.Output, minval tf.Output, ...) (output tf.Output)
- func StatelessRandomUniformIntV2(scope *Scope, shape tf.Output, key tf.Output, counter tf.Output, alg tf.Output, ...) (output tf.Output)
- func StatelessRandomUniformV2(scope *Scope, shape tf.Output, key tf.Output, counter tf.Output, alg tf.Output, ...) (output tf.Output)
- func StatelessSampleDistortedBoundingBox(scope *Scope, image_size tf.Output, bounding_boxes tf.Output, ...) (begin tf.Output, size tf.Output, bboxes tf.Output)
- func StatelessShuffle(scope *Scope, value tf.Output, key tf.Output, counter tf.Output, alg tf.Output) (output tf.Output)
- func StatelessTruncatedNormal(scope *Scope, shape tf.Output, seed tf.Output, ...) (output tf.Output)
- func StatelessTruncatedNormalV2(scope *Scope, shape tf.Output, key tf.Output, counter tf.Output, alg tf.Output, ...) (output tf.Output)
- func StaticRegexFullMatch(scope *Scope, input tf.Output, pattern string) (output tf.Output)
- func StaticRegexReplace(scope *Scope, input tf.Output, pattern string, rewrite string, ...) (output tf.Output)
- func StatsAggregatorHandle(scope *Scope, optional ...StatsAggregatorHandleAttr) (handle tf.Output)
- func StatsAggregatorSetSummaryWriter(scope *Scope, stats_aggregator tf.Output, summary tf.Output) (o *tf.Operation)
- func StatsAggregatorSummary(scope *Scope, iterator tf.Output) (summary tf.Output)
- func StochasticCastToInt(scope *Scope, input tf.Output, key tf.Output, counter tf.Output, alg tf.Output, ...) (output tf.Output)
- func StopGradient(scope *Scope, input tf.Output) (output tf.Output)
- func StridedSlice(scope *Scope, input tf.Output, begin tf.Output, end tf.Output, ...) (output tf.Output)
- func StridedSliceGrad(scope *Scope, shape tf.Output, begin tf.Output, end tf.Output, ...) (output tf.Output)
- func StringFormat(scope *Scope, inputs []tf.Output, optional ...StringFormatAttr) (output tf.Output)
- func StringJoin(scope *Scope, inputs []tf.Output, optional ...StringJoinAttr) (output tf.Output)
- func StringLength(scope *Scope, input tf.Output, optional ...StringLengthAttr) (output tf.Output)
- func StringLower(scope *Scope, input tf.Output, optional ...StringLowerAttr) (output tf.Output)
- func StringNGrams(scope *Scope, data tf.Output, data_splits tf.Output, separator string, ...) (ngrams tf.Output, ngrams_splits tf.Output)
- func StringSplit(scope *Scope, input tf.Output, delimiter tf.Output, ...) (indices tf.Output, values tf.Output, shape tf.Output)
- func StringSplitV2(scope *Scope, input tf.Output, sep tf.Output, optional ...StringSplitV2Attr) (indices tf.Output, values tf.Output, shape tf.Output)
- func StringStrip(scope *Scope, input tf.Output) (output tf.Output)
- func StringToHashBucket(scope *Scope, string_tensor tf.Output, num_buckets int64) (output tf.Output)
- func StringToHashBucketFast(scope *Scope, input tf.Output, num_buckets int64) (output tf.Output)
- func StringToHashBucketStrong(scope *Scope, input tf.Output, num_buckets int64, key []int64) (output tf.Output)
- func StringToNumber(scope *Scope, string_tensor tf.Output, optional ...StringToNumberAttr) (output tf.Output)
- func StringUpper(scope *Scope, input tf.Output, optional ...StringUpperAttr) (output tf.Output)
- func Sub(scope *Scope, x tf.Output, y tf.Output) (z tf.Output)
- func Substr(scope *Scope, input tf.Output, pos tf.Output, len tf.Output, ...) (output tf.Output)
- func Sum(scope *Scope, input tf.Output, axis tf.Output, optional ...SumAttr) (output tf.Output)
- func Svd(scope *Scope, input tf.Output, optional ...SvdAttr) (s tf.Output, u tf.Output, v tf.Output)
- func Switch(scope *Scope, data tf.Output, pred tf.Output) (output_false tf.Output, output_true tf.Output)
- func SyncDevice(scope *Scope) (o *tf.Operation)
- func TFRecordDataset(scope *Scope, filenames tf.Output, compression_type tf.Output, ...) (handle tf.Output)
- func TFRecordDatasetV2(scope *Scope, filenames tf.Output, compression_type tf.Output, ...) (handle tf.Output)
- func TFRecordReaderV2(scope *Scope, optional ...TFRecordReaderV2Attr) (reader_handle tf.Output)
- func TPUCompilationResult(scope *Scope) (output tf.Output)
- func TPUCompileSucceededAssert(scope *Scope, compilation_status tf.Output) (o *tf.Operation)
- func TPUCopyWithDynamicShape(scope *Scope, tensors []tf.Output, unpadded_sizes []tf.Output) (tpu_tensors []tf.Output)
- func TPUEmbeddingActivations(scope *Scope, embedding_variable tf.Output, sliced_activations tf.Output, ...) (output tf.Output)
- func TPUExecute(scope *Scope, args []tf.Output, key tf.Output, Tresults []tf.DataType) (results []tf.Output)
- func TPUExecuteAndUpdateVariables(scope *Scope, args []tf.Output, key tf.Output, Tresults []tf.DataType, ...) (results []tf.Output)
- func TPUOrdinalSelector(scope *Scope) (device_ordinals tf.Output)
- func TPUPartitionedInput(scope *Scope, inputs []tf.Output, optional ...TPUPartitionedInputAttr) (output tf.Output)
- func TPUPartitionedInputV2(scope *Scope, inputs []tf.Output, partition_dims []int64, ...) (output tf.Output)
- func TPUPartitionedOutput(scope *Scope, inputs tf.Output, num_splits int64, ...) (output []tf.Output)
- func TPUPartitionedOutputV2(scope *Scope, inputs tf.Output, num_splits int64, partition_dims []int64) (output []tf.Output)
- func TPUReplicateMetadata(scope *Scope, num_replicas int64, optional ...TPUReplicateMetadataAttr) (o *tf.Operation)
- func TPUReplicatedInput(scope *Scope, inputs []tf.Output, optional ...TPUReplicatedInputAttr) (output tf.Output)
- func TPUReplicatedOutput(scope *Scope, input tf.Output, num_replicas int64) (outputs []tf.Output)
- func TPUReshardVariables(scope *Scope, vars []tf.Output, new_format_key tf.Output, ...) (o *tf.Operation)
- func TPURoundRobin(scope *Scope) (device_ordinal tf.Output)
- func TakeDataset(scope *Scope, input_dataset tf.Output, count tf.Output, ...) (handle tf.Output)
- func TakeManySparseFromTensorsMap(scope *Scope, sparse_handles tf.Output, dtype tf.DataType, ...) (sparse_indices tf.Output, sparse_values tf.Output, sparse_shape tf.Output)
- func Tan(scope *Scope, x tf.Output) (y tf.Output)
- func Tanh(scope *Scope, x tf.Output) (y tf.Output)
- func TanhGrad(scope *Scope, y tf.Output, dy tf.Output) (z tf.Output)
- func TensorArrayCloseV2(scope *Scope, handle tf.Output) (o *tf.Operation)
- func TensorArrayCloseV3(scope *Scope, handle tf.Output) (o *tf.Operation)
- func TensorArrayConcatV2(scope *Scope, handle tf.Output, flow_in tf.Output, dtype tf.DataType, ...) (value tf.Output, lengths tf.Output)
- func TensorArrayConcatV3(scope *Scope, handle tf.Output, flow_in tf.Output, dtype tf.DataType, ...) (value tf.Output, lengths tf.Output)
- func TensorArrayGatherV2(scope *Scope, handle tf.Output, indices tf.Output, flow_in tf.Output, ...) (value tf.Output)
- func TensorArrayGatherV3(scope *Scope, handle tf.Output, indices tf.Output, flow_in tf.Output, ...) (value tf.Output)
- func TensorArrayGradV2(scope *Scope, handle tf.Output, flow_in tf.Output, source string) (grad_handle tf.Output)
- func TensorArrayGradV3(scope *Scope, handle tf.Output, flow_in tf.Output, source string) (grad_handle tf.Output, flow_out tf.Output)
- func TensorArrayGradWithShape(scope *Scope, handle tf.Output, flow_in tf.Output, shape_to_prepend tf.Output, ...) (grad_handle tf.Output, flow_out tf.Output)
- func TensorArrayReadV2(scope *Scope, handle tf.Output, index tf.Output, flow_in tf.Output, ...) (value tf.Output)
- func TensorArrayReadV3(scope *Scope, handle tf.Output, index tf.Output, flow_in tf.Output, ...) (value tf.Output)
- func TensorArrayScatterV2(scope *Scope, handle tf.Output, indices tf.Output, value tf.Output, ...) (flow_out tf.Output)
- func TensorArrayScatterV3(scope *Scope, handle tf.Output, indices tf.Output, value tf.Output, ...) (flow_out tf.Output)
- func TensorArraySizeV2(scope *Scope, handle tf.Output, flow_in tf.Output) (size tf.Output)
- func TensorArraySizeV3(scope *Scope, handle tf.Output, flow_in tf.Output) (size tf.Output)
- func TensorArraySplitV2(scope *Scope, handle tf.Output, value tf.Output, lengths tf.Output, ...) (flow_out tf.Output)
- func TensorArraySplitV3(scope *Scope, handle tf.Output, value tf.Output, lengths tf.Output, ...) (flow_out tf.Output)
- func TensorArrayV2(scope *Scope, size tf.Output, dtype tf.DataType, optional ...TensorArrayV2Attr) (handle tf.Output)
- func TensorArrayV3(scope *Scope, size tf.Output, dtype tf.DataType, optional ...TensorArrayV3Attr) (handle tf.Output, flow tf.Output)
- func TensorArrayWriteV2(scope *Scope, handle tf.Output, index tf.Output, value tf.Output, ...) (flow_out tf.Output)
- func TensorArrayWriteV3(scope *Scope, handle tf.Output, index tf.Output, value tf.Output, ...) (flow_out tf.Output)
- func TensorDataset(scope *Scope, components []tf.Output, output_shapes []tf.Shape, ...) (handle tf.Output)
- func TensorListConcat(scope *Scope, input_handle tf.Output, element_dtype tf.DataType, ...) (tensor tf.Output, lengths tf.Output)
- func TensorListConcatV2(scope *Scope, input_handle tf.Output, element_shape tf.Output, ...) (tensor tf.Output, lengths tf.Output)
- func TensorListElementShape(scope *Scope, input_handle tf.Output, shape_type tf.DataType) (element_shape tf.Output)
- func TensorListFromTensor(scope *Scope, tensor tf.Output, element_shape tf.Output) (output_handle tf.Output)
- func TensorListGather(scope *Scope, input_handle tf.Output, indices tf.Output, ...) (values tf.Output)
- func TensorListGetItem(scope *Scope, input_handle tf.Output, index tf.Output, element_shape tf.Output, ...) (item tf.Output)
- func TensorListLength(scope *Scope, input_handle tf.Output) (length tf.Output)
- func TensorListPopBack(scope *Scope, input_handle tf.Output, element_shape tf.Output, ...) (output_handle tf.Output, tensor tf.Output)
- func TensorListPushBack(scope *Scope, input_handle tf.Output, tensor tf.Output) (output_handle tf.Output)
- func TensorListReserve(scope *Scope, element_shape tf.Output, num_elements tf.Output, ...) (handle tf.Output)
- func TensorListResize(scope *Scope, input_handle tf.Output, size tf.Output) (output_handle tf.Output)
- func TensorListScatter(scope *Scope, tensor tf.Output, indices tf.Output, element_shape tf.Output) (output_handle tf.Output)
- func TensorListScatterIntoExistingList(scope *Scope, input_handle tf.Output, tensor tf.Output, indices tf.Output) (output_handle tf.Output)
- func TensorListScatterV2(scope *Scope, tensor tf.Output, indices tf.Output, element_shape tf.Output, ...) (output_handle tf.Output)
- func TensorListSetItem(scope *Scope, input_handle tf.Output, index tf.Output, item tf.Output, ...) (output_handle tf.Output)
- func TensorListSplit(scope *Scope, tensor tf.Output, element_shape tf.Output, lengths tf.Output) (output_handle tf.Output)
- func TensorListStack(scope *Scope, input_handle tf.Output, element_shape tf.Output, ...) (tensor tf.Output)
- func TensorMapErase(scope *Scope, input_handle tf.Output, key tf.Output, value_dtype tf.DataType) (output_handle tf.Output)
- func TensorMapHasKey(scope *Scope, input_handle tf.Output, key tf.Output) (has_key tf.Output)
- func TensorMapInsert(scope *Scope, input_handle tf.Output, key tf.Output, value tf.Output) (output_handle tf.Output)
- func TensorMapLookup(scope *Scope, input_handle tf.Output, key tf.Output, value_dtype tf.DataType) (value tf.Output)
- func TensorMapSize(scope *Scope, input_handle tf.Output) (size tf.Output)
- func TensorMapStackKeys(scope *Scope, input_handle tf.Output, key_dtype tf.DataType) (keys tf.Output)
- func TensorScatterAdd(scope *Scope, tensor tf.Output, indices tf.Output, updates tf.Output, ...) (output tf.Output)
- func TensorScatterMax(scope *Scope, tensor tf.Output, indices tf.Output, updates tf.Output, ...) (output tf.Output)
- func TensorScatterSub(scope *Scope, tensor tf.Output, indices tf.Output, updates tf.Output, ...) (output tf.Output)
- func TensorScatterUpdate(scope *Scope, tensor tf.Output, indices tf.Output, updates tf.Output, ...) (output tf.Output)
- func TensorSliceDataset(scope *Scope, components []tf.Output, output_shapes []tf.Shape, ...) (handle tf.Output)
- func TensorStridedSliceUpdate(scope *Scope, input tf.Output, begin tf.Output, end tf.Output, ...) (output tf.Output)
- func TensorSummary(scope *Scope, tensor tf.Output, optional ...TensorSummaryAttr) (summary tf.Output)
- func TensorSummaryV2(scope *Scope, tag tf.Output, tensor tf.Output, ...) (summary tf.Output)
- func TextLineDataset(scope *Scope, filenames tf.Output, compression_type tf.Output, ...) (handle tf.Output)
- func TextLineReaderV2(scope *Scope, optional ...TextLineReaderV2Attr) (reader_handle tf.Output)
- func ThreadPoolDataset(scope *Scope, input_dataset tf.Output, thread_pool tf.Output, ...) (handle tf.Output)
- func ThreadPoolHandle(scope *Scope, num_threads int64, display_name string, ...) (handle tf.Output)
- func ThreadUnsafeUnigramCandidateSampler(scope *Scope, true_classes tf.Output, num_true int64, num_sampled int64, ...) (sampled_candidates tf.Output, true_expected_count tf.Output, ...)
- func Tile(scope *Scope, input tf.Output, multiples tf.Output) (output tf.Output)
- func TileGrad(scope *Scope, input tf.Output, multiples tf.Output) (output tf.Output)
- func Timestamp(scope *Scope) (ts tf.Output)
- func ToBool(scope *Scope, input tf.Output) (output tf.Output)
- func TopK(scope *Scope, input tf.Output, k int64, optional ...TopKAttr) (values tf.Output, indices tf.Output)
- func TopKUnique(scope *Scope, input tf.Output, k int64) (topk tf.Output, topk_indices tf.Output)
- func TopKV2(scope *Scope, input tf.Output, k tf.Output, optional ...TopKV2Attr) (values tf.Output, indices tf.Output)
- func TopKWithUnique(scope *Scope, input tf.Output, k int64) (topk tf.Output, topk_indices tf.Output)
- func TpuHandleToProtoKey(scope *Scope, uid tf.Output) (proto_keys tf.Output)
- func Transpose(scope *Scope, x tf.Output, perm tf.Output) (y tf.Output)
- func TridiagonalMatMul(scope *Scope, superdiag tf.Output, maindiag tf.Output, subdiag tf.Output, ...) (output tf.Output)
- func TridiagonalSolve(scope *Scope, diagonals tf.Output, rhs tf.Output, ...) (output tf.Output)
- func TruncateDiv(scope *Scope, x tf.Output, y tf.Output) (z tf.Output)
- func TruncateMod(scope *Scope, x tf.Output, y tf.Output) (z tf.Output)
- func TruncatedNormal(scope *Scope, shape tf.Output, dtype tf.DataType, ...) (output tf.Output)
- func Unbatch(scope *Scope, batched_tensor tf.Output, batch_index tf.Output, id tf.Output, ...) (unbatched_tensor tf.Output)
- func UnbatchDataset(scope *Scope, input_dataset tf.Output, output_types []tf.DataType, ...) (handle tf.Output)
- func UnbatchGrad(scope *Scope, original_input tf.Output, batch_index tf.Output, grad tf.Output, ...) (batched_grad tf.Output)
- func UncompressElement(scope *Scope, compressed tf.Output, output_types []tf.DataType, ...) (components []tf.Output)
- func UnicodeDecode(scope *Scope, input tf.Output, input_encoding string, ...) (row_splits tf.Output, char_values tf.Output)
- func UnicodeDecodeWithOffsets(scope *Scope, input tf.Output, input_encoding string, ...) (row_splits tf.Output, char_values tf.Output, char_to_byte_starts tf.Output)
- func UnicodeEncode(scope *Scope, input_values tf.Output, input_splits tf.Output, ...) (output tf.Output)
- func UnicodeScript(scope *Scope, input tf.Output) (output tf.Output)
- func UnicodeTranscode(scope *Scope, input tf.Output, input_encoding string, output_encoding string, ...) (output tf.Output)
- func UniformCandidateSampler(scope *Scope, true_classes tf.Output, num_true int64, num_sampled int64, ...) (sampled_candidates tf.Output, true_expected_count tf.Output, ...)
- func UniformDequantize(scope *Scope, input tf.Output, scales tf.Output, zero_points tf.Output, ...) (output tf.Output)
- func UniformQuantize(scope *Scope, input tf.Output, scales tf.Output, zero_points tf.Output, ...) (output tf.Output)
- func UniformQuantizedAdd(scope *Scope, lhs tf.Output, rhs tf.Output, lhs_scales tf.Output, ...) (output tf.Output)
- func UniformQuantizedClipByValue(scope *Scope, operand tf.Output, min tf.Output, max tf.Output, ...) (output tf.Output)
- func UniformQuantizedConvolution(scope *Scope, lhs tf.Output, rhs tf.Output, lhs_scales tf.Output, ...) (output tf.Output)
- func UniformQuantizedConvolutionHybrid(scope *Scope, lhs tf.Output, rhs tf.Output, rhs_scales tf.Output, ...) (output tf.Output)
- func UniformQuantizedDot(scope *Scope, lhs tf.Output, rhs tf.Output, lhs_scales tf.Output, ...) (output tf.Output)
- func UniformQuantizedDotHybrid(scope *Scope, lhs tf.Output, rhs tf.Output, rhs_scales tf.Output, ...) (output tf.Output)
- func UniformRequantize(scope *Scope, input tf.Output, input_scales tf.Output, ...) (output tf.Output)
- func Unique(scope *Scope, x tf.Output, optional ...UniqueAttr) (y tf.Output, idx tf.Output)
- func UniqueDataset(scope *Scope, input_dataset tf.Output, output_types []tf.DataType, ...) (handle tf.Output)
- func UniqueV2(scope *Scope, x tf.Output, axis tf.Output, optional ...UniqueV2Attr) (y tf.Output, idx tf.Output)
- func UniqueWithCounts(scope *Scope, x tf.Output, optional ...UniqueWithCountsAttr) (y tf.Output, idx tf.Output, count tf.Output)
- func UniqueWithCountsV2(scope *Scope, x tf.Output, axis tf.Output, optional ...UniqueWithCountsV2Attr) (y tf.Output, idx tf.Output, count tf.Output)
- func Unpack(scope *Scope, value tf.Output, num int64, optional ...UnpackAttr) (output []tf.Output)
- func UnravelIndex(scope *Scope, indices tf.Output, dims tf.Output) (output tf.Output)
- func UnsortedSegmentMax(scope *Scope, data tf.Output, segment_ids tf.Output, num_segments tf.Output) (output tf.Output)
- func UnsortedSegmentMin(scope *Scope, data tf.Output, segment_ids tf.Output, num_segments tf.Output) (output tf.Output)
- func UnsortedSegmentProd(scope *Scope, data tf.Output, segment_ids tf.Output, num_segments tf.Output) (output tf.Output)
- func UnsortedSegmentSum(scope *Scope, data tf.Output, segment_ids tf.Output, num_segments tf.Output) (output tf.Output)
- func Unstage(scope *Scope, dtypes []tf.DataType, optional ...UnstageAttr) (values []tf.Output)
- func UpdateTaskIdAndGlobalCoreArray(scope *Scope, tpu_task_id_to_shard_id []tf.Output) (o *tf.Operation)
- func UpperBound(scope *Scope, sorted_inputs tf.Output, values tf.Output, ...) (output tf.Output)
- func VarHandleOp(scope *Scope, dtype tf.DataType, shape tf.Shape, optional ...VarHandleOpAttr) (resource tf.Output)
- func VarIsInitializedOp(scope *Scope, resource tf.Output) (is_initialized tf.Output)
- func VariableShape(scope *Scope, input tf.Output, optional ...VariableShapeAttr) (output tf.Output)
- func Where(scope *Scope, condition tf.Output) (index tf.Output)
- func WholeFileReaderV2(scope *Scope, optional ...WholeFileReaderV2Attr) (reader_handle tf.Output)
- func WindowDataset(scope *Scope, input_dataset tf.Output, size tf.Output, shift tf.Output, ...) (handle tf.Output)
- func WorkerHeartbeat(scope *Scope, request tf.Output) (response tf.Output)
- func WriteAudioSummary(scope *Scope, writer tf.Output, step tf.Output, tag tf.Output, ...) (o *tf.Operation)
- func WriteFile(scope *Scope, filename tf.Output, contents tf.Output) (o *tf.Operation)
- func WriteGraphSummary(scope *Scope, writer tf.Output, step tf.Output, tensor tf.Output) (o *tf.Operation)
- func WriteHistogramSummary(scope *Scope, writer tf.Output, step tf.Output, tag tf.Output, ...) (o *tf.Operation)
- func WriteImageSummary(scope *Scope, writer tf.Output, step tf.Output, tag tf.Output, ...) (o *tf.Operation)
- func WriteRawProtoSummary(scope *Scope, writer tf.Output, step tf.Output, tensor tf.Output) (o *tf.Operation)
- func WriteScalarSummary(scope *Scope, writer tf.Output, step tf.Output, tag tf.Output, value tf.Output) (o *tf.Operation)
- func WriteSummary(scope *Scope, writer tf.Output, step tf.Output, tensor tf.Output, ...) (o *tf.Operation)
- func Xdivy(scope *Scope, x tf.Output, y tf.Output) (z tf.Output)
- func XlaAllReduce(scope *Scope, input tf.Output, group_assignment tf.Output, reduce_op string, ...) (output tf.Output)
- func XlaBroadcastHelper(scope *Scope, lhs tf.Output, rhs tf.Output, broadcast_dims tf.Output) (lhs_output tf.Output, rhs_output tf.Output)
- func XlaConcatND(scope *Scope, inputs []tf.Output, num_concats []int64, ...) (output tf.Output)
- func XlaConv(scope *Scope, lhs tf.Output, rhs tf.Output, window_strides tf.Output, ...) (output tf.Output)
- func XlaConvV2(scope *Scope, lhs tf.Output, rhs tf.Output, window_strides tf.Output, ...) (output tf.Output)
- func XlaCustomCall(scope *Scope, args []tf.Output, target_name string, backend_config string, ...) (output tf.Output)
- func XlaCustomCallV2(scope *Scope, operands []tf.Output, call_target_name string, ...) (results []tf.Output)
- func XlaDequantize(scope *Scope, input tf.Output, min_range float32, max_range float32, ...) (output tf.Output)
- func XlaDot(scope *Scope, lhs tf.Output, rhs tf.Output, dimension_numbers string, ...) (output tf.Output)
- func XlaDotV2(scope *Scope, lhs tf.Output, rhs tf.Output, dimension_numbers string, ...) (output tf.Output)
- func XlaDynamicSlice(scope *Scope, input tf.Output, start_indices tf.Output, size_indices tf.Output) (output tf.Output)
- func XlaDynamicUpdateSlice(scope *Scope, input tf.Output, update tf.Output, indices tf.Output) (output tf.Output)
- func XlaEinsum(scope *Scope, a tf.Output, b tf.Output, equation string) (product tf.Output)
- func XlaGather(scope *Scope, operand tf.Output, start_indices tf.Output, ...) (output tf.Output)
- func XlaKeyValueSort(scope *Scope, keys tf.Output, values tf.Output) (sorted_keys tf.Output, sorted_values tf.Output)
- func XlaOptimizationBarrier(scope *Scope, input []tf.Output) (output []tf.Output)
- func XlaPad(scope *Scope, input tf.Output, padding_value tf.Output, padding_low tf.Output, ...) (output tf.Output)
- func XlaRecv(scope *Scope, dtype tf.DataType, tensor_name string, shape tf.Shape) (tensor tf.Output)
- func XlaRecvFromHost(scope *Scope, Toutput tf.DataType, shape tf.Shape, key string) (output tf.Output)
- func XlaRecvTPUEmbeddingActivations(scope *Scope, deduplication_data tf.Output, num_tables int64, config string) (outputs []tf.Output)
- func XlaRecvTPUEmbeddingActivationsV2(scope *Scope, deduplication_data tf.Output, num_tables int64, config string, ...) (outputs []tf.Output)
- func XlaRecvTPUEmbeddingDeduplicationData(scope *Scope, config string) (output tf.Output)
- func XlaRecvTPUEmbeddingDeduplicationDataV2(scope *Scope, config string, embedding_partitions string, ...) (output tf.Output)
- func XlaReducePrecision(scope *Scope, operand tf.Output, exponent_bits int64, mantissa_bits int64) (output tf.Output)
- func XlaReduceScatter(scope *Scope, input tf.Output, group_assignment tf.Output, ...) (output tf.Output)
- func XlaRemoveDynamicDimensionSize(scope *Scope, input tf.Output, dim_index tf.Output) (output tf.Output)
- func XlaReplicaId(scope *Scope) (id tf.Output)
- func XlaRngBitGenerator(scope *Scope, algorithm tf.Output, initial_state tf.Output, shape tf.Output, ...) (output_key tf.Output, output tf.Output)
- func XlaSelfAdjointEig(scope *Scope, a tf.Output, lower bool, max_iter int64, epsilon float32) (w tf.Output, v tf.Output)
- func XlaSend(scope *Scope, tensor tf.Output, tensor_name string) (o *tf.Operation)
- func XlaSendTPUEmbeddingGradients(scope *Scope, gradients []tf.Output, learning_rates []tf.Output, ...) (o *tf.Operation)
- func XlaSendTPUEmbeddingGradientsV2(scope *Scope, gradients []tf.Output, learning_rates []tf.Output, ...) (o *tf.Operation)
- func XlaSendToHost(scope *Scope, input tf.Output, key string) (o *tf.Operation)
- func XlaSetBound(scope *Scope, input tf.Output, bound tf.Output) (output tf.Output)
- func XlaSetDynamicDimensionSize(scope *Scope, input tf.Output, dim_index tf.Output, size tf.Output) (output tf.Output)
- func XlaSharding(scope *Scope, input tf.Output, optional ...XlaShardingAttr) (output tf.Output)
- func XlaSort(scope *Scope, input tf.Output) (output tf.Output)
- func XlaSplitND(scope *Scope, input tf.Output, N int64, num_splits []int64, ...) (outputs []tf.Output)
- func XlaSpmdFullToShardShape(scope *Scope, input tf.Output, manual_sharding string, ...) (output tf.Output)
- func XlaSpmdShardToFullShape(scope *Scope, input tf.Output, manual_sharding string, full_shape tf.Shape, ...) (output tf.Output)
- func XlaSvd(scope *Scope, a tf.Output, max_iter int64, epsilon float32, ...) (s tf.Output, u tf.Output, v tf.Output)
- func Xlog1py(scope *Scope, x tf.Output, y tf.Output) (z tf.Output)
- func Xlogy(scope *Scope, x tf.Output, y tf.Output) (z tf.Output)
- func ZerosLike(scope *Scope, x tf.Output) (y tf.Output)
- func Zeta(scope *Scope, x tf.Output, q tf.Output) (z tf.Output)
- func ZipDataset(scope *Scope, input_datasets []tf.Output, output_types []tf.DataType, ...) (handle tf.Output)
- type AbortAttr
- type AddManySparseToTensorsMapAttr
- type AddSparseToTensorsMapAttr
- type AllAttr
- type AllCandidateSamplerAttr
- type AngleAttr
- type AnonymousMutableDenseHashTableAttr
- type AnonymousMutableHashTableOfTensorsAttr
- type AnyAttr
- type ApproxTopKAttr
- type ApproximateEqualAttr
- type ArgMaxAttr
- type ArgMinAttr
- type AsStringAttr
- type AssertAttr
- type AssignVariableOpAttr
- type AssignVariableXlaConcatNDAttr
- type AudioSpectrogramAttr
- type AudioSummaryAttr
- type AudioSummaryV2Attr
- type AutoShardDatasetAttr
- type AvgPool3DAttr
- type AvgPool3DGradAttr
- type AvgPoolAttr
- type AvgPoolGradAttr
- type BatchAttr
- type BatchDatasetAttr
- type BatchDatasetV2Attr
- type BatchMatMulAttr
- type BatchMatMulV2Attr
- type BatchMatMulV3Attr
- type BiasAddAttr
- type BiasAddGradAttr
- type BlockLSTMAttr
- type BlockLSTMV2Attr
- type BoostedTreesCalculateBestFeatureSplitAttr
- type BoostedTreesCreateQuantileStreamResourceAttr
- type BoostedTreesEnsembleResourceHandleOpAttr
- type BoostedTreesQuantileStreamResourceFlushAttr
- type BoostedTreesQuantileStreamResourceHandleOpAttr
- type BoostedTreesSparseCalculateBestFeatureSplitAttr
- type BoostedTreesUpdateEnsembleV2Attr
- type CTCBeamSearchDecoderAttr
- type CTCGreedyDecoderAttr
- type CTCLossAttr
- type CTCLossV2Attr
- type CacheDatasetAttr
- type CastAttr
- type CollectiveAllToAllV2Attr
- type CollectiveAllToAllV3Attr
- type CollectiveBcastRecvAttr
- type CollectiveBcastRecvV2Attr
- type CollectiveBcastSendAttr
- type CollectiveBcastSendV2Attr
- type CollectiveGatherAttr
- type CollectiveGatherV2Attr
- type CollectiveInitializeCommunicatorAttr
- type CollectiveReduceAttr
- type CollectiveReduceScatterV2Attr
- func CollectiveReduceScatterV2CommunicationHint(value string) CollectiveReduceScatterV2Attr
- func CollectiveReduceScatterV2IsStateless(value bool) CollectiveReduceScatterV2Attr
- func CollectiveReduceScatterV2MaxSubdivsPerDevice(value int64) CollectiveReduceScatterV2Attr
- func CollectiveReduceScatterV2TimeoutSeconds(value float32) CollectiveReduceScatterV2Attr
- type CollectiveReduceV2Attr
- func CollectiveReduceV2CommunicationHint(value string) CollectiveReduceV2Attr
- func CollectiveReduceV2IsStateless(value bool) CollectiveReduceV2Attr
- func CollectiveReduceV2MaxSubdivsPerDevice(value int64) CollectiveReduceV2Attr
- func CollectiveReduceV2TimeoutSeconds(value float32) CollectiveReduceV2Attr
- type CollectiveReduceV3Attr
- type CombinedNonMaxSuppressionAttr
- type ComplexAbsAttr
- type ComplexAttr
- type ComputeAccidentalHitsAttr
- type ConcatenateDatasetAttr
- type ConfigureAndInitializeGlobalTPUAttr
- type ConfigureDistributedTPUAttr
- func ConfigureDistributedTPUCompilationFailureClosesChips(value bool) ConfigureDistributedTPUAttr
- func ConfigureDistributedTPUEmbeddingConfig(value string) ConfigureDistributedTPUAttr
- func ConfigureDistributedTPUEnableWholeMeshCompilations(value bool) ConfigureDistributedTPUAttr
- func ConfigureDistributedTPUIsGlobalInit(value bool) ConfigureDistributedTPUAttr
- func ConfigureDistributedTPUTpuCancellationClosesChips(value int64) ConfigureDistributedTPUAttr
- func ConfigureDistributedTPUTpuEmbeddingConfig(value string) ConfigureDistributedTPUAttr
- type Conv2DAttr
- type Conv2DBackpropFilterAttr
- func Conv2DBackpropFilterDataFormat(value string) Conv2DBackpropFilterAttr
- func Conv2DBackpropFilterDilations(value []int64) Conv2DBackpropFilterAttr
- func Conv2DBackpropFilterExplicitPaddings(value []int64) Conv2DBackpropFilterAttr
- func Conv2DBackpropFilterUseCudnnOnGpu(value bool) Conv2DBackpropFilterAttr
- type Conv2DBackpropFilterV2Attr
- func Conv2DBackpropFilterV2DataFormat(value string) Conv2DBackpropFilterV2Attr
- func Conv2DBackpropFilterV2Dilations(value []int64) Conv2DBackpropFilterV2Attr
- func Conv2DBackpropFilterV2ExplicitPaddings(value []int64) Conv2DBackpropFilterV2Attr
- func Conv2DBackpropFilterV2UseCudnnOnGpu(value bool) Conv2DBackpropFilterV2Attr
- type Conv2DBackpropInputAttr
- type Conv2DBackpropInputV2Attr
- func Conv2DBackpropInputV2DataFormat(value string) Conv2DBackpropInputV2Attr
- func Conv2DBackpropInputV2Dilations(value []int64) Conv2DBackpropInputV2Attr
- func Conv2DBackpropInputV2ExplicitPaddings(value []int64) Conv2DBackpropInputV2Attr
- func Conv2DBackpropInputV2UseCudnnOnGpu(value bool) Conv2DBackpropInputV2Attr
- type Conv3DAttr
- type Conv3DBackpropFilterAttr
- type Conv3DBackpropFilterV2Attr
- type Conv3DBackpropInputAttr
- type Conv3DBackpropInputV2Attr
- type ConvAttr
- type CopyAttr
- type CopyHostAttr
- type CropAndResizeAttr
- type CropAndResizeGradBoxesAttr
- type CropAndResizeGradImageAttr
- type CudnnRNNAttr
- func CudnnRNNDirection(value string) CudnnRNNAttr
- func CudnnRNNDropout(value float32) CudnnRNNAttr
- func CudnnRNNInputMode(value string) CudnnRNNAttr
- func CudnnRNNIsTraining(value bool) CudnnRNNAttr
- func CudnnRNNRnnMode(value string) CudnnRNNAttr
- func CudnnRNNSeed(value int64) CudnnRNNAttr
- func CudnnRNNSeed2(value int64) CudnnRNNAttr
- type CudnnRNNBackpropAttr
- func CudnnRNNBackpropDirection(value string) CudnnRNNBackpropAttr
- func CudnnRNNBackpropDropout(value float32) CudnnRNNBackpropAttr
- func CudnnRNNBackpropInputMode(value string) CudnnRNNBackpropAttr
- func CudnnRNNBackpropRnnMode(value string) CudnnRNNBackpropAttr
- func CudnnRNNBackpropSeed(value int64) CudnnRNNBackpropAttr
- func CudnnRNNBackpropSeed2(value int64) CudnnRNNBackpropAttr
- type CudnnRNNBackpropV2Attr
- func CudnnRNNBackpropV2Direction(value string) CudnnRNNBackpropV2Attr
- func CudnnRNNBackpropV2Dropout(value float32) CudnnRNNBackpropV2Attr
- func CudnnRNNBackpropV2InputMode(value string) CudnnRNNBackpropV2Attr
- func CudnnRNNBackpropV2RnnMode(value string) CudnnRNNBackpropV2Attr
- func CudnnRNNBackpropV2Seed(value int64) CudnnRNNBackpropV2Attr
- func CudnnRNNBackpropV2Seed2(value int64) CudnnRNNBackpropV2Attr
- type CudnnRNNBackpropV3Attr
- func CudnnRNNBackpropV3Direction(value string) CudnnRNNBackpropV3Attr
- func CudnnRNNBackpropV3Dropout(value float32) CudnnRNNBackpropV3Attr
- func CudnnRNNBackpropV3InputMode(value string) CudnnRNNBackpropV3Attr
- func CudnnRNNBackpropV3NumProj(value int64) CudnnRNNBackpropV3Attr
- func CudnnRNNBackpropV3RnnMode(value string) CudnnRNNBackpropV3Attr
- func CudnnRNNBackpropV3Seed(value int64) CudnnRNNBackpropV3Attr
- func CudnnRNNBackpropV3Seed2(value int64) CudnnRNNBackpropV3Attr
- func CudnnRNNBackpropV3TimeMajor(value bool) CudnnRNNBackpropV3Attr
- type CudnnRNNCanonicalToParamsAttr
- func CudnnRNNCanonicalToParamsDirection(value string) CudnnRNNCanonicalToParamsAttr
- func CudnnRNNCanonicalToParamsDropout(value float32) CudnnRNNCanonicalToParamsAttr
- func CudnnRNNCanonicalToParamsInputMode(value string) CudnnRNNCanonicalToParamsAttr
- func CudnnRNNCanonicalToParamsRnnMode(value string) CudnnRNNCanonicalToParamsAttr
- func CudnnRNNCanonicalToParamsSeed(value int64) CudnnRNNCanonicalToParamsAttr
- func CudnnRNNCanonicalToParamsSeed2(value int64) CudnnRNNCanonicalToParamsAttr
- type CudnnRNNCanonicalToParamsV2Attr
- func CudnnRNNCanonicalToParamsV2Direction(value string) CudnnRNNCanonicalToParamsV2Attr
- func CudnnRNNCanonicalToParamsV2Dropout(value float32) CudnnRNNCanonicalToParamsV2Attr
- func CudnnRNNCanonicalToParamsV2InputMode(value string) CudnnRNNCanonicalToParamsV2Attr
- func CudnnRNNCanonicalToParamsV2NumProj(value int64) CudnnRNNCanonicalToParamsV2Attr
- func CudnnRNNCanonicalToParamsV2RnnMode(value string) CudnnRNNCanonicalToParamsV2Attr
- func CudnnRNNCanonicalToParamsV2Seed(value int64) CudnnRNNCanonicalToParamsV2Attr
- func CudnnRNNCanonicalToParamsV2Seed2(value int64) CudnnRNNCanonicalToParamsV2Attr
- type CudnnRNNParamsSizeAttr
- func CudnnRNNParamsSizeDirection(value string) CudnnRNNParamsSizeAttr
- func CudnnRNNParamsSizeDropout(value float32) CudnnRNNParamsSizeAttr
- func CudnnRNNParamsSizeInputMode(value string) CudnnRNNParamsSizeAttr
- func CudnnRNNParamsSizeNumProj(value int64) CudnnRNNParamsSizeAttr
- func CudnnRNNParamsSizeRnnMode(value string) CudnnRNNParamsSizeAttr
- func CudnnRNNParamsSizeSeed(value int64) CudnnRNNParamsSizeAttr
- func CudnnRNNParamsSizeSeed2(value int64) CudnnRNNParamsSizeAttr
- type CudnnRNNParamsToCanonicalAttr
- func CudnnRNNParamsToCanonicalDirection(value string) CudnnRNNParamsToCanonicalAttr
- func CudnnRNNParamsToCanonicalDropout(value float32) CudnnRNNParamsToCanonicalAttr
- func CudnnRNNParamsToCanonicalInputMode(value string) CudnnRNNParamsToCanonicalAttr
- func CudnnRNNParamsToCanonicalRnnMode(value string) CudnnRNNParamsToCanonicalAttr
- func CudnnRNNParamsToCanonicalSeed(value int64) CudnnRNNParamsToCanonicalAttr
- func CudnnRNNParamsToCanonicalSeed2(value int64) CudnnRNNParamsToCanonicalAttr
- type CudnnRNNParamsToCanonicalV2Attr
- func CudnnRNNParamsToCanonicalV2Direction(value string) CudnnRNNParamsToCanonicalV2Attr
- func CudnnRNNParamsToCanonicalV2Dropout(value float32) CudnnRNNParamsToCanonicalV2Attr
- func CudnnRNNParamsToCanonicalV2InputMode(value string) CudnnRNNParamsToCanonicalV2Attr
- func CudnnRNNParamsToCanonicalV2NumProj(value int64) CudnnRNNParamsToCanonicalV2Attr
- func CudnnRNNParamsToCanonicalV2RnnMode(value string) CudnnRNNParamsToCanonicalV2Attr
- func CudnnRNNParamsToCanonicalV2Seed(value int64) CudnnRNNParamsToCanonicalV2Attr
- func CudnnRNNParamsToCanonicalV2Seed2(value int64) CudnnRNNParamsToCanonicalV2Attr
- type CudnnRNNV2Attr
- func CudnnRNNV2Direction(value string) CudnnRNNV2Attr
- func CudnnRNNV2Dropout(value float32) CudnnRNNV2Attr
- func CudnnRNNV2InputMode(value string) CudnnRNNV2Attr
- func CudnnRNNV2IsTraining(value bool) CudnnRNNV2Attr
- func CudnnRNNV2RnnMode(value string) CudnnRNNV2Attr
- func CudnnRNNV2Seed(value int64) CudnnRNNV2Attr
- func CudnnRNNV2Seed2(value int64) CudnnRNNV2Attr
- type CudnnRNNV3Attr
- func CudnnRNNV3Direction(value string) CudnnRNNV3Attr
- func CudnnRNNV3Dropout(value float32) CudnnRNNV3Attr
- func CudnnRNNV3InputMode(value string) CudnnRNNV3Attr
- func CudnnRNNV3IsTraining(value bool) CudnnRNNV3Attr
- func CudnnRNNV3NumProj(value int64) CudnnRNNV3Attr
- func CudnnRNNV3RnnMode(value string) CudnnRNNV3Attr
- func CudnnRNNV3Seed(value int64) CudnnRNNV3Attr
- func CudnnRNNV3Seed2(value int64) CudnnRNNV3Attr
- func CudnnRNNV3TimeMajor(value bool) CudnnRNNV3Attr
- type CumprodAttr
- type CumsumAttr
- type CumulativeLogsumexpAttr
- type DataFormatDimMapAttr
- type DataFormatVecPermuteAttr
- type DataServiceDatasetAttr
- func DataServiceDatasetCrossTrainerCacheOptions(value string) DataServiceDatasetAttr
- func DataServiceDatasetDataTransferProtocol(value string) DataServiceDatasetAttr
- func DataServiceDatasetTargetWorkers(value string) DataServiceDatasetAttr
- func DataServiceDatasetTaskRefreshIntervalHintMs(value int64) DataServiceDatasetAttr
- type DataServiceDatasetV2Attr
- func DataServiceDatasetV2CrossTrainerCacheOptions(value string) DataServiceDatasetV2Attr
- func DataServiceDatasetV2DataTransferProtocol(value string) DataServiceDatasetV2Attr
- func DataServiceDatasetV2TargetWorkers(value string) DataServiceDatasetV2Attr
- func DataServiceDatasetV2TaskRefreshIntervalHintMs(value int64) DataServiceDatasetV2Attr
- type DatasetCardinalityAttr
- type DatasetToGraphAttr
- type DatasetToGraphV2Attr
- type DatasetToSingleElementAttr
- type DebugIdentityAttr
- type DebugIdentityV2Attr
- func DebugIdentityV2CircularBufferSize(value int64) DebugIdentityV2Attr
- func DebugIdentityV2DebugUrls(value []string) DebugIdentityV2Attr
- func DebugIdentityV2OpName(value string) DebugIdentityV2Attr
- func DebugIdentityV2OutputSlot(value int64) DebugIdentityV2Attr
- func DebugIdentityV2TensorDebugMode(value int64) DebugIdentityV2Attr
- func DebugIdentityV2TfdbgContextId(value string) DebugIdentityV2Attr
- func DebugIdentityV2TfdbgRunId(value string) DebugIdentityV2Attr
- type DebugIdentityV3Attr
- func DebugIdentityV3DebugUrls(value []string) DebugIdentityV3Attr
- func DebugIdentityV3DeviceName(value string) DebugIdentityV3Attr
- func DebugIdentityV3GatedGrpc(value bool) DebugIdentityV3Attr
- func DebugIdentityV3IoIndex(value int64) DebugIdentityV3Attr
- func DebugIdentityV3IoOfNode(value string) DebugIdentityV3Attr
- func DebugIdentityV3IsInput(value bool) DebugIdentityV3Attr
- func DebugIdentityV3TensorName(value string) DebugIdentityV3Attr
- type DebugNanCountAttr
- type DebugNumericSummaryAttr
- func DebugNumericSummaryDebugUrls(value []string) DebugNumericSummaryAttr
- func DebugNumericSummaryDeviceName(value string) DebugNumericSummaryAttr
- func DebugNumericSummaryGatedGrpc(value bool) DebugNumericSummaryAttr
- func DebugNumericSummaryLowerBound(value float32) DebugNumericSummaryAttr
- func DebugNumericSummaryMuteIfHealthy(value bool) DebugNumericSummaryAttr
- func DebugNumericSummaryTensorName(value string) DebugNumericSummaryAttr
- func DebugNumericSummaryUpperBound(value float32) DebugNumericSummaryAttr
- type DebugNumericSummaryV2Attr
- type DecodeAndCropJpegAttr
- func DecodeAndCropJpegAcceptableFraction(value float32) DecodeAndCropJpegAttr
- func DecodeAndCropJpegChannels(value int64) DecodeAndCropJpegAttr
- func DecodeAndCropJpegDctMethod(value string) DecodeAndCropJpegAttr
- func DecodeAndCropJpegFancyUpscaling(value bool) DecodeAndCropJpegAttr
- func DecodeAndCropJpegRatio(value int64) DecodeAndCropJpegAttr
- func DecodeAndCropJpegTryRecoverTruncated(value bool) DecodeAndCropJpegAttr
- type DecodeBmpAttr
- type DecodeCSVAttr
- type DecodeCompressedAttr
- type DecodeImageAttr
- type DecodeJpegAttr
- func DecodeJpegAcceptableFraction(value float32) DecodeJpegAttr
- func DecodeJpegChannels(value int64) DecodeJpegAttr
- func DecodeJpegDctMethod(value string) DecodeJpegAttr
- func DecodeJpegFancyUpscaling(value bool) DecodeJpegAttr
- func DecodeJpegRatio(value int64) DecodeJpegAttr
- func DecodeJpegTryRecoverTruncated(value bool) DecodeJpegAttr
- type DecodePaddedRawAttr
- type DecodePngAttr
- type DecodeProtoV2Attr
- type DecodeRawAttr
- type DecodeWavAttr
- type DenseBincountAttr
- type DenseCountSparseOutputAttr
- type DenseToDenseSetOperationAttr
- type DenseToSparseSetOperationAttr
- type DepthToSpaceAttr
- type DepthwiseConv2dNativeAttr
- type DepthwiseConv2dNativeBackpropFilterAttr
- func DepthwiseConv2dNativeBackpropFilterDataFormat(value string) DepthwiseConv2dNativeBackpropFilterAttr
- func DepthwiseConv2dNativeBackpropFilterDilations(value []int64) DepthwiseConv2dNativeBackpropFilterAttr
- func DepthwiseConv2dNativeBackpropFilterExplicitPaddings(value []int64) DepthwiseConv2dNativeBackpropFilterAttr
- type DepthwiseConv2dNativeBackpropInputAttr
- func DepthwiseConv2dNativeBackpropInputDataFormat(value string) DepthwiseConv2dNativeBackpropInputAttr
- func DepthwiseConv2dNativeBackpropInputDilations(value []int64) DepthwiseConv2dNativeBackpropInputAttr
- func DepthwiseConv2dNativeBackpropInputExplicitPaddings(value []int64) DepthwiseConv2dNativeBackpropInputAttr
- type DequantizeAttr
- type DestroyResourceOpAttr
- type DirectedInterleaveDatasetAttr
- type DynamicEnqueueTPUEmbeddingArbitraryTensorBatchAttr
- type EagerPyFuncAttr
- type EditDistanceAttr
- type EigAttr
- type EmptyAttr
- type EncodeBase64Attr
- type EncodeJpegAttr
- func EncodeJpegChromaDownsampling(value bool) EncodeJpegAttr
- func EncodeJpegDensityUnit(value string) EncodeJpegAttr
- func EncodeJpegFormat(value string) EncodeJpegAttr
- func EncodeJpegOptimizeSize(value bool) EncodeJpegAttr
- func EncodeJpegProgressive(value bool) EncodeJpegAttr
- func EncodeJpegQuality(value int64) EncodeJpegAttr
- func EncodeJpegXDensity(value int64) EncodeJpegAttr
- func EncodeJpegXmpMetadata(value string) EncodeJpegAttr
- func EncodeJpegYDensity(value int64) EncodeJpegAttr
- type EncodePngAttr
- type EncodeProtoAttr
- type EnqueueTPUEmbeddingArbitraryTensorBatchAttr
- type EnqueueTPUEmbeddingBatchAttr
- type EnqueueTPUEmbeddingIntegerBatchAttr
- type EnqueueTPUEmbeddingRaggedTensorBatchAttr
- func EnqueueTPUEmbeddingRaggedTensorBatchCombiners(value []string) EnqueueTPUEmbeddingRaggedTensorBatchAttr
- func EnqueueTPUEmbeddingRaggedTensorBatchDeviceOrdinal(value int64) EnqueueTPUEmbeddingRaggedTensorBatchAttr
- func EnqueueTPUEmbeddingRaggedTensorBatchMaxSequenceLengths(value []int64) EnqueueTPUEmbeddingRaggedTensorBatchAttr
- func EnqueueTPUEmbeddingRaggedTensorBatchNumFeatures(value []int64) EnqueueTPUEmbeddingRaggedTensorBatchAttr
- type EnqueueTPUEmbeddingSparseBatchAttr
- type EnqueueTPUEmbeddingSparseTensorBatchAttr
- func EnqueueTPUEmbeddingSparseTensorBatchCombiners(value []string) EnqueueTPUEmbeddingSparseTensorBatchAttr
- func EnqueueTPUEmbeddingSparseTensorBatchDeviceOrdinal(value int64) EnqueueTPUEmbeddingSparseTensorBatchAttr
- func EnqueueTPUEmbeddingSparseTensorBatchMaxSequenceLengths(value []int64) EnqueueTPUEmbeddingSparseTensorBatchAttr
- func EnqueueTPUEmbeddingSparseTensorBatchNumFeatures(value []int64) EnqueueTPUEmbeddingSparseTensorBatchAttr
- type EnterAttr
- type EqualAttr
- type EuclideanNormAttr
- type ExperimentalAutoShardDatasetAttr
- type ExperimentalIgnoreErrorsDatasetAttr
- type ExperimentalParseExampleDatasetAttr
- type ExperimentalRebatchDatasetAttr
- type ExperimentalStatsAggregatorHandleAttr
- type ExperimentalThreadPoolHandleAttr
- type ExtractGlimpseAttr
- type ExtractGlimpseV2Attr
- type ExtractJpegShapeAttr
- type FIFOQueueV2Attr
- type FakeQuantWithMinMaxArgsAttr
- func FakeQuantWithMinMaxArgsMax(value float32) FakeQuantWithMinMaxArgsAttr
- func FakeQuantWithMinMaxArgsMin(value float32) FakeQuantWithMinMaxArgsAttr
- func FakeQuantWithMinMaxArgsNarrowRange(value bool) FakeQuantWithMinMaxArgsAttr
- func FakeQuantWithMinMaxArgsNumBits(value int64) FakeQuantWithMinMaxArgsAttr
- type FakeQuantWithMinMaxArgsGradientAttr
- func FakeQuantWithMinMaxArgsGradientMax(value float32) FakeQuantWithMinMaxArgsGradientAttr
- func FakeQuantWithMinMaxArgsGradientMin(value float32) FakeQuantWithMinMaxArgsGradientAttr
- func FakeQuantWithMinMaxArgsGradientNarrowRange(value bool) FakeQuantWithMinMaxArgsGradientAttr
- func FakeQuantWithMinMaxArgsGradientNumBits(value int64) FakeQuantWithMinMaxArgsGradientAttr
- type FakeQuantWithMinMaxVarsAttr
- type FakeQuantWithMinMaxVarsGradientAttr
- type FakeQuantWithMinMaxVarsPerChannelAttr
- type FakeQuantWithMinMaxVarsPerChannelGradientAttr
- type FinalizeDatasetAttr
- type FixedLengthRecordDatasetAttr
- type FixedLengthRecordReaderV2Attr
- func FixedLengthRecordReaderV2Container(value string) FixedLengthRecordReaderV2Attr
- func FixedLengthRecordReaderV2Encoding(value string) FixedLengthRecordReaderV2Attr
- func FixedLengthRecordReaderV2FooterBytes(value int64) FixedLengthRecordReaderV2Attr
- func FixedLengthRecordReaderV2HeaderBytes(value int64) FixedLengthRecordReaderV2Attr
- func FixedLengthRecordReaderV2HopBytes(value int64) FixedLengthRecordReaderV2Attr
- func FixedLengthRecordReaderV2SharedName(value string) FixedLengthRecordReaderV2Attr
- type FixedUnigramCandidateSamplerAttr
- func FixedUnigramCandidateSamplerDistortion(value float32) FixedUnigramCandidateSamplerAttr
- func FixedUnigramCandidateSamplerNumReservedIds(value int64) FixedUnigramCandidateSamplerAttr
- func FixedUnigramCandidateSamplerNumShards(value int64) FixedUnigramCandidateSamplerAttr
- func FixedUnigramCandidateSamplerSeed(value int64) FixedUnigramCandidateSamplerAttr
- func FixedUnigramCandidateSamplerSeed2(value int64) FixedUnigramCandidateSamplerAttr
- func FixedUnigramCandidateSamplerShard(value int64) FixedUnigramCandidateSamplerAttr
- func FixedUnigramCandidateSamplerUnigrams(value []float32) FixedUnigramCandidateSamplerAttr
- func FixedUnigramCandidateSamplerVocabFile(value string) FixedUnigramCandidateSamplerAttr
- type FractionalAvgPoolAttr
- func FractionalAvgPoolDeterministic(value bool) FractionalAvgPoolAttr
- func FractionalAvgPoolOverlapping(value bool) FractionalAvgPoolAttr
- func FractionalAvgPoolPseudoRandom(value bool) FractionalAvgPoolAttr
- func FractionalAvgPoolSeed(value int64) FractionalAvgPoolAttr
- func FractionalAvgPoolSeed2(value int64) FractionalAvgPoolAttr
- type FractionalAvgPoolGradAttr
- type FractionalMaxPoolAttr
- func FractionalMaxPoolDeterministic(value bool) FractionalMaxPoolAttr
- func FractionalMaxPoolOverlapping(value bool) FractionalMaxPoolAttr
- func FractionalMaxPoolPseudoRandom(value bool) FractionalMaxPoolAttr
- func FractionalMaxPoolSeed(value int64) FractionalMaxPoolAttr
- func FractionalMaxPoolSeed2(value int64) FractionalMaxPoolAttr
- type FractionalMaxPoolGradAttr
- type FusedBatchNormAttr
- type FusedBatchNormGradAttr
- type FusedBatchNormGradV2Attr
- type FusedBatchNormGradV3Attr
- type FusedBatchNormV2Attr
- type FusedBatchNormV3Attr
- type FusedResizeAndPadConv2DAttr
- type GatherAttr
- type GatherNdAttr
- type GatherV2Attr
- type GenerateBoundingBoxProposalsAttr
- type GenerateVocabRemappingAttr
- type HashTableV2Attr
- type HistogramFixedWidthAttr
- type IRFFT2DAttr
- type IRFFT3DAttr
- type IRFFTAttr
- type IRFFTNDAttr
- type IdentityReaderV2Attr
- type IgnoreErrorsDatasetAttr
- type ImagAttr
- type ImageProjectiveTransformV2Attr
- type ImageProjectiveTransformV3Attr
- type ImageSummaryAttr
- type InfeedEnqueueAttr
- type InfeedEnqueuePrelinearizedBufferAttr
- type InfeedEnqueueTupleAttr
- type InitializeTableFromTextFileV2Attr
- type IsTPUEmbeddingInitializedAttr
- type IsotonicRegressionAttr
- type IteratorFromStringHandleAttr
- type LRNAttr
- type LRNGradAttr
- type LSTMBlockCellAttr
- type LeakyReluAttr
- type LeakyReluGradAttr
- type LearnedUnigramCandidateSamplerAttr
- type ListDatasetAttr
- type ListDiffAttr
- type LoadAndRemapMatrixAttr
- type LoadTPUEmbeddingADAMParametersAttr
- type LoadTPUEmbeddingAdadeltaParametersAttr
- type LoadTPUEmbeddingAdagradMomentumParametersAttr
- func LoadTPUEmbeddingAdagradMomentumParametersConfig(value string) LoadTPUEmbeddingAdagradMomentumParametersAttr
- func LoadTPUEmbeddingAdagradMomentumParametersTableId(value int64) LoadTPUEmbeddingAdagradMomentumParametersAttr
- func LoadTPUEmbeddingAdagradMomentumParametersTableName(value string) LoadTPUEmbeddingAdagradMomentumParametersAttr
- type LoadTPUEmbeddingAdagradParametersAttr
- type LoadTPUEmbeddingCenteredRMSPropParametersAttr
- func LoadTPUEmbeddingCenteredRMSPropParametersConfig(value string) LoadTPUEmbeddingCenteredRMSPropParametersAttr
- func LoadTPUEmbeddingCenteredRMSPropParametersTableId(value int64) LoadTPUEmbeddingCenteredRMSPropParametersAttr
- func LoadTPUEmbeddingCenteredRMSPropParametersTableName(value string) LoadTPUEmbeddingCenteredRMSPropParametersAttr
- type LoadTPUEmbeddingFTRLParametersAttr
- type LoadTPUEmbeddingFrequencyEstimatorParametersAttr
- func LoadTPUEmbeddingFrequencyEstimatorParametersConfig(value string) LoadTPUEmbeddingFrequencyEstimatorParametersAttr
- func LoadTPUEmbeddingFrequencyEstimatorParametersTableId(value int64) LoadTPUEmbeddingFrequencyEstimatorParametersAttr
- func LoadTPUEmbeddingFrequencyEstimatorParametersTableName(value string) LoadTPUEmbeddingFrequencyEstimatorParametersAttr
- type LoadTPUEmbeddingMDLAdagradLightParametersAttr
- func LoadTPUEmbeddingMDLAdagradLightParametersConfig(value string) LoadTPUEmbeddingMDLAdagradLightParametersAttr
- func LoadTPUEmbeddingMDLAdagradLightParametersTableId(value int64) LoadTPUEmbeddingMDLAdagradLightParametersAttr
- func LoadTPUEmbeddingMDLAdagradLightParametersTableName(value string) LoadTPUEmbeddingMDLAdagradLightParametersAttr
- type LoadTPUEmbeddingMomentumParametersAttr
- type LoadTPUEmbeddingProximalAdagradParametersAttr
- func LoadTPUEmbeddingProximalAdagradParametersConfig(value string) LoadTPUEmbeddingProximalAdagradParametersAttr
- func LoadTPUEmbeddingProximalAdagradParametersTableId(value int64) LoadTPUEmbeddingProximalAdagradParametersAttr
- func LoadTPUEmbeddingProximalAdagradParametersTableName(value string) LoadTPUEmbeddingProximalAdagradParametersAttr
- type LoadTPUEmbeddingRMSPropParametersAttr
- type LoadTPUEmbeddingStochasticGradientDescentParametersAttr
- func LoadTPUEmbeddingStochasticGradientDescentParametersConfig(value string) LoadTPUEmbeddingStochasticGradientDescentParametersAttr
- func LoadTPUEmbeddingStochasticGradientDescentParametersTableId(value int64) LoadTPUEmbeddingStochasticGradientDescentParametersAttr
- func LoadTPUEmbeddingStochasticGradientDescentParametersTableName(value string) LoadTPUEmbeddingStochasticGradientDescentParametersAttr
- type LogUniformCandidateSamplerAttr
- type LowerBoundAttr
- type LuAttr
- type MapClearAttr
- type MapIncompleteSizeAttr
- type MapPeekAttr
- type MapSizeAttr
- type MapStageAttr
- type MapUnstageAttr
- type MapUnstageNoKeyAttr
- type MatMulAttr
- type MatrixDiagPartV3Attr
- type MatrixDiagV3Attr
- type MatrixInverseAttr
- type MatrixSetDiagV3Attr
- type MatrixSolveAttr
- type MatrixSolveLsAttr
- type MatrixTriangularSolveAttr
- type MaxAttr
- type MaxPool3DAttr
- type MaxPool3DGradAttr
- type MaxPool3DGradGradAttr
- type MaxPoolAttr
- type MaxPoolGradAttr
- type MaxPoolGradGradAttr
- type MaxPoolGradGradV2Attr
- type MaxPoolGradGradWithArgmaxAttr
- type MaxPoolGradV2Attr
- type MaxPoolGradWithArgmaxAttr
- type MaxPoolV2Attr
- type MaxPoolWithArgmaxAttr
- type MeanAttr
- type MergeDedupDataAttr
- type MergeV2CheckpointsAttr
- type MfccAttr
- type MinAttr
- type ModelDatasetAttr
- type MultiDeviceIteratorFromStringHandleAttr
- type MultinomialAttr
- type MutableDenseHashTableV2Attr
- func MutableDenseHashTableV2Container(value string) MutableDenseHashTableV2Attr
- func MutableDenseHashTableV2InitialNumBuckets(value int64) MutableDenseHashTableV2Attr
- func MutableDenseHashTableV2MaxLoadFactor(value float32) MutableDenseHashTableV2Attr
- func MutableDenseHashTableV2SharedName(value string) MutableDenseHashTableV2Attr
- func MutableDenseHashTableV2UseNodeNameSharing(value bool) MutableDenseHashTableV2Attr
- func MutableDenseHashTableV2ValueShape(value tf.Shape) MutableDenseHashTableV2Attr
- type MutableHashTableOfTensorsV2Attr
- func MutableHashTableOfTensorsV2Container(value string) MutableHashTableOfTensorsV2Attr
- func MutableHashTableOfTensorsV2SharedName(value string) MutableHashTableOfTensorsV2Attr
- func MutableHashTableOfTensorsV2UseNodeNameSharing(value bool) MutableHashTableOfTensorsV2Attr
- func MutableHashTableOfTensorsV2ValueShape(value tf.Shape) MutableHashTableOfTensorsV2Attr
- type MutableHashTableV2Attr
- type MutexV2Attr
- type NonDeterministicIntsAttr
- type NonMaxSuppressionAttr
- type NonMaxSuppressionV4Attr
- type NonMaxSuppressionV5Attr
- type NotEqualAttr
- type NthElementAttr
- type OneHotAttr
- type OptimizeDatasetAttr
- type OptimizeDatasetV2Attr
- type OptionsDatasetAttr
- type OrderedMapClearAttr
- type OrderedMapIncompleteSizeAttr
- func OrderedMapIncompleteSizeCapacity(value int64) OrderedMapIncompleteSizeAttr
- func OrderedMapIncompleteSizeContainer(value string) OrderedMapIncompleteSizeAttr
- func OrderedMapIncompleteSizeMemoryLimit(value int64) OrderedMapIncompleteSizeAttr
- func OrderedMapIncompleteSizeSharedName(value string) OrderedMapIncompleteSizeAttr
- type OrderedMapPeekAttr
- type OrderedMapSizeAttr
- type OrderedMapStageAttr
- type OrderedMapUnstageAttr
- type OrderedMapUnstageNoKeyAttr
- func OrderedMapUnstageNoKeyCapacity(value int64) OrderedMapUnstageNoKeyAttr
- func OrderedMapUnstageNoKeyContainer(value string) OrderedMapUnstageNoKeyAttr
- func OrderedMapUnstageNoKeyMemoryLimit(value int64) OrderedMapUnstageNoKeyAttr
- func OrderedMapUnstageNoKeySharedName(value string) OrderedMapUnstageNoKeyAttr
- type OutfeedDequeueAttr
- type OutfeedDequeueTupleAttr
- type PackAttr
- type PaddedBatchDatasetAttr
- type PaddedBatchDatasetV2Attr
- type PaddingFIFOQueueV2Attr
- type ParameterizedTruncatedNormalAttr
- type ParseExampleDatasetAttr
- func ParseExampleDatasetRaggedKeys(value []string) ParseExampleDatasetAttr
- func ParseExampleDatasetRaggedSplitTypes(value []tf.DataType) ParseExampleDatasetAttr
- func ParseExampleDatasetRaggedValueTypes(value []tf.DataType) ParseExampleDatasetAttr
- func ParseExampleDatasetSloppy(value bool) ParseExampleDatasetAttr
- type ParseExampleDatasetV2Attr
- func ParseExampleDatasetV2Deterministic(value string) ParseExampleDatasetV2Attr
- func ParseExampleDatasetV2RaggedKeys(value []string) ParseExampleDatasetV2Attr
- func ParseExampleDatasetV2RaggedSplitTypes(value []tf.DataType) ParseExampleDatasetV2Attr
- func ParseExampleDatasetV2RaggedValueTypes(value []tf.DataType) ParseExampleDatasetV2Attr
- type ParseSequenceExampleAttr
- func ParseSequenceExampleContextDenseShapes(value []tf.Shape) ParseSequenceExampleAttr
- func ParseSequenceExampleContextSparseTypes(value []tf.DataType) ParseSequenceExampleAttr
- func ParseSequenceExampleFeatureListDenseShapes(value []tf.Shape) ParseSequenceExampleAttr
- func ParseSequenceExampleFeatureListDenseTypes(value []tf.DataType) ParseSequenceExampleAttr
- func ParseSequenceExampleFeatureListSparseTypes(value []tf.DataType) ParseSequenceExampleAttr
- func ParseSequenceExampleNcontextDense(value int64) ParseSequenceExampleAttr
- func ParseSequenceExampleNcontextSparse(value int64) ParseSequenceExampleAttr
- func ParseSequenceExampleNfeatureListDense(value int64) ParseSequenceExampleAttr
- func ParseSequenceExampleNfeatureListSparse(value int64) ParseSequenceExampleAttr
- type ParseSequenceExampleV2Attr
- func ParseSequenceExampleV2ContextDenseShapes(value []tf.Shape) ParseSequenceExampleV2Attr
- func ParseSequenceExampleV2ContextRaggedSplitTypes(value []tf.DataType) ParseSequenceExampleV2Attr
- func ParseSequenceExampleV2ContextRaggedValueTypes(value []tf.DataType) ParseSequenceExampleV2Attr
- func ParseSequenceExampleV2ContextSparseTypes(value []tf.DataType) ParseSequenceExampleV2Attr
- func ParseSequenceExampleV2FeatureListDenseShapes(value []tf.Shape) ParseSequenceExampleV2Attr
- func ParseSequenceExampleV2FeatureListDenseTypes(value []tf.DataType) ParseSequenceExampleV2Attr
- func ParseSequenceExampleV2FeatureListRaggedSplitTypes(value []tf.DataType) ParseSequenceExampleV2Attr
- func ParseSequenceExampleV2FeatureListRaggedValueTypes(value []tf.DataType) ParseSequenceExampleV2Attr
- func ParseSequenceExampleV2FeatureListSparseTypes(value []tf.DataType) ParseSequenceExampleV2Attr
- func ParseSequenceExampleV2NcontextSparse(value int64) ParseSequenceExampleV2Attr
- func ParseSequenceExampleV2NfeatureListDense(value int64) ParseSequenceExampleV2Attr
- func ParseSequenceExampleV2NfeatureListSparse(value int64) ParseSequenceExampleV2Attr
- type ParseSingleSequenceExampleAttr
- func ParseSingleSequenceExampleContextDenseShapes(value []tf.Shape) ParseSingleSequenceExampleAttr
- func ParseSingleSequenceExampleContextSparseTypes(value []tf.DataType) ParseSingleSequenceExampleAttr
- func ParseSingleSequenceExampleFeatureListDenseShapes(value []tf.Shape) ParseSingleSequenceExampleAttr
- func ParseSingleSequenceExampleFeatureListDenseTypes(value []tf.DataType) ParseSingleSequenceExampleAttr
- func ParseSingleSequenceExampleFeatureListSparseTypes(value []tf.DataType) ParseSingleSequenceExampleAttr
- type PlaceholderAttr
- type PrefetchDatasetAttr
- type PrelinearizeAttr
- type PrelinearizeTupleAttr
- type PreventGradientAttr
- type PrintAttr
- type PrintV2Attr
- type PriorityQueueV2Attr
- type ProdAttr
- type QrAttr
- type QuantizeAndDequantizeAttr
- func QuantizeAndDequantizeInputMax(value float32) QuantizeAndDequantizeAttr
- func QuantizeAndDequantizeInputMin(value float32) QuantizeAndDequantizeAttr
- func QuantizeAndDequantizeNumBits(value int64) QuantizeAndDequantizeAttr
- func QuantizeAndDequantizeRangeGiven(value bool) QuantizeAndDequantizeAttr
- func QuantizeAndDequantizeSignedInput(value bool) QuantizeAndDequantizeAttr
- type QuantizeAndDequantizeV2Attr
- func QuantizeAndDequantizeV2Axis(value int64) QuantizeAndDequantizeV2Attr
- func QuantizeAndDequantizeV2NarrowRange(value bool) QuantizeAndDequantizeV2Attr
- func QuantizeAndDequantizeV2NumBits(value int64) QuantizeAndDequantizeV2Attr
- func QuantizeAndDequantizeV2RangeGiven(value bool) QuantizeAndDequantizeV2Attr
- func QuantizeAndDequantizeV2RoundMode(value string) QuantizeAndDequantizeV2Attr
- func QuantizeAndDequantizeV2SignedInput(value bool) QuantizeAndDequantizeV2Attr
- type QuantizeAndDequantizeV3Attr
- func QuantizeAndDequantizeV3Axis(value int64) QuantizeAndDequantizeV3Attr
- func QuantizeAndDequantizeV3NarrowRange(value bool) QuantizeAndDequantizeV3Attr
- func QuantizeAndDequantizeV3RangeGiven(value bool) QuantizeAndDequantizeV3Attr
- func QuantizeAndDequantizeV3SignedInput(value bool) QuantizeAndDequantizeV3Attr
- type QuantizeAndDequantizeV4Attr
- func QuantizeAndDequantizeV4Axis(value int64) QuantizeAndDequantizeV4Attr
- func QuantizeAndDequantizeV4NarrowRange(value bool) QuantizeAndDequantizeV4Attr
- func QuantizeAndDequantizeV4NumBits(value int64) QuantizeAndDequantizeV4Attr
- func QuantizeAndDequantizeV4RangeGiven(value bool) QuantizeAndDequantizeV4Attr
- func QuantizeAndDequantizeV4RoundMode(value string) QuantizeAndDequantizeV4Attr
- func QuantizeAndDequantizeV4SignedInput(value bool) QuantizeAndDequantizeV4Attr
- type QuantizeAndDequantizeV4GradAttr
- type QuantizeV2Attr
- type QuantizedAddAttr
- type QuantizedConv2DAttr
- type QuantizedConv2DPerChannelAttr
- type QuantizedDepthwiseConv2DAttr
- type QuantizedDepthwiseConv2DWithBiasAndReluAndRequantizeAttr
- func QuantizedDepthwiseConv2DWithBiasAndReluAndRequantizeDilations(value []int64) QuantizedDepthwiseConv2DWithBiasAndReluAndRequantizeAttr
- func QuantizedDepthwiseConv2DWithBiasAndReluAndRequantizeOutType(value tf.DataType) QuantizedDepthwiseConv2DWithBiasAndReluAndRequantizeAttr
- func QuantizedDepthwiseConv2DWithBiasAndReluAndRequantizePaddingList(value []int64) QuantizedDepthwiseConv2DWithBiasAndReluAndRequantizeAttr
- type QuantizedDepthwiseConv2DWithBiasAndReluAttr
- func QuantizedDepthwiseConv2DWithBiasAndReluDilations(value []int64) QuantizedDepthwiseConv2DWithBiasAndReluAttr
- func QuantizedDepthwiseConv2DWithBiasAndReluOutType(value tf.DataType) QuantizedDepthwiseConv2DWithBiasAndReluAttr
- func QuantizedDepthwiseConv2DWithBiasAndReluPaddingList(value []int64) QuantizedDepthwiseConv2DWithBiasAndReluAttr
- type QuantizedDepthwiseConv2DWithBiasAttr
- type QuantizedInstanceNormAttr
- func QuantizedInstanceNormGivenYMax(value float32) QuantizedInstanceNormAttr
- func QuantizedInstanceNormGivenYMin(value float32) QuantizedInstanceNormAttr
- func QuantizedInstanceNormMinSeparation(value float32) QuantizedInstanceNormAttr
- func QuantizedInstanceNormOutputRangeGiven(value bool) QuantizedInstanceNormAttr
- func QuantizedInstanceNormVarianceEpsilon(value float32) QuantizedInstanceNormAttr
- type QuantizedMatMulAttr
- type QuantizedMatMulWithBiasAndReluAndRequantizeAttr
- func QuantizedMatMulWithBiasAndReluAndRequantizeInputQuantMode(value string) QuantizedMatMulWithBiasAndReluAndRequantizeAttr
- func QuantizedMatMulWithBiasAndReluAndRequantizeToutput(value tf.DataType) QuantizedMatMulWithBiasAndReluAndRequantizeAttr
- func QuantizedMatMulWithBiasAndReluAndRequantizeTransposeA(value bool) QuantizedMatMulWithBiasAndReluAndRequantizeAttr
- func QuantizedMatMulWithBiasAndReluAndRequantizeTransposeB(value bool) QuantizedMatMulWithBiasAndReluAndRequantizeAttr
- type QuantizedMatMulWithBiasAndReluAttr
- func QuantizedMatMulWithBiasAndReluInputQuantMode(value string) QuantizedMatMulWithBiasAndReluAttr
- func QuantizedMatMulWithBiasAndReluToutput(value tf.DataType) QuantizedMatMulWithBiasAndReluAttr
- func QuantizedMatMulWithBiasAndReluTransposeA(value bool) QuantizedMatMulWithBiasAndReluAttr
- func QuantizedMatMulWithBiasAndReluTransposeB(value bool) QuantizedMatMulWithBiasAndReluAttr
- type QuantizedMatMulWithBiasAttr
- func QuantizedMatMulWithBiasInputQuantMode(value string) QuantizedMatMulWithBiasAttr
- func QuantizedMatMulWithBiasToutput(value tf.DataType) QuantizedMatMulWithBiasAttr
- func QuantizedMatMulWithBiasTransposeA(value bool) QuantizedMatMulWithBiasAttr
- func QuantizedMatMulWithBiasTransposeB(value bool) QuantizedMatMulWithBiasAttr
- type QuantizedMulAttr
- type QuantizedRelu6Attr
- type QuantizedReluAttr
- type QuantizedReluXAttr
- type QuantizedResizeBilinearAttr
- type QueueCloseV2Attr
- type QueueDequeueManyV2Attr
- type QueueDequeueUpToV2Attr
- type QueueDequeueV2Attr
- type QueueEnqueueManyV2Attr
- type QueueEnqueueV2Attr
- type RFFT2DAttr
- type RFFT3DAttr
- type RFFTAttr
- type RFFTNDAttr
- type RaggedBincountAttr
- type RaggedCountSparseOutputAttr
- type RaggedRangeAttr
- type RaggedTensorFromVariantAttr
- type RandomCropAttr
- type RandomDatasetAttr
- type RandomDatasetV2Attr
- type RandomGammaAttr
- type RandomIndexShuffleAttr
- type RandomPoissonAttr
- type RandomPoissonV2Attr
- type RandomShuffleAttr
- type RandomShuffleQueueV2Attr
- func RandomShuffleQueueV2Capacity(value int64) RandomShuffleQueueV2Attr
- func RandomShuffleQueueV2Container(value string) RandomShuffleQueueV2Attr
- func RandomShuffleQueueV2MinAfterDequeue(value int64) RandomShuffleQueueV2Attr
- func RandomShuffleQueueV2Seed(value int64) RandomShuffleQueueV2Attr
- func RandomShuffleQueueV2Seed2(value int64) RandomShuffleQueueV2Attr
- func RandomShuffleQueueV2Shapes(value []tf.Shape) RandomShuffleQueueV2Attr
- func RandomShuffleQueueV2SharedName(value string) RandomShuffleQueueV2Attr
- type RandomStandardNormalAttr
- type RandomUniformAttr
- type RandomUniformIntAttr
- type RangeDatasetAttr
- type ReadVariableXlaSplitNDAttr
- type RealAttr
- type RebatchDatasetAttr
- type RecordInputAttr
- func RecordInputBatchSize(value int64) RecordInputAttr
- func RecordInputCompressionType(value string) RecordInputAttr
- func RecordInputFileBufferSize(value int64) RecordInputAttr
- func RecordInputFileParallelism(value int64) RecordInputAttr
- func RecordInputFileRandomSeed(value int64) RecordInputAttr
- func RecordInputFileShuffleShiftRatio(value float32) RecordInputAttr
- type RecvAttr
- type ReduceJoinAttr
- type RegexReplaceAttr
- type RegisterDatasetAttr
- type RegisterDatasetV2Attr
- type RepeatDatasetAttr
- type RequantizePerChannelAttr
- type ResizeAreaAttr
- type ResizeBicubicAttr
- type ResizeBicubicGradAttr
- type ResizeBilinearAttr
- type ResizeBilinearGradAttr
- type ResizeNearestNeighborAttr
- type ResizeNearestNeighborGradAttr
- type ResourceApplyAdaMaxAttr
- type ResourceApplyAdadeltaAttr
- type ResourceApplyAdagradAttr
- type ResourceApplyAdagradDAAttr
- type ResourceApplyAdagradV2Attr
- type ResourceApplyAdamAttr
- type ResourceApplyAdamWithAmsgradAttr
- type ResourceApplyAddSignAttr
- type ResourceApplyCenteredRMSPropAttr
- type ResourceApplyFtrlAttr
- type ResourceApplyFtrlV2Attr
- type ResourceApplyGradientDescentAttr
- type ResourceApplyKerasMomentumAttr
- type ResourceApplyMomentumAttr
- type ResourceApplyPowerSignAttr
- type ResourceApplyProximalAdagradAttr
- type ResourceApplyProximalGradientDescentAttr
- type ResourceApplyRMSPropAttr
- type ResourceConditionalAccumulatorAttr
- type ResourceGatherAttr
- type ResourceScatterNdAddAttr
- type ResourceScatterNdSubAttr
- type ResourceScatterNdUpdateAttr
- type ResourceSparseApplyAdadeltaAttr
- type ResourceSparseApplyAdagradAttr
- type ResourceSparseApplyAdagradDAAttr
- type ResourceSparseApplyAdagradV2Attr
- type ResourceSparseApplyCenteredRMSPropAttr
- type ResourceSparseApplyFtrlAttr
- type ResourceSparseApplyFtrlV2Attr
- type ResourceSparseApplyKerasMomentumAttr
- type ResourceSparseApplyMomentumAttr
- type ResourceSparseApplyProximalAdagradAttr
- type ResourceSparseApplyProximalGradientDescentAttr
- type ResourceSparseApplyRMSPropAttr
- type ResourceStridedSliceAssignAttr
- func ResourceStridedSliceAssignBeginMask(value int64) ResourceStridedSliceAssignAttr
- func ResourceStridedSliceAssignEllipsisMask(value int64) ResourceStridedSliceAssignAttr
- func ResourceStridedSliceAssignEndMask(value int64) ResourceStridedSliceAssignAttr
- func ResourceStridedSliceAssignNewAxisMask(value int64) ResourceStridedSliceAssignAttr
- func ResourceStridedSliceAssignShrinkAxisMask(value int64) ResourceStridedSliceAssignAttr
- type RestoreAttr
- type RestoreSliceAttr
- type RetrieveTPUEmbeddingADAMParametersAttr
- type RetrieveTPUEmbeddingAdadeltaParametersAttr
- func RetrieveTPUEmbeddingAdadeltaParametersConfig(value string) RetrieveTPUEmbeddingAdadeltaParametersAttr
- func RetrieveTPUEmbeddingAdadeltaParametersTableId(value int64) RetrieveTPUEmbeddingAdadeltaParametersAttr
- func RetrieveTPUEmbeddingAdadeltaParametersTableName(value string) RetrieveTPUEmbeddingAdadeltaParametersAttr
- type RetrieveTPUEmbeddingAdagradMomentumParametersAttr
- func RetrieveTPUEmbeddingAdagradMomentumParametersConfig(value string) RetrieveTPUEmbeddingAdagradMomentumParametersAttr
- func RetrieveTPUEmbeddingAdagradMomentumParametersTableId(value int64) RetrieveTPUEmbeddingAdagradMomentumParametersAttr
- func RetrieveTPUEmbeddingAdagradMomentumParametersTableName(value string) RetrieveTPUEmbeddingAdagradMomentumParametersAttr
- type RetrieveTPUEmbeddingAdagradParametersAttr
- func RetrieveTPUEmbeddingAdagradParametersConfig(value string) RetrieveTPUEmbeddingAdagradParametersAttr
- func RetrieveTPUEmbeddingAdagradParametersTableId(value int64) RetrieveTPUEmbeddingAdagradParametersAttr
- func RetrieveTPUEmbeddingAdagradParametersTableName(value string) RetrieveTPUEmbeddingAdagradParametersAttr
- type RetrieveTPUEmbeddingCenteredRMSPropParametersAttr
- func RetrieveTPUEmbeddingCenteredRMSPropParametersConfig(value string) RetrieveTPUEmbeddingCenteredRMSPropParametersAttr
- func RetrieveTPUEmbeddingCenteredRMSPropParametersTableId(value int64) RetrieveTPUEmbeddingCenteredRMSPropParametersAttr
- func RetrieveTPUEmbeddingCenteredRMSPropParametersTableName(value string) RetrieveTPUEmbeddingCenteredRMSPropParametersAttr
- type RetrieveTPUEmbeddingFTRLParametersAttr
- type RetrieveTPUEmbeddingFrequencyEstimatorParametersAttr
- func RetrieveTPUEmbeddingFrequencyEstimatorParametersConfig(value string) RetrieveTPUEmbeddingFrequencyEstimatorParametersAttr
- func RetrieveTPUEmbeddingFrequencyEstimatorParametersTableId(value int64) RetrieveTPUEmbeddingFrequencyEstimatorParametersAttr
- func RetrieveTPUEmbeddingFrequencyEstimatorParametersTableName(value string) RetrieveTPUEmbeddingFrequencyEstimatorParametersAttr
- type RetrieveTPUEmbeddingMDLAdagradLightParametersAttr
- func RetrieveTPUEmbeddingMDLAdagradLightParametersConfig(value string) RetrieveTPUEmbeddingMDLAdagradLightParametersAttr
- func RetrieveTPUEmbeddingMDLAdagradLightParametersTableId(value int64) RetrieveTPUEmbeddingMDLAdagradLightParametersAttr
- func RetrieveTPUEmbeddingMDLAdagradLightParametersTableName(value string) RetrieveTPUEmbeddingMDLAdagradLightParametersAttr
- type RetrieveTPUEmbeddingMomentumParametersAttr
- func RetrieveTPUEmbeddingMomentumParametersConfig(value string) RetrieveTPUEmbeddingMomentumParametersAttr
- func RetrieveTPUEmbeddingMomentumParametersTableId(value int64) RetrieveTPUEmbeddingMomentumParametersAttr
- func RetrieveTPUEmbeddingMomentumParametersTableName(value string) RetrieveTPUEmbeddingMomentumParametersAttr
- type RetrieveTPUEmbeddingProximalAdagradParametersAttr
- func RetrieveTPUEmbeddingProximalAdagradParametersConfig(value string) RetrieveTPUEmbeddingProximalAdagradParametersAttr
- func RetrieveTPUEmbeddingProximalAdagradParametersTableId(value int64) RetrieveTPUEmbeddingProximalAdagradParametersAttr
- func RetrieveTPUEmbeddingProximalAdagradParametersTableName(value string) RetrieveTPUEmbeddingProximalAdagradParametersAttr
- type RetrieveTPUEmbeddingRMSPropParametersAttr
- func RetrieveTPUEmbeddingRMSPropParametersConfig(value string) RetrieveTPUEmbeddingRMSPropParametersAttr
- func RetrieveTPUEmbeddingRMSPropParametersTableId(value int64) RetrieveTPUEmbeddingRMSPropParametersAttr
- func RetrieveTPUEmbeddingRMSPropParametersTableName(value string) RetrieveTPUEmbeddingRMSPropParametersAttr
- type RetrieveTPUEmbeddingStochasticGradientDescentParametersAttr
- func RetrieveTPUEmbeddingStochasticGradientDescentParametersConfig(value string) RetrieveTPUEmbeddingStochasticGradientDescentParametersAttr
- func RetrieveTPUEmbeddingStochasticGradientDescentParametersTableId(value int64) RetrieveTPUEmbeddingStochasticGradientDescentParametersAttr
- func RetrieveTPUEmbeddingStochasticGradientDescentParametersTableName(value string) RetrieveTPUEmbeddingStochasticGradientDescentParametersAttr
- type ReverseSequenceAttr
- type SampleDistortedBoundingBoxAttr
- func SampleDistortedBoundingBoxAreaRange(value []float32) SampleDistortedBoundingBoxAttr
- func SampleDistortedBoundingBoxAspectRatioRange(value []float32) SampleDistortedBoundingBoxAttr
- func SampleDistortedBoundingBoxMaxAttempts(value int64) SampleDistortedBoundingBoxAttr
- func SampleDistortedBoundingBoxMinObjectCovered(value float32) SampleDistortedBoundingBoxAttr
- func SampleDistortedBoundingBoxSeed(value int64) SampleDistortedBoundingBoxAttr
- func SampleDistortedBoundingBoxSeed2(value int64) SampleDistortedBoundingBoxAttr
- func SampleDistortedBoundingBoxUseImageIfNoBoundingBoxes(value bool) SampleDistortedBoundingBoxAttr
- type SampleDistortedBoundingBoxV2Attr
- func SampleDistortedBoundingBoxV2AreaRange(value []float32) SampleDistortedBoundingBoxV2Attr
- func SampleDistortedBoundingBoxV2AspectRatioRange(value []float32) SampleDistortedBoundingBoxV2Attr
- func SampleDistortedBoundingBoxV2MaxAttempts(value int64) SampleDistortedBoundingBoxV2Attr
- func SampleDistortedBoundingBoxV2Seed(value int64) SampleDistortedBoundingBoxV2Attr
- func SampleDistortedBoundingBoxV2Seed2(value int64) SampleDistortedBoundingBoxV2Attr
- func SampleDistortedBoundingBoxV2UseImageIfNoBoundingBoxes(value bool) SampleDistortedBoundingBoxV2Attr
- type ScatterNdAttr
- type ScatterNdNonAliasingAddAttr
- type Scope
- func (s *Scope) AddOperation(args tf.OpSpec) *tf.Operation
- func (s *Scope) Err() error
- func (s *Scope) Finalize() (*tf.Graph, error)
- func (s *Scope) SubScope(namespace string) *Scope
- func (s *Scope) UpdateErr(op string, err error)
- func (s *Scope) WithControlDependencies(ops ...*tf.Operation) *Scope
- func (s *Scope) WithDevice(device string) *Scope
- type SdcaOptimizerAttr
- type SdcaOptimizerV2Attr
- type SelfAdjointEigV2Attr
- type SendAttr
- type SerializeIteratorAttr
- type SerializeManySparseAttr
- type SerializeSparseAttr
- type SetSizeAttr
- type ShapeAttr
- type ShapeNAttr
- type ShardDatasetAttr
- type ShuffleAndRepeatDatasetAttr
- type ShuffleDatasetAttr
- type SizeAttr
- type SkipDatasetAttr
- type SkipgramAttr
- type SlidingWindowDatasetAttr
- type SnapshotDatasetAttr
- func SnapshotDatasetCompression(value string) SnapshotDatasetAttr
- func SnapshotDatasetMode(value string) SnapshotDatasetAttr
- func SnapshotDatasetNumReaderThreads(value int64) SnapshotDatasetAttr
- func SnapshotDatasetNumWriterThreads(value int64) SnapshotDatasetAttr
- func SnapshotDatasetPendingSnapshotExpirySeconds(value int64) SnapshotDatasetAttr
- func SnapshotDatasetReaderBufferSize(value int64) SnapshotDatasetAttr
- func SnapshotDatasetReaderPathPrefix(value string) SnapshotDatasetAttr
- func SnapshotDatasetSeed(value int64) SnapshotDatasetAttr
- func SnapshotDatasetSeed2(value int64) SnapshotDatasetAttr
- func SnapshotDatasetShardSizeBytes(value int64) SnapshotDatasetAttr
- func SnapshotDatasetShuffleOnRead(value bool) SnapshotDatasetAttr
- func SnapshotDatasetSnapshotName(value string) SnapshotDatasetAttr
- func SnapshotDatasetWriterBufferSize(value int64) SnapshotDatasetAttr
- func SnapshotDatasetWriterPathPrefix(value string) SnapshotDatasetAttr
- type SobolSampleAttr
- type SpaceToDepthAttr
- type SparseBincountAttr
- type SparseCountSparseOutputAttr
- type SparseMatMulAttr
- type SparseMatrixMatMulAttr
- func SparseMatrixMatMulAdjointA(value bool) SparseMatrixMatMulAttr
- func SparseMatrixMatMulAdjointB(value bool) SparseMatrixMatMulAttr
- func SparseMatrixMatMulConjugateOutput(value bool) SparseMatrixMatMulAttr
- func SparseMatrixMatMulTransposeA(value bool) SparseMatrixMatMulAttr
- func SparseMatrixMatMulTransposeB(value bool) SparseMatrixMatMulAttr
- func SparseMatrixMatMulTransposeOutput(value bool) SparseMatrixMatMulAttr
- type SparseMatrixSparseMatMulAttr
- func SparseMatrixSparseMatMulAdjointA(value bool) SparseMatrixSparseMatMulAttr
- func SparseMatrixSparseMatMulAdjointB(value bool) SparseMatrixSparseMatMulAttr
- func SparseMatrixSparseMatMulTransposeA(value bool) SparseMatrixSparseMatMulAttr
- func SparseMatrixSparseMatMulTransposeB(value bool) SparseMatrixSparseMatMulAttr
- type SparseMatrixTransposeAttr
- type SparseReduceMaxAttr
- type SparseReduceMaxSparseAttr
- type SparseReduceSumAttr
- type SparseReduceSumSparseAttr
- type SparseSegmentMeanAttr
- type SparseSegmentMeanWithNumSegmentsAttr
- type SparseSegmentSqrtNAttr
- type SparseSegmentSqrtNWithNumSegmentsAttr
- type SparseSegmentSumAttr
- type SparseSegmentSumWithNumSegmentsAttr
- type SparseTensorDenseMatMulAttr
- type SparseToDenseAttr
- type SparseToSparseSetOperationAttr
- type SplitDedupDataAttr
- type SqueezeAttr
- type StackPushV2Attr
- type StackV2Attr
- type StageAttr
- type StageClearAttr
- type StagePeekAttr
- type StageSizeAttr
- type StatefulStandardNormalAttr
- type StatefulStandardNormalV2Attr
- type StatefulTruncatedNormalAttr
- type StatefulUniformAttr
- type StatefulUniformFullIntAttr
- type StatelessMultinomialAttr
- type StatelessRandomBinomialAttr
- type StatelessRandomNormalAttr
- type StatelessRandomNormalV2Attr
- type StatelessRandomUniformAttr
- type StatelessRandomUniformFullIntAttr
- type StatelessRandomUniformFullIntV2Attr
- type StatelessRandomUniformV2Attr
- type StatelessSampleDistortedBoundingBoxAttr
- func StatelessSampleDistortedBoundingBoxAreaRange(value []float32) StatelessSampleDistortedBoundingBoxAttr
- func StatelessSampleDistortedBoundingBoxAspectRatioRange(value []float32) StatelessSampleDistortedBoundingBoxAttr
- func StatelessSampleDistortedBoundingBoxMaxAttempts(value int64) StatelessSampleDistortedBoundingBoxAttr
- func StatelessSampleDistortedBoundingBoxUseImageIfNoBoundingBoxes(value bool) StatelessSampleDistortedBoundingBoxAttr
- type StatelessTruncatedNormalAttr
- type StatelessTruncatedNormalV2Attr
- type StaticRegexReplaceAttr
- type StatsAggregatorHandleAttr
- type StridedSliceAttr
- type StridedSliceGradAttr
- func StridedSliceGradBeginMask(value int64) StridedSliceGradAttr
- func StridedSliceGradEllipsisMask(value int64) StridedSliceGradAttr
- func StridedSliceGradEndMask(value int64) StridedSliceGradAttr
- func StridedSliceGradNewAxisMask(value int64) StridedSliceGradAttr
- func StridedSliceGradShrinkAxisMask(value int64) StridedSliceGradAttr
- type StringFormatAttr
- type StringJoinAttr
- type StringLengthAttr
- type StringLowerAttr
- type StringSplitAttr
- type StringSplitV2Attr
- type StringToNumberAttr
- type StringUpperAttr
- type SubstrAttr
- type SumAttr
- type SvdAttr
- type TFRecordDatasetAttr
- type TFRecordDatasetV2Attr
- type TFRecordReaderV2Attr
- type TPUPartitionedInputAttr
- type TPUPartitionedInputV2Attr
- type TPUPartitionedOutputAttr
- type TPUReplicateMetadataAttr
- func TPUReplicateMetadataAllowSoftPlacement(value bool) TPUReplicateMetadataAttr
- func TPUReplicateMetadataComputationShape(value []int64) TPUReplicateMetadataAttr
- func TPUReplicateMetadataDeviceAssignment(value []int64) TPUReplicateMetadataAttr
- func TPUReplicateMetadataHostComputeCore(value []string) TPUReplicateMetadataAttr
- func TPUReplicateMetadataNumCoresPerReplica(value int64) TPUReplicateMetadataAttr
- func TPUReplicateMetadataPaddingMap(value []string) TPUReplicateMetadataAttr
- func TPUReplicateMetadataStepMarkerLocation(value string) TPUReplicateMetadataAttr
- func TPUReplicateMetadataTopology(value string) TPUReplicateMetadataAttr
- func TPUReplicateMetadataTpuCompileOptionsProto(value string) TPUReplicateMetadataAttr
- func TPUReplicateMetadataUseSpmdForXlaPartitioning(value bool) TPUReplicateMetadataAttr
- func TPUReplicateMetadataUseTpu(value bool) TPUReplicateMetadataAttr
- type TPUReplicatedInputAttr
- type TakeDatasetAttr
- type TakeManySparseFromTensorsMapAttr
- type TensorArrayConcatV2Attr
- type TensorArrayConcatV3Attr
- type TensorArrayGatherV2Attr
- type TensorArrayGatherV3Attr
- type TensorArrayV2Attr
- type TensorArrayV3Attr
- func TensorArrayV3ClearAfterRead(value bool) TensorArrayV3Attr
- func TensorArrayV3DynamicSize(value bool) TensorArrayV3Attr
- func TensorArrayV3ElementShape(value tf.Shape) TensorArrayV3Attr
- func TensorArrayV3IdenticalElementShapes(value bool) TensorArrayV3Attr
- func TensorArrayV3TensorArrayName(value string) TensorArrayV3Attr
- type TensorDatasetAttr
- type TensorListConcatAttr
- type TensorListSetItemAttr
- type TensorListStackAttr
- type TensorScatterAddAttr
- type TensorScatterMaxAttr
- type TensorScatterSubAttr
- type TensorScatterUpdateAttr
- type TensorSliceDatasetAttr
- type TensorStridedSliceUpdateAttr
- func TensorStridedSliceUpdateBeginMask(value int64) TensorStridedSliceUpdateAttr
- func TensorStridedSliceUpdateEllipsisMask(value int64) TensorStridedSliceUpdateAttr
- func TensorStridedSliceUpdateEndMask(value int64) TensorStridedSliceUpdateAttr
- func TensorStridedSliceUpdateNewAxisMask(value int64) TensorStridedSliceUpdateAttr
- func TensorStridedSliceUpdateShrinkAxisMask(value int64) TensorStridedSliceUpdateAttr
- type TensorSummaryAttr
- type TextLineDatasetAttr
- type TextLineReaderV2Attr
- type ThreadPoolHandleAttr
- type ThreadUnsafeUnigramCandidateSamplerAttr
- type TopKAttr
- type TopKV2Attr
- type TridiagonalSolveAttr
- type TruncatedNormalAttr
- type UnbatchAttr
- type UnbatchDatasetAttr
- type UnbatchGradAttr
- type UnicodeDecodeAttr
- type UnicodeDecodeWithOffsetsAttr
- func UnicodeDecodeWithOffsetsErrors(value string) UnicodeDecodeWithOffsetsAttr
- func UnicodeDecodeWithOffsetsReplaceControlCharacters(value bool) UnicodeDecodeWithOffsetsAttr
- func UnicodeDecodeWithOffsetsReplacementChar(value int64) UnicodeDecodeWithOffsetsAttr
- func UnicodeDecodeWithOffsetsTsplits(value tf.DataType) UnicodeDecodeWithOffsetsAttr
- type UnicodeEncodeAttr
- type UnicodeTranscodeAttr
- type UniformCandidateSamplerAttr
- type UniformDequantizeAttr
- type UniformQuantizeAttr
- type UniformQuantizedAddAttr
- type UniformQuantizedClipByValueAttr
- type UniformQuantizedConvolutionAttr
- func UniformQuantizedConvolutionBatchGroupCount(value int64) UniformQuantizedConvolutionAttr
- func UniformQuantizedConvolutionDimensionNumbers(value string) UniformQuantizedConvolutionAttr
- func UniformQuantizedConvolutionExplicitPadding(value []int64) UniformQuantizedConvolutionAttr
- func UniformQuantizedConvolutionFeatureGroupCount(value int64) UniformQuantizedConvolutionAttr
- func UniformQuantizedConvolutionLhsDilation(value []int64) UniformQuantizedConvolutionAttr
- func UniformQuantizedConvolutionLhsQuantizationAxis(value int64) UniformQuantizedConvolutionAttr
- func UniformQuantizedConvolutionOutputQuantizationAxis(value int64) UniformQuantizedConvolutionAttr
- func UniformQuantizedConvolutionRhsDilation(value []int64) UniformQuantizedConvolutionAttr
- func UniformQuantizedConvolutionRhsQuantizationAxis(value int64) UniformQuantizedConvolutionAttr
- func UniformQuantizedConvolutionWindowStrides(value []int64) UniformQuantizedConvolutionAttr
- type UniformQuantizedConvolutionHybridAttr
- func UniformQuantizedConvolutionHybridBatchGroupCount(value int64) UniformQuantizedConvolutionHybridAttr
- func UniformQuantizedConvolutionHybridDimensionNumbers(value string) UniformQuantizedConvolutionHybridAttr
- func UniformQuantizedConvolutionHybridExplicitPadding(value []int64) UniformQuantizedConvolutionHybridAttr
- func UniformQuantizedConvolutionHybridFeatureGroupCount(value int64) UniformQuantizedConvolutionHybridAttr
- func UniformQuantizedConvolutionHybridLhsDilation(value []int64) UniformQuantizedConvolutionHybridAttr
- func UniformQuantizedConvolutionHybridRhsDilation(value []int64) UniformQuantizedConvolutionHybridAttr
- func UniformQuantizedConvolutionHybridRhsQuantizationAxis(value int64) UniformQuantizedConvolutionHybridAttr
- func UniformQuantizedConvolutionHybridWindowStrides(value []int64) UniformQuantizedConvolutionHybridAttr
- type UniformQuantizedDotAttr
- type UniformQuantizedDotHybridAttr
- type UniformRequantizeAttr
- type UniqueAttr
- type UniqueDatasetAttr
- type UniqueV2Attr
- type UniqueWithCountsAttr
- type UniqueWithCountsV2Attr
- type UnpackAttr
- type UnstageAttr
- type UpperBoundAttr
- type VarHandleOpAttr
- type VariableShapeAttr
- type WholeFileReaderV2Attr
- type WindowDatasetAttr
- type WriteAudioSummaryAttr
- type WriteImageSummaryAttr
- type XlaConcatNDAttr
- type XlaConvV2Attr
- type XlaRngBitGeneratorAttr
- type XlaShardingAttr
- type XlaSplitNDAttr
- type XlaSpmdFullToShardShapeAttr
- type XlaSpmdShardToFullShapeAttr
- type ZipDatasetAttr
Examples ¶
Constants ¶
This section is empty.
Variables ¶
This section is empty.
Functions ¶
func Abort ¶
Raise a exception to abort the process when called.
If exit_without_error is true, the process will exit normally, otherwise it will exit with a SIGABORT signal.
Returns nothing but an exception.
Returns the created operation.
func Abs ¶
Computes the absolute value of a tensor.
Given a tensor `x`, this operation returns a tensor containing the absolute value of each element in `x`. For example, if x is an input element and y is an output element, this operation computes \\(y = |x|\\).
func AccumulateNV2 ¶
Returns the element-wise sum of a list of tensors.
`tf.accumulate_n_v2` performs the same operation as `tf.add_n`, but does not wait for all of its inputs to be ready before beginning to sum. This can save memory if inputs are ready at different times, since minimum temporary storage is proportional to the output size rather than the inputs size.
Unlike the original `accumulate_n`, `accumulate_n_v2` is differentiable.
Returns a `Tensor` of same shape and type as the elements of `inputs`.
Arguments:
inputs: A list of `Tensor` objects, each with same shape and type. shape: Shape of elements of `inputs`.
func Acos ¶
Computes acos of x element-wise.
Provided an input tensor, the `tf.math.acos` operation returns the inverse cosine of each element of the tensor. If `y = tf.math.cos(x)` then, `x = tf.math.acos(y)`. Input range is `[-1, 1]` and the output has a range of `[0, pi]`.
func Acosh ¶
Computes inverse hyperbolic cosine of x element-wise.
Given an input tensor, the function computes inverse hyperbolic cosine of every element. Input range is `[1, inf]`. It returns `nan` if the input lies outside the range.
```python x = tf.constant([-2, -0.5, 1, 1.2, 200, 10000, float("inf")]) tf.math.acosh(x) ==> [nan nan 0. 0.62236255 5.9914584 9.903487 inf] ```
func Add ¶
Returns x + y element-wise.
*NOTE*: `Add` supports broadcasting. `AddN` does not. More about broadcasting [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
Given two input tensors, the `tf.add` operation computes the sum for every element in the tensor.
Both input and output have a range `(-inf, inf)`.
func AddManySparseToTensorsMap ¶
func AddManySparseToTensorsMap(scope *Scope, sparse_indices tf.Output, sparse_values tf.Output, sparse_shape tf.Output, optional ...AddManySparseToTensorsMapAttr) (sparse_handles tf.Output)
Add an `N`-minibatch `SparseTensor` to a `SparseTensorsMap`, return `N` handles.
A `SparseTensor` of rank `R` is represented by three tensors: `sparse_indices`, `sparse_values`, and `sparse_shape`, where
```sparse_indices.shape[1] == sparse_shape.shape[0] == R```
An `N`-minibatch of `SparseTensor` objects is represented as a `SparseTensor` having a first `sparse_indices` column taking values between `[0, N)`, where the minibatch size `N == sparse_shape[0]`.
The input `SparseTensor` must have rank `R` greater than 1, and the first dimension is treated as the minibatch dimension. Elements of the `SparseTensor` must be sorted in increasing order of this first dimension. The stored `SparseTensor` objects pointed to by each row of the output `sparse_handles` will have rank `R-1`.
The `SparseTensor` values can then be read out as part of a minibatch by passing the given keys as vector elements to `TakeManySparseFromTensorsMap`. To ensure the correct `SparseTensorsMap` is accessed, ensure that the same `container` and `shared_name` are passed to that Op. If no `shared_name` is provided here, instead use the *name* of the Operation created by calling `AddManySparseToTensorsMap` as the `shared_name` passed to `TakeManySparseFromTensorsMap`. Ensure the Operations are colocated.
Arguments:
sparse_indices: 2-D. The `indices` of the minibatch `SparseTensor`.
`sparse_indices[:, 0]` must be ordered values in `[0, N)`.
sparse_values: 1-D. The `values` of the minibatch `SparseTensor`. sparse_shape: 1-D. The `shape` of the minibatch `SparseTensor`.
The minibatch size `N == sparse_shape[0]`.
Returns 1-D. The handles of the `SparseTensor` now stored in the `SparseTensorsMap`. Shape: `[N]`.
func AddN ¶
Add all input tensors element wise.
Inputs must be of same size and shape. ```python x = [9, 7, 10] tf.math.add_n(x) ==> 26 ```
func AddSparseToTensorsMap ¶
func AddSparseToTensorsMap(scope *Scope, sparse_indices tf.Output, sparse_values tf.Output, sparse_shape tf.Output, optional ...AddSparseToTensorsMapAttr) (sparse_handle tf.Output)
Add a `SparseTensor` to a `SparseTensorsMap` return its handle.
A `SparseTensor` is represented by three tensors: `sparse_indices`, `sparse_values`, and `sparse_shape`.
This operator takes the given `SparseTensor` and adds it to a container object (a `SparseTensorsMap`). A unique key within this container is generated in the form of an `int64`, and this is the value that is returned.
The `SparseTensor` can then be read out as part of a minibatch by passing the key as a vector element to `TakeManySparseFromTensorsMap`. To ensure the correct `SparseTensorsMap` is accessed, ensure that the same `container` and `shared_name` are passed to that Op. If no `shared_name` is provided here, instead use the *name* of the Operation created by calling `AddSparseToTensorsMap` as the `shared_name` passed to `TakeManySparseFromTensorsMap`. Ensure the Operations are colocated.
Arguments:
sparse_indices: 2-D. The `indices` of the `SparseTensor`. sparse_values: 1-D. The `values` of the `SparseTensor`. sparse_shape: 1-D. The `shape` of the `SparseTensor`.
Returns 0-D. The handle of the `SparseTensor` now stored in the `SparseTensorsMap`.
func AddV2 ¶
Returns x + y element-wise.
*NOTE*: `Add` supports broadcasting. `AddN` does not. More about broadcasting [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
func AdjustContrast ¶
func AdjustContrast(scope *Scope, images tf.Output, contrast_factor tf.Output, min_value tf.Output, max_value tf.Output) (output tf.Output)
Deprecated. Disallowed in GraphDef version >= 2.
DEPRECATED at GraphDef version 2: Use AdjustContrastv2 instead
func AdjustContrastv2 ¶
Adjust the contrast of one or more images.
`images` is a tensor of at least 3 dimensions. The last 3 dimensions are interpreted as `[height, width, channels]`. The other dimensions only represent a collection of images, such as `[batch, height, width, channels].`
Contrast is adjusted independently for each channel of each image.
For each channel, the Op first computes the mean of the image pixels in the channel and then adjusts each component of each pixel to `(x - mean) * contrast_factor + mean`.
Arguments:
images: Images to adjust. At least 3-D. contrast_factor: A float multiplier for adjusting contrast.
Returns The contrast-adjusted image or images.
func AdjustHue ¶
Adjust the hue of one or more images.
`images` is a tensor of at least 3 dimensions. The last dimension is interpreted as channels, and must be three.
The input image is considered in the RGB colorspace. Conceptually, the RGB colors are first mapped into HSV. A delta is then applied all the hue values, and then remapped back to RGB colorspace.
Arguments:
images: Images to adjust. At least 3-D. delta: A float delta to add to the hue.
Returns The hue-adjusted image or images.
func AdjustSaturation ¶
Adjust the saturation of one or more images.
`images` is a tensor of at least 3 dimensions. The last dimension is interpreted as channels, and must be three.
The input image is considered in the RGB colorspace. Conceptually, the RGB colors are first mapped into HSV. A scale is then applied all the saturation values, and then remapped back to RGB colorspace.
Arguments:
images: Images to adjust. At least 3-D. scale: A float scale to add to the saturation.
Returns The hue-adjusted image or images.
func All ¶
Computes the "logical and" of elements across dimensions of a tensor.
Reduces `input` along the dimensions given in `axis`. Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in `axis`. If `keep_dims` is true, the reduced dimensions are retained with length 1.
Arguments:
input: The tensor to reduce. axis: The dimensions to reduce. Must be in the range
`[-rank(input), rank(input))`.
Returns The reduced tensor.
func AllCandidateSampler ¶
func AllCandidateSampler(scope *Scope, true_classes tf.Output, num_true int64, num_sampled int64, unique bool, optional ...AllCandidateSamplerAttr) (sampled_candidates tf.Output, true_expected_count tf.Output, sampled_expected_count tf.Output)
Generates labels for candidate sampling with a learned unigram distribution.
See explanations of candidate sampling and the data formats at go/candidate-sampling.
For each batch, this op picks a single set of sampled candidate labels.
The advantages of sampling candidates per-batch are simplicity and the possibility of efficient dense matrix multiplication. The disadvantage is that the sampled candidates must be chosen independently of the context and of the true labels.
Arguments:
true_classes: A batch_size * num_true matrix, in which each row contains the
IDs of the num_true target_classes in the corresponding original label.
num_true: Number of true labels per context. num_sampled: Number of candidates to produce. unique: If unique is true, we sample with rejection, so that all sampled
candidates in a batch are unique. This requires some approximation to estimate the post-rejection sampling probabilities.
Returns:
sampled_candidates: A vector of length num_sampled, in which each element is
the ID of a sampled candidate.
true_expected_count: A batch_size * num_true matrix, representing
the number of times each candidate is expected to occur in a batch of sampled candidates. If unique=true, then this is a probability.
sampled_expected_count: A vector of length num_sampled, for each sampled
candidate representing the number of times the candidate is expected to occur in a batch of sampled candidates. If unique=true, then this is a probability.
func AllToAll ¶
func AllToAll(scope *Scope, input tf.Output, group_assignment tf.Output, concat_dimension int64, split_dimension int64, split_count int64) (output tf.Output)
An Op to exchange data across TPU replicas.
On each replica, the input is split into `split_count` blocks along `split_dimension` and send to the other replicas given group_assignment. After receiving `split_count` - 1 blocks from other replicas, we concatenate the blocks along `concat_dimension` as the output.
For example, suppose there are 2 TPU replicas: replica 0 receives input: `[[A, B]]` replica 1 receives input: `[[C, D]]`
group_assignment=`[[0, 1]]` concat_dimension=0 split_dimension=1 split_count=2
replica 0's output: `[[A], [C]]` replica 1's output: `[[B], [D]]`
Arguments:
input: The local input to the sum. group_assignment: An int32 tensor with shape
[num_groups, num_replicas_per_group]. `group_assignment[i]` represents the replica ids in the ith subgroup.
concat_dimension: The dimension number to concatenate. split_dimension: The dimension number to split. split_count: The number of splits, this number must equal to the sub-group
size(group_assignment.get_shape()[1])
Returns The exchanged result.
func Angle ¶
Returns the argument of a complex number.
Given a tensor `input` of complex numbers, this operation returns a tensor of type `float` that is the argument of each element in `input`. All elements in `input` must be complex numbers of the form \\(a + bj\\), where *a* is the real part and *b* is the imaginary part.
The argument returned by this operation is of the form \\(atan2(b, a)\\).
For example:
``` # tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j] tf.math.angle(input) ==> [2.0132, 1.056] ```
@compatibility(numpy) Equivalent to np.angle. @end_compatibility
func AnonymousHashTable ¶
func AnonymousHashTable(scope *Scope, key_dtype tf.DataType, value_dtype tf.DataType) (table_handle tf.Output)
Creates a uninitialized anonymous hash table.
This op creates a new anonymous hash table (as a resource) everytime it is executed, with the specified dtype of its keys and values, returning the resource handle. Before using the table you will have to initialize it. After initialization the table will be immutable. The table is anonymous in the sense that it can only be accessed by the returned resource handle (e.g. it cannot be looked up by a name in a resource manager). The table will be automatically deleted when all resource handles pointing to it are gone.
Arguments:
key_dtype: Type of the table keys. value_dtype: Type of the table values.
Returns The resource handle to the newly created hash-table resource.
func AnonymousIterator ¶
func AnonymousIterator(scope *Scope, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output)
A container for an iterator resource.
Returns A handle to the iterator that can be passed to a "MakeIterator" or "IteratorGetNext" op. In contrast to Iterator, AnonymousIterator prevents resource sharing by name, and does not keep a reference to the resource container.
func AnonymousIteratorV2 ¶
func AnonymousIteratorV2(scope *Scope, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output, deleter tf.Output)
A container for an iterator resource.
Returns:
handle: A handle to the iterator that can be passed to a "MakeIterator" or
"IteratorGetNext" op. In contrast to Iterator, AnonymousIterator prevents resource sharing by name, and does not keep a reference to the resource container.
deleter: A variant deleter that should be passed into the op that deletes the iterator.
func AnonymousIteratorV3 ¶
func AnonymousIteratorV3(scope *Scope, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output)
A container for an iterator resource.
Returns A handle to the iterator that can be passed to a "MakeIterator" or "IteratorGetNext" op. In contrast to Iterator, AnonymousIterator prevents resource sharing by name, and does not keep a reference to the resource container.
func AnonymousMultiDeviceIterator ¶
func AnonymousMultiDeviceIterator(scope *Scope, devices []string, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output, deleter tf.Output)
A container for a multi device iterator resource.
Returns:
handle: A handle to a multi device iterator that can be passed to a
"MultiDeviceIteratorGetNextFromShard" op. In contrast to MultiDeviceIterator, AnonymousIterator prevents resource sharing by name, and does not keep a reference to the resource container.
deleter: A variant deleter that should be passed into the op that deletes the iterator.
func AnonymousMultiDeviceIteratorV3 ¶
func AnonymousMultiDeviceIteratorV3(scope *Scope, devices []string, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output)
A container for a multi device iterator resource.
Returns A handle to a multi device iterator that can be passed to a "MultiDeviceIteratorGetNextFromShard" op. In contrast to MultiDeviceIterator, AnonymousIterator prevents resource sharing by name, and does not keep a reference to the resource container.
func AnonymousMutableDenseHashTable ¶
func AnonymousMutableDenseHashTable(scope *Scope, empty_key tf.Output, deleted_key tf.Output, value_dtype tf.DataType, optional ...AnonymousMutableDenseHashTableAttr) (table_handle tf.Output)
Creates an empty anonymous mutable hash table that uses tensors as the backing store.
This op creates a new anonymous mutable hash table (as a resource) everytime it is executed, with the specified dtype of its keys and values, returning the resource handle. Each value must be a scalar. Data can be inserted into the table using the insert operations. It does not support the initialization operation.
It uses "open addressing" with quadratic reprobing to resolve collisions.
The table is anonymous in the sense that it can only be accessed by the returned resource handle (e.g. it cannot be looked up by a name in a resource manager). The table will be automatically deleted when all resource handles pointing to it are gone.
Arguments:
empty_key: The key used to represent empty key buckets internally. Must not
be used in insert or lookup operations.
value_dtype: Type of the table values.
Returns The resource handle to the newly created hash-table resource.
func AnonymousMutableHashTable ¶
func AnonymousMutableHashTable(scope *Scope, key_dtype tf.DataType, value_dtype tf.DataType) (table_handle tf.Output)
Creates an empty anonymous mutable hash table.
This op creates a new anonymous mutable hash table (as a resource) everytime it is executed, with the specified dtype of its keys and values, returning the resource handle. Each value must be a scalar. Data can be inserted into the table using the insert operations. It does not support the initialization operation. The table is anonymous in the sense that it can only be accessed by the returned resource handle (e.g. it cannot be looked up by a name in a resource manager). The table will be automatically deleted when all resource handles pointing to it are gone.
Arguments:
key_dtype: Type of the table keys. value_dtype: Type of the table values.
Returns The resource handle to the newly created hash-table resource.
func AnonymousMutableHashTableOfTensors ¶
func AnonymousMutableHashTableOfTensors(scope *Scope, key_dtype tf.DataType, value_dtype tf.DataType, optional ...AnonymousMutableHashTableOfTensorsAttr) (table_handle tf.Output)
Creates an empty anonymous mutable hash table of vector values.
This op creates a new anonymous mutable hash table (as a resource) everytime it is executed, with the specified dtype of its keys and values, returning the resource handle. Each value must be a vector. Data can be inserted into the table using the insert operations. It does not support the initialization operation. The table is anonymous in the sense that it can only be accessed by the returned resource handle (e.g. it cannot be looked up by a name in a resource manager). The table will be automatically deleted when all resource handles pointing to it are gone.
Arguments:
key_dtype: Type of the table keys. value_dtype: Type of the table values.
Returns The resource handle to the newly created hash-table resource.
func Any ¶
Computes the "logical or" of elements across dimensions of a tensor.
Reduces `input` along the dimensions given in `axis`. Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in `axis`. If `keep_dims` is true, the reduced dimensions are retained with length 1.
Arguments:
input: The tensor to reduce. axis: The dimensions to reduce. Must be in the range
`[-rank(input), rank(input))`.
Returns The reduced tensor.
func ApproxTopK ¶ added in v0.2.0
func ApproxTopK(scope *Scope, input tf.Output, k int64, optional ...ApproxTopKAttr) (values tf.Output, indices tf.Output)
Returns min/max k values and their indices of the input operand in an approximate manner.
See https://arxiv.org/abs/2206.14286 for the algorithm details. This op is only optimized on TPU currently.
Arguments:
input: Array to search. Must be at least 1-D of the floating type k: Specifies the number of min/max-k.
Returns:
values: The min/max k values along the `reduction_dimension` of the `input` operand.
The dimension are the same as the `input` operand except for the `reduction_dimension`: when `aggregate_to_topk` is true, the reduction dimension is `k`; otherwise, it is greater equals to `k` where the size is implementation-defined.
indices: The indices of `values` along the `reduction_dimension` of the `input` operand.
func ApproximateEqual ¶
func ApproximateEqual(scope *Scope, x tf.Output, y tf.Output, optional ...ApproximateEqualAttr) (z tf.Output)
Returns the truth value of abs(x-y) < tolerance element-wise.
func ArgMax ¶
func ArgMax(scope *Scope, input tf.Output, dimension tf.Output, optional ...ArgMaxAttr) (output tf.Output)
Returns the index with the largest value across dimensions of a tensor.
Note that in case of ties the identity of the return value is not guaranteed.
Usage:
```python import tensorflow as tf a = [1, 10, 26.9, 2.8, 166.32, 62.3] b = tf.math.argmax(input = a) c = tf.keras.backend.eval(b) # c = 4 # here a[4] = 166.32 which is the largest element of a across axis 0 ```
Arguments:
dimension: int16, int32 or int64, must be in the range `[-rank(input), rank(input))`.
Describes which dimension of the input Tensor to reduce across. For vectors, use dimension = 0.
func ArgMin ¶
func ArgMin(scope *Scope, input tf.Output, dimension tf.Output, optional ...ArgMinAttr) (output tf.Output)
Returns the index with the smallest value across dimensions of a tensor.
Note that in case of ties the identity of the return value is not guaranteed.
Usage:
```python import tensorflow as tf a = [1, 10, 26.9, 2.8, 166.32, 62.3] b = tf.math.argmin(input = a) c = tf.keras.backend.eval(b) # c = 0 # here a[0] = 1 which is the smallest element of a across axis 0 ```
Arguments:
dimension: int32 or int64, must be in the range `[-rank(input), rank(input))`.
Describes which dimension of the input Tensor to reduce across. For vectors, use dimension = 0.
func AsString ¶
Converts each entry in the given tensor to strings.
Supports many numeric types and boolean.
For Unicode, see the [https://www.tensorflow.org/tutorials/representation/unicode](Working with Unicode text) tutorial.
Examples:
>>> tf.strings.as_string([3, 2]) <tf.Tensor: shape=(2,), dtype=string, numpy=array([b'3', b'2'], dtype=object)> >>> tf.strings.as_string([3.1415926, 2.71828], precision=2).numpy() array([b'3.14', b'2.72'], dtype=object)
func Asin ¶
Computes the trignometric inverse sine of x element-wise.
The `tf.math.asin` operation returns the inverse of `tf.math.sin`, such that if `y = tf.math.sin(x)` then, `x = tf.math.asin(y)`.
**Note**: The output of `tf.math.asin` will lie within the invertible range of sine, i.e [-pi/2, pi/2].
For example:
```python # Note: [1.047, 0.785] ~= [(pi/3), (pi/4)] x = tf.constant([1.047, 0.785]) y = tf.math.sin(x) # [0.8659266, 0.7068252]
tf.math.asin(y) # [1.047, 0.785] = x ```
func Asinh ¶
Computes inverse hyperbolic sine of x element-wise.
Given an input tensor, this function computes inverse hyperbolic sine for every element in the tensor. Both input and output has a range of `[-inf, inf]`. ```python x = tf.constant([-float("inf"), -2, -0.5, 1, 1.2, 200, 10000, float("inf")]) tf.math.asinh(x) ==> [-inf -1.4436355 -0.4812118 0.8813736 1.0159732 5.991471 9.903487 inf] ```
func Assert ¶
func Assert(scope *Scope, condition tf.Output, data []tf.Output, optional ...AssertAttr) (o *tf.Operation)
Asserts that the given condition is true.
If `condition` evaluates to false, print the list of tensors in `data`. `summarize` determines how many entries of the tensors to print.
Arguments:
condition: The condition to evaluate. data: The tensors to print out when condition is false.
Returns the created operation.
func AssertNextDataset ¶
func AssertNextDataset(scope *Scope, input_dataset tf.Output, transformations tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output)
A transformation that asserts which transformations happen next.
This transformation checks whether the camel-case names (i.e. "FlatMap", not "flat_map") of the transformations following this transformation match the list of names in the `transformations` argument. If there is a mismatch, the transformation raises an exception.
The check occurs when iterating over the contents of the dataset, which means that the check happens *after* any static optimizations are applied to the dataset graph.
Arguments:
input_dataset: A variant tensor representing the input dataset.
`AssertNextDataset` passes through the outputs of its input dataset.
transformations: A `tf.string` vector `tf.Tensor` identifying the transformations that are
expected to happen next.
func AssertPrevDataset ¶
func AssertPrevDataset(scope *Scope, input_dataset tf.Output, transformations tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output)
A transformation that asserts which transformations happened previously.
This transformation checks the names and, optionally, the attribute name-value pairs in the `transformations` argument against those of the transformations that preceded this transformation. If there is a mismatch, the transformation raises an exception.
The check occurs when iterating over the contents of the dataset, which means that the check happens *after* any static optimizations are applied to the dataset graph.
Arguments:
input_dataset: A variant tensor representing the input dataset.
`AssertPrevDataset` passes through the outputs of its input dataset.
transformations: A `tf.string` vector `tf.Tensor` identifying the transformations, with optional
attribute name-value pairs, that are expected to have happened previously.
func AssignAddVariableOp ¶
Adds a value to the current value of a variable.
Any ReadVariableOp with a control dependency on this op is guaranteed to see the incremented value or a subsequent newer one.
Arguments:
resource: handle to the resource in which to store the variable. value: the value by which the variable will be incremented.
Returns the created operation.
func AssignSubVariableOp ¶
Subtracts a value from the current value of a variable.
Any ReadVariableOp with a control dependency on this op is guaranteed to see the decremented value or a subsequent newer one.
Arguments:
resource: handle to the resource in which to store the variable. value: the value by which the variable will be incremented.
Returns the created operation.
func AssignVariableOp ¶
func AssignVariableOp(scope *Scope, resource tf.Output, value tf.Output, optional ...AssignVariableOpAttr) (o *tf.Operation)
Assigns a new value to a variable.
Any ReadVariableOp with a control dependency on this op is guaranteed to return this value or a subsequent newer value of the variable.
Arguments:
resource: handle to the resource in which to store the variable. value: the value to set the new tensor to use.
Returns the created operation.
func AssignVariableXlaConcatND ¶
func AssignVariableXlaConcatND(scope *Scope, resource tf.Output, inputs []tf.Output, num_concats []int64, optional ...AssignVariableXlaConcatNDAttr) (o *tf.Operation)
Concats input tensor across all dimensions.
An op which merges slices the input tensor based on the given num_splits attribute, strips paddings optionally, and writes the merged tensor without paddings to the resource variable.
This op may be generated via the TPU bridge.
For example, with `input` tensor: ``` [[0, 1],
[4, 5]]
[[2, 3],
[6, 7]]
[[8, 9],
[12, 13]]
[[10, 11],
[14, 15]]
``` `num_splits`: ``` [2, 2] ``` and `paddings`: ``` [1, 1] ``` the expected `outputs` is: ``` [[0, 1, 2],
[4, 5, 6], [8, 9, 10]]
```
Arguments:
resource: Resource variable for concatenated input tensors across all dimensions. inputs: Input tensor slices in row-major order to merge across all dimensions. All
inputs must have the same shape.
num_concats: Number of ways to merge per dimension.
Returns the created operation.
func Atan ¶
Computes the trignometric inverse tangent of x element-wise.
The `tf.math.atan` operation returns the inverse of `tf.math.tan`, such that if `y = tf.math.tan(x)` then, `x = tf.math.atan(y)`.
**Note**: The output of `tf.math.atan` will lie within the invertible range of tan, i.e (-pi/2, pi/2).
For example:
```python # Note: [1.047, 0.785] ~= [(pi/3), (pi/4)] x = tf.constant([1.047, 0.785]) y = tf.math.tan(x) # [1.731261, 0.99920404]
tf.math.atan(y) # [1.047, 0.785] = x ```
func Atan2 ¶
Computes arctangent of `y/x` element-wise, respecting signs of the arguments.
This is the angle \\( \theta \in [-\pi, \pi] \\) such that \\[ x = r \cos(\theta) \\] and \\[ y = r \sin(\theta) \\] where \\(r = \sqrt{x^2 + y^2} \\).
For example:
>>> x = [1., 1.] >>> y = [1., -1.] >>> print((tf.math.atan2(y,x) * (180 / np.pi)).numpy()) [ 45. -45.]
func Atanh ¶
Computes inverse hyperbolic tangent of x element-wise.
Given an input tensor, this function computes inverse hyperbolic tangent for every element in the tensor. Input range is `[-1,1]` and output range is `[-inf, inf]`. If input is `-1`, output will be `-inf` and if the input is `1`, output will be `inf`. Values outside the range will have `nan` as output. ```python x = tf.constant([-float("inf"), -1, -0.5, 1, 0, 0.5, 10, float("inf")]) tf.math.atanh(x) ==> [nan -inf -0.54930615 inf 0. 0.54930615 nan nan] ```
func AudioSpectrogram ¶
func AudioSpectrogram(scope *Scope, input tf.Output, window_size int64, stride int64, optional ...AudioSpectrogramAttr) (spectrogram tf.Output)
Produces a visualization of audio data over time.
Spectrograms are a standard way of representing audio information as a series of slices of frequency information, one slice for each window of time. By joining these together into a sequence, they form a distinctive fingerprint of the sound over time.
This op expects to receive audio data as an input, stored as floats in the range -1 to 1, together with a window width in samples, and a stride specifying how far to move the window between slices. From this it generates a three dimensional output. The first dimension is for the channels in the input, so a stereo audio input would have two here for example. The second dimension is time, with successive frequency slices. The third dimension has an amplitude value for each frequency during that time slice.
This means the layout when converted and saved as an image is rotated 90 degrees clockwise from a typical spectrogram. Time is descending down the Y axis, and the frequency decreases from left to right.
Each value in the result represents the square root of the sum of the real and imaginary parts of an FFT on the current window of samples. In this way, the lowest dimension represents the power of each frequency in the current window, and adjacent windows are concatenated in the next dimension.
To get a more intuitive and visual look at what this operation does, you can run tensorflow/examples/wav_to_spectrogram to read in an audio file and save out the resulting spectrogram as a PNG image.
Arguments:
input: Float representation of audio data. window_size: How wide the input window is in samples. For the highest efficiency
this should be a power of two, but other values are accepted.
stride: How widely apart the center of adjacent sample windows should be.
Returns 3D representation of the audio frequencies as an image.
func AudioSummary ¶
func AudioSummary(scope *Scope, tag tf.Output, tensor tf.Output, sample_rate float32, optional ...AudioSummaryAttr) (summary tf.Output)
Outputs a `Summary` protocol buffer with audio.
DEPRECATED at GraphDef version 15: Use AudioSummaryV2.
The summary has up to `max_outputs` summary values containing audio. The audio is built from `tensor` which must be 3-D with shape `[batch_size, frames, channels]` or 2-D with shape `[batch_size, frames]`. The values are assumed to be in the range of `[-1.0, 1.0]` with a sample rate of `sample_rate`.
The `tag` argument is a scalar `Tensor` of type `string`. It is used to build the `tag` of the summary values:
- If `max_outputs` is 1, the summary value tag is '*tag*/audio'.
- If `max_outputs` is greater than 1, the summary value tags are generated sequentially as '*tag*/audio/0', '*tag*/audio/1', etc.
Arguments:
tag: Scalar. Used to build the `tag` attribute of the summary values. tensor: 2-D of shape `[batch_size, frames]`. sample_rate: The sample rate of the signal in hertz.
Returns Scalar. Serialized `Summary` protocol buffer.
func AudioSummaryV2 ¶
func AudioSummaryV2(scope *Scope, tag tf.Output, tensor tf.Output, sample_rate tf.Output, optional ...AudioSummaryV2Attr) (summary tf.Output)
Outputs a `Summary` protocol buffer with audio.
The summary has up to `max_outputs` summary values containing audio. The audio is built from `tensor` which must be 3-D with shape `[batch_size, frames, channels]` or 2-D with shape `[batch_size, frames]`. The values are assumed to be in the range of `[-1.0, 1.0]` with a sample rate of `sample_rate`.
The `tag` argument is a scalar `Tensor` of type `string`. It is used to build the `tag` of the summary values:
- If `max_outputs` is 1, the summary value tag is '*tag*/audio'.
- If `max_outputs` is greater than 1, the summary value tags are generated sequentially as '*tag*/audio/0', '*tag*/audio/1', etc.
Arguments:
tag: Scalar. Used to build the `tag` attribute of the summary values. tensor: 2-D of shape `[batch_size, frames]`. sample_rate: The sample rate of the signal in hertz.
Returns Scalar. Serialized `Summary` protocol buffer.
func AutoShardDataset ¶
func AutoShardDataset(scope *Scope, input_dataset tf.Output, num_workers tf.Output, index tf.Output, output_types []tf.DataType, output_shapes []tf.Shape, optional ...AutoShardDatasetAttr) (handle tf.Output)
Creates a dataset that shards the input dataset.
Creates a dataset that shards the input dataset by num_workers, returning a sharded dataset for the index-th worker. This attempts to automatically shard a dataset by examining the Dataset graph and inserting a shard op before the inputs to a reader Dataset (e.g. CSVDataset, TFRecordDataset).
This dataset will throw a NotFound error if we cannot shard the dataset automatically.
Arguments:
input_dataset: A variant tensor representing the input dataset. num_workers: A scalar representing the number of workers to distribute this dataset across. index: A scalar representing the index of the current worker out of num_workers.
func AvgPool ¶
func AvgPool(scope *Scope, value tf.Output, ksize []int64, strides []int64, padding string, optional ...AvgPoolAttr) (output tf.Output)
Performs average pooling on the input.
Each entry in `output` is the mean of the corresponding size `ksize` window in `value`.
Arguments:
value: 4-D with shape `[batch, height, width, channels]`. ksize: The size of the sliding window for each dimension of `value`. strides: The stride of the sliding window for each dimension of `value`. padding: The type of padding algorithm to use.
Returns The average pooled output tensor.
func AvgPool3D ¶
func AvgPool3D(scope *Scope, input tf.Output, ksize []int64, strides []int64, padding string, optional ...AvgPool3DAttr) (output tf.Output)
Performs 3D average pooling on the input.
Each entry in `output` is the mean of the corresponding size `ksize` window in `value`.
Arguments:
input: Shape `[batch, depth, rows, cols, channels]` tensor to pool over. ksize: 1-D tensor of length 5. The size of the window for each dimension of
the input tensor. Must have `ksize[0] = ksize[4] = 1`.
strides: 1-D tensor of length 5. The stride of the sliding window for each
dimension of `input`. Must have `strides[0] = strides[4] = 1`.
padding: The type of padding algorithm to use.
Returns The average pooled output tensor.
func AvgPool3DGrad ¶
func AvgPool3DGrad(scope *Scope, orig_input_shape tf.Output, grad tf.Output, ksize []int64, strides []int64, padding string, optional ...AvgPool3DGradAttr) (output tf.Output)
Computes gradients of average pooling function.
Arguments:
orig_input_shape: The original input dimensions. grad: Output backprop of shape `[batch, depth, rows, cols, channels]`. ksize: 1-D tensor of length 5. The size of the window for each dimension of
the input tensor. Must have `ksize[0] = ksize[4] = 1`.
strides: 1-D tensor of length 5. The stride of the sliding window for each
dimension of `input`. Must have `strides[0] = strides[4] = 1`.
padding: The type of padding algorithm to use.
Returns The backprop for input.
func AvgPoolGrad ¶
func AvgPoolGrad(scope *Scope, orig_input_shape tf.Output, grad tf.Output, ksize []int64, strides []int64, padding string, optional ...AvgPoolGradAttr) (output tf.Output)
Computes gradients of the average pooling function.
Arguments:
orig_input_shape: 1-D. Shape of the original input to `avg_pool`. grad: 4-D with shape `[batch, height, width, channels]`. Gradients w.r.t.
the output of `avg_pool`.
ksize: The size of the sliding window for each dimension of the input. strides: The stride of the sliding window for each dimension of the input. padding: The type of padding algorithm to use.
Returns 4-D. Gradients w.r.t. the input of `avg_pool`.
func Batch ¶
func Batch(scope *Scope, in_tensors []tf.Output, num_batch_threads int64, max_batch_size int64, batch_timeout_micros int64, grad_timeout_micros int64, optional ...BatchAttr) (batched_tensors []tf.Output, batch_index tf.Output, id tf.Output)
Batches all input tensors nondeterministically.
When many instances of this Op are being run concurrently with the same container/shared_name in the same device, some will output zero-shaped Tensors and others will output Tensors of size up to max_batch_size.
All Tensors in in_tensors are batched together (so, for example, labels and features should be batched with a single instance of this operation.
Each invocation of batch emits an `id` scalar which will be used to identify this particular invocation when doing unbatch or its gradient.
Each op which emits a non-empty batch will also emit a non-empty batch_index Tensor, which, is a [K, 3] matrix where each row contains the invocation's id, start, and length of elements of each set of Tensors present in batched_tensors.
Batched tensors are concatenated along the first dimension, and all tensors in in_tensors must have the first dimension of the same size.
in_tensors: The tensors to be batched. num_batch_threads: Number of scheduling threads for processing batches of work.
Determines the number of batches processed in parallel.
max_batch_size: Batch sizes will never be bigger than this. batch_timeout_micros: Maximum number of microseconds to wait before outputting
an incomplete batch.
allowed_batch_sizes: Optional list of allowed batch sizes. If left empty, does
nothing. Otherwise, supplies a list of batch sizes, causing the op to pad batches up to one of those sizes. The entries must increase monotonically, and the final entry must equal max_batch_size.
grad_timeout_micros: The timeout to use for the gradient. See Unbatch. batched_tensors: Either empty tensors or a batch of concatenated Tensors. batch_index: If out_tensors is non-empty, has information to invert it. container: Controls the scope of sharing of this batch. id: always contains a scalar with a unique ID for this invocation of Batch. shared_name: Concurrently running instances of batch in the same device with the
same container and shared_name will batch their elements together. If left empty, the op name will be used as the shared name.
T: the types of tensors to be batched.
func BatchDataset ¶
func BatchDataset(scope *Scope, input_dataset tf.Output, batch_size tf.Output, output_types []tf.DataType, output_shapes []tf.Shape, optional ...BatchDatasetAttr) (handle tf.Output)
Creates a dataset that batches `batch_size` elements from `input_dataset`.
Arguments:
batch_size: A scalar representing the number of elements to accumulate in a
batch.
func BatchDatasetV2 ¶
func BatchDatasetV2(scope *Scope, input_dataset tf.Output, batch_size tf.Output, drop_remainder tf.Output, output_types []tf.DataType, output_shapes []tf.Shape, optional ...BatchDatasetV2Attr) (handle tf.Output)
Creates a dataset that batches `batch_size` elements from `input_dataset`.
Arguments:
batch_size: A scalar representing the number of elements to accumulate in a batch. drop_remainder: A scalar representing whether the last batch should be dropped in case its size
is smaller than desired.
func BatchMatMul ¶
func BatchMatMul(scope *Scope, x tf.Output, y tf.Output, optional ...BatchMatMulAttr) (output tf.Output)
Multiplies slices of two tensors in batches.
Multiplies all slices of `Tensor` `x` and `y` (each slice can be viewed as an element of a batch), and arranges the individual results in a single output tensor of the same batch size. Each of the individual slices can optionally be adjointed (to adjoint a matrix means to transpose and conjugate it) before multiplication by setting the `adj_x` or `adj_y` flag to `True`, which are by default `False`.
The input tensors `x` and `y` are 2-D or higher with shape `[..., r_x, c_x]` and `[..., r_y, c_y]`.
The output tensor is 2-D or higher with shape `[..., r_o, c_o]`, where:
r_o = c_x if adj_x else r_x c_o = r_y if adj_y else c_y
It is computed as:
output[..., :, :] = matrix(x[..., :, :]) * matrix(y[..., :, :])
Arguments:
x: 2-D or higher with shape `[..., r_x, c_x]`. y: 2-D or higher with shape `[..., r_y, c_y]`.
Returns 3-D or higher with shape `[..., r_o, c_o]`
func BatchMatMulV2 ¶
func BatchMatMulV2(scope *Scope, x tf.Output, y tf.Output, optional ...BatchMatMulV2Attr) (output tf.Output)
Multiplies slices of two tensors in batches.
Multiplies all slices of `Tensor` `x` and `y` (each slice can be viewed as an element of a batch), and arranges the individual results in a single output tensor of the same batch size. Each of the individual slices can optionally be adjointed (to adjoint a matrix means to transpose and conjugate it) before multiplication by setting the `adj_x` or `adj_y` flag to `True`, which are by default `False`.
The input tensors `x` and `y` are 2-D or higher with shape `[..., r_x, c_x]` and `[..., r_y, c_y]`.
The output tensor is 2-D or higher with shape `[..., r_o, c_o]`, where:
r_o = c_x if adj_x else r_x c_o = r_y if adj_y else c_y
It is computed as:
output[..., :, :] = matrix(x[..., :, :]) * matrix(y[..., :, :])
*NOTE*: `BatchMatMulV2` supports broadcasting in the batch dimensions. More about broadcasting [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html).
Arguments:
x: 2-D or higher with shape `[..., r_x, c_x]`. y: 2-D or higher with shape `[..., r_y, c_y]`.
Returns 3-D or higher with shape `[..., r_o, c_o]`
func BatchMatMulV3 ¶
func BatchMatMulV3(scope *Scope, x tf.Output, y tf.Output, Tout tf.DataType, optional ...BatchMatMulV3Attr) (output tf.Output)
Multiplies slices of two tensors in batches.
Multiplies all slices of `Tensor` `x` and `y` (each slice can be viewed as an element of a batch), and arranges the individual results in a single output tensor of the same batch size. Each of the individual slices can optionally be adjointed (to adjoint a matrix means to transpose and conjugate it) before multiplication by setting the `adj_x` or `adj_y` flag to `True`, which are by default `False`.
The input tensors `x` and `y` are 2-D or higher with shape `[..., r_x, c_x]` and `[..., r_y, c_y]`.
The output tensor is 2-D or higher with shape `[..., r_o, c_o]`, where:
r_o = c_x if adj_x else r_x c_o = r_y if adj_y else c_y
It is computed as:
output[..., :, :] = matrix(x[..., :, :]) * matrix(y[..., :, :])
*NOTE*: `BatchMatMulV3` supports broadcasting in the batch dimensions. More about broadcasting [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html).
Arguments:
x: 2-D or higher with shape `[..., r_x, c_x]`. y: 2-D or higher with shape `[..., r_y, c_y]`. Tout: If not spcified, Tout is the same type to input type.
Returns 3-D or higher with shape `[..., r_o, c_o]`
func BatchNormWithGlobalNormalization ¶
func BatchNormWithGlobalNormalization(scope *Scope, t tf.Output, m tf.Output, v tf.Output, beta tf.Output, gamma tf.Output, variance_epsilon float32, scale_after_normalization bool) (result tf.Output)
Batch normalization.
DEPRECATED at GraphDef version 9: Use tf.nn.batch_normalization()
This op is deprecated. Prefer `tf.nn.batch_normalization`.
Arguments:
t: A 4D input Tensor. m: A 1D mean Tensor with size matching the last dimension of t.
This is the first output from tf.nn.moments, or a saved moving average thereof.
v: A 1D variance Tensor with size matching the last dimension of t.
This is the second output from tf.nn.moments, or a saved moving average thereof.
beta: A 1D beta Tensor with size matching the last dimension of t.
An offset to be added to the normalized tensor.
gamma: A 1D gamma Tensor with size matching the last dimension of t.
If "scale_after_normalization" is true, this tensor will be multiplied with the normalized tensor.
variance_epsilon: A small float number to avoid dividing by 0. scale_after_normalization: A bool indicating whether the resulted tensor
needs to be multiplied with gamma.
func BatchNormWithGlobalNormalizationGrad ¶
func BatchNormWithGlobalNormalizationGrad(scope *Scope, t tf.Output, m tf.Output, v tf.Output, gamma tf.Output, backprop tf.Output, variance_epsilon float32, scale_after_normalization bool) (dx tf.Output, dm tf.Output, dv tf.Output, db tf.Output, dg tf.Output)
Gradients for batch normalization.
DEPRECATED at GraphDef version 9: Use tf.nn.batch_normalization()
This op is deprecated. See `tf.nn.batch_normalization`.
Arguments:
t: A 4D input Tensor. m: A 1D mean Tensor with size matching the last dimension of t.
This is the first output from tf.nn.moments, or a saved moving average thereof.
v: A 1D variance Tensor with size matching the last dimension of t.
This is the second output from tf.nn.moments, or a saved moving average thereof.
gamma: A 1D gamma Tensor with size matching the last dimension of t.
If "scale_after_normalization" is true, this Tensor will be multiplied with the normalized Tensor.
backprop: 4D backprop Tensor. variance_epsilon: A small float number to avoid dividing by 0. scale_after_normalization: A bool indicating whether the resulted tensor
needs to be multiplied with gamma.
Returns:
dx: 4D backprop tensor for input. dm: 1D backprop tensor for mean. dv: 1D backprop tensor for variance. db: 1D backprop tensor for beta. dg: 1D backprop tensor for gamma.
func BatchToSpace ¶
func BatchToSpace(scope *Scope, input tf.Output, crops tf.Output, block_size int64) (output tf.Output)
BatchToSpace for 4-D tensors of type T.
This is a legacy version of the more general BatchToSpaceND.
Rearranges (permutes) data from batch into blocks of spatial data, followed by cropping. This is the reverse transformation of SpaceToBatch. More specifically, this op outputs a copy of the input tensor where values from the `batch` dimension are moved in spatial blocks to the `height` and `width` dimensions, followed by cropping along the `height` and `width` dimensions.
Arguments:
input: 4-D tensor with shape
`[batch*block_size*block_size, height_pad/block_size, width_pad/block_size,
depth]`. Note that the batch size of the input tensor must be divisible by
`block_size * block_size`.
crops: 2-D tensor of non-negative integers with shape `[2, 2]`. It specifies
how many elements to crop from the intermediate result across the spatial dimensions as follows:
crops = [[crop_top, crop_bottom], [crop_left, crop_right]]
Returns 4-D with shape `[batch, height, width, depth]`, where:
height = height_pad - crop_top - crop_bottom width = width_pad - crop_left - crop_right
The attr `block_size` must be greater than one. It indicates the block size.
Some examples:
(1) For the following input of shape `[4, 1, 1, 1]` and block_size of 2:
``` [[[[1]]], [[[2]]], [[[3]]], [[[4]]]] ```
The output tensor has shape `[1, 2, 2, 1]` and value:
``` x = [[[[1], [2]], [[3], [4]]]] ```
(2) For the following input of shape `[4, 1, 1, 3]` and block_size of 2:
``` [[[[1, 2, 3]]], [[[4, 5, 6]]], [[[7, 8, 9]]], [[[10, 11, 12]]]] ```
The output tensor has shape `[1, 2, 2, 3]` and value:
``` x = [[[[1, 2, 3], [4, 5, 6]],
[[7, 8, 9], [10, 11, 12]]]]
```
(3) For the following input of shape `[4, 2, 2, 1]` and block_size of 2:
``` x = [[[[1], [3]], [[9], [11]]],
[[[2], [4]], [[10], [12]]], [[[5], [7]], [[13], [15]]], [[[6], [8]], [[14], [16]]]]
```
The output tensor has shape `[1, 4, 4, 1]` and value:
``` x = [[[[1], [2], [3], [4]],
[[5], [6], [7], [8]], [[9], [10], [11], [12]], [[13], [14], [15], [16]]]]
```
(4) For the following input of shape `[8, 1, 2, 1]` and block_size of 2:
``` x = [[[[1], [3]]], [[[9], [11]]], [[[2], [4]]], [[[10], [12]]],
[[[5], [7]]], [[[13], [15]]], [[[6], [8]]], [[[14], [16]]]]
```
The output tensor has shape `[2, 2, 4, 1]` and value:
``` x = [[[[1], [3]], [[5], [7]]],
[[[2], [4]], [[10], [12]]], [[[5], [7]], [[13], [15]]], [[[6], [8]], [[14], [16]]]]
```
func BatchToSpaceND ¶
func BatchToSpaceND(scope *Scope, input tf.Output, block_shape tf.Output, crops tf.Output) (output tf.Output)
BatchToSpace for N-D tensors of type T.
This operation reshapes the "batch" dimension 0 into `M + 1` dimensions of shape `block_shape + [batch]`, interleaves these blocks back into the grid defined by the spatial dimensions `[1, ..., M]`, to obtain a result with the same rank as the input. The spatial dimensions of this intermediate result are then optionally cropped according to `crops` to produce the output. This is the reverse of SpaceToBatch. See below for a precise description.
Arguments:
input: N-D with shape `input_shape = [batch] + spatial_shape + remaining_shape`,
where spatial_shape has M dimensions.
block_shape: 1-D with shape `[M]`, all values must be >= 1. crops: 2-D with shape `[M, 2]`, all values must be >= 0. `crops[i] = [crop_start, crop_end]` specifies the amount to crop from input dimension `i + 1`, which corresponds to spatial dimension `i`. It is required that `crop_start[i] + crop_end[i] <= block_shape[i] * input_shape[i + 1]`.
This operation is equivalent to the following steps:
Reshape `input` to `reshaped` of shape: [block_shape[0], ..., block_shape[M-1], batch / prod(block_shape), input_shape[1], ..., input_shape[N-1]]
Permute dimensions of `reshaped` to produce `permuted` of shape [batch / prod(block_shape),
input_shape[1], block_shape[0], ..., input_shape[M], block_shape[M-1],
input_shape[M+1], ..., input_shape[N-1]]
Reshape `permuted` to produce `reshaped_permuted` of shape [batch / prod(block_shape),
input_shape[1] * block_shape[0], ..., input_shape[M] * block_shape[M-1],
input_shape[M+1], ..., input_shape[N-1]]
Crop the start and end of dimensions `[1, ..., M]` of `reshaped_permuted` according to `crops` to produce the output of shape: [batch / prod(block_shape),
input_shape[1] * block_shape[0] - crops[0,0] - crops[0,1], ..., input_shape[M] * block_shape[M-1] - crops[M-1,0] - crops[M-1,1],
input_shape[M+1], ..., input_shape[N-1]]
Some examples:
(1) For the following input of shape `[4, 1, 1, 1]`, `block_shape = [2, 2]`, and
`crops = [[0, 0], [0, 0]]`:
``` [[[[1]]], [[[2]]], [[[3]]], [[[4]]]] ```
The output tensor has shape `[1, 2, 2, 1]` and value:
``` x = [[[[1], [2]], [[3], [4]]]] ```
(2) For the following input of shape `[4, 1, 1, 3]`, `block_shape = [2, 2]`, and
`crops = [[0, 0], [0, 0]]`:
``` [[[[1, 2, 3]]], [[[4, 5, 6]]], [[[7, 8, 9]]], [[[10, 11, 12]]]] ```
The output tensor has shape `[1, 2, 2, 3]` and value:
``` x = [[[[1, 2, 3], [4, 5, 6]],
[[7, 8, 9], [10, 11, 12]]]]
```
(3) For the following input of shape `[4, 2, 2, 1]`, `block_shape = [2, 2]`, and
`crops = [[0, 0], [0, 0]]`:
``` x = [[[[1], [3]], [[9], [11]]],
[[[2], [4]], [[10], [12]]], [[[5], [7]], [[13], [15]]], [[[6], [8]], [[14], [16]]]]
```
The output tensor has shape `[1, 4, 4, 1]` and value:
``` x = [[[[1], [2], [3], [4]],
[[5], [6], [7], [8]], [[9], [10], [11], [12]], [[13], [14], [15], [16]]]]
```
(4) For the following input of shape `[8, 1, 3, 1]`, `block_shape = [2, 2]`, and
`crops = [[0, 0], [2, 0]]`:
``` x = [[[[0], [1], [3]]], [[[0], [9], [11]]],
[[[0], [2], [4]]], [[[0], [10], [12]]], [[[0], [5], [7]]], [[[0], [13], [15]]], [[[0], [6], [8]]], [[[0], [14], [16]]]]
```
The output tensor has shape `[2, 2, 4, 1]` and value:
``` x = [[[[1], [2], [3], [4]],
[[5], [6], [7], [8]]], [[[9], [10], [11], [12]], [[13], [14], [15], [16]]]]
```
func Betainc ¶
Compute the regularized incomplete beta integral \\(I_x(a, b)\\).
The regularized incomplete beta integral is defined as:
\\(I_x(a, b) = \frac{B(x; a, b)}{B(a, b)}\\)
where
\\(B(x; a, b) = \int_0^x t^{a-1} (1 - t)^{b-1} dt\\)
is the incomplete beta function and \\(B(a, b)\\) is the *complete* beta function.
func BiasAdd ¶
func BiasAdd(scope *Scope, value tf.Output, bias tf.Output, optional ...BiasAddAttr) (output tf.Output)
Adds `bias` to `value`.
This is a special case of `tf.add` where `bias` is restricted to be 1-D. Broadcasting is supported, so `value` may have any number of dimensions.
Arguments:
value: Any number of dimensions. bias: 1-D with size the last dimension of `value`.
Returns Broadcasted sum of `value` and `bias`.
func BiasAddGrad ¶
func BiasAddGrad(scope *Scope, out_backprop tf.Output, optional ...BiasAddGradAttr) (output tf.Output)
The backward operation for "BiasAdd" on the "bias" tensor.
It accumulates all the values from out_backprop into the feature dimension. For NHWC data format, the feature dimension is the last. For NCHW data format, the feature dimension is the third-to-last.
Arguments:
out_backprop: Any number of dimensions.
Returns 1-D with size the feature dimension of `out_backprop`.
func BiasAddV1 ¶
Adds `bias` to `value`.
This is a deprecated version of BiasAdd and will be soon removed.
This is a special case of `tf.add` where `bias` is restricted to be 1-D. Broadcasting is supported, so `value` may have any number of dimensions.
Arguments:
value: Any number of dimensions. bias: 1-D with size the last dimension of `value`.
Returns Broadcasted sum of `value` and `bias`.
func Bincount ¶
Counts the number of occurrences of each value in an integer array.
Outputs a vector with length `size` and the same dtype as `weights`. If `weights` are empty, then index `i` stores the number of times the value `i` is counted in `arr`. If `weights` are non-empty, then index `i` stores the sum of the value in `weights` at each index where the corresponding value in `arr` is `i`.
Values in `arr` outside of the range [0, size) are ignored.
Arguments:
arr: int32 `Tensor`. size: non-negative int32 scalar `Tensor`. weights: is an int32, int64, float32, or float64 `Tensor` with the same
shape as `arr`, or a length-0 `Tensor`, in which case it acts as all weights equal to 1.
Returns 1D `Tensor` with length equal to `size`. The counts or summed weights for each value in the range [0, size).
func Bitcast ¶
Bitcasts a tensor from one type to another without copying data.
Given a tensor `input`, this operation returns a tensor that has the same buffer data as `input` with datatype `type`.
If the input datatype `T` is larger than the output datatype `type` then the shape changes from [...] to [..., sizeof(`T`)/sizeof(`type`)].
If `T` is smaller than `type`, the operator requires that the rightmost dimension be equal to sizeof(`type`)/sizeof(`T`). The shape then goes from [..., sizeof(`type`)/sizeof(`T`)] to [...].
tf.bitcast() and tf.cast() work differently when real dtype is casted as a complex dtype (e.g. tf.complex64 or tf.complex128) as tf.cast() make imaginary part 0 while tf.bitcast() gives module error. For example,
Example 1:
>>> a = [1., 2., 3.] >>> equality_bitcast = tf.bitcast(a, tf.complex128) Traceback (most recent call last): ... InvalidArgumentError: Cannot bitcast from 1 to 18 [Op:Bitcast] >>> equality_cast = tf.cast(a, tf.complex128) >>> print(equality_cast) tf.Tensor([1.+0.j 2.+0.j 3.+0.j], shape=(3,), dtype=complex128)
Example 2:
>>> tf.bitcast(tf.constant(0xffffffff, dtype=tf.uint32), tf.uint8) <tf.Tensor: shape=(4,), dtype=uint8, numpy=array([255, 255, 255, 255], dtype=uint8)>
Example 3:
>>> x = [1., 2., 3.] >>> y = [0., 2., 3.] >>> equality= tf.equal(x,y) >>> equality_cast = tf.cast(equality,tf.float32) >>> equality_bitcast = tf.bitcast(equality_cast,tf.uint8) >>> print(equality) tf.Tensor([False True True], shape=(3,), dtype=bool) >>> print(equality_cast) tf.Tensor([0. 1. 1.], shape=(3,), dtype=float32) >>> print(equality_bitcast) tf.Tensor(
[[ 0 0 0 0] [ 0 0 128 63] [ 0 0 128 63]], shape=(3, 4), dtype=uint8)
*NOTE*: Bitcast is implemented as a low-level cast, so machines with different endian orderings will give different results. A copy from input buffer to output buffer is made on BE machines when types are of different sizes in order to get the same casting results as on LE machines.
func BitwiseAnd ¶
Elementwise computes the bitwise AND of `x` and `y`.
The result will have those bits set, that are set in both `x` and `y`. The computation is performed on the underlying representations of `x` and `y`.
For example:
```python import tensorflow as tf from tensorflow.python.ops import bitwise_ops dtype_list = [tf.int8, tf.int16, tf.int32, tf.int64,
tf.uint8, tf.uint16, tf.uint32, tf.uint64]
for dtype in dtype_list:
lhs = tf.constant([0, 5, 3, 14], dtype=dtype) rhs = tf.constant([5, 0, 7, 11], dtype=dtype) exp = tf.constant([0, 0, 3, 10], dtype=tf.float32) res = bitwise_ops.bitwise_and(lhs, rhs) tf.assert_equal(tf.cast(res, tf.float32), exp) # TRUE
```
func BitwiseOr ¶
Elementwise computes the bitwise OR of `x` and `y`.
The result will have those bits set, that are set in `x`, `y` or both. The computation is performed on the underlying representations of `x` and `y`.
For example:
```python import tensorflow as tf from tensorflow.python.ops import bitwise_ops dtype_list = [tf.int8, tf.int16, tf.int32, tf.int64,
tf.uint8, tf.uint16, tf.uint32, tf.uint64]
for dtype in dtype_list:
lhs = tf.constant([0, 5, 3, 14], dtype=dtype) rhs = tf.constant([5, 0, 7, 11], dtype=dtype) exp = tf.constant([5, 5, 7, 15], dtype=tf.float32) res = bitwise_ops.bitwise_or(lhs, rhs) tf.assert_equal(tf.cast(res, tf.float32), exp) # TRUE
```
func BitwiseXor ¶
Elementwise computes the bitwise XOR of `x` and `y`.
The result will have those bits set, that are different in `x` and `y`. The computation is performed on the underlying representations of `x` and `y`.
For example:
```python import tensorflow as tf from tensorflow.python.ops import bitwise_ops dtype_list = [tf.int8, tf.int16, tf.int32, tf.int64,
tf.uint8, tf.uint16, tf.uint32, tf.uint64]
for dtype in dtype_list:
lhs = tf.constant([0, 5, 3, 14], dtype=dtype) rhs = tf.constant([5, 0, 7, 11], dtype=dtype) exp = tf.constant([5, 5, 4, 5], dtype=tf.float32) res = bitwise_ops.bitwise_xor(lhs, rhs) tf.assert_equal(tf.cast(res, tf.float32), exp) # TRUE
```
func BlockLSTM ¶
func BlockLSTM(scope *Scope, seq_len_max tf.Output, x tf.Output, cs_prev tf.Output, h_prev tf.Output, w tf.Output, wci tf.Output, wcf tf.Output, wco tf.Output, b tf.Output, optional ...BlockLSTMAttr) (i tf.Output, cs tf.Output, f tf.Output, o tf.Output, ci tf.Output, co tf.Output, h tf.Output)
Computes the LSTM cell forward propagation for all the time steps.
This is equivalent to applying LSTMBlockCell in a loop, like so:
```python for x1 in unpack(x):
i1, cs1, f1, o1, ci1, co1, h1 = LSTMBlock( x1, cs_prev, h_prev, w, wci, wcf, wco, b) cs_prev = cs1 h_prev = h1 i.append(i1) cs.append(cs1) f.append(f1) o.append(o1) ci.append(ci1) co.append(co1) h.append(h1)
return pack(i), pack(cs), pack(f), pack(o), pack(ci), pack(ch), pack(h) ```
Arguments:
seq_len_max: Maximum time length actually used by this input. Outputs are padded
with zeros beyond this length.
x: The sequence input to the LSTM, shape (timelen, batch_size, num_inputs). cs_prev: Value of the initial cell state. h_prev: Initial output of cell (to be used for peephole). w: The weight matrix. wci: The weight matrix for input gate peephole connection. wcf: The weight matrix for forget gate peephole connection. wco: The weight matrix for output gate peephole connection. b: The bias vector.
Returns:
i: The input gate over the whole time sequence. cs: The cell state before the tanh over the whole time sequence. f: The forget gate over the whole time sequence. o: The output gate over the whole time sequence. ci: The cell input over the whole time sequence. co: The cell after the tanh over the whole time sequence. h: The output h vector over the whole time sequence.
func BlockLSTMGrad ¶
func BlockLSTMGrad(scope *Scope, seq_len_max tf.Output, x tf.Output, cs_prev tf.Output, h_prev tf.Output, w tf.Output, wci tf.Output, wcf tf.Output, wco tf.Output, b tf.Output, i tf.Output, cs tf.Output, f tf.Output, o tf.Output, ci tf.Output, co tf.Output, h tf.Output, cs_grad tf.Output, h_grad tf.Output, use_peephole bool) (x_grad tf.Output, cs_prev_grad tf.Output, h_prev_grad tf.Output, w_grad tf.Output, wci_grad tf.Output, wcf_grad tf.Output, wco_grad tf.Output, b_grad tf.Output)
Computes the LSTM cell backward propagation for the entire time sequence.
This implementation is to be used in conjunction of LSTMBlock.
Arguments:
seq_len_max: Maximum time length actually used by this input. Outputs are padded
with zeros beyond this length.
x: The sequence input to the LSTM, shape (timelen, batch_size, num_inputs). cs_prev: Value of the initial cell state. h_prev: Initial output of cell (to be used for peephole). w: The weight matrix. wci: The weight matrix for input gate peephole connection. wcf: The weight matrix for forget gate peephole connection. wco: The weight matrix for output gate peephole connection. b: The bias vector. i: The input gate over the whole time sequence. cs: The cell state before the tanh over the whole time sequence. f: The forget gate over the whole time sequence. o: The output gate over the whole time sequence. ci: The cell input over the whole time sequence. co: The cell after the tanh over the whole time sequence. h: The output h vector over the whole time sequence. cs_grad: The current gradient of cs. h_grad: The gradient of h vector. use_peephole: Whether to use peephole weights.
Returns:
x_grad: The gradient of x to be back-propped. cs_prev_grad: The gradient of cs_prev to be back-propped. h_prev_grad: The gradient of h_prev to be back-propped. w_grad: The gradient for w to be back-propped. wci_grad: The gradient for wci to be back-propped. wcf_grad: The gradient for wcf to be back-propped. wco_grad: The gradient for wco to be back-propped. b_grad: The gradient for w to be back-propped.
func BlockLSTMGradV2 ¶
func BlockLSTMGradV2(scope *Scope, seq_len_max tf.Output, x tf.Output, cs_prev tf.Output, h_prev tf.Output, w tf.Output, wci tf.Output, wcf tf.Output, wco tf.Output, b tf.Output, i tf.Output, cs tf.Output, f tf.Output, o tf.Output, ci tf.Output, co tf.Output, h tf.Output, cs_grad tf.Output, h_grad tf.Output, use_peephole bool) (x_grad tf.Output, cs_prev_grad tf.Output, h_prev_grad tf.Output, w_grad tf.Output, wci_grad tf.Output, wcf_grad tf.Output, wco_grad tf.Output, b_grad tf.Output)
Computes the LSTM cell backward propagation for the entire time sequence.
This implementation is to be used in conjunction of BlockLSTMV2.
Arguments:
seq_len_max: Maximum time length actually used by this input. Outputs are padded
with zeros beyond this length.
x: The sequence input to the LSTM, shape (timelen, batch_size, num_inputs). cs_prev: Value of the initial cell state. h_prev: Initial output of cell (to be used for peephole). w: The weight matrix. wci: The weight matrix for input gate peephole connection. wcf: The weight matrix for forget gate peephole connection. wco: The weight matrix for output gate peephole connection. b: The bias vector. i: The input gate over the whole time sequence. cs: The cell state before the tanh over the whole time sequence. f: The forget gate over the whole time sequence. o: The output gate over the whole time sequence. ci: The cell input over the whole time sequence. co: The cell after the tanh over the whole time sequence. h: The output h vector over the whole time sequence. cs_grad: The current gradient of cs. h_grad: The gradient of h vector. use_peephole: Whether to use peephole weights.
Returns:
x_grad: The gradient of x to be back-propped. cs_prev_grad: The gradient of cs_prev to be back-propped. h_prev_grad: The gradient of h_prev to be back-propped. w_grad: The gradient for w to be back-propped. wci_grad: The gradient for wci to be back-propped. wcf_grad: The gradient for wcf to be back-propped. wco_grad: The gradient for wco to be back-propped. b_grad: The gradient for w to be back-propped.
func BlockLSTMV2 ¶
func BlockLSTMV2(scope *Scope, seq_len_max tf.Output, x tf.Output, cs_prev tf.Output, h_prev tf.Output, w tf.Output, wci tf.Output, wcf tf.Output, wco tf.Output, b tf.Output, optional ...BlockLSTMV2Attr) (i tf.Output, cs tf.Output, f tf.Output, o tf.Output, ci tf.Output, co tf.Output, h tf.Output)
Computes the LSTM cell forward propagation for all the time steps.
This is equivalent to applying LSTMBlockCell in a loop, like so:
```python for x1 in unpack(x):
i1, cs1, f1, o1, ci1, co1, h1 = LSTMBlock( x1, cs_prev, h_prev, w, wci, wcf, wco, b) cs_prev = cs1 h_prev = h1 i.append(i1) cs.append(cs1) f.append(f1) o.append(o1) ci.append(ci1) co.append(co1) h.append(h1)
return pack(i), pack(cs), pack(f), pack(o), pack(ci), pack(ch), pack(h)
Note that unlike LSTMBlockCell (and BlockLSTM) which uses ICFO gate layout, this op uses IFCO. So in order for the following snippet to be equivalent all gate-related outputs should be reordered. ```
Arguments:
seq_len_max: Maximum time length actually used by this input. Outputs are padded
with zeros beyond this length.
x: The sequence input to the LSTM, shape (timelen, batch_size, num_inputs). cs_prev: Value of the initial cell state. h_prev: Initial output of cell (to be used for peephole). w: The weight matrix. wci: The weight matrix for input gate peephole connection. wcf: The weight matrix for forget gate peephole connection. wco: The weight matrix for output gate peephole connection. b: The bias vector.
Returns:
i: The input gate over the whole time sequence. cs: The cell state before the tanh over the whole time sequence. f: The forget gate over the whole time sequence. o: The output gate over the whole time sequence. ci: The cell input over the whole time sequence. co: The cell after the tanh over the whole time sequence. h: The output h vector over the whole time sequence.
func BoostedTreesAggregateStats ¶
func BoostedTreesAggregateStats(scope *Scope, node_ids tf.Output, gradients tf.Output, hessians tf.Output, feature tf.Output, max_splits int64, num_buckets int64) (stats_summary tf.Output)
Aggregates the summary of accumulated stats for the batch.
The summary stats contains gradients and hessians accumulated for each node, feature dimension id and bucket.
Arguments:
node_ids: int32; Rank 1 Tensor containing node ids for each example, shape [batch_size]. gradients: float32; Rank 2 Tensor (shape=[batch_size, logits_dimension]) with gradients for each example. hessians: float32; Rank 2 Tensor (shape=[batch_size, hessian_dimension]) with hessians for each example. feature: int32; Rank 2 feature Tensors (shape=[batch_size, feature_dimension]). max_splits: int; the maximum number of splits possible in the whole tree. num_buckets: int; equals to the maximum possible value of bucketized feature.
Returns output Rank 4 Tensor (shape=[splits, feature_dimension, buckets, logits_dimension + hessian_dimension]) containing accumulated stats for each node, feature dimension and bucket.
func BoostedTreesBucketize ¶
func BoostedTreesBucketize(scope *Scope, float_values []tf.Output, bucket_boundaries []tf.Output) (buckets []tf.Output)
Bucketize each feature based on bucket boundaries.
An op that returns a list of float tensors, where each tensor represents the bucketized values for a single feature.
Arguments:
float_values: float; List of Rank 1 Tensor each containing float values for a single feature. bucket_boundaries: float; List of Rank 1 Tensors each containing the bucket boundaries for a single
feature.
Returns int; List of Rank 1 Tensors each containing the bucketized values for a single feature.
func BoostedTreesCalculateBestFeatureSplit ¶
func BoostedTreesCalculateBestFeatureSplit(scope *Scope, node_id_range tf.Output, stats_summary tf.Output, l1 tf.Output, l2 tf.Output, tree_complexity tf.Output, min_node_weight tf.Output, logits_dimension int64, optional ...BoostedTreesCalculateBestFeatureSplitAttr) (node_ids tf.Output, gains tf.Output, feature_dimensions tf.Output, thresholds tf.Output, left_node_contribs tf.Output, right_node_contribs tf.Output, split_with_default_directions tf.Output)
Calculates gains for each feature and returns the best possible split information for the feature.
The split information is the best threshold (bucket id), gains and left/right node contributions per node for each feature.
It is possible that not all nodes can be split on each feature. Hence, the list of possible nodes can differ between the features. Therefore, we return `node_ids_list` for each feature, containing the list of nodes that this feature can be used to split.
In this manner, the output is the best split per features and per node, so that it needs to be combined later to produce the best split for each node (among all possible features).
The output shapes are compatible in a way that the first dimension of all tensors are the same and equal to the number of possible split nodes for each feature.
Arguments:
node_id_range: A Rank 1 tensor (shape=[2]) to specify the range [first, last) of node ids to process within `stats_summary_list`. The nodes are iterated between the two nodes specified by the tensor, as like `for node_id in range(node_id_range[0], node_id_range[1])` (Note that the last index node_id_range[1] is exclusive). stats_summary: A Rank 4 tensor (#shape=[max_splits, feature_dims, bucket, stats_dims]) for accumulated stats summary (gradient/hessian) per node, per dimension, per buckets for each feature.
The first dimension of the tensor is the maximum number of splits, and thus not all elements of it will be used, but only the indexes specified by node_ids will be used.
l1: l1 regularization factor on leaf weights, per instance based. l2: l2 regularization factor on leaf weights, per instance based. tree_complexity: adjustment to the gain, per leaf based. min_node_weight: minimum avg of hessians in a node before required for the node to be considered for splitting. logits_dimension: The dimension of logit, i.e., number of classes.
Returns:
node_ids: A Rank 1 tensors indicating possible split node ids for each feature. The length of the list is num_features, but each tensor has different size as each feature provides different possible nodes. See above for details like shapes and sizes. gains: A Rank 1 tensors indicating the best gains for each feature to split for certain nodes. See above for details like shapes and sizes. feature_dimensions: A Rank 1 tensors indicating the best feature dimension for each feature to split for certain nodes if the feature is multi-dimension. See above for details like shapes and sizes. thresholds: A Rank 1 tensors indicating the bucket id to compare with (as a threshold) for split in each node. See above for details like shapes and sizes. left_node_contribs: A Rank 2 tensors indicating the contribution of the left nodes when branching from parent nodes (given by the tensor element in the output node_ids_list) to the left direction by the given threshold for each feature. This value will be used to make the left node value by adding to the parent node value. Second dimension size is 1 for 1-dimensional logits, but would be larger for multi-class problems. See above for details like shapes and sizes. right_node_contribs: A Rank 2 tensors, with the same shape/conditions as left_node_contribs_list, but just that the value is for the right node. split_with_default_directions: A Rank 1 tensors indicating the which direction to go if data is missing. See above for details like shapes and sizes.
Inequality with default left returns 0, inequality with default right returns 1, equality with default right returns 2.
func BoostedTreesCalculateBestFeatureSplitV2 ¶
func BoostedTreesCalculateBestFeatureSplitV2(scope *Scope, node_id_range tf.Output, stats_summaries_list []tf.Output, split_types tf.Output, candidate_feature_ids tf.Output, l1 tf.Output, l2 tf.Output, tree_complexity tf.Output, min_node_weight tf.Output, logits_dimension int64) (node_ids tf.Output, gains tf.Output, feature_ids tf.Output, feature_dimensions tf.Output, thresholds tf.Output, left_node_contribs tf.Output, right_node_contribs tf.Output, split_with_default_directions tf.Output)
Calculates gains for each feature and returns the best possible split information for each node. However, if no split is found, then no split information is returned for that node.
The split information is the best threshold (bucket id), gains and left/right node contributions per node for each feature.
It is possible that not all nodes can be split on each feature. Hence, the list of possible nodes can differ between the features. Therefore, we return `node_ids_list` for each feature, containing the list of nodes that this feature can be used to split.
In this manner, the output is the best split per features and per node, so that it needs to be combined later to produce the best split for each node (among all possible features).
The output shapes are compatible in a way that the first dimension of all tensors are the same and equal to the number of possible split nodes for each feature.
Arguments:
node_id_range: A Rank 1 tensor (shape=[2]) to specify the range [first, last) of node ids to process within `stats_summary_list`. The nodes are iterated between the two nodes specified by the tensor, as like `for node_id in range(node_id_range[0], node_id_range[1])` (Note that the last index node_id_range[1] is exclusive). stats_summaries_list: A list of Rank 4 tensor (#shape=[max_splits, feature_dims, bucket, stats_dims]) for accumulated stats summary (gradient/hessian) per node, per dimension, per buckets for each feature.
The first dimension of the tensor is the maximum number of splits, and thus not all elements of it will be used, but only the indexes specified by node_ids will be used.
split_types: A Rank 1 tensor indicating if this Op should perform inequality split or equality split per feature. candidate_feature_ids: Rank 1 tensor with ids for each feature. This is the real id of the feature. l1: l1 regularization factor on leaf weights, per instance based. l2: l2 regularization factor on leaf weights, per instance based. tree_complexity: adjustment to the gain, per leaf based. min_node_weight: minimum avg of hessians in a node before required for the node to be considered for splitting. logits_dimension: The dimension of logit, i.e., number of classes.
Returns:
node_ids: A Rank 1 tensors indicating possible split node ids for each feature. The length of the list is num_features, but each tensor has different size as each feature provides different possible nodes. See above for details like shapes and sizes. gains: A Rank 1 tensor indicating the best gains for each feature to split for certain nodes. See above for details like shapes and sizes. feature_ids: A Rank 1 tensors indicating the best feature id for each node. See above for details like shapes and sizes. feature_dimensions: A Rank 1 tensors indicating the best feature dimension for each feature to split for certain nodes if the feature is multi-dimension. See above for details like shapes and sizes. thresholds: A Rank 1 tensors indicating the bucket id to compare with (as a threshold) for split in each node. See above for details like shapes and sizes. left_node_contribs: A Rank 2 tensors indicating the contribution of the left nodes when branching from parent nodes (given by the tensor element in the output node_ids_list) to the left direction by the given threshold for each feature. This value will be used to make the left node value by adding to the parent node value. Second dimension size is 1 for 1-dimensional logits, but would be larger for multi-class problems. See above for details like shapes and sizes. right_node_contribs: A Rank 2 tensors, with the same shape/conditions as left_node_contribs_list, but just that the value is for the right node. split_with_default_directions: A Rank 1 tensors indicating the which direction to go if data is missing. See above for details like shapes and sizes.
Inequality with default left returns 0, inequality with default right returns 1, equality with default right returns 2.
func BoostedTreesCalculateBestGainsPerFeature ¶
func BoostedTreesCalculateBestGainsPerFeature(scope *Scope, node_id_range tf.Output, stats_summary_list []tf.Output, l1 tf.Output, l2 tf.Output, tree_complexity tf.Output, min_node_weight tf.Output, max_splits int64) (node_ids_list []tf.Output, gains_list []tf.Output, thresholds_list []tf.Output, left_node_contribs_list []tf.Output, right_node_contribs_list []tf.Output)
Calculates gains for each feature and returns the best possible split information for the feature.
The split information is the best threshold (bucket id), gains and left/right node contributions per node for each feature.
It is possible that not all nodes can be split on each feature. Hence, the list of possible nodes can differ between the features. Therefore, we return `node_ids_list` for each feature, containing the list of nodes that this feature can be used to split.
In this manner, the output is the best split per features and per node, so that it needs to be combined later to produce the best split for each node (among all possible features).
The length of output lists are all of the same length, `num_features`. The output shapes are compatible in a way that the first dimension of all tensors of all lists are the same and equal to the number of possible split nodes for each feature.
Arguments:
node_id_range: A Rank 1 tensor (shape=[2]) to specify the range [first, last) of node ids to process within `stats_summary_list`. The nodes are iterated between the two nodes specified by the tensor, as like `for node_id in range(node_id_range[0], node_id_range[1])` (Note that the last index node_id_range[1] is exclusive). stats_summary_list: A list of Rank 3 tensor (#shape=[max_splits, bucket, 2]) for accumulated stats summary (gradient/hessian) per node per buckets for each feature. The first dimension of the tensor is the maximum number of splits, and thus not all elements of it will be used, but only the indexes specified by node_ids will be used. l1: l1 regularization factor on leaf weights, per instance based. l2: l2 regularization factor on leaf weights, per instance based. tree_complexity: adjustment to the gain, per leaf based. min_node_weight: minimum avg of hessians in a node before required for the node to be considered for splitting. max_splits: the number of nodes that can be split in the whole tree. Used as a dimension of output tensors.
Returns:
node_ids_list: An output list of Rank 1 tensors indicating possible split node ids for each feature. The length of the list is num_features, but each tensor has different size as each feature provides different possible nodes. See above for details like shapes and sizes. gains_list: An output list of Rank 1 tensors indicating the best gains for each feature to split for certain nodes. See above for details like shapes and sizes. thresholds_list: An output list of Rank 1 tensors indicating the bucket id to compare with (as a threshold) for split in each node. See above for details like shapes and sizes. left_node_contribs_list: A list of Rank 2 tensors indicating the contribution of the left nodes when branching from parent nodes (given by the tensor element in the output node_ids_list) to the left direction by the given threshold for each feature. This value will be used to make the left node value by adding to the parent node value. Second dimension size is 1 for 1-dimensional logits, but would be larger for multi-class problems. See above for details like shapes and sizes. right_node_contribs_list: A list of Rank 2 tensors, with the same shape/conditions as left_node_contribs_list, but just that the value is for the right node.
func BoostedTreesCenterBias ¶
func BoostedTreesCenterBias(scope *Scope, tree_ensemble_handle tf.Output, mean_gradients tf.Output, mean_hessians tf.Output, l1 tf.Output, l2 tf.Output) (continue_centering tf.Output)
Calculates the prior from the training data (the bias) and fills in the first node with the logits' prior. Returns a boolean indicating whether to continue centering.
Arguments:
tree_ensemble_handle: Handle to the tree ensemble. mean_gradients: A tensor with shape=[logits_dimension] with mean of gradients for a first node. mean_hessians: A tensor with shape=[logits_dimension] mean of hessians for a first node. l1: l1 regularization factor on leaf weights, per instance based. l2: l2 regularization factor on leaf weights, per instance based.
Returns Bool, whether to continue bias centering.
func BoostedTreesCreateEnsemble ¶
func BoostedTreesCreateEnsemble(scope *Scope, tree_ensemble_handle tf.Output, stamp_token tf.Output, tree_ensemble_serialized tf.Output) (o *tf.Operation)
Creates a tree ensemble model and returns a handle to it.
Arguments:
tree_ensemble_handle: Handle to the tree ensemble resource to be created. stamp_token: Token to use as the initial value of the resource stamp. tree_ensemble_serialized: Serialized proto of the tree ensemble.
Returns the created operation.
func BoostedTreesCreateQuantileStreamResource ¶
func BoostedTreesCreateQuantileStreamResource(scope *Scope, quantile_stream_resource_handle tf.Output, epsilon tf.Output, num_streams tf.Output, optional ...BoostedTreesCreateQuantileStreamResourceAttr) (o *tf.Operation)
Create the Resource for Quantile Streams.
Arguments:
quantile_stream_resource_handle: resource; Handle to quantile stream resource. epsilon: float; The required approximation error of the stream resource. num_streams: int; The number of streams managed by the resource that shares the same epsilon.
Returns the created operation.
func BoostedTreesDeserializeEnsemble ¶
func BoostedTreesDeserializeEnsemble(scope *Scope, tree_ensemble_handle tf.Output, stamp_token tf.Output, tree_ensemble_serialized tf.Output) (o *tf.Operation)
Deserializes a serialized tree ensemble config and replaces current tree
ensemble.
Arguments:
tree_ensemble_handle: Handle to the tree ensemble. stamp_token: Token to use as the new value of the resource stamp. tree_ensemble_serialized: Serialized proto of the ensemble.
Returns the created operation.
func BoostedTreesEnsembleResourceHandleOp ¶
func BoostedTreesEnsembleResourceHandleOp(scope *Scope, optional ...BoostedTreesEnsembleResourceHandleOpAttr) (resource tf.Output)
Creates a handle to a BoostedTreesEnsembleResource
func BoostedTreesExampleDebugOutputs ¶
func BoostedTreesExampleDebugOutputs(scope *Scope, tree_ensemble_handle tf.Output, bucketized_features []tf.Output, logits_dimension int64) (examples_debug_outputs_serialized tf.Output)
Debugging/model interpretability outputs for each example.
It traverses all the trees and computes debug metrics for individual examples, such as getting split feature ids and logits after each split along the decision path used to compute directional feature contributions.
Arguments:
bucketized_features: A list of rank 1 Tensors containing bucket id for each
feature.
logits_dimension: scalar, dimension of the logits, to be used for constructing the protos in
examples_debug_outputs_serialized.
Returns Output rank 1 Tensor containing a proto serialized as a string for each example.
func BoostedTreesFlushQuantileSummaries ¶
func BoostedTreesFlushQuantileSummaries(scope *Scope, quantile_stream_resource_handle tf.Output, num_features int64) (summaries []tf.Output)
Flush the quantile summaries from each quantile stream resource.
An op that outputs a list of quantile summaries of a quantile stream resource. Each summary Tensor is rank 2, containing summaries (value, weight, min_rank, max_rank) for a single feature.
Arguments:
quantile_stream_resource_handle: resource handle referring to a QuantileStreamResource.
func BoostedTreesGetEnsembleStates ¶
func BoostedTreesGetEnsembleStates(scope *Scope, tree_ensemble_handle tf.Output) (stamp_token tf.Output, num_trees tf.Output, num_finalized_trees tf.Output, num_attempted_layers tf.Output, last_layer_nodes_range tf.Output)
Retrieves the tree ensemble resource stamp token, number of trees and growing statistics.
Arguments:
tree_ensemble_handle: Handle to the tree ensemble.
Returns:
stamp_token: Stamp token of the tree ensemble resource. num_trees: The number of trees in the tree ensemble resource. num_finalized_trees: The number of trees that were finished successfully. num_attempted_layers: The number of layers we attempted to build (but not necessarily succeeded). last_layer_nodes_range: Rank size 2 tensor that contains start and end ids of the nodes in the latest
layer.
func BoostedTreesMakeQuantileSummaries ¶
func BoostedTreesMakeQuantileSummaries(scope *Scope, float_values []tf.Output, example_weights tf.Output, epsilon tf.Output) (summaries []tf.Output)
Makes the summary of quantiles for the batch.
An op that takes a list of tensors (one tensor per feature) and outputs the quantile summaries for each tensor.
Arguments:
float_values: float; List of Rank 1 Tensors each containing values for a single feature. example_weights: float; Rank 1 Tensor with weights per instance. epsilon: float; The required maximum approximation error.
Returns float; List of Rank 2 Tensors each containing the quantile summary (value, weight, min_rank, max_rank) of a single feature.
func BoostedTreesMakeStatsSummary ¶
func BoostedTreesMakeStatsSummary(scope *Scope, node_ids tf.Output, gradients tf.Output, hessians tf.Output, bucketized_features_list []tf.Output, max_splits int64, num_buckets int64) (stats_summary tf.Output)
Makes the summary of accumulated stats for the batch.
The summary stats contains gradients and hessians accumulated into the corresponding node and bucket for each example.
Arguments:
node_ids: int32 Rank 1 Tensor containing node ids, which each example falls into for the requested layer. gradients: float32; Rank 2 Tensor (shape=[#examples, 1]) for gradients. hessians: float32; Rank 2 Tensor (shape=[#examples, 1]) for hessians. bucketized_features_list: int32 list of Rank 1 Tensors, each containing the bucketized feature (for each feature column). max_splits: int; the maximum number of splits possible in the whole tree. num_buckets: int; equals to the maximum possible value of bucketized feature.
Returns output Rank 4 Tensor (shape=[#features, #splits, #buckets, 2]) containing accumulated stats put into the corresponding node and bucket. The first index of 4th dimension refers to gradients, and the second to hessians.
func BoostedTreesPredict ¶
func BoostedTreesPredict(scope *Scope, tree_ensemble_handle tf.Output, bucketized_features []tf.Output, logits_dimension int64) (logits tf.Output)
Runs multiple additive regression ensemble predictors on input instances and
computes the logits. It is designed to be used during prediction. It traverses all the trees and calculates the final score for each instance.
Arguments:
bucketized_features: A list of rank 1 Tensors containing bucket id for each
feature.
logits_dimension: scalar, dimension of the logits, to be used for partial logits
shape.
Returns Output rank 2 Tensor containing logits for each example.
func BoostedTreesQuantileStreamResourceAddSummaries ¶
func BoostedTreesQuantileStreamResourceAddSummaries(scope *Scope, quantile_stream_resource_handle tf.Output, summaries []tf.Output) (o *tf.Operation)
Add the quantile summaries to each quantile stream resource.
An op that adds a list of quantile summaries to a quantile stream resource. Each summary Tensor is rank 2, containing summaries (value, weight, min_rank, max_rank) for a single feature.
Arguments:
quantile_stream_resource_handle: resource handle referring to a QuantileStreamResource. summaries: string; List of Rank 2 Tensor each containing the summaries for a single feature.
Returns the created operation.
func BoostedTreesQuantileStreamResourceDeserialize ¶
func BoostedTreesQuantileStreamResourceDeserialize(scope *Scope, quantile_stream_resource_handle tf.Output, bucket_boundaries []tf.Output) (o *tf.Operation)
Deserialize bucket boundaries and ready flag into current QuantileAccumulator.
An op that deserializes bucket boundaries and are boundaries ready flag into current QuantileAccumulator.
Arguments:
quantile_stream_resource_handle: resource handle referring to a QuantileStreamResource. bucket_boundaries: float; List of Rank 1 Tensors each containing the bucket boundaries for a feature.
Returns the created operation.
func BoostedTreesQuantileStreamResourceFlush ¶
func BoostedTreesQuantileStreamResourceFlush(scope *Scope, quantile_stream_resource_handle tf.Output, num_buckets tf.Output, optional ...BoostedTreesQuantileStreamResourceFlushAttr) (o *tf.Operation)
Flush the summaries for a quantile stream resource.
An op that flushes the summaries for a quantile stream resource.
Arguments:
quantile_stream_resource_handle: resource handle referring to a QuantileStreamResource. num_buckets: int; approximate number of buckets unless using generate_quantiles.
Returns the created operation.
func BoostedTreesQuantileStreamResourceGetBucketBoundaries ¶
func BoostedTreesQuantileStreamResourceGetBucketBoundaries(scope *Scope, quantile_stream_resource_handle tf.Output, num_features int64) (bucket_boundaries []tf.Output)
Generate the bucket boundaries for each feature based on accumulated summaries.
An op that returns a list of float tensors for a quantile stream resource. Each tensor is Rank 1 containing bucket boundaries for a single feature.
Arguments:
quantile_stream_resource_handle: resource handle referring to a QuantileStreamResource. num_features: inferred int; number of features to get bucket boundaries for.
Returns float; List of Rank 1 Tensors each containing the bucket boundaries for a feature.
func BoostedTreesQuantileStreamResourceHandleOp ¶
func BoostedTreesQuantileStreamResourceHandleOp(scope *Scope, optional ...BoostedTreesQuantileStreamResourceHandleOpAttr) (resource tf.Output)
Creates a handle to a BoostedTreesQuantileStreamResource.
func BoostedTreesSerializeEnsemble ¶
func BoostedTreesSerializeEnsemble(scope *Scope, tree_ensemble_handle tf.Output) (stamp_token tf.Output, tree_ensemble_serialized tf.Output)
Serializes the tree ensemble to a proto.
Arguments:
tree_ensemble_handle: Handle to the tree ensemble.
Returns:
stamp_token: Stamp token of the tree ensemble resource. tree_ensemble_serialized: Serialized proto of the ensemble.
func BoostedTreesSparseAggregateStats ¶
func BoostedTreesSparseAggregateStats(scope *Scope, node_ids tf.Output, gradients tf.Output, hessians tf.Output, feature_indices tf.Output, feature_values tf.Output, feature_shape tf.Output, max_splits int64, num_buckets int64) (stats_summary_indices tf.Output, stats_summary_values tf.Output, stats_summary_shape tf.Output)
Aggregates the summary of accumulated stats for the batch.
The summary stats contains gradients and hessians accumulated for each node, bucket and dimension id.
Arguments:
node_ids: int32; Rank 1 Tensor containing node ids for each example, shape [batch_size]. gradients: float32; Rank 2 Tensor (shape=[batch_size, logits_dimension]) with gradients for each example. hessians: float32; Rank 2 Tensor (shape=[batch_size, hessian_dimension]) with hessians for each example. feature_indices: int32; Rank 2 indices of feature sparse Tensors (shape=[number of sparse entries, 2]).
Number of sparse entries across all instances from the batch. The first value is the index of the instance, the second is dimension of the feature. The second axis can only have 2 values, i.e., the input dense version of Tensor can only be matrix.
feature_values: int32; Rank 1 values of feature sparse Tensors (shape=[number of sparse entries]).
Number of sparse entries across all instances from the batch. The first value is the index of the instance, the second is dimension of the feature.
feature_shape: int32; Rank 1 dense shape of feature sparse Tensors (shape=[2]).
The first axis can only have 2 values, [batch_size, feature_dimension].
max_splits: int; the maximum number of splits possible in the whole tree. num_buckets: int; equals to the maximum possible value of bucketized feature + 1.
Returns:
stats_summary_indices: int32; Rank 2 indices of summary sparse Tensors (shape=[number of non zero statistics, 4])
The second axis can only be 4 including node id, feature dimension, bucket id, and statistics_dimension. statistics_dimension = logits_dimension + hessian_dimension.
stats_summary_values: output Rank 1 Tensor (shape=[number of non zero statistics]) stats_summary_shape: output Rank 1 Tensor (shape=[4])
The tensor has following 4 values: [max_splits, feature_dimension, num_buckets, statistics_dimension], where statistics_dimension = gradient_dimension + hessian_dimension. gradient_dimension is the same as label_dimension, i.e., the output space. hessian_dimension can be the same as logits dimension when diagonal hessian is used, or label_dimension^2 when full hessian is used.
func BoostedTreesSparseCalculateBestFeatureSplit ¶
func BoostedTreesSparseCalculateBestFeatureSplit(scope *Scope, node_id_range tf.Output, stats_summary_indices tf.Output, stats_summary_values tf.Output, stats_summary_shape tf.Output, l1 tf.Output, l2 tf.Output, tree_complexity tf.Output, min_node_weight tf.Output, logits_dimension int64, optional ...BoostedTreesSparseCalculateBestFeatureSplitAttr) (node_ids tf.Output, gains tf.Output, feature_dimensions tf.Output, thresholds tf.Output, left_node_contribs tf.Output, right_node_contribs tf.Output, split_with_default_directions tf.Output)
Calculates gains for each feature and returns the best possible split information for the feature.
The split information is the best threshold (bucket id), gains and left/right node contributions per node for each feature.
It is possible that not all nodes can be split on each feature. Hence, the list of possible nodes can differ between the features. Therefore, we return `node_ids_list` for each feature, containing the list of nodes that this feature can be used to split.
In this manner, the output is the best split per features and per node, so that it needs to be combined later to produce the best split for each node (among all possible features).
The output shapes are compatible in a way that the first dimension of all tensors are the same and equal to the number of possible split nodes for each feature.
Arguments:
node_id_range: A Rank 1 tensor (shape=[2]) to specify the range [first, last) of node ids to process within `stats_summary_list`. The nodes are iterated between the two nodes specified by the tensor, as like `for node_id in range(node_id_range[0], node_id_range[1])` (Note that the last index node_id_range[1] is exclusive). stats_summary_indices: A Rank 2 int64 tensor of dense shape [N, 4] (N specifies the number of non-zero values) for accumulated stats summary (gradient/hessian) per node per bucket for each feature. The second dimension contains node id, feature dimension, bucket id, and stats dim.
stats dim is the sum of logits dimension and hessian dimension, hessian dimension can either be logits dimension if diagonal hessian is used, or logits dimension^2 if full hessian is used.
stats_summary_values: A Rank 1 float tensor of dense shape [N] (N specifies the number of non-zero values), which supplies the values for each element in summary_indices. stats_summary_shape: A Rank 1 float tensor of dense shape [4], which specifies the dense shape of the sparse tensor, which is [num tree nodes, feature dimensions, num buckets, stats dim]. l1: l1 regularization factor on leaf weights, per instance based. l2: l2 regularization factor on leaf weights, per instance based. tree_complexity: adjustment to the gain, per leaf based. min_node_weight: minimum avg of hessians in a node before required for the node to be considered for splitting. logits_dimension: The dimension of logit, i.e., number of classes.
Returns:
node_ids: A Rank 1 tensor indicating possible node ids that can be split. gains: A Rank 1 tensor indicating the best gains to split each node. feature_dimensions: A Rank 1 tensor indicating the best feature dimension for each feature to split for each node. thresholds: A Rank 1 tensor indicating the bucket id to compare with (as a threshold) for split in each node. left_node_contribs: A Rank 2 tensor indicating the contribution of the left nodes when branching from parent nodes to the left direction by the given threshold for each feature.
This value will be used to make the left node value by adding to the parent node value. Second dimension size is logits dimension.
right_node_contribs: A Rank 2 tensor, with the same shape/conditions as left_node_contribs_list, but just that the value is for the right node. split_with_default_directions: A Rank 1 tensor indicating which direction to go if data is missing.
Inequality with default left returns 0, inequality with default right returns 1, equality with default right returns 2.
func BoostedTreesTrainingPredict ¶
func BoostedTreesTrainingPredict(scope *Scope, tree_ensemble_handle tf.Output, cached_tree_ids tf.Output, cached_node_ids tf.Output, bucketized_features []tf.Output, logits_dimension int64) (partial_logits tf.Output, tree_ids tf.Output, node_ids tf.Output)
Runs multiple additive regression ensemble predictors on input instances and
computes the update to cached logits. It is designed to be used during training. It traverses the trees starting from cached tree id and cached node id and calculates the updates to be pushed to the cache.
Arguments:
cached_tree_ids: Rank 1 Tensor containing cached tree ids which is the starting
tree of prediction.
cached_node_ids: Rank 1 Tensor containing cached node id which is the starting
node of prediction.
bucketized_features: A list of rank 1 Tensors containing bucket id for each
feature.
logits_dimension: scalar, dimension of the logits, to be used for partial logits
shape.
Returns:
partial_logits: Rank 2 Tensor containing logits update (with respect to cached
values stored) for each example.
tree_ids: Rank 1 Tensor containing new tree ids for each example. node_ids: Rank 1 Tensor containing new node ids in the new tree_ids.
func BoostedTreesUpdateEnsemble ¶
func BoostedTreesUpdateEnsemble(scope *Scope, tree_ensemble_handle tf.Output, feature_ids tf.Output, node_ids []tf.Output, gains []tf.Output, thresholds []tf.Output, left_node_contribs []tf.Output, right_node_contribs []tf.Output, max_depth tf.Output, learning_rate tf.Output, pruning_mode int64) (o *tf.Operation)
Updates the tree ensemble by either adding a layer to the last tree being grown
or by starting a new tree.
Arguments:
tree_ensemble_handle: Handle to the ensemble variable. feature_ids: Rank 1 tensor with ids for each feature. This is the real id of
the feature that will be used in the split.
node_ids: List of rank 1 tensors representing the nodes for which this feature
has a split.
gains: List of rank 1 tensors representing the gains for each of the feature's
split.
thresholds: List of rank 1 tensors representing the thesholds for each of the
feature's split.
left_node_contribs: List of rank 2 tensors with left leaf contribs for each of
the feature's splits. Will be added to the previous node values to constitute the values of the left nodes.
right_node_contribs: List of rank 2 tensors with right leaf contribs for each
of the feature's splits. Will be added to the previous node values to constitute the values of the right nodes.
max_depth: Max depth of the tree to build. learning_rate: shrinkage const for each new tree. pruning_mode: 0-No pruning, 1-Pre-pruning, 2-Post-pruning.
Returns the created operation.
func BoostedTreesUpdateEnsembleV2 ¶
func BoostedTreesUpdateEnsembleV2(scope *Scope, tree_ensemble_handle tf.Output, feature_ids []tf.Output, dimension_ids []tf.Output, node_ids []tf.Output, gains []tf.Output, thresholds []tf.Output, left_node_contribs []tf.Output, right_node_contribs []tf.Output, split_types []tf.Output, max_depth tf.Output, learning_rate tf.Output, pruning_mode tf.Output, optional ...BoostedTreesUpdateEnsembleV2Attr) (o *tf.Operation)
Updates the tree ensemble by adding a layer to the last tree being grown
or by starting a new tree.
Arguments:
tree_ensemble_handle: Handle to the ensemble variable. feature_ids: Rank 1 tensor with ids for each feature. This is the real id of
the feature that will be used in the split.
dimension_ids: List of rank 1 tensors representing the dimension in each feature. node_ids: List of rank 1 tensors representing the nodes for which this feature
has a split.
gains: List of rank 1 tensors representing the gains for each of the feature's
split.
thresholds: List of rank 1 tensors representing the thesholds for each of the
feature's split.
left_node_contribs: List of rank 2 tensors with left leaf contribs for each of
the feature's splits. Will be added to the previous node values to constitute the values of the left nodes.
right_node_contribs: List of rank 2 tensors with right leaf contribs for each
of the feature's splits. Will be added to the previous node values to constitute the values of the right nodes.
split_types: List of rank 1 tensors representing the split type for each feature. max_depth: Max depth of the tree to build. learning_rate: shrinkage const for each new tree. pruning_mode: 0-No pruning, 1-Pre-pruning, 2-Post-pruning.
Returns the created operation.
func BroadcastArgs ¶
Return the shape of s0 op s1 with broadcast.
Given `s0` and `s1`, tensors that represent shapes, compute `r0`, the broadcasted shape. `s0`, `s1` and `r0` are all integer vectors.
func BroadcastGradientArgs ¶
Return the reduction indices for computing gradients of s0 op s1 with broadcast.
This is typically used by gradient computations for a broadcasting operation.
func BroadcastTo ¶
Broadcast an array for a compatible shape.
Broadcasting is the process of making arrays to have compatible shapes for arithmetic operations. Two shapes are compatible if for each dimension pair they are either equal or one of them is one.
For example:
>>> x = tf.constant([[1, 2, 3]]) # Shape (1, 3,) >>> y = tf.broadcast_to(x, [2, 3]) >>> print(y) tf.Tensor(
[[1 2 3] [1 2 3]], shape=(2, 3), dtype=int32)
In the above example, the input Tensor with the shape of `[1, 3]` is broadcasted to output Tensor with shape of `[2, 3]`.
When broadcasting, if a tensor has fewer axes than necessary its shape is padded on the left with ones. So this gives the same result as the previous example:
>>> x = tf.constant([1, 2, 3]) # Shape (3,) >>> y = tf.broadcast_to(x, [2, 3])
When doing broadcasted operations such as multiplying a tensor by a scalar, broadcasting (usually) confers some time or space benefit, as the broadcasted tensor is never materialized.
However, `broadcast_to` does not carry with it any such benefits. The newly-created tensor takes the full memory of the broadcasted shape. (In a graph context, `broadcast_to` might be fused to subsequent operation and then be optimized away, however.)
Arguments:
input: A Tensor to broadcast. shape: An 1-D `int` Tensor. The shape of the desired output.
Returns A Tensor.
func Bucketize ¶
Bucketizes 'input' based on 'boundaries'.
For example, if the inputs are
boundaries = [0, 10, 100] input = [[-5, 10000] [150, 10] [5, 100]]
then the output will be
output = [[0, 3] [3, 2] [1, 3]]
Arguments:
input: Any shape of Tensor contains with int or float type. boundaries: A sorted list of floats gives the boundary of the buckets.
Returns Same shape with 'input', each value of input replaced with bucket index.
@compatibility(numpy) Equivalent to np.digitize. @end_compatibility
func BytesProducedStatsDataset ¶
func BytesProducedStatsDataset(scope *Scope, input_dataset tf.Output, tag tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output)
Records the bytes size of each element of `input_dataset` in a StatsAggregator.
func CSRSparseMatrixComponents ¶
func CSRSparseMatrixComponents(scope *Scope, csr_sparse_matrix tf.Output, index tf.Output, type_ tf.DataType) (row_ptrs tf.Output, col_inds tf.Output, values tf.Output)
Reads out the CSR components at batch `index`.
This op is meant only for debugging / testing, and its interface is not expected to be stable.
Arguments:
csr_sparse_matrix: A batched CSRSparseMatrix. index: The index in `csr_sparse_matrix`'s batch.
Returns:
row_ptrs: An array containing CSR matrix row pointers. col_inds: An array containing CSR matrix column indices. values: An array containing CSR matrix nonzero values.
func CSRSparseMatrixToDense ¶
func CSRSparseMatrixToDense(scope *Scope, sparse_input tf.Output, type_ tf.DataType) (dense_output tf.Output)
Convert a (possibly batched) CSRSparseMatrix to dense.
Arguments:
sparse_input: A batched CSRSparseMatrix.
Returns A dense tensor.
func CSRSparseMatrixToSparseTensor ¶
func CSRSparseMatrixToSparseTensor(scope *Scope, sparse_matrix tf.Output, type_ tf.DataType) (indices tf.Output, values tf.Output, dense_shape tf.Output)
Converts a (possibly batched) CSRSparesMatrix to a SparseTensor.
Arguments:
sparse_matrix: A (possibly batched) CSRSparseMatrix.
Returns:
indices: SparseTensor indices. values: SparseTensor values. dense_shape: SparseTensor dense shape.
func CTCBeamSearchDecoder ¶
func CTCBeamSearchDecoder(scope *Scope, inputs tf.Output, sequence_length tf.Output, beam_width int64, top_paths int64, optional ...CTCBeamSearchDecoderAttr) (decoded_indices []tf.Output, decoded_values []tf.Output, decoded_shape []tf.Output, log_probability tf.Output)
Performs beam search decoding on the logits given in input.
A note about the attribute merge_repeated: For the beam search decoder, this means that if consecutive entries in a beam are the same, only the first of these is emitted. That is, when the top path is "A B B B B", "A B" is returned if merge_repeated = True but "A B B B B" is returned if merge_repeated = False.
Arguments:
inputs: 3-D, shape: `(max_time x batch_size x num_classes)`, the logits. sequence_length: A vector containing sequence lengths, size `(batch)`. beam_width: A scalar >= 0 (beam search beam width). top_paths: A scalar >= 0, <= beam_width (controls output size).
Returns:
decoded_indices: A list (length: top_paths) of indices matrices. Matrix j,
size `(total_decoded_outputs[j] x 2)`, has indices of a `SparseTensor<int64, 2>`. The rows store: [batch, time].
decoded_values: A list (length: top_paths) of values vectors. Vector j,
size `(length total_decoded_outputs[j])`, has the values of a `SparseTensor<int64, 2>`. The vector stores the decoded classes for beam j.
decoded_shape: A list (length: top_paths) of shape vector. Vector j,
size `(2)`, stores the shape of the decoded `SparseTensor[j]`. Its values are: `[batch_size, max_decoded_length[j]]`.
log_probability: A matrix, shaped: `(batch_size x top_paths)`. The
sequence log-probabilities.
func CTCGreedyDecoder ¶
func CTCGreedyDecoder(scope *Scope, inputs tf.Output, sequence_length tf.Output, optional ...CTCGreedyDecoderAttr) (decoded_indices tf.Output, decoded_values tf.Output, decoded_shape tf.Output, log_probability tf.Output)
Performs greedy decoding on the logits given in inputs.
A note about the attribute merge_repeated: if enabled, when consecutive logits' maximum indices are the same, only the first of these is emitted. Labeling the blank '*', the sequence "A B B * B B" becomes "A B B" if merge_repeated = True and "A B B B B" if merge_repeated = False.
Regardless of the value of merge_repeated, if the maximum index of a given time and batch corresponds to the blank, index `(num_classes - 1)`, no new element is emitted.
Arguments:
inputs: 3-D, shape: `(max_time x batch_size x num_classes)`, the logits. sequence_length: A vector containing sequence lengths, size `(batch_size)`.
Returns:
decoded_indices: Indices matrix, size `(total_decoded_outputs x 2)`,
of a `SparseTensor<int64, 2>`. The rows store: [batch, time].
decoded_values: Values vector, size: `(total_decoded_outputs)`,
of a `SparseTensor<int64, 2>`. The vector stores the decoded classes.
decoded_shape: Shape vector, size `(2)`, of the decoded SparseTensor.
Values are: `[batch_size, max_decoded_length]`.
log_probability: Matrix, size `(batch_size x 1)`, containing sequence
log-probabilities.
func CTCLoss ¶
func CTCLoss(scope *Scope, inputs tf.Output, labels_indices tf.Output, labels_values tf.Output, sequence_length tf.Output, optional ...CTCLossAttr) (loss tf.Output, gradient tf.Output)
Calculates the CTC Loss (log probability) for each batch entry. Also calculates
the gradient. This class performs the softmax operation for you, so inputs should be e.g. linear projections of outputs by an LSTM.
Arguments:
inputs: 3-D, shape: `(max_time x batch_size x num_classes)`, the logits. labels_indices: The indices of a `SparseTensor<int32, 2>`.
`labels_indices(i, :) == [b, t]` means `labels_values(i)` stores the id for `(batch b, time t)`.
labels_values: The values (labels) associated with the given batch and time. sequence_length: A vector containing sequence lengths (batch).
Returns:
loss: A vector (batch) containing log-probabilities. gradient: The gradient of `loss`. 3-D, shape:
`(max_time x batch_size x num_classes)`.
func CTCLossV2 ¶
func CTCLossV2(scope *Scope, inputs tf.Output, labels_indices tf.Output, labels_values tf.Output, sequence_length tf.Output, optional ...CTCLossV2Attr) (loss tf.Output, gradient tf.Output)
Calculates the CTC Loss (log probability) for each batch entry. Also calculates
the gradient. This class performs the softmax operation for you, so inputs should be e.g. linear projections of outputs by an LSTM.
Arguments:
inputs: 3-D, shape: `(max_time x batch_size x num_classes)`, the logits. Default blank
label is 0 rather num_classes - 1.
labels_indices: The indices of a `SparseTensor<int32, 2>`.
`labels_indices(i, :) == [b, t]` means `labels_values(i)` stores the id for `(batch b, time t)`.
labels_values: The values (labels) associated with the given batch and time. sequence_length: A vector containing sequence lengths (batch).
Returns:
loss: A vector (batch) containing log-probabilities. gradient: The gradient of `loss`. 3-D, shape:
`(max_time x batch_size x num_classes)`.
func CacheDataset ¶
func CacheDataset(scope *Scope, input_dataset tf.Output, filename tf.Output, output_types []tf.DataType, output_shapes []tf.Shape, optional ...CacheDatasetAttr) (handle tf.Output)
Creates a dataset that caches elements from `input_dataset`.
A CacheDataset will iterate over the input_dataset, and store tensors. If the cache already exists, the cache will be used. If the cache is inappropriate (e.g. cannot be opened, contains tensors of the wrong shape / size), an error will the returned when used.
Arguments:
filename: A path on the filesystem where we should cache the dataset. Note: this
will be a directory.
func CheckNumerics ¶
Checks a tensor for NaN and Inf values.
When run, reports an `InvalidArgument` error if `tensor` has any values that are not a number (NaN) or infinity (Inf). Otherwise, returns the input tensor.
Example usage:
``` python a = tf.Variable(1.0) tf.debugging.check_numerics(a, message=”)
b = tf.Variable(np.nan) try:
tf.debugging.check_numerics(b, message='Checking b')
except Exception as e:
assert "Checking b : Tensor had NaN values" in e.message
c = tf.Variable(np.inf) try:
tf.debugging.check_numerics(c, message='Checking c')
except Exception as e:
assert "Checking c : Tensor had Inf values" in e.message
```
Arguments:
message: Prefix of the error message.
func CheckNumericsV2 ¶
Checks a tensor for NaN, -Inf and +Inf values.
When run, reports an `InvalidArgument` error if `tensor` has any values that are not a number (NaN) or infinity (Inf). Otherwise, returns the input tensor. Unlike CheckNumerics (V1), CheckNumericsV2 distinguishes -Inf and +Inf in the errors it throws.
Arguments:
message: Prefix of the error message.
func Cholesky ¶
Computes the Cholesky decomposition of one or more square matrices.
The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions form square matrices.
The input has to be symmetric and positive definite. Only the lower-triangular part of the input will be used for this operation. The upper-triangular part will not be read.
The output is a tensor of the same shape as the input containing the Cholesky decompositions for all input submatrices `[..., :, :]`.
**Note**: The gradient computation on GPU is faster for large matrices but not for large batch dimensions when the submatrices are small. In this case it might be faster to use the CPU.
Arguments:
input: Shape is `[..., M, M]`.
Returns Shape is `[..., M, M]`.
func CholeskyGrad ¶
Computes the reverse mode backpropagated gradient of the Cholesky algorithm.
For an explanation see "Differentiation of the Cholesky algorithm" by Iain Murray http://arxiv.org/abs/1602.07527.
Arguments:
l: Output of batch Cholesky algorithm l = cholesky(A). Shape is `[..., M, M]`.
Algorithm depends only on lower triangular part of the innermost matrices of this tensor.
grad: df/dl where f is some scalar function. Shape is `[..., M, M]`.
Algorithm depends only on lower triangular part of the innermost matrices of this tensor.
Returns Symmetrized version of df/dA . Shape is `[..., M, M]`
func ClipByValue ¶
func ClipByValue(scope *Scope, t tf.Output, clip_value_min tf.Output, clip_value_max tf.Output) (output tf.Output)
Clips tensor values to a specified min and max.
Given a tensor `t`, this operation returns a tensor of the same type and shape as `t` with its values clipped to `clip_value_min` and `clip_value_max`. Any values less than `clip_value_min` are set to `clip_value_min`. Any values greater than `clip_value_max` are set to `clip_value_max`.
Arguments:
t: A `Tensor`. clip_value_min: A 0-D (scalar) `Tensor`, or a `Tensor` with the same shape
as `t`. The minimum value to clip by.
clip_value_max: A 0-D (scalar) `Tensor`, or a `Tensor` with the same shape
as `t`. The maximum value to clip by.
Returns A clipped `Tensor` with the same shape as input 't'.
func CollateTPUEmbeddingMemory ¶ added in v0.2.0
func CollateTPUEmbeddingMemory(scope *Scope, memory_configs []tf.Output) (merged_memory_config tf.Output)
An op that merges the string-encoded memory config protos from all hosts.
Arguments:
memory_configs: String-encoded memory config protos containing metadata about
the memory allocations reserved for TPUEmbedding across all hosts.
func CollectiveAllToAllV2 ¶ added in v0.5.0
func CollectiveAllToAllV2(scope *Scope, input tf.Output, group_size tf.Output, group_key tf.Output, instance_key tf.Output, ordering_token []tf.Output, optional ...CollectiveAllToAllV2Attr) (data tf.Output)
Mutually exchanges multiple tensors of identical type and shape.
`is_stateless` means each op does not need control dependencies to other collective ops. In this case, keys that are unique at runtime (e.g. `instance_key`) should be used to distinguish collective groups.
func CollectiveAllToAllV3 ¶
func CollectiveAllToAllV3(scope *Scope, input tf.Output, communicator tf.Output, group_assignment tf.Output, optional ...CollectiveAllToAllV3Attr) (data tf.Output)
Mutually exchanges multiple tensors of identical type and shape.
func CollectiveAssignGroupV2 ¶
func CollectiveAssignGroupV2(scope *Scope, group_assignment tf.Output, device_index tf.Output, base_key tf.Output) (group_size tf.Output, group_key tf.Output)
Assign group keys based on group assignment.
func CollectiveBcastRecv ¶
func CollectiveBcastRecv(scope *Scope, T tf.DataType, group_size int64, group_key int64, instance_key int64, shape tf.Shape, optional ...CollectiveBcastRecvAttr) (data tf.Output)
Receives a tensor value broadcast from another device.
func CollectiveBcastRecvV2 ¶
func CollectiveBcastRecvV2(scope *Scope, group_size tf.Output, group_key tf.Output, instance_key tf.Output, shape tf.Output, T tf.DataType, optional ...CollectiveBcastRecvV2Attr) (data tf.Output)
Receives a tensor value broadcast from another device.
func CollectiveBcastSend ¶
func CollectiveBcastSend(scope *Scope, input tf.Output, group_size int64, group_key int64, instance_key int64, shape tf.Shape, optional ...CollectiveBcastSendAttr) (data tf.Output)
Broadcasts a tensor value to one or more other devices.
func CollectiveBcastSendV2 ¶
func CollectiveBcastSendV2(scope *Scope, input tf.Output, group_size tf.Output, group_key tf.Output, instance_key tf.Output, optional ...CollectiveBcastSendV2Attr) (data tf.Output)
Broadcasts a tensor value to one or more other devices.
func CollectiveGather ¶
func CollectiveGather(scope *Scope, input tf.Output, group_size int64, group_key int64, instance_key int64, shape tf.Shape, optional ...CollectiveGatherAttr) (data tf.Output)
Mutually accumulates multiple tensors of identical type and shape.
func CollectiveGatherV2 ¶
func CollectiveGatherV2(scope *Scope, input tf.Output, group_size tf.Output, group_key tf.Output, instance_key tf.Output, ordering_token []tf.Output, optional ...CollectiveGatherV2Attr) (data tf.Output)
Mutually accumulates multiple tensors of identical type and shape.
`is_stateless` means each op does not need control dependencies to other collective ops. In this case, keys that are unique at runtime (e.g. `instance_key`) should be used to distinguish collective groups.
func CollectiveInitializeCommunicator ¶
func CollectiveInitializeCommunicator(scope *Scope, group_key tf.Output, rank tf.Output, group_size tf.Output, optional ...CollectiveInitializeCommunicatorAttr) (communicator tf.Output)
Initializes a group for collective operations.
func CollectivePermute ¶
func CollectivePermute(scope *Scope, input tf.Output, source_target_pairs tf.Output) (output tf.Output)
An Op to permute tensors across replicated TPU instances.
Each instance supplies its own input.
For example, suppose there are 4 TPU instances: `[A, B, C, D]`. Passing source_target_pairs=`[[0,1],[1,2],[2,3],[3,0]]` gets the outputs: `[D, A, B, C]`.
Arguments:
input: The local input to be permuted. Currently only supports float and
bfloat16.
source_target_pairs: A tensor with shape [num_pairs, 2].
Returns The permuted input.
func CollectiveReduce ¶
func CollectiveReduce(scope *Scope, input tf.Output, group_size int64, group_key int64, instance_key int64, merge_op string, final_op string, subdiv_offsets []int64, optional ...CollectiveReduceAttr) (data tf.Output)
Mutually reduces multiple tensors of identical type and shape.
func CollectiveReduceScatterV2 ¶ added in v0.4.0
func CollectiveReduceScatterV2(scope *Scope, input tf.Output, group_size tf.Output, group_key tf.Output, instance_key tf.Output, ordering_token []tf.Output, merge_op string, final_op string, optional ...CollectiveReduceScatterV2Attr) (data tf.Output)
Mutually reduces multiple tensors of identical type and shape and scatters the result.
`is_stateless` means each op does not need control dependencies to other collective ops. In this case, keys that are unique at runtime (e.g. `instance_key`) should be used to distinguish collective groups.
func CollectiveReduceV2 ¶
func CollectiveReduceV2(scope *Scope, input tf.Output, group_size tf.Output, group_key tf.Output, instance_key tf.Output, ordering_token []tf.Output, merge_op string, final_op string, optional ...CollectiveReduceV2Attr) (data tf.Output)
Mutually reduces multiple tensors of identical type and shape.
`is_stateless` means each op does not need control dependencies to other collective ops. In this case, keys that are unique at runtime (e.g. `instance_key`) should be used to distinguish collective groups.
func CollectiveReduceV3 ¶
func CollectiveReduceV3(scope *Scope, input tf.Output, communicator tf.Output, group_assignment tf.Output, reduction string, optional ...CollectiveReduceV3Attr) (data tf.Output)
Mutually reduces multiple tensors of identical type and shape.
func CombinedNonMaxSuppression ¶
func CombinedNonMaxSuppression(scope *Scope, boxes tf.Output, scores tf.Output, max_output_size_per_class tf.Output, max_total_size tf.Output, iou_threshold tf.Output, score_threshold tf.Output, optional ...CombinedNonMaxSuppressionAttr) (nmsed_boxes tf.Output, nmsed_scores tf.Output, nmsed_classes tf.Output, valid_detections tf.Output)
Greedily selects a subset of bounding boxes in descending order of score,
This operation performs non_max_suppression on the inputs per batch, across all classes. Prunes away boxes that have high intersection-over-union (IOU) overlap with previously selected boxes. Bounding boxes are supplied as [y1, x1, y2, x2], where (y1, x1) and (y2, x2) are the coordinates of any diagonal pair of box corners and the coordinates can be provided as normalized (i.e., lying in the interval [0, 1]) or absolute. Note that this algorithm is agnostic to where the origin is in the coordinate system. Also note that this algorithm is invariant to orthogonal transformations and translations of the coordinate system; thus translating or reflections of the coordinate system result in the same boxes being selected by the algorithm. The output of this operation is the final boxes, scores and classes tensor returned after performing non_max_suppression.
Arguments:
boxes: A 4-D float tensor of shape `[batch_size, num_boxes, q, 4]`. If `q` is 1 then
same boxes are used for all classes otherwise, if `q` is equal to number of classes, class-specific boxes are used.
scores: A 3-D float tensor of shape `[batch_size, num_boxes, num_classes]`
representing a single score corresponding to each box (each row of boxes).
max_output_size_per_class: A scalar integer tensor representing the maximum number of
boxes to be selected by non max suppression per class
max_total_size: An int32 scalar representing the maximum number of boxes retained over all
classes. Note that setting this value to a large number may result in OOM error depending on the system workload.
iou_threshold: A 0-D float tensor representing the threshold for deciding whether
boxes overlap too much with respect to IOU.
score_threshold: A 0-D float tensor representing the threshold for deciding when to remove
boxes based on score.
Returns:
nmsed_boxes: A [batch_size, max_detections, 4] float32 tensor
containing the non-max suppressed boxes.
nmsed_scores: A [batch_size, max_detections] float32 tensor
containing the scores for the boxes.
nmsed_classes: A [batch_size, max_detections] float32 tensor
containing the classes for the boxes.
valid_detections: A [batch_size] int32 tensor indicating the number of
valid detections per batch item. Only the top num_detections[i] entries in nms_boxes[i], nms_scores[i] and nms_class[i] are valid. The rest of the entries are zero paddings.
func Complex ¶
Converts two real numbers to a complex number.
Given a tensor `real` representing the real part of a complex number, and a tensor `imag` representing the imaginary part of a complex number, this operation returns complex numbers elementwise of the form \\(a + bj\\), where *a* represents the `real` part and *b* represents the `imag` part.
The input tensors `real` and `imag` must have the same shape.
For example:
``` # tensor 'real' is [2.25, 3.25] # tensor `imag` is [4.75, 5.75] tf.complex(real, imag) ==> [[2.25 + 4.75j], [3.25 + 5.75j]] ```
func ComplexAbs ¶
Computes the complex absolute value of a tensor.
Given a tensor `x` of complex numbers, this operation returns a tensor of type `float` or `double` that is the absolute value of each element in `x`. All elements in `x` must be complex numbers of the form \\(a + bj\\). The absolute value is computed as \\( \sqrt{a^2 + b^2}\\).
For example:
>>> x = tf.complex(3.0, 4.0) >>> print((tf.raw_ops.ComplexAbs(x=x, Tout=tf.dtypes.float32, name=None)).numpy()) 5.0
func CompositeTensorVariantFromComponents ¶
func CompositeTensorVariantFromComponents(scope *Scope, components []tf.Output, metadata string) (encoded tf.Output)
Encodes an `ExtensionType` value into a `variant` scalar Tensor.
Returns a scalar variant tensor containing a single `CompositeTensorVariant` with the specified Tensor components and TypeSpec.
Arguments:
components: The component tensors for the extension type value. metadata: String serialization for the TypeSpec. (Note: the encoding for the TypeSpec
may change in future versions of TensorFlow.)
Returns A `variant` Tensor that containing the encoded value.
func CompositeTensorVariantToComponents ¶
func CompositeTensorVariantToComponents(scope *Scope, encoded tf.Output, metadata string, Tcomponents []tf.DataType) (components []tf.Output)
Decodes a `variant` scalar Tensor into an `ExtensionType` value.
Returns the Tensor components encoded in a `CompositeTensorVariant`.
Raises an error if `type_spec_proto` doesn't match the TypeSpec in `encoded`.
Arguments:
encoded: A scalar `variant` Tensor containing an encoded ExtensionType value. metadata: String serialization for the TypeSpec. Must be compatible with the
`TypeSpec` contained in `encoded`. (Note: the encoding for the TypeSpec may change in future versions of TensorFlow.)
Tcomponents: Expected dtypes for components.
Returns The component tensors for the ExtensionType value in `encoded`.
func CompressElement ¶
Compresses a dataset element.
func ComputeAccidentalHits ¶
func ComputeAccidentalHits(scope *Scope, true_classes tf.Output, sampled_candidates tf.Output, num_true int64, optional ...ComputeAccidentalHitsAttr) (indices tf.Output, ids tf.Output, weights tf.Output)
Computes the ids of the positions in sampled_candidates that match true_labels.
When doing log-odds NCE, the result of this op should be passed through a SparseToDense op, then added to the logits of the sampled candidates. This has the effect of 'removing' the sampled labels that match the true labels by making the classifier sure that they are sampled labels.
Arguments:
true_classes: The true_classes output of UnpackSparseLabels. sampled_candidates: The sampled_candidates output of CandidateSampler. num_true: Number of true labels per context.
Returns:
indices: A vector of indices corresponding to rows of true_candidates. ids: A vector of IDs of positions in sampled_candidates that match a true_label
for the row with the corresponding index in indices.
weights: A vector of the same length as indices and ids, in which each element
is -FLOAT_MAX.
func ComputeBatchSize ¶
Computes the static batch size of a dataset sans partial batches.
func ComputeDedupDataSize ¶ added in v0.8.0
An op computes the size of the deduplication data from embedding core and returns the updated config.
This op is to compute size of the deduplication data so to provide this information to the op that computes the tuple mask of deduplication data can have static output shape.
Arguments:
config: Serialized TPUEmbeddingConfiguration proto.
Returns The size of the deduplicated data from infeed.
func ComputeDedupDataSizeV2 ¶ added in v0.8.2
func ComputeDedupDataSizeV2(scope *Scope, config string, embedding_partitions string, hbm_buffers_config string, tpu_topology string) (num_elements tf.Output)
An op computes the size of the deduplication data from embedding core and returns the updated config.
This op is to compute size of the deduplication data so to provide this information to the op that computes the tuple mask of deduplication data can have static output shape.
Arguments:
config: Serialized TPUEmbeddingConfiguration proto. embedding_partitions: Serialized EmbeddingPartitionsProto proto. hbm_buffers_config: Serialized HbmBuffersConfig proto. tpu_topology: Serialized TpuTopologyArgsProto proto.
Returns The size of the deduplicated data from infeed.
func ComputeDedupDataTupleMask ¶ added in v0.4.0
An op computes tuple mask of deduplication data from embedding core.
The deduplication data receiving from embedding core is a Tensor with type=DT_VARIANT. The tensor itself is an XLA nested tuple, whose elements are rank 1 tensors. This op is to represents types and length of these elements.
Arguments:
config: Serialized TPUEmbeddingConfiguration proto.
Returns A 2-D int tensor represent mask of deduplication data tuple generated by `XlaRecvTPUEmbeddingDeduplicationData`. The tuple has several integer and float type 1-D tensor tuple elements. The first dimenion of this output_shape 2-D tensor is tensor type of tuple elements, `0` represents integer tensor, `1` represents float tensor. The second dimension of `output_shape` gives length of each tuple element.
func ComputeDedupDataTupleMaskV2 ¶ added in v0.8.2
func ComputeDedupDataTupleMaskV2(scope *Scope, config string, embedding_partitions string, hbm_buffers_config string, tpu_topology string) (output_shape tf.Output)
An op computes tuple mask of deduplication data from embedding core.
The deduplication data receiving from embedding core is a Tensor with type=DT_VARIANT. The tensor itself is an XLA nested tuple, whose elements are rank 1 tensors. This op is to represents types and length of these elements.
Arguments:
config: Serialized TPUEmbeddingConfiguration proto. embedding_partitions: Serialized EmbeddingPartitionsProto proto. hbm_buffers_config: Serialized HbmBuffersConfig proto. tpu_topology: Serialized TpuTopologyArgsProto proto.
Returns A 2-D int tensor represent mask of deduplication data tuple generated by `XlaRecvTPUEmbeddingDeduplicationData`. The tuple has several integer and float type 1-D tensor tuple elements. The first dimenion of this output_shape 2-D tensor is tensor type of tuple elements, `0` represents integer tensor, `1` represents float tensor. The second dimension of `output_shape` gives length of each tuple element.
func Concat ¶
Concatenates tensors along one dimension.
Arguments:
concat_dim: 0-D. The dimension along which to concatenate. Must be in the
range [0, rank(values)).
values: The `N` Tensors to concatenate. Their ranks and types must match,
and their sizes must match in all dimensions except `concat_dim`.
Returns A `Tensor` with the concatenation of values stacked along the `concat_dim` dimension. This tensor's shape matches that of `values` except in `concat_dim` where it has the sum of the sizes.
func ConcatOffset ¶
Computes offsets of concat inputs within its output.
For example:
>>> x = [2, 2, 7] >>> y = [2, 3, 7] >>> z = [2, 9, 7] >>> offsets = concat_offset(1, [x, y, z]) >>> [list(off.numpy()) for off in offsets] [[0, 0, 0], [0, 2, 0], [0, 5, 0]]
This is typically used by gradient computations for a concat operation.
Arguments:
concat_dim: The dimension along which to concatenate. shape: The `N` int32 or int64 vectors representing shape of tensors being concatenated.
Returns The `N` vectors representing the starting offset of input tensors within the concatenated output with type matching `shape`.
func ConcatV2 ¶
Concatenates tensors along one dimension.
Arguments:
values: List of `N` Tensors to concatenate. Their ranks and types must match,
and their sizes must match in all dimensions except `concat_dim`.
axis: 0-D. The dimension along which to concatenate. Must be in the
range [-rank(values), rank(values)).
Returns A `Tensor` with the concatenation of values stacked along the `concat_dim` dimension. This tensor's shape matches that of `values` except in `concat_dim` where it has the sum of the sizes.
func ConcatenateDataset ¶
func ConcatenateDataset(scope *Scope, input_dataset tf.Output, another_dataset tf.Output, output_types []tf.DataType, output_shapes []tf.Shape, optional ...ConcatenateDatasetAttr) (handle tf.Output)
Creates a dataset that concatenates `input_dataset` with `another_dataset`.
func ConfigureAndInitializeGlobalTPU ¶ added in v0.2.0
func ConfigureAndInitializeGlobalTPU(scope *Scope, optional ...ConfigureAndInitializeGlobalTPUAttr) (output tf.Output)
An op that sets up the centralized structures for a distributed TPU system.
Returns A vector containing the global TPU id of each TPU on the host.
func ConfigureDistributedTPU ¶
func ConfigureDistributedTPU(scope *Scope, optional ...ConfigureDistributedTPUAttr) (topology tf.Output)
Sets up the centralized structures for a distributed TPU system.
Returns A serialized tensorflow.tpu.TopologyProto that describes the TPU topology.
func ConfigureTPUEmbedding ¶
Sets up TPUEmbedding in a distributed TPU system.
Arguments:
config: Serialized tensorflow.tpu.TPUEmbeddingConfiguration that
describes the embedding lookups of the program.
Returns the created operation.
func ConfigureTPUEmbeddingHost ¶ added in v0.2.0
func ConfigureTPUEmbeddingHost(scope *Scope, common_config tf.Output, memory_config tf.Output, config string) (network_config tf.Output)
An op that configures the TPUEmbedding software on a host.
Arguments:
common_config: A string-encoded common configuration proto containing metadata
about the TPUEmbedding partitioner output.
memory_config: A string-encoded memory config proto containing metadata about
the memory allocations reserved for TPUEmbedding.
config: An TPUEmbeddingConfiguration proto serialized to a string,
describing the desired TPUEmbedding configuration.
Returns A string containing metadata about the hostname and RPC port used for communication with this host.
func ConfigureTPUEmbeddingMemory ¶ added in v0.2.0
An op that configures the TPUEmbedding software on a host.
Arguments:
common_config: A string-encoded CommonConfiguration proto containing metadata
about the TPUEmbedding partitioner output and the HBM size (in bytes) required for operation.
Returns A string-encoded memory configuration containing metadata about the memory allocations reserved for TPUEmbedding.
func Conj ¶
Returns the complex conjugate of a complex number.
Given a tensor `input` of complex numbers, this operation returns a tensor of complex numbers that are the complex conjugate of each element in `input`. The complex numbers in `input` must be of the form \\(a + bj\\), where *a* is the real part and *b* is the imaginary part.
The complex conjugate returned by this operation is of the form \\(a - bj\\).
For example:
``` # tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j] tf.conj(input) ==> [-2.25 - 4.75j, 3.25 - 5.75j] ```
func ConjugateTranspose ¶
Shuffle dimensions of x according to a permutation and conjugate the result.
The output `y` has the same rank as `x`. The shapes of `x` and `y` satisfy:
`y.shape[i] == x.shape[perm[i]] for i in [0, 1, ..., rank(x) - 1]` `y[i,j,k,...,s,t,u] == conj(x[perm[i], perm[j], perm[k],...,perm[s], perm[t], perm[u]])`
func ConnectTPUEmbeddingHosts ¶ added in v0.2.0
An op that sets up communication between TPUEmbedding host software instances
after ConfigureTPUEmbeddingHost has been called on each host.
Arguments:
network_configs: Strings containing metadata about the hostname and RPC port
used for communication with all hosts.
Returns the created operation.
func ConsumeMutexLock ¶
This op consumes a lock created by `MutexLock`.
This op exists to consume a tensor created by `MutexLock` (other than direct control dependencies). It should be the only that consumes the tensor, and will raise an error if it is not. Its only purpose is to keep the mutex lock tensor alive until it is consumed by this op.
**NOTE**: This operation must run on the same device as its input. This may be enforced via the `colocate_with` mechanism.
Arguments:
mutex_lock: A tensor returned by `MutexLock`.
Returns the created operation.
func ControlTrigger ¶
Does nothing. Serves as a control trigger for scheduling.
Only useful as a placeholder for control edges.
Returns the created operation.
func Conv ¶ added in v0.6.0
func Conv(scope *Scope, input tf.Output, filter tf.Output, strides []int64, padding string, optional ...ConvAttr) (output tf.Output)
Computes a N-D convolution given (N+1+batch_dims)-D `input` and (N+2)-D `filter` tensors.
General function for computing a N-D convolution. It is required that `1 <= N <= 3`.
Arguments:
input: Tensor of type T and shape `batch_shape + spatial_shape + [in_channels]` in the
case that `channels_last_format = true` or shape `batch_shape + [in_channels] + spatial_shape` if `channels_last_format = false`. spatial_shape is N-dimensional with `N=2` or `N=3`. Also note that `batch_shape` is dictated by the parameter `batch_dims` and defaults to 1.
filter: An `(N+2)-D` Tensor with the same type as `input` and shape
`spatial_filter_shape + [in_channels, out_channels]`, where spatial_filter_shape is N-dimensional with `N=2` or `N=3`.
strides: 1-D tensor of length `N+2`. The stride of the sliding window for each
dimension of `input`. Must have `strides[0] = strides[N+1] = 1`.
padding: The type of padding algorithm to use.
Returns A (N+1+batch_dims)-D tensor. The dimension order is determined by the value of `channels_last_format`, see below for details.
func Conv2D ¶
func Conv2D(scope *Scope, input tf.Output, filter tf.Output, strides []int64, padding string, optional ...Conv2DAttr) (output tf.Output)
Computes a 2-D convolution given 4-D `input` and `filter` tensors.
Given an input tensor of shape `[batch, in_height, in_width, in_channels]` and a filter / kernel tensor of shape `[filter_height, filter_width, in_channels, out_channels]`, this op performs the following:
- Flattens the filter to a 2-D matrix with shape `[filter_height * filter_width * in_channels, output_channels]`.
- Extracts image patches from the input tensor to form a *virtual* tensor of shape `[batch, out_height, out_width, filter_height * filter_width * in_channels]`.
- For each patch, right-multiplies the filter matrix and the image patch vector.
In detail, with the default NHWC format,
output[b, i, j, k] = sum_{di, dj, q} input[b, strides[1] * i + di, strides[2] * j + dj, q] * filter[di, dj, q, k]
Must have `strides[0] = strides[3] = 1`. For the most common case of the same horizontal and vertices strides, `strides = [1, stride, stride, 1]`.
Arguments:
input: A 4-D tensor. The dimension order is interpreted according to the value
of `data_format`, see below for details.
filter: A 4-D tensor of shape
`[filter_height, filter_width, in_channels, out_channels]`
strides: 1-D tensor of length 4. The stride of the sliding window for each
dimension of `input`. The dimension order is determined by the value of `data_format`, see below for details.
padding: The type of padding algorithm to use.
Returns A 4-D tensor. The dimension order is determined by the value of `data_format`, see below for details.
func Conv2DBackpropFilter ¶
func Conv2DBackpropFilter(scope *Scope, input tf.Output, filter_sizes tf.Output, out_backprop tf.Output, strides []int64, padding string, optional ...Conv2DBackpropFilterAttr) (output tf.Output)
Computes the gradients of convolution with respect to the filter.
Arguments:
input: 4-D with shape `[batch, in_height, in_width, in_channels]`. filter_sizes: An integer vector representing the tensor shape of `filter`,
where `filter` is a 4-D `[filter_height, filter_width, in_channels, out_channels]` tensor.
out_backprop: 4-D with shape `[batch, out_height, out_width, out_channels]`.
Gradients w.r.t. the output of the convolution.
strides: The stride of the sliding window for each dimension of the input
of the convolution. Must be in the same order as the dimension specified with format.
padding: The type of padding algorithm to use.
Returns 4-D with shape `[filter_height, filter_width, in_channels, out_channels]`. Gradient w.r.t. the `filter` input of the convolution.
func Conv2DBackpropFilterV2 ¶ added in v0.4.0
func Conv2DBackpropFilterV2(scope *Scope, input tf.Output, filter tf.Output, out_backprop tf.Output, strides []int64, padding string, optional ...Conv2DBackpropFilterV2Attr) (output tf.Output)
Computes the gradients of convolution with respect to the filter.
Arguments:
input: 4-D with shape `[batch, in_height, in_width, in_channels]`. filter: 4-D with shape `[filter_height, filter_width, in_channels, out_channels]`.
Only shape of tensor is used.
out_backprop: 4-D with shape `[batch, out_height, out_width, out_channels]`.
Gradients w.r.t. the output of the convolution.
strides: The stride of the sliding window for each dimension of the input
of the convolution. Must be in the same order as the dimension specified with format.
padding: The type of padding algorithm to use.
Returns 4-D with shape `[filter_height, filter_width, in_channels, out_channels]`. Gradient w.r.t. the `filter` input of the convolution.
func Conv2DBackpropInput ¶
func Conv2DBackpropInput(scope *Scope, input_sizes tf.Output, filter tf.Output, out_backprop tf.Output, strides []int64, padding string, optional ...Conv2DBackpropInputAttr) (output tf.Output)
Computes the gradients of convolution with respect to the input.
Arguments:
input_sizes: An integer vector representing the shape of `input`,
where `input` is a 4-D `[batch, height, width, channels]` tensor.
filter: 4-D with shape
`[filter_height, filter_width, in_channels, out_channels]`.
out_backprop: 4-D with shape `[batch, out_height, out_width, out_channels]`.
Gradients w.r.t. the output of the convolution.
strides: The stride of the sliding window for each dimension of the input
of the convolution. Must be in the same order as the dimension specified with format.
padding: The type of padding algorithm to use.
Returns 4-D with shape `[batch, in_height, in_width, in_channels]`. Gradient w.r.t. the input of the convolution.
func Conv2DBackpropInputV2 ¶ added in v0.4.0
func Conv2DBackpropInputV2(scope *Scope, input tf.Output, filter tf.Output, out_backprop tf.Output, strides []int64, padding string, optional ...Conv2DBackpropInputV2Attr) (output tf.Output)
Computes the gradients of convolution with respect to the input.
Arguments:
input: 4-D with shape `[batch, in_height, in_width, in_channels]`.
Only shape of tensor is used.
filter: 4-D with shape
`[filter_height, filter_width, in_channels, out_channels]`.
out_backprop: 4-D with shape `[batch, out_height, out_width, out_channels]`.
Gradients w.r.t. the output of the convolution.
strides: The stride of the sliding window for each dimension of the input
of the convolution. Must be in the same order as the dimension specified with format.
padding: The type of padding algorithm to use.
Returns 4-D with shape `[batch, in_height, in_width, in_channels]`. Gradient w.r.t. the input of the convolution.
func Conv3D ¶
func Conv3D(scope *Scope, input tf.Output, filter tf.Output, strides []int64, padding string, optional ...Conv3DAttr) (output tf.Output)
Computes a 3-D convolution given 5-D `input` and `filter` tensors.
In signal processing, cross-correlation is a measure of similarity of two waveforms as a function of a time-lag applied to one of them. This is also known as a sliding dot product or sliding inner-product.
Our Conv3D implements a form of cross-correlation.
Arguments:
input: Shape `[batch, in_depth, in_height, in_width, in_channels]`. filter: Shape `[filter_depth, filter_height, filter_width, in_channels,
out_channels]`. `in_channels` must match between `input` and `filter`.
strides: 1-D tensor of length 5. The stride of the sliding window for each
dimension of `input`. Must have `strides[0] = strides[4] = 1`.
padding: The type of padding algorithm to use.
func Conv3DBackpropFilter ¶
func Conv3DBackpropFilter(scope *Scope, input tf.Output, filter tf.Output, out_backprop tf.Output, strides []int64, padding string, optional ...Conv3DBackpropFilterAttr) (output tf.Output)
Computes the gradients of 3-D convolution with respect to the filter.
DEPRECATED at GraphDef version 10: Use Conv3DBackpropFilterV2
Arguments:
input: Shape `[batch, depth, rows, cols, in_channels]`. filter: Shape `[depth, rows, cols, in_channels, out_channels]`.
`in_channels` must match between `input` and `filter`.
out_backprop: Backprop signal of shape `[batch, out_depth, out_rows, out_cols,
out_channels]`.
strides: 1-D tensor of length 5. The stride of the sliding window for each
dimension of `input`. Must have `strides[0] = strides[4] = 1`.
padding: The type of padding algorithm to use.
func Conv3DBackpropFilterV2 ¶
func Conv3DBackpropFilterV2(scope *Scope, input tf.Output, filter_sizes tf.Output, out_backprop tf.Output, strides []int64, padding string, optional ...Conv3DBackpropFilterV2Attr) (output tf.Output)
Computes the gradients of 3-D convolution with respect to the filter.
Arguments:
input: Shape `[batch, depth, rows, cols, in_channels]`. filter_sizes: An integer vector representing the tensor shape of `filter`,
where `filter` is a 5-D `[filter_depth, filter_height, filter_width, in_channels, out_channels]` tensor.
out_backprop: Backprop signal of shape `[batch, out_depth, out_rows, out_cols,
out_channels]`.
strides: 1-D tensor of length 5. The stride of the sliding window for each
dimension of `input`. Must have `strides[0] = strides[4] = 1`.
padding: The type of padding algorithm to use.
func Conv3DBackpropInput ¶
func Conv3DBackpropInput(scope *Scope, input tf.Output, filter tf.Output, out_backprop tf.Output, strides []int64, padding string, optional ...Conv3DBackpropInputAttr) (output tf.Output)
Computes the gradients of 3-D convolution with respect to the input.
DEPRECATED at GraphDef version 10: Use Conv3DBackpropInputV2
Arguments:
input: Shape `[batch, depth, rows, cols, in_channels]`. filter: Shape `[depth, rows, cols, in_channels, out_channels]`.
`in_channels` must match between `input` and `filter`.
out_backprop: Backprop signal of shape `[batch, out_depth, out_rows, out_cols,
out_channels]`.
strides: 1-D tensor of length 5. The stride of the sliding window for each
dimension of `input`. Must have `strides[0] = strides[4] = 1`.
padding: The type of padding algorithm to use.
func Conv3DBackpropInputV2 ¶
func Conv3DBackpropInputV2(scope *Scope, input_sizes tf.Output, filter tf.Output, out_backprop tf.Output, strides []int64, padding string, optional ...Conv3DBackpropInputV2Attr) (output tf.Output)
Computes the gradients of 3-D convolution with respect to the input.
Arguments:
input_sizes: An integer vector representing the tensor shape of `input`,
where `input` is a 5-D `[batch, depth, rows, cols, in_channels]` tensor.
filter: Shape `[depth, rows, cols, in_channels, out_channels]`.
`in_channels` must match between `input` and `filter`.
out_backprop: Backprop signal of shape `[batch, out_depth, out_rows, out_cols,
out_channels]`.
strides: 1-D tensor of length 5. The stride of the sliding window for each
dimension of `input`. Must have `strides[0] = strides[4] = 1`.
padding: The type of padding algorithm to use.
func Copy ¶
Copy a tensor from CPU-to-CPU or GPU-to-GPU.
Performs CPU-to-CPU or GPU-to-GPU deep-copying of tensor, depending on the device on which the tensor is allocated. N.B.: If the all downstream attached debug ops are disabled given the current gRPC gating status, the output will simply forward the input tensor without deep-copying. See the documentation of Debug* ops for more details.
Unlike the CopyHost Op, this op does not have HostMemory constraint on its input or output.
Arguments:
input: Input tensor.
func CopyHost ¶
Copy a tensor to host.
Performs CPU-to-CPU deep-copying of tensor. N.B.: If the all downstream attached debug ops are disabled given the current gRPC gating status, the output will simply forward the input tensor without deep-copying. See the documentation of Debug* ops for more details.
Unlike the Copy Op, this op has HostMemory constraint on its input or output.
Arguments:
input: Input tensor.
func Cos ¶
Computes cos of x element-wise.
Given an input tensor, this function computes cosine of every element in the tensor. Input range is `(-inf, inf)` and output range is `[-1,1]`. If input lies outside the boundary, `nan` is returned. ```python x = tf.constant([-float("inf"), -9, -0.5, 1, 1.2, 200, 10000, float("inf")]) tf.math.cos(x) ==> [nan -0.91113025 0.87758255 0.5403023 0.36235774 0.48718765 -0.95215535 nan] ```
func Cosh ¶
Computes hyperbolic cosine of x element-wise.
Given an input tensor, this function computes hyperbolic cosine of every element in the tensor. Input range is `[-inf, inf]` and output range is `[1, inf]`. ```python x = tf.constant([-float("inf"), -9, -0.5, 1, 1.2, 2, 10, float("inf")]) tf.math.cosh(x) ==> [inf 4.0515420e+03 1.1276259e+00 1.5430807e+00 1.8106556e+00 3.7621956e+00 1.1013233e+04 inf] ```
func CropAndResize ¶
func CropAndResize(scope *Scope, image tf.Output, boxes tf.Output, box_ind tf.Output, crop_size tf.Output, optional ...CropAndResizeAttr) (crops tf.Output)
Extracts crops from the input image tensor and resizes them.
Extracts crops from the input image tensor and resizes them using bilinear sampling or nearest neighbor sampling (possibly with aspect ratio change) to a common output size specified by `crop_size`. This is more general than the `crop_to_bounding_box` op which extracts a fixed size slice from the input image and does not allow resizing or aspect ratio change.
Returns a tensor with `crops` from the input `image` at positions defined at the bounding box locations in `boxes`. The cropped boxes are all resized (with bilinear or nearest neighbor interpolation) to a fixed `size = [crop_height, crop_width]`. The result is a 4-D tensor `[num_boxes, crop_height, crop_width, depth]`. The resizing is corner aligned. In particular, if `boxes = [[0, 0, 1, 1]]`, the method will give identical results to using `tf.image.resize_bilinear()` or `tf.image.resize_nearest_neighbor()`(depends on the `method` argument) with `align_corners=True`.
Arguments:
image: A 4-D tensor of shape `[batch, image_height, image_width, depth]`.
Both `image_height` and `image_width` need to be positive.
boxes: A 2-D tensor of shape `[num_boxes, 4]`. The `i`-th row of the tensor
specifies the coordinates of a box in the `box_ind[i]` image and is specified in normalized coordinates `[y1, x1, y2, x2]`. A normalized coordinate value of `y` is mapped to the image coordinate at `y * (image_height - 1)`, so as the `[0, 1]` interval of normalized image height is mapped to `[0, image_height - 1]` in image height coordinates. We do allow `y1` > `y2`, in which case the sampled crop is an up-down flipped version of the original image. The width dimension is treated similarly. Normalized coordinates outside the `[0, 1]` range are allowed, in which case we use `extrapolation_value` to extrapolate the input image values.
box_ind: A 1-D tensor of shape `[num_boxes]` with int32 values in `[0, batch)`.
The value of `box_ind[i]` specifies the image that the `i`-th box refers to.
crop_size: A 1-D tensor of 2 elements, `size = [crop_height, crop_width]`. All
cropped image patches are resized to this size. The aspect ratio of the image content is not preserved. Both `crop_height` and `crop_width` need to be positive.
Returns A 4-D tensor of shape `[num_boxes, crop_height, crop_width, depth]`.
func CropAndResizeGradBoxes ¶
func CropAndResizeGradBoxes(scope *Scope, grads tf.Output, image tf.Output, boxes tf.Output, box_ind tf.Output, optional ...CropAndResizeGradBoxesAttr) (output tf.Output)
Computes the gradient of the crop_and_resize op wrt the input boxes tensor.
Arguments:
grads: A 4-D tensor of shape `[num_boxes, crop_height, crop_width, depth]`. image: A 4-D tensor of shape `[batch, image_height, image_width, depth]`.
Both `image_height` and `image_width` need to be positive.
boxes: A 2-D tensor of shape `[num_boxes, 4]`. The `i`-th row of the tensor
specifies the coordinates of a box in the `box_ind[i]` image and is specified in normalized coordinates `[y1, x1, y2, x2]`. A normalized coordinate value of `y` is mapped to the image coordinate at `y * (image_height - 1)`, so as the `[0, 1]` interval of normalized image height is mapped to `[0, image_height - 1] in image height coordinates. We do allow y1 > y2, in which case the sampled crop is an up-down flipped version of the original image. The width dimension is treated similarly. Normalized coordinates outside the `[0, 1]` range are allowed, in which case we use `extrapolation_value` to extrapolate the input image values.
box_ind: A 1-D tensor of shape `[num_boxes]` with int32 values in `[0, batch)`.
The value of `box_ind[i]` specifies the image that the `i`-th box refers to.
Returns A 2-D tensor of shape `[num_boxes, 4]`.
func CropAndResizeGradImage ¶
func CropAndResizeGradImage(scope *Scope, grads tf.Output, boxes tf.Output, box_ind tf.Output, image_size tf.Output, T tf.DataType, optional ...CropAndResizeGradImageAttr) (output tf.Output)
Computes the gradient of the crop_and_resize op wrt the input image tensor.
Arguments:
grads: A 4-D tensor of shape `[num_boxes, crop_height, crop_width, depth]`. boxes: A 2-D tensor of shape `[num_boxes, 4]`. The `i`-th row of the tensor
specifies the coordinates of a box in the `box_ind[i]` image and is specified in normalized coordinates `[y1, x1, y2, x2]`. A normalized coordinate value of `y` is mapped to the image coordinate at `y * (image_height - 1)`, so as the `[0, 1]` interval of normalized image height is mapped to `[0, image_height - 1] in image height coordinates. We do allow y1 > y2, in which case the sampled crop is an up-down flipped version of the original image. The width dimension is treated similarly. Normalized coordinates outside the `[0, 1]` range are allowed, in which case we use `extrapolation_value` to extrapolate the input image values.
box_ind: A 1-D tensor of shape `[num_boxes]` with int32 values in `[0, batch)`.
The value of `box_ind[i]` specifies the image that the `i`-th box refers to.
image_size: A 1-D tensor with value `[batch, image_height, image_width, depth]`
containing the original image size. Both `image_height` and `image_width` need to be positive.
Returns A 4-D tensor of shape `[batch, image_height, image_width, depth]`.
func Cross ¶
Compute the pairwise cross product.
`a` and `b` must be the same shape; they can either be simple 3-element vectors, or any shape where the innermost dimension is 3. In the latter case, each pair of corresponding 3-element vectors is cross-multiplied independently.
Arguments:
a: A tensor containing 3-element vectors. b: Another tensor, of same type and shape as `a`.
Returns Pairwise cross product of the vectors in `a` and `b`.
func CrossReplicaSum ¶
An Op to sum inputs across replicated TPU instances.
Each instance supplies its own input.
For example, suppose there are 8 TPU instances: `[A, B, C, D, E, F, G, H]`. Passing group_assignment=`[[0,2,4,6],[1,3,5,7]]` sets `A, C, E, G` as group 0, and `B, D, F, H` as group 1. Thus we get the outputs: `[A+C+E+G, B+D+F+H, A+C+E+G, B+D+F+H, A+C+E+G, B+D+F+H, A+C+E+G, B+D+F+H]`.
Arguments:
input: The local input to the sum. group_assignment: An int32 tensor with shape
[num_groups, num_replicas_per_group]. `group_assignment[i]` represents the replica ids in the ith subgroup.
Returns The sum of all the distributed inputs.
func CudnnRNN ¶
func CudnnRNN(scope *Scope, input tf.Output, input_h tf.Output, input_c tf.Output, params tf.Output, optional ...CudnnRNNAttr) (output tf.Output, output_h tf.Output, output_c tf.Output, reserve_space tf.Output)
A RNN backed by cuDNN.
Computes the RNN from the input and initial states, with respect to the params buffer.
rnn_mode: Indicates the type of the RNN model. input_mode: Indicate whether there is a linear projection between the input and
the actual computation before the first layer. 'skip_input' is only allowed when input_size == num_units; 'auto_select' implies 'skip_input' when input_size == num_units; otherwise, it implies 'linear_input'.
direction: Indicates whether a bidirectional model will be used. Should be
"unidirectional" or "bidirectional".
dropout: Dropout probability. When set to 0., dropout is disabled. seed: The 1st part of a seed to initialize dropout. seed2: The 2nd part of a seed to initialize dropout. input: A 3-D tensor with the shape of [seq_length, batch_size, input_size]. input_h: A 3-D tensor with the shape of [num_layer * dir, batch_size,
num_units].
input_c: For LSTM, a 3-D tensor with the shape of
[num_layer * dir, batch, num_units]. For other models, it is ignored.
params: A 1-D tensor that contains the weights and biases in an opaque layout.
The size must be created through CudnnRNNParamsSize, and initialized separately. Note that they might not be compatible across different generations. So it is a good idea to save and restore
output: A 3-D tensor with the shape of [seq_length, batch_size,
dir * num_units].
output_h: The same shape has input_h. output_c: The same shape as input_c for LSTM. An empty tensor for other models. is_training: Indicates whether this operation is used for inference or
training.
reserve_space: An opaque tensor that can be used in backprop calculation. It
is only produced if is_training is false.
func CudnnRNNBackprop ¶
func CudnnRNNBackprop(scope *Scope, input tf.Output, input_h tf.Output, input_c tf.Output, params tf.Output, output tf.Output, output_h tf.Output, output_c tf.Output, output_backprop tf.Output, output_h_backprop tf.Output, output_c_backprop tf.Output, reserve_space tf.Output, optional ...CudnnRNNBackpropAttr) (input_backprop tf.Output, input_h_backprop tf.Output, input_c_backprop tf.Output, params_backprop tf.Output)
Backprop step of CudnnRNN.
Compute the backprop of both data and weights in a RNN.
rnn_mode: Indicates the type of the RNN model. input_mode: Indicate whether there is a linear projection between the input and
the actual computation before the first layer. 'skip_input' is only allowed when input_size == num_units; 'auto_select' implies 'skip_input' when input_size == num_units; otherwise, it implies 'linear_input'.
direction: Indicates whether a bidirectional model will be used. Should be
"unidirectional" or "bidirectional".
dropout: Dropout probability. When set to 0., dropout is disabled. seed: The 1st part of a seed to initialize dropout. seed2: The 2nd part of a seed to initialize dropout. input: A 3-D tensor with the shape of [seq_length, batch_size, input_size]. input_h: A 3-D tensor with the shape of [num_layer * dir, batch_size,
num_units].
input_c: For LSTM, a 3-D tensor with the shape of
[num_layer * dir, batch, num_units]. For other models, it is ignored.
params: A 1-D tensor that contains the weights and biases in an opaque layout.
The size must be created through CudnnRNNParamsSize, and initialized separately. Note that they might not be compatible across different generations. So it is a good idea to save and restore
output: A 3-D tensor with the shape of [seq_length, batch_size,
dir * num_units].
output_h: The same shape has input_h. output_c: The same shape as input_c for LSTM. An empty tensor for other models. output_backprop: A 3-D tensor with the same shape as output in the forward pass. output_h_backprop: A 3-D tensor with the same shape as output_h in the forward
pass.
output_c_backprop: A 3-D tensor with the same shape as output_c in the forward
pass.
reserve_space: The same reserve_space produced in for forward operation. input_backprop: The backprop to input in the forward pass. Has the same shape
as input.
input_h_backprop: The backprop to input_h in the forward pass. Has the same
shape as input_h.
input_c_backprop: The backprop to input_c in the forward pass. Has the same
shape as input_c.
params_backprop: The backprop to the params buffer in the forward pass. Has the
same shape as params.
func CudnnRNNBackpropV2 ¶
func CudnnRNNBackpropV2(scope *Scope, input tf.Output, input_h tf.Output, input_c tf.Output, params tf.Output, output tf.Output, output_h tf.Output, output_c tf.Output, output_backprop tf.Output, output_h_backprop tf.Output, output_c_backprop tf.Output, reserve_space tf.Output, host_reserved tf.Output, optional ...CudnnRNNBackpropV2Attr) (input_backprop tf.Output, input_h_backprop tf.Output, input_c_backprop tf.Output, params_backprop tf.Output)
Backprop step of CudnnRNN.
Compute the backprop of both data and weights in a RNN. Takes an extra
"host_reserved" inupt than CudnnRNNBackprop, which is used to determine RNN cudnnRNNAlgo_t and cudnnMathType_t.
rnn_mode: Indicates the type of the RNN model. input_mode: Indicates whether there is a linear projection between the input and
the actual computation before the first layer. 'skip_input' is only allowed when input_size == num_units; 'auto_select' implies 'skip_input' when input_size == num_units; otherwise, it implies 'linear_input'.
direction: Indicates whether a bidirectional model will be used. Should be
"unidirectional" or "bidirectional".
dropout: Dropout probability. When set to 0., dropout is disabled. seed: The 1st part of a seed to initialize dropout. seed2: The 2nd part of a seed to initialize dropout. input: A 3-D tensor with the shape of [seq_length, batch_size, input_size]. input_h: A 3-D tensor with the shape of [num_layer * dir, batch_size,
num_units].
input_c: For LSTM, a 3-D tensor with the shape of
[num_layer * dir, batch, num_units]. For other models, it is ignored.
params: A 1-D tensor that contains the weights and biases in an opaque layout.
The size must be created through CudnnRNNParamsSize, and initialized separately. Note that they might not be compatible across different generations. So it is a good idea to save and restore
output: A 3-D tensor with the shape of [seq_length, batch_size,
dir * num_units].
output_h: The same shape has input_h. output_c: The same shape as input_c for LSTM. An empty tensor for other models. output_backprop: A 3-D tensor with the same shape as output in the forward pass. output_h_backprop: A 3-D tensor with the same shape as output_h in the forward
pass.
output_c_backprop: A 3-D tensor with the same shape as output_c in the forward
pass.
reserve_space: The same reserve_space produced in the forward operation. host_reserved: The same host_reserved produced in the forward operation. input_backprop: The backprop to input in the forward pass. Has the same shape
as input.
input_h_backprop: The backprop to input_h in the forward pass. Has the same
shape as input_h.
input_c_backprop: The backprop to input_c in the forward pass. Has the same
shape as input_c.
params_backprop: The backprop to the params buffer in the forward pass. Has the
same shape as params.
func CudnnRNNBackpropV3 ¶
func CudnnRNNBackpropV3(scope *Scope, input tf.Output, input_h tf.Output, input_c tf.Output, params tf.Output, sequence_lengths tf.Output, output tf.Output, output_h tf.Output, output_c tf.Output, output_backprop tf.Output, output_h_backprop tf.Output, output_c_backprop tf.Output, reserve_space tf.Output, host_reserved tf.Output, optional ...CudnnRNNBackpropV3Attr) (input_backprop tf.Output, input_h_backprop tf.Output, input_c_backprop tf.Output, params_backprop tf.Output)
Backprop step of CudnnRNNV3.
Compute the backprop of both data and weights in a RNN. Takes an extra
"sequence_lengths" input than CudnnRNNBackprop.
rnn_mode: Indicates the type of the RNN model. input_mode: Indicates whether there is a linear projection between the input and
the actual computation before the first layer. 'skip_input' is only allowed when input_size == num_units; 'auto_select' implies 'skip_input' when input_size == num_units; otherwise, it implies 'linear_input'.
direction: Indicates whether a bidirectional model will be used. Should be
"unidirectional" or "bidirectional".
dropout: Dropout probability. When set to 0., dropout is disabled. seed: The 1st part of a seed to initialize dropout. seed2: The 2nd part of a seed to initialize dropout. input: If time_major is true, this is a 3-D tensor with the shape of
[seq_length, batch_size, input_size]. If time_major is false, the shape is [batch_size, seq_length, input_size].
input_h: If time_major is true, this is a 3-D tensor with the shape of
[num_layer * dir, batch_size, num_units]. If time_major is false, the shape is [batch_size, num_layer * dir, num_units].
input_c: For LSTM, a 3-D tensor with the shape of
[num_layer * dir, batch, num_units]. For other models, it is ignored.
params: A 1-D tensor that contains the weights and biases in an opaque layout.
The size must be created through CudnnRNNParamsSize, and initialized separately. Note that they might not be compatible across different generations. So it is a good idea to save and restore
sequence_lengths: a vector of lengths of each input sequence. output: If time_major is true, this is a 3-D tensor with the shape of
[seq_length, batch_size, dir * num_units]. If time_major is false, the shape is [batch_size, seq_length, dir * num_units].
output_h: The same shape has input_h. output_c: The same shape as input_c for LSTM. An empty tensor for other models. output_backprop: A 3-D tensor with the same shape as output in the forward pass. output_h_backprop: A 3-D tensor with the same shape as output_h in the forward
pass.
output_c_backprop: A 3-D tensor with the same shape as output_c in the forward
pass.
time_major: Indicates whether the input/output format is time major or batch
major.
reserve_space: The same reserve_space produced in the forward operation. input_backprop: The backprop to input in the forward pass. Has the same shape
as input.
input_h_backprop: The backprop to input_h in the forward pass. Has the same
shape as input_h.
input_c_backprop: The backprop to input_c in the forward pass. Has the same
shape as input_c.
params_backprop: The backprop to the params buffer in the forward pass. Has the
same shape as params.
func CudnnRNNCanonicalToParams ¶
func CudnnRNNCanonicalToParams(scope *Scope, num_layers tf.Output, num_units tf.Output, input_size tf.Output, weights []tf.Output, biases []tf.Output, optional ...CudnnRNNCanonicalToParamsAttr) (params tf.Output)
Converts CudnnRNN params from canonical form to usable form.
Writes a set of weights into the opaque params buffer so they can be used in upcoming training or inferences.
Note that the params buffer may not be compatible across different GPUs. So any save and restoration should be converted to and from the canonical weights and biases.
num_layers: Specifies the number of layers in the RNN model. num_units: Specifies the size of the hidden state. input_size: Specifies the size of the input state. weights: the canonical form of weights that can be used for saving
and restoration. They are more likely to be compatible across different generations.
biases: the canonical form of biases that can be used for saving
and restoration. They are more likely to be compatible across different generations.
num_params: number of parameter sets for all layers.
Each layer may contain multiple parameter sets, with each set consisting of a weight matrix and a bias vector.
rnn_mode: Indicates the type of the RNN model. input_mode: Indicate whether there is a linear projection between the input and
The actual computation before the first layer. 'skip_input' is only allowed when input_size == num_units; 'auto_select' implies 'skip_input' when input_size == num_units; otherwise, it implies 'linear_input'.
direction: Indicates whether a bidirectional model will be used.
dir = (direction == bidirectional) ? 2 : 1
dropout: dropout probability. When set to 0., dropout is disabled. seed: the 1st part of a seed to initialize dropout. seed2: the 2nd part of a seed to initialize dropout.
func CudnnRNNCanonicalToParamsV2 ¶
func CudnnRNNCanonicalToParamsV2(scope *Scope, num_layers tf.Output, num_units tf.Output, input_size tf.Output, weights []tf.Output, biases []tf.Output, optional ...CudnnRNNCanonicalToParamsV2Attr) (params tf.Output)
Converts CudnnRNN params from canonical form to usable form. It supports the projection in LSTM.
Writes a set of weights into the opaque params buffer so they can be used in upcoming training or inferences.
Note that the params buffer may not be compatible across different GPUs. So any save and restoration should be converted to and from the canonical weights and biases.
num_layers: Specifies the number of layers in the RNN model. num_units: Specifies the size of the hidden state. input_size: Specifies the size of the input state. weights: the canonical form of weights that can be used for saving
and restoration. They are more likely to be compatible across different generations.
biases: the canonical form of biases that can be used for saving
and restoration. They are more likely to be compatible across different generations.
num_params_weights: number of weight parameter matrix for all layers. num_params_biases: number of bias parameter vector for all layers. rnn_mode: Indicates the type of the RNN model. input_mode: Indicate whether there is a linear projection between the input and
The actual computation before the first layer. 'skip_input' is only allowed when input_size == num_units; 'auto_select' implies 'skip_input' when input_size == num_units; otherwise, it implies 'linear_input'.
direction: Indicates whether a bidirectional model will be used.
dir = (direction == bidirectional) ? 2 : 1
dropout: dropout probability. When set to 0., dropout is disabled. seed: the 1st part of a seed to initialize dropout. seed2: the 2nd part of a seed to initialize dropout. num_proj: The output dimensionality for the projection matrices. If None or 0,
no projection is performed.
func CudnnRNNParamsSize ¶
func CudnnRNNParamsSize(scope *Scope, num_layers tf.Output, num_units tf.Output, input_size tf.Output, T tf.DataType, S tf.DataType, optional ...CudnnRNNParamsSizeAttr) (params_size tf.Output)
Computes size of weights that can be used by a Cudnn RNN model.
Return the params size that can be used by the Cudnn RNN model. Subsequent weight allocation and initialization should use this size.
num_layers: Specifies the number of layers in the RNN model. num_units: Specifies the size of the hidden state. input_size: Specifies the size of the input state. rnn_mode: Indicates the type of the RNN model. input_mode: Indicate whether there is a linear projection between the input and
The actual computation before the first layer. 'skip_input' is only allowed when input_size == num_units; 'auto_select' implies 'skip_input' when input_size == num_units; otherwise, it implies 'linear_input'.
direction: Indicates whether a bidirectional model will be used.
dir = (direction == bidirectional) ? 2 : 1
dropout: dropout probability. When set to 0., dropout is disabled. seed: the 1st part of a seed to initialize dropout. seed2: the 2nd part of a seed to initialize dropout. params_size: The size of the params buffer that should be allocated and
initialized for this RNN model. Note that this params buffer may not be compatible across GPUs. Please use CudnnRNNParamsWeights and CudnnRNNParamsBiases to save and restore them in a way that is compatible across different runs.
func CudnnRNNParamsToCanonical ¶
func CudnnRNNParamsToCanonical(scope *Scope, num_layers tf.Output, num_units tf.Output, input_size tf.Output, params tf.Output, num_params int64, optional ...CudnnRNNParamsToCanonicalAttr) (weights []tf.Output, biases []tf.Output)
Retrieves CudnnRNN params in canonical form.
Retrieves a set of weights from the opaque params buffer that can be saved and restored in a way compatible with future runs.
Note that the params buffer may not be compatible across different GPUs. So any save and restoration should be converted to and from the canonical weights and biases.
num_layers: Specifies the number of layers in the RNN model. num_units: Specifies the size of the hidden state. input_size: Specifies the size of the input state. num_params: number of parameter sets for all layers.
Each layer may contain multiple parameter sets, with each set consisting of a weight matrix and a bias vector.
weights: the canonical form of weights that can be used for saving
and restoration. They are more likely to be compatible across different generations.
biases: the canonical form of biases that can be used for saving
and restoration. They are more likely to be compatible across different generations.
rnn_mode: Indicates the type of the RNN model. input_mode: Indicate whether there is a linear projection between the input and
The actual computation before the first layer. 'skip_input' is only allowed when input_size == num_units; 'auto_select' implies 'skip_input' when input_size == num_units; otherwise, it implies 'linear_input'.
direction: Indicates whether a bidirectional model will be used.
dir = (direction == bidirectional) ? 2 : 1
dropout: dropout probability. When set to 0., dropout is disabled. seed: the 1st part of a seed to initialize dropout. seed2: the 2nd part of a seed to initialize dropout.
func CudnnRNNParamsToCanonicalV2 ¶
func CudnnRNNParamsToCanonicalV2(scope *Scope, num_layers tf.Output, num_units tf.Output, input_size tf.Output, params tf.Output, num_params_weights int64, num_params_biases int64, optional ...CudnnRNNParamsToCanonicalV2Attr) (weights []tf.Output, biases []tf.Output)
Retrieves CudnnRNN params in canonical form. It supports the projection in LSTM.
Retrieves a set of weights from the opaque params buffer that can be saved and restored in a way compatible with future runs.
Note that the params buffer may not be compatible across different GPUs. So any save and restoration should be converted to and from the canonical weights and biases.
num_layers: Specifies the number of layers in the RNN model. num_units: Specifies the size of the hidden state. input_size: Specifies the size of the input state. num_params_weights: number of weight parameter matrix for all layers. num_params_biases: number of bias parameter vector for all layers. weights: the canonical form of weights that can be used for saving
and restoration. They are more likely to be compatible across different generations.
biases: the canonical form of biases that can be used for saving
and restoration. They are more likely to be compatible across different generations.
rnn_mode: Indicates the type of the RNN model. input_mode: Indicate whether there is a linear projection between the input and
The actual computation before the first layer. 'skip_input' is only allowed when input_size == num_units; 'auto_select' implies 'skip_input' when input_size == num_units; otherwise, it implies 'linear_input'.
direction: Indicates whether a bidirectional model will be used.
dir = (direction == bidirectional) ? 2 : 1
dropout: dropout probability. When set to 0., dropout is disabled. seed: the 1st part of a seed to initialize dropout. seed2: the 2nd part of a seed to initialize dropout. num_proj: The output dimensionality for the projection matrices. If None or 0,
no projection is performed.
func CudnnRNNV2 ¶
func CudnnRNNV2(scope *Scope, input tf.Output, input_h tf.Output, input_c tf.Output, params tf.Output, optional ...CudnnRNNV2Attr) (output tf.Output, output_h tf.Output, output_c tf.Output, reserve_space tf.Output, host_reserved tf.Output)
A RNN backed by cuDNN.
Computes the RNN from the input and initial states, with respect to the params buffer. Produces one extra output "host_reserved" than CudnnRNN.
rnn_mode: Indicates the type of the RNN model. input_mode: Indicates whether there is a linear projection between the input and
the actual computation before the first layer. 'skip_input' is only allowed when input_size == num_units; 'auto_select' implies 'skip_input' when input_size == num_units; otherwise, it implies 'linear_input'.
direction: Indicates whether a bidirectional model will be used. Should be
"unidirectional" or "bidirectional".
dropout: Dropout probability. When set to 0., dropout is disabled. seed: The 1st part of a seed to initialize dropout. seed2: The 2nd part of a seed to initialize dropout. input: A 3-D tensor with the shape of [seq_length, batch_size, input_size]. input_h: A 3-D tensor with the shape of [num_layer * dir, batch_size,
num_units].
input_c: For LSTM, a 3-D tensor with the shape of
[num_layer * dir, batch, num_units]. For other models, it is ignored.
params: A 1-D tensor that contains the weights and biases in an opaque layout.
The size must be created through CudnnRNNParamsSize, and initialized separately. Note that they might not be compatible across different generations. So it is a good idea to save and restore
output: A 3-D tensor with the shape of [seq_length, batch_size,
dir * num_units].
output_h: The same shape has input_h. output_c: The same shape as input_c for LSTM. An empty tensor for other models. is_training: Indicates whether this operation is used for inference or
training.
reserve_space: An opaque tensor that can be used in backprop calculation. It
is only produced if is_training is true.
host_reserved: An opaque tensor that can be used in backprop calculation. It is
only produced if is_training is true. It is output on host memory rather than device memory.
func CudnnRNNV3 ¶
func CudnnRNNV3(scope *Scope, input tf.Output, input_h tf.Output, input_c tf.Output, params tf.Output, sequence_lengths tf.Output, optional ...CudnnRNNV3Attr) (output tf.Output, output_h tf.Output, output_c tf.Output, reserve_space tf.Output, host_reserved tf.Output)
A RNN backed by cuDNN.
Computes the RNN from the input and initial states, with respect to the params buffer. Accepts one extra input "sequence_lengths" than CudnnRNN.
rnn_mode: Indicates the type of the RNN model. input_mode: Indicates whether there is a linear projection between the input and
the actual computation before the first layer. 'skip_input' is only allowed when input_size == num_units; 'auto_select' implies 'skip_input' when input_size == num_units; otherwise, it implies 'linear_input'.
direction: Indicates whether a bidirectional model will be used. Should be
"unidirectional" or "bidirectional".
dropout: Dropout probability. When set to 0., dropout is disabled. seed: The 1st part of a seed to initialize dropout. seed2: The 2nd part of a seed to initialize dropout. input: If time_major is true, this is a 3-D tensor with the shape of
[seq_length, batch_size, input_size]. If time_major is false, the shape is [batch_size, seq_length, input_size].
input_h: If time_major is true, this is a 3-D tensor with the shape of
[num_layer * dir, batch_size, num_units]. If time_major is false, the shape is [batch_size, num_layer * dir, num_units].
input_c: For LSTM, a 3-D tensor with the shape of
[num_layer * dir, batch, num_units]. For other models, it is ignored.
params: A 1-D tensor that contains the weights and biases in an opaque layout.
The size must be created through CudnnRNNParamsSize, and initialized separately. Note that they might not be compatible across different generations. So it is a good idea to save and restore
sequence_lengths: a vector of lengths of each input sequence. output: If time_major is true, this is a 3-D tensor with the shape of
[seq_length, batch_size, dir * num_units]. If time_major is false, the shape is [batch_size, seq_length, dir * num_units].
output_h: The same shape has input_h. output_c: The same shape as input_c for LSTM. An empty tensor for other models. is_training: Indicates whether this operation is used for inference or
training.
time_major: Indicates whether the input/output format is time major or batch
major.
reserve_space: An opaque tensor that can be used in backprop calculation. It
is only produced if is_training is true.
func Cumprod ¶
Compute the cumulative product of the tensor `x` along `axis`.
By default, this op performs an inclusive cumprod, which means that the first element of the input is identical to the first element of the output:
```python tf.cumprod([a, b, c]) # => [a, a * b, a * b * c] ```
By setting the `exclusive` kwarg to `True`, an exclusive cumprod is performed instead:
```python tf.cumprod([a, b, c], exclusive=True) # => [1, a, a * b] ```
By setting the `reverse` kwarg to `True`, the cumprod is performed in the opposite direction:
```python tf.cumprod([a, b, c], reverse=True) # => [a * b * c, b * c, c] ```
This is more efficient than using separate `tf.reverse` ops.
The `reverse` and `exclusive` kwargs can also be combined:
```python tf.cumprod([a, b, c], exclusive=True, reverse=True) # => [b * c, c, 1] ```
Arguments:
x: A `Tensor`. Must be one of the following types: `float32`, `float64`,
`int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`, `complex128`, `qint8`, `quint8`, `qint32`, `half`.
axis: A `Tensor` of type `int32` (default: 0). Must be in the range
`[-rank(x), rank(x))`.
func Cumsum ¶
Compute the cumulative sum of the tensor `x` along `axis`.
By default, this op performs an inclusive cumsum, which means that the first element of the input is identical to the first element of the output:
```python tf.cumsum([a, b, c]) # => [a, a + b, a + b + c] ```
By setting the `exclusive` kwarg to `True`, an exclusive cumsum is performed instead:
```python tf.cumsum([a, b, c], exclusive=True) # => [0, a, a + b] ```
By setting the `reverse` kwarg to `True`, the cumsum is performed in the opposite direction:
```python tf.cumsum([a, b, c], reverse=True) # => [a + b + c, b + c, c] ```
This is more efficient than using separate `tf.reverse` ops.
The `reverse` and `exclusive` kwargs can also be combined:
```python tf.cumsum([a, b, c], exclusive=True, reverse=True) # => [b + c, c, 0] ```
Arguments:
x: A `Tensor`. Must be one of the following types: `float32`, `float64`,
`int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`, `complex128`, `qint8`, `quint8`, `qint32`, `half`.
axis: A `Tensor` of type `int32` (default: 0). Must be in the range
`[-rank(x), rank(x))`.
func CumulativeLogsumexp ¶
func CumulativeLogsumexp(scope *Scope, x tf.Output, axis tf.Output, optional ...CumulativeLogsumexpAttr) (out tf.Output)
Compute the cumulative product of the tensor `x` along `axis`.
By default, this op performs an inclusive cumulative log-sum-exp, which means that the first element of the input is identical to the first element of the output: ```python tf.math.cumulative_logsumexp([a, b, c]) # => [a, log(exp(a) + exp(b)), log(exp(a) + exp(b) + exp(c))] ```
By setting the `exclusive` kwarg to `True`, an exclusive cumulative log-sum-exp is performed instead: ```python tf.cumulative_logsumexp([a, b, c], exclusive=True) # => [-inf, a, log(exp(a) * exp(b))] ``` Note that the neutral element of the log-sum-exp operation is `-inf`, however, for performance reasons, the minimal value representable by the floating point type is used instead.
By setting the `reverse` kwarg to `True`, the cumulative log-sum-exp is performed in the opposite direction.
Arguments:
x: A `Tensor`. Must be one of the following types: `float16`, `float32`, `float64`. axis: A `Tensor` of type `int32` (default: 0). Must be in the range
`[-rank(x), rank(x))`.
func DTensorSetGlobalTPUArray ¶ added in v0.2.0
An op that informs a host of the global ids of all the of TPUs in the system.
Arguments:
topology: A serialized tensorflow.tpu.TopologyProto that describes the TPU topology.
Returns the created operation.
func DataFormatDimMap ¶
Returns the dimension index in the destination data format given the one in
the source data format.
Arguments:
x: A Tensor with each element as a dimension index in source data format.
Must be in the range [-4, 4).
Returns A Tensor with each element as a dimension index in destination data format.
func DataFormatVecPermute ¶
func DataFormatVecPermute(scope *Scope, x tf.Output, optional ...DataFormatVecPermuteAttr) (y tf.Output)
Permute input tensor from `src_format` to `dst_format`.
Given source and destination format strings of length n=4 or 5, the input tensor must be a vector of size n or n-2, or a 2D tensor of shape (n, 2) or (n-2, 2).
If the first dimension of the input tensor is n-2, it is assumed that non-spatial dimensions are omitted (i.e `N`, `C`).
For example, with `src_format` of `NHWC`, `dst_format` of `NCHW`, and input: ``` [1, 2, 3, 4] ``` , the output will be: ``` [1, 4, 2, 3] ``` With `src_format` of `NDHWC`, `dst_format` of `NCDHW`, and input: ``` [[1, 6], [2, 7], [3, 8], [4, 9], [5, 10]] ``` , the output will be: ``` [[1, 6], [5, 10], [2, 7], [3, 8], [4, 9]] ``` With `src_format` of `NHWC`, `dst_format` of `NCHW`, and input: ``` [1, 2] ``` , the output will be: ``` [1, 2] ```
Arguments:
x: Tensor of rank 1 or 2 in source data format.
Returns Tensor of rank 1 or 2 in destination data format.
func DataServiceDataset ¶
func DataServiceDataset(scope *Scope, dataset_id tf.Output, processing_mode tf.Output, address tf.Output, protocol tf.Output, job_name tf.Output, max_outstanding_requests tf.Output, iteration_counter tf.Output, output_types []tf.DataType, output_shapes []tf.Shape, optional ...DataServiceDatasetAttr) (handle tf.Output)
Creates a dataset that reads data from the tf.data service.
func DataServiceDatasetV2 ¶
func DataServiceDatasetV2(scope *Scope, dataset_id tf.Output, processing_mode tf.Output, address tf.Output, protocol tf.Output, job_name tf.Output, consumer_index tf.Output, num_consumers tf.Output, max_outstanding_requests tf.Output, iteration_counter tf.Output, output_types []tf.DataType, output_shapes []tf.Shape, optional ...DataServiceDatasetV2Attr) (handle tf.Output)
Creates a dataset that reads data from the tf.data service.
func DatasetCardinality ¶
func DatasetCardinality(scope *Scope, input_dataset tf.Output, optional ...DatasetCardinalityAttr) (cardinality tf.Output)
Returns the cardinality of `input_dataset`.
Returns the cardinality of `input_dataset`.
Arguments:
input_dataset: A variant tensor representing the dataset to return cardinality for.
Returns The cardinality of `input_dataset`. Named constants are used to represent infinite and unknown cardinality.
func DatasetFingerprint ¶ added in v0.8.0
Returns the fingerprint of `input_dataset`.
Returns the fingerprint of `input_dataset`.
Arguments:
input_dataset: A variant tensor representing the dataset to return fingerprint for.
Returns The fingerprint of `input_dataset` in `uint64`
func DatasetFromGraph ¶
Creates a dataset from the given `graph_def`.
Creates a dataset from the provided `graph_def`.
Arguments:
graph_def: The graph representation of the dataset (as serialized GraphDef).
Returns A variant tensor representing the dataset.
func DatasetToGraph ¶
func DatasetToGraph(scope *Scope, input_dataset tf.Output, optional ...DatasetToGraphAttr) (graph tf.Output)
Returns a serialized GraphDef representing `input_dataset`.
Returns a graph representation for `input_dataset`.
Arguments:
input_dataset: A variant tensor representing the dataset to return the graph representation for.
Returns The graph representation of the dataset (as serialized GraphDef).
func DatasetToGraphV2 ¶
func DatasetToGraphV2(scope *Scope, input_dataset tf.Output, optional ...DatasetToGraphV2Attr) (graph tf.Output)
Returns a serialized GraphDef representing `input_dataset`.
Returns a graph representation for `input_dataset`.
Arguments:
input_dataset: A variant tensor representing the dataset to return the graph representation for.
Returns The graph representation of the dataset (as serialized GraphDef).
func DatasetToSingleElement ¶
func DatasetToSingleElement(scope *Scope, dataset tf.Output, output_types []tf.DataType, output_shapes []tf.Shape, optional ...DatasetToSingleElementAttr) (components []tf.Output)
Outputs the single element from the given dataset.
Arguments:
dataset: A handle to a dataset that contains a single element.
Returns The components of the single element of `input`.
func DatasetToTFRecord ¶
func DatasetToTFRecord(scope *Scope, input_dataset tf.Output, filename tf.Output, compression_type tf.Output) (o *tf.Operation)
Writes the given dataset to the given file using the TFRecord format.
Arguments:
input_dataset: A variant tensor representing the dataset to write. filename: A scalar string tensor representing the filename to use. compression_type: A scalar string tensor containing either (i) the empty string (no
compression), (ii) "ZLIB", or (iii) "GZIP".
Returns the created operation.
func DebugGradientIdentity ¶
Identity op for gradient debugging.
This op is hidden from public in Python. It is used by TensorFlow Debugger to register gradient tensors for gradient debugging. This op operates on non-reference-type tensors.
func DebugIdentity ¶
Provides an identity mapping of the non-Ref type input tensor for debugging.
Provides an identity mapping of the non-Ref type input tensor for debugging.
Arguments:
input: Input tensor, non-Reference type
func DebugIdentityV2 ¶
func DebugIdentityV2(scope *Scope, input tf.Output, optional ...DebugIdentityV2Attr) (output tf.Output)
Debug Identity V2 Op.
Provides an identity mapping from input to output, while writing the content of the input tensor by calling DebugEventsWriter.
The semantics of the input tensor depends on tensor_debug_mode. In typical usage, the input tensor comes directly from the user computation only when graph_debug_mode is FULL_TENSOR (see protobuf/debug_event.proto for a list of all the possible values of graph_debug_mode). For the other debug modes, the input tensor should be produced by an additional op or subgraph that computes summary information about one or more tensors.
Arguments:
input: Input tensor, non-Reference type
func DebugIdentityV3 ¶ added in v0.5.0
func DebugIdentityV3(scope *Scope, input tf.Output, optional ...DebugIdentityV3Attr) (output tf.Output)
Provides an identity mapping of the non-Ref type input tensor for debugging.
Provides an identity mapping of the non-Ref type input tensor for debugging.
Arguments:
input: Input tensor, non-Reference type
func DebugNanCount ¶
Debug NaN Value Counter Op.
Counts number of NaNs in the input tensor, for debugging.
Arguments:
input: Input tensor, non-Reference type.
func DebugNumericSummary ¶
func DebugNumericSummary(scope *Scope, input tf.Output, optional ...DebugNumericSummaryAttr) (output tf.Output)
Debug Numeric Summary Op.
Provide a basic summary of numeric value types, range and distribution.
output: A double tensor of shape [14 + nDimensions], where nDimensions is the
number of dimensions of the tensor's shape. The elements of output are: [0]: is initialized (1.0) or not (0.0). [1]: total number of elements [2]: NaN element count [3]: generalized -inf count: elements <= lower_bound. lower_bound is -inf by default. [4]: negative element count (excluding -inf), if lower_bound is the default -inf. Otherwise, this is the count of elements > lower_bound and < 0. [5]: zero element count [6]: positive element count (excluding +inf), if upper_bound is the default +inf. Otherwise, this is the count of elements < upper_bound and > 0. [7]: generalized +inf count, elements >= upper_bound. upper_bound is +inf by default.
Output elements [1:8] are all zero, if the tensor is uninitialized.
[8]: minimum of all non-inf and non-NaN elements. If uninitialized or no such element exists: +inf. [9]: maximum of all non-inf and non-NaN elements. If uninitialized or no such element exists: -inf. [10]: mean of all non-inf and non-NaN elements. If uninitialized or no such element exists: NaN. [11]: variance of all non-inf and non-NaN elements. If uninitialized or no such element exists: NaN. [12]: Data type of the tensor encoded as an enum integer. See the DataType proto for more details. [13]: Number of dimensions of the tensor (ndims). [14+]: Sizes of the dimensions.
Arguments:
input: Input tensor, non-Reference type.
func DebugNumericSummaryV2 ¶
func DebugNumericSummaryV2(scope *Scope, input tf.Output, optional ...DebugNumericSummaryV2Attr) (output tf.Output)
Debug Numeric Summary V2 Op.
Computes a numeric summary of the input tensor. The shape of the output depends on the tensor_debug_mode attribute. This op is used internally by TensorFlow Debugger (tfdbg) v2.
Arguments:
input: Input tensor, to be summarized by the op.
func DecodeAndCropJpeg ¶
func DecodeAndCropJpeg(scope *Scope, contents tf.Output, crop_window tf.Output, optional ...DecodeAndCropJpegAttr) (image tf.Output)
Decode and Crop a JPEG-encoded image to a uint8 tensor.
The attr `channels` indicates the desired number of color channels for the decoded image.
Accepted values are:
* 0: Use the number of channels in the JPEG-encoded image. * 1: output a grayscale image. * 3: output an RGB image.
If needed, the JPEG-encoded image is transformed to match the requested number of color channels.
The attr `ratio` allows downscaling the image by an integer factor during decoding. Allowed values are: 1, 2, 4, and 8. This is much faster than downscaling the image later.
It is equivalent to a combination of decode and crop, but much faster by only decoding partial jpeg image.
Arguments:
contents: 0-D. The JPEG-encoded image. crop_window: 1-D. The crop window: [crop_y, crop_x, crop_height, crop_width].
Returns 3-D with shape `[height, width, channels]`..
func DecodeBase64 ¶
Decode web-safe base64-encoded strings.
Input may or may not have padding at the end. See EncodeBase64(https://www.tensorflow.org/api_docs/python/tf/io/encode_base64) for padding. Web-safe means that input must use - and _ instead of + and /.
Arguments:
input: Base64 strings to decode.
Returns Decoded strings.
func DecodeBmp ¶
Decode the first frame of a BMP-encoded image to a uint8 tensor.
The attr `channels` indicates the desired number of color channels for the decoded image.
Accepted values are:
* 0: Use the number of channels in the BMP-encoded image. * 3: output an RGB image. * 4: output an RGBA image.
Arguments:
contents: 0-D. The BMP-encoded image.
Returns 3-D with shape `[height, width, channels]`. RGB order
func DecodeCSV ¶
func DecodeCSV(scope *Scope, records tf.Output, record_defaults []tf.Output, optional ...DecodeCSVAttr) (output []tf.Output)
Convert CSV records to tensors. Each column maps to one tensor.
RFC 4180 format is expected for the CSV records. (https://tools.ietf.org/html/rfc4180) Note that we allow leading and trailing spaces with int or float field.
Arguments:
records: Each string is a record/row in the csv and all records should have
the same format.
record_defaults: One tensor per column of the input record, with either a
scalar default value for that column or an empty vector if the column is required.
Returns Each tensor will have the same shape as records.
func DecodeCompressed ¶
func DecodeCompressed(scope *Scope, bytes tf.Output, optional ...DecodeCompressedAttr) (output tf.Output)
Decompress strings.
This op decompresses each element of the `bytes` input `Tensor`, which is assumed to be compressed using the given `compression_type`.
The `output` is a string `Tensor` of the same shape as `bytes`, each element containing the decompressed data from the corresponding element in `bytes`.
Arguments:
bytes: A Tensor of string which is compressed.
Returns A Tensor with the same shape as input `bytes`, uncompressed from bytes.
func DecodeGif ¶
Decode the frame(s) of a GIF-encoded image to a uint8 tensor.
GIF images with frame or transparency compression are not supported. On Linux and MacOS systems, convert animated GIFs from compressed to uncompressed by running:
convert $src.gif -coalesce $dst.gif
This op also supports decoding JPEGs and PNGs, though it is cleaner to use `tf.io.decode_image`.
Arguments:
contents: 0-D. The GIF-encoded image.
Returns 4-D with shape `[num_frames, height, width, 3]`. RGB channel order.
func DecodeImage ¶
Function for decode_bmp, decode_gif, decode_jpeg, and decode_png.
Detects whether an image is a BMP, GIF, JPEG, or PNG, and performs the appropriate operation to convert the input bytes string into a Tensor of type dtype.
*NOTE*: decode_gif returns a 4-D array [num_frames, height, width, 3], as opposed to decode_bmp, decode_jpeg and decode_png, which return 3-D arrays [height, width, num_channels]. Make sure to take this into account when constructing your graph if you are intermixing GIF files with BMP, JPEG, and/or PNG files. Alternately, set the expand_animations argument of this function to False, in which case the op will return 3-dimensional tensors and will truncate animated GIF files to the first frame.
*NOTE*: If the first frame of an animated GIF does not occupy the entire canvas (maximum frame width x maximum frame height), then it fills the unoccupied areas (in the first frame) with zeros (black). For frames after the first frame that does not occupy the entire canvas, it uses the previous frame to fill the unoccupied areas.
Arguments:
contents: 0-D. The encoded image bytes.
Returns 3-D with shape `[height, width, channels]` or 4-D with shape `[frame, height, width, channels]`..
func DecodeJSONExample ¶
Convert JSON-encoded Example records to binary protocol buffer strings.
Note: This is **not** a general purpose JSON parsing op.
This op converts JSON-serialized `tf.train.Example` (created with `json_format.MessageToJson`, following the [standard JSON mapping](https://developers.google.com/protocol-buffers/docs/proto3#json)) to a binary-serialized `tf.train.Example` (equivalent to `Example.SerializeToString()`) suitable for conversion to tensors with `tf.io.parse_example`.
Arguments:
json_examples: Each string is a JSON object serialized according to the JSON
mapping of the Example proto.
Returns Each string is a binary Example protocol buffer corresponding to the respective element of `json_examples`.
func DecodeJpeg ¶
Decode a JPEG-encoded image to a uint8 tensor.
The attr `channels` indicates the desired number of color channels for the decoded image.
Accepted values are:
* 0: Use the number of channels in the JPEG-encoded image. * 1: output a grayscale image. * 3: output an RGB image.
If needed, the JPEG-encoded image is transformed to match the requested number of color channels.
The attr `ratio` allows downscaling the image by an integer factor during decoding. Allowed values are: 1, 2, 4, and 8. This is much faster than downscaling the image later.
This op also supports decoding PNGs and non-animated GIFs since the interface is the same, though it is cleaner to use `tf.io.decode_image`.
Arguments:
contents: 0-D. The JPEG-encoded image.
Returns 3-D with shape `[height, width, channels]`..
func DecodePaddedRaw ¶
func DecodePaddedRaw(scope *Scope, input_bytes tf.Output, fixed_length tf.Output, out_type tf.DataType, optional ...DecodePaddedRawAttr) (output tf.Output)
Reinterpret the bytes of a string as a vector of numbers.
Arguments:
input_bytes: Tensor of string to be decoded. fixed_length: Length in bytes for each element of the decoded output. Must be a multiple
of the size of the output type.
Returns A Tensor with one more dimension than the input `bytes`. The added dimension will have size equal to the length of the elements of `bytes` divided by the number of bytes to represent `out_type`.
func DecodePng ¶
Decode a PNG-encoded image to a uint8 or uint16 tensor.
The attr `channels` indicates the desired number of color channels for the decoded image.
Accepted values are:
* 0: Use the number of channels in the PNG-encoded image. * 1: output a grayscale image. * 3: output an RGB image. * 4: output an RGBA image.
If needed, the PNG-encoded image is transformed to match the requested number of color channels.
This op also supports decoding JPEGs and non-animated GIFs since the interface is the same, though it is cleaner to use `tf.io.decode_image`.
Arguments:
contents: 0-D. The PNG-encoded image.
Returns 3-D with shape `[height, width, channels]`.
func DecodeProtoV2 ¶
func DecodeProtoV2(scope *Scope, bytes tf.Output, message_type string, field_names []string, output_types []tf.DataType, optional ...DecodeProtoV2Attr) (sizes tf.Output, values []tf.Output)
The op extracts fields from a serialized protocol buffers message into tensors.
Note: This API is designed for orthogonality rather than human-friendliness. It can be used to parse input protos by hand, but it is intended for use in generated code.
The `decode_proto` op extracts fields from a serialized protocol buffers message into tensors. The fields in `field_names` are decoded and converted to the corresponding `output_types` if possible.
A `message_type` name must be provided to give context for the field names. The actual message descriptor can be looked up either in the linked-in descriptor pool or a filename provided by the caller using the `descriptor_source` attribute.
Each output tensor is a dense tensor. This means that it is padded to hold the largest number of repeated elements seen in the input minibatch. (The shape is also padded by one to prevent zero-sized dimensions). The actual repeat counts for each example in the minibatch can be found in the `sizes` output. In many cases the output of `decode_proto` is fed immediately into tf.squeeze if missing values are not a concern. When using tf.squeeze, always pass the squeeze dimension explicitly to avoid surprises.
For the most part, the mapping between Proto field types and TensorFlow dtypes is straightforward. However, there are a few special cases:
- A proto field that contains a submessage or group can only be converted to `DT_STRING` (the serialized submessage). This is to reduce the complexity of the API. The resulting string can be used as input to another instance of the decode_proto op.
- TensorFlow lacks support for unsigned integers. The ops represent uint64 types as a `DT_INT64` with the same twos-complement bit pattern (the obvious way). Unsigned int32 values can be represented exactly by specifying type `DT_INT64`, or using twos-complement if the caller specifies `DT_INT32` in the `output_types` attribute.
- `map` fields are not directly decoded. They are treated as `repeated` fields, of the appropriate entry type. The proto-compiler defines entry types for each map field. The type-name is the field name, converted to "CamelCase" with "Entry" appended. The `tf.train.Features.FeatureEntry` message is an example of one of these implicit `Entry` types.
- `enum` fields should be read as int32.
Both binary and text proto serializations are supported, and can be chosen using the `format` attribute.
The `descriptor_source` attribute selects the source of protocol descriptors to consult when looking up `message_type`. This may be:
- An empty string or "local://", in which case protocol descriptors are created for C++ (not Python) proto definitions linked to the binary.
- A file, in which case protocol descriptors are created from the file, which is expected to contain a `FileDescriptorSet` serialized as a string. NOTE: You can build a `descriptor_source` file using the `--descriptor_set_out` and `--include_imports` options to the protocol compiler `protoc`.
- A "bytes://<bytes>", in which protocol descriptors are created from `<bytes>`, which is expected to be a `FileDescriptorSet` serialized as a string.
Arguments:
bytes: Tensor of serialized protos with shape `batch_shape`. message_type: Name of the proto message type to decode. field_names: List of strings containing proto field names. An extension field can be decoded
by using its full name, e.g. EXT_PACKAGE.EXT_FIELD_NAME.
output_types: List of TF types to use for the respective field in field_names.
Returns:
sizes: Tensor of int32 with shape `[batch_shape, len(field_names)]`.
Each entry is the number of values found for the corresponding field. Optional fields may have 0 or 1 values.
values: List of tensors containing values for the corresponding field.
`values[i]` has datatype `output_types[i]` and shape `[batch_shape, max(sizes[...,i])]`.
func DecodeRaw ¶
func DecodeRaw(scope *Scope, bytes tf.Output, out_type tf.DataType, optional ...DecodeRawAttr) (output tf.Output)
Reinterpret the bytes of a string as a vector of numbers.
Arguments:
bytes: All the elements must have the same length.
Returns A Tensor with one more dimension than the input `bytes`. The added dimension will have size equal to the length of the elements of `bytes` divided by the number of bytes to represent `out_type`.
func DecodeWav ¶
func DecodeWav(scope *Scope, contents tf.Output, optional ...DecodeWavAttr) (audio tf.Output, sample_rate tf.Output)
Decode a 16-bit PCM WAV file to a float tensor.
The -32768 to 32767 signed 16-bit values will be scaled to -1.0 to 1.0 in float.
When desired_channels is set, if the input contains fewer channels than this then the last channel will be duplicated to give the requested number, else if the input has more channels than requested then the additional channels will be ignored.
If desired_samples is set, then the audio will be cropped or padded with zeroes to the requested length.
The first output contains a Tensor with the content of the audio samples. The lowest dimension will be the number of channels, and the second will be the number of samples. For example, a ten-sample-long stereo WAV file should give an output shape of [10, 2].
Arguments:
contents: The WAV-encoded audio, usually from a file.
Returns:
audio: 2-D with shape `[length, channels]`. sample_rate: Scalar holding the sample rate found in the WAV header.
func DeepCopy ¶
Makes a copy of `x`.
Arguments:
x: The source tensor of type `T`.
Returns y: A `Tensor` of type `T`. A copy of `x`. Guaranteed that `y`
is not an alias of `x`.
func DeleteIterator ¶
A container for an iterator resource.
Arguments:
handle: A handle to the iterator to delete. deleter: A variant deleter.
Returns the created operation.
func DeleteMultiDeviceIterator ¶
func DeleteMultiDeviceIterator(scope *Scope, multi_device_iterator tf.Output, iterators []tf.Output, deleter tf.Output) (o *tf.Operation)
A container for an iterator resource.
Arguments:
multi_device_iterator: A handle to the multi device iterator to delete. iterators: A list of iterator handles (unused). This is added so that automatic control dependencies get added during function tracing that ensure this op runs after all the dependent iterators are deleted. deleter: A variant deleter.
Returns the created operation.
func DeleteSessionTensor ¶
Delete the tensor specified by its handle in the session.
Arguments:
handle: The handle for a tensor stored in the session state.
Returns the created operation.
func DenseBincount ¶
func DenseBincount(scope *Scope, input tf.Output, size tf.Output, weights tf.Output, optional ...DenseBincountAttr) (output tf.Output)
Counts the number of occurrences of each value in an integer array.
Outputs a vector with length `size` and the same dtype as `weights`. If `weights` are empty, then index `i` stores the number of times the value `i` is counted in `arr`. If `weights` are non-empty, then index `i` stores the sum of the value in `weights` at each index where the corresponding value in `arr` is `i`.
Values in `arr` outside of the range [0, size) are ignored.
Arguments:
input: 1D or 2D int `Tensor`. size: non-negative int scalar `Tensor`. weights: is an int32, int64, float32, or float64 `Tensor` with the same
shape as `arr`, or a length-0 `Tensor`, in which case it acts as all weights equal to 1.
Returns 1D `Tensor` with length equal to `size` or 2D `Tensor` with [batch_size, `size`]. The counts or summed weights for each value in the range [0, size).
func DenseCountSparseOutput ¶
func DenseCountSparseOutput(scope *Scope, values tf.Output, weights tf.Output, binary_output bool, optional ...DenseCountSparseOutputAttr) (output_indices tf.Output, output_values tf.Output, output_dense_shape tf.Output)
Performs sparse-output bin counting for a tf.tensor input.
Counts the number of times each value occurs in the input.
Arguments:
values: Tensor containing data to count. weights: A Tensor of the same shape as indices containing per-index weight values. May
also be the empty tensor if no weights are used.
binary_output: Whether to output the number of occurrences of each value or 1.
Returns:
output_indices: Indices tensor for the resulting sparse tensor object. output_values: Values tensor for the resulting sparse tensor object. output_dense_shape: Shape tensor for the resulting sparse tensor object.
func DenseToCSRSparseMatrix ¶
func DenseToCSRSparseMatrix(scope *Scope, dense_input tf.Output, indices tf.Output) (sparse_output tf.Output)
Converts a dense tensor to a (possibly batched) CSRSparseMatrix.
Arguments:
dense_input: A Dense tensor. indices: Indices of nonzero elements.
Returns A (possibly batched) CSRSparseMatrix.
func DenseToDenseSetOperation ¶
func DenseToDenseSetOperation(scope *Scope, set1 tf.Output, set2 tf.Output, set_operation string, optional ...DenseToDenseSetOperationAttr) (result_indices tf.Output, result_values tf.Output, result_shape tf.Output)
Applies set operation along last dimension of 2 `Tensor` inputs.
See SetOperationOp::SetOperationFromContext for values of `set_operation`.
Output `result` is a `SparseTensor` represented by `result_indices`, `result_values`, and `result_shape`. For `set1` and `set2` ranked `n`, this has rank `n` and the same 1st `n-1` dimensions as `set1` and `set2`. The `nth` dimension contains the result of `set_operation` applied to the corresponding `[0...n-1]` dimension of `set`.
Arguments:
set1: `Tensor` with rank `n`. 1st `n-1` dimensions must be the same as `set2`.
Dimension `n` contains values in a set, duplicates are allowed but ignored.
set2: `Tensor` with rank `n`. 1st `n-1` dimensions must be the same as `set1`.
Dimension `n` contains values in a set, duplicates are allowed but ignored.
Returns:
result_indices: 2D indices of a `SparseTensor`. result_values: 1D values of a `SparseTensor`. result_shape: 1D `Tensor` shape of a `SparseTensor`. `result_shape[0...n-1]` is
the same as the 1st `n-1` dimensions of `set1` and `set2`, `result_shape[n]` is the max result set size across all `0...n-1` dimensions.
func DenseToSparseBatchDataset ¶
func DenseToSparseBatchDataset(scope *Scope, input_dataset tf.Output, batch_size tf.Output, row_shape tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output)
Creates a dataset that batches input elements into a SparseTensor.
Arguments:
input_dataset: A handle to an input dataset. Must have a single component. batch_size: A scalar representing the number of elements to accumulate in a
batch.
row_shape: A vector representing the dense shape of each row in the produced
SparseTensor. The shape may be partially specified, using `-1` to indicate that a particular dimension should use the maximum size of all batch elements.
func DenseToSparseSetOperation ¶
func DenseToSparseSetOperation(scope *Scope, set1 tf.Output, set2_indices tf.Output, set2_values tf.Output, set2_shape tf.Output, set_operation string, optional ...DenseToSparseSetOperationAttr) (result_indices tf.Output, result_values tf.Output, result_shape tf.Output)
Applies set operation along last dimension of `Tensor` and `SparseTensor`.
See SetOperationOp::SetOperationFromContext for values of `set_operation`.
Input `set2` is a `SparseTensor` represented by `set2_indices`, `set2_values`, and `set2_shape`. For `set2` ranked `n`, 1st `n-1` dimensions must be the same as `set1`. Dimension `n` contains values in a set, duplicates are allowed but ignored.
If `validate_indices` is `True`, this op validates the order and range of `set2` indices.
Output `result` is a `SparseTensor` represented by `result_indices`, `result_values`, and `result_shape`. For `set1` and `set2` ranked `n`, this has rank `n` and the same 1st `n-1` dimensions as `set1` and `set2`. The `nth` dimension contains the result of `set_operation` applied to the corresponding `[0...n-1]` dimension of `set`.
Arguments:
set1: `Tensor` with rank `n`. 1st `n-1` dimensions must be the same as `set2`.
Dimension `n` contains values in a set, duplicates are allowed but ignored.
set2_indices: 2D `Tensor`, indices of a `SparseTensor`. Must be in row-major
order.
set2_values: 1D `Tensor`, values of a `SparseTensor`. Must be in row-major
order.
set2_shape: 1D `Tensor`, shape of a `SparseTensor`. `set2_shape[0...n-1]` must
be the same as the 1st `n-1` dimensions of `set1`, `result_shape[n]` is the max set size across `n-1` dimensions.
Returns:
result_indices: 2D indices of a `SparseTensor`. result_values: 1D values of a `SparseTensor`. result_shape: 1D `Tensor` shape of a `SparseTensor`. `result_shape[0...n-1]` is
the same as the 1st `n-1` dimensions of `set1` and `set2`, `result_shape[n]` is the max result set size across all `0...n-1` dimensions.
func DepthToSpace ¶
func DepthToSpace(scope *Scope, input tf.Output, block_size int64, optional ...DepthToSpaceAttr) (output tf.Output)
DepthToSpace for tensors of type T.
Rearranges data from depth into blocks of spatial data. This is the reverse transformation of SpaceToDepth. More specifically, this op outputs a copy of the input tensor where values from the `depth` dimension are moved in spatial blocks to the `height` and `width` dimensions. The attr `block_size` indicates the input block size and how the data is moved.
- Chunks of data of size `block_size * block_size` from depth are rearranged into non-overlapping blocks of size `block_size x block_size`
- The width of the output tensor is `input_depth * block_size`, whereas the height is `input_height * block_size`.
- The Y, X coordinates within each block of the output image are determined by the high order component of the input channel index.
- The depth of the input tensor must be divisible by `block_size * block_size`.
The `data_format` attr specifies the layout of the input and output tensors with the following options:
"NHWC": `[ batch, height, width, channels ]` "NCHW": `[ batch, channels, height, width ]` "NCHW_VECT_C": `qint8 [ batch, channels / 4, height, width, 4 ]`
It is useful to consider the operation as transforming a 6-D Tensor. e.g. for data_format = NHWC,
Each element in the input tensor can be specified via 6 coordinates, ordered by decreasing memory layout significance as: n,iY,iX,bY,bX,oC (where n=batch index, iX, iY means X or Y coordinates within the input image, bX, bY means coordinates within the output block, oC means output channels). The output would be the input transposed to the following layout: n,iY,bY,iX,bX,oC
This operation is useful for resizing the activations between convolutions (but keeping all data), e.g. instead of pooling. It is also useful for training purely convolutional models.
For example, given an input of shape `[1, 1, 1, 4]`, data_format = "NHWC" and block_size = 2:
``` x = [[[[1, 2, 3, 4]]]]
```
This operation will output a tensor of shape `[1, 2, 2, 1]`:
```
[[[[1], [2]], [[3], [4]]]]
```
Here, the input has a batch of 1 and each batch element has shape `[1, 1, 4]`, the corresponding output will have 2x2 elements and will have a depth of 1 channel (1 = `4 / (block_size * block_size)`). The output element shape is `[2, 2, 1]`.
For an input tensor with larger depth, here of shape `[1, 1, 1, 12]`, e.g.
``` x = [[[[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]]]] ```
This operation, for block size of 2, will return the following tensor of shape `[1, 2, 2, 3]`
```
[[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]]]
```
Similarly, for the following input of shape `[1 2 2 4]`, and a block size of 2:
``` x = [[[[1, 2, 3, 4],
[5, 6, 7, 8]], [[9, 10, 11, 12], [13, 14, 15, 16]]]]
```
the operator will return the following tensor of shape `[1 4 4 1]`:
``` x = [[[ [1], [2], [5], [6]],
[ [3], [4], [7], [8]], [ [9], [10], [13], [14]], [ [11], [12], [15], [16]]]]
```
Arguments:
block_size: The size of the spatial block, same as in Space2Depth.
func DepthwiseConv2dNative ¶
func DepthwiseConv2dNative(scope *Scope, input tf.Output, filter tf.Output, strides []int64, padding string, optional ...DepthwiseConv2dNativeAttr) (output tf.Output)
Computes a 2-D depthwise convolution given 4-D `input` and `filter` tensors.
Given an input tensor of shape `[batch, in_height, in_width, in_channels]` and a filter / kernel tensor of shape `[filter_height, filter_width, in_channels, channel_multiplier]`, containing `in_channels` convolutional filters of depth 1, `depthwise_conv2d` applies a different filter to each input channel (expanding from 1 channel to `channel_multiplier` channels for each), then concatenates the results together. Thus, the output has `in_channels * channel_multiplier` channels.
``` for k in 0..in_channels-1
for q in 0..channel_multiplier-1 output[b, i, j, k * channel_multiplier + q] = sum_{di, dj} input[b, strides[1] * i + di, strides[2] * j + dj, k] * filter[di, dj, k, q]
```
Must have `strides[0] = strides[3] = 1`. For the most common case of the same horizontal and vertices strides, `strides = [1, stride, stride, 1]`.
Arguments:
strides: 1-D of length 4. The stride of the sliding window for each dimension
of `input`.
padding: The type of padding algorithm to use.
func DepthwiseConv2dNativeBackpropFilter ¶
func DepthwiseConv2dNativeBackpropFilter(scope *Scope, input tf.Output, filter_sizes tf.Output, out_backprop tf.Output, strides []int64, padding string, optional ...DepthwiseConv2dNativeBackpropFilterAttr) (output tf.Output)
Computes the gradients of depthwise convolution with respect to the filter.
Arguments:
input: 4-D with shape based on `data_format`. For example, if
`data_format` is 'NHWC' then `input` is a 4-D `[batch, in_height, in_width, in_channels]` tensor.
filter_sizes: An integer vector representing the tensor shape of `filter`,
where `filter` is a 4-D `[filter_height, filter_width, in_channels, depthwise_multiplier]` tensor.
out_backprop: 4-D with shape based on `data_format`.
For example, if `data_format` is 'NHWC' then out_backprop shape is `[batch, out_height, out_width, out_channels]`. Gradients w.r.t. the output of the convolution.
strides: The stride of the sliding window for each dimension of the input
of the convolution.
padding: The type of padding algorithm to use.
Returns 4-D with shape `[filter_height, filter_width, in_channels, out_channels]`. Gradient w.r.t. the `filter` input of the convolution.
func DepthwiseConv2dNativeBackpropInput ¶
func DepthwiseConv2dNativeBackpropInput(scope *Scope, input_sizes tf.Output, filter tf.Output, out_backprop tf.Output, strides []int64, padding string, optional ...DepthwiseConv2dNativeBackpropInputAttr) (output tf.Output)
Computes the gradients of depthwise convolution with respect to the input.
Arguments:
input_sizes: An integer vector representing the shape of `input`, based
on `data_format`. For example, if `data_format` is 'NHWC' then
`input` is a 4-D `[batch, height, width, channels]` tensor. filter: 4-D with shape
`[filter_height, filter_width, in_channels, depthwise_multiplier]`.
out_backprop: 4-D with shape based on `data_format`.
For example, if `data_format` is 'NHWC' then out_backprop shape is `[batch, out_height, out_width, out_channels]`. Gradients w.r.t. the output of the convolution.
strides: The stride of the sliding window for each dimension of the input
of the convolution.
padding: The type of padding algorithm to use.
Returns 4-D with shape according to `data_format`. For example, if `data_format` is 'NHWC', output shape is `[batch, in_height, in_width, in_channels]`. Gradient w.r.t. the input of the convolution.
func Dequantize ¶
func Dequantize(scope *Scope, input tf.Output, min_range tf.Output, max_range tf.Output, optional ...DequantizeAttr) (output tf.Output)
Dequantize the 'input' tensor into a float or bfloat16 Tensor.
[min_range, max_range] are scalar floats that specify the range for the output. The 'mode' attribute controls exactly which calculations are used to convert the float values to their quantized equivalents.
In 'MIN_COMBINED' mode, each value of the tensor will undergo the following:
``` if T == qint8: in[i] += (range(T) + 1)/ 2.0 out[i] = min_range + (in[i]* (max_range - min_range) / range(T)) ``` here `range(T) = numeric_limits<T>::max() - numeric_limits<T>::min()`
*MIN_COMBINED Mode Example*
If the input comes from a QuantizedRelu6, the output type is quint8 (range of 0-255) but the possible range of QuantizedRelu6 is 0-6. The min_range and max_range values are therefore 0.0 and 6.0. Dequantize on quint8 will take each value, cast to float, and multiply by 6 / 255. Note that if quantizedtype is qint8, the operation will additionally add each value by 128 prior to casting.
If the mode is 'MIN_FIRST', then this approach is used:
```c++ num_discrete_values = 1 << (# of bits in T) range_adjust = num_discrete_values / (num_discrete_values - 1) range = (range_max - range_min) * range_adjust range_scale = range / num_discrete_values const double offset_input = static_cast<double>(input) - lowest_quantized; result = range_min + ((input - numeric_limits<T>::min()) * range_scale) ```
If the mode is `SCALED`, dequantization is performed by multiplying each input value by a scaling_factor. (Thus an input of 0 always maps to 0.0).
The scaling_factor is determined from `min_range`, `max_range`, and `narrow_range` in a way that is compatible with `QuantizeAndDequantize{V2|V3}` and `QuantizeV2`, using the following algorithm:
```c++
const int min_expected_T = std::numeric_limits<T>::min() + (narrow_range ? 1 : 0); const int max_expected_T = std::numeric_limits<T>::max(); const float max_expected_T = std::numeric_limits<float>::max(); const float scale_factor = (std::numeric_limits<T>::min() == 0) ? (max_range / max_expected_T) : std::max(min_range / min_expected_T, max_range / max_expected_T);
```
Arguments:
min_range: The minimum scalar value possibly produced for the input. max_range: The maximum scalar value possibly produced for the input.
func DeserializeIterator ¶
func DeserializeIterator(scope *Scope, resource_handle tf.Output, serialized tf.Output) (o *tf.Operation)
Converts the given variant tensor to an iterator and stores it in the given resource.
Arguments:
resource_handle: A handle to an iterator resource. serialized: A variant tensor storing the state of the iterator contained in the
resource.
Returns the created operation.
func DeserializeManySparse ¶
func DeserializeManySparse(scope *Scope, serialized_sparse tf.Output, dtype tf.DataType) (sparse_indices tf.Output, sparse_values tf.Output, sparse_shape tf.Output)
Deserialize and concatenate `SparseTensors` from a serialized minibatch.
The input `serialized_sparse` must be a string matrix of shape `[N x 3]` where `N` is the minibatch size and the rows correspond to packed outputs of `SerializeSparse`. The ranks of the original `SparseTensor` objects must all match. When the final `SparseTensor` is created, it has rank one higher than the ranks of the incoming `SparseTensor` objects (they have been concatenated along a new row dimension).
The output `SparseTensor` object's shape values for all dimensions but the first are the max across the input `SparseTensor` objects' shape values for the corresponding dimensions. Its first shape value is `N`, the minibatch size.
The input `SparseTensor` objects' indices are assumed ordered in standard lexicographic order. If this is not the case, after this step run `SparseReorder` to restore index ordering.
For example, if the serialized input is a `[2 x 3]` matrix representing two original `SparseTensor` objects:
index = [ 0] [10] [20] values = [1, 2, 3] shape = [50]
and
index = [ 2] [10] values = [4, 5] shape = [30]
then the final deserialized `SparseTensor` will be:
index = [0 0] [0 10] [0 20] [1 2] [1 10] values = [1, 2, 3, 4, 5] shape = [2 50]
Arguments:
serialized_sparse: 2-D, The `N` serialized `SparseTensor` objects.
Must have 3 columns.
dtype: The `dtype` of the serialized `SparseTensor` objects.
func DeserializeSparse ¶
func DeserializeSparse(scope *Scope, serialized_sparse tf.Output, dtype tf.DataType) (sparse_indices tf.Output, sparse_values tf.Output, sparse_shape tf.Output)
Deserialize `SparseTensor` objects.
The input `serialized_sparse` must have the shape `[?, ?, ..., ?, 3]` where the last dimension stores serialized `SparseTensor` objects and the other N dimensions (N >= 0) correspond to a batch. The ranks of the original `SparseTensor` objects must all match. When the final `SparseTensor` is created, its rank is the rank of the incoming `SparseTensor` objects plus N; the sparse tensors have been concatenated along new dimensions, one for each batch.
The output `SparseTensor` object's shape values for the original dimensions are the max across the input `SparseTensor` objects' shape values for the corresponding dimensions. The new dimensions match the size of the batch.
The input `SparseTensor` objects' indices are assumed ordered in standard lexicographic order. If this is not the case, after this step run `SparseReorder` to restore index ordering.
For example, if the serialized input is a `[2 x 3]` matrix representing two original `SparseTensor` objects:
index = [ 0] [10] [20] values = [1, 2, 3] shape = [50]
and
index = [ 2] [10] values = [4, 5] shape = [30]
then the final deserialized `SparseTensor` will be:
index = [0 0] [0 10] [0 20] [1 2] [1 10] values = [1, 2, 3, 4, 5] shape = [2 50]
Arguments:
serialized_sparse: The serialized `SparseTensor` objects. The last dimension
must have 3 columns.
dtype: The `dtype` of the serialized `SparseTensor` objects.
func DestroyResourceOp ¶
func DestroyResourceOp(scope *Scope, resource tf.Output, optional ...DestroyResourceOpAttr) (o *tf.Operation)
Deletes the resource specified by the handle.
All subsequent operations using the resource will result in a NotFound error status.
Arguments:
resource: handle to the resource to delete.
Returns the created operation.
func DeviceIndex ¶
Return the index of device the op runs.
Given a list of device names, this operation returns the index of the device this op runs. The length of the list is returned in two cases: (1) Device does not exist in the given device list. (2) It is in XLA compilation.
func Diag ¶
Returns a diagonal tensor with a given diagonal values.
Given a `diagonal`, this operation returns a tensor with the `diagonal` and everything else padded with zeros. The diagonal is computed as follows:
Assume `diagonal` has dimensions [D1,..., Dk], then the output is a tensor of rank 2k with dimensions [D1,..., Dk, D1,..., Dk] where:
`output[i1,..., ik, i1,..., ik] = diagonal[i1, ..., ik]` and 0 everywhere else.
For example:
``` # 'diagonal' is [1, 2, 3, 4] tf.diag(diagonal) ==> [[1, 0, 0, 0]
[0, 2, 0, 0] [0, 0, 3, 0] [0, 0, 0, 4]]
```
Arguments:
diagonal: Rank k tensor where k is at most 1.
func DiagPart ¶
Returns the diagonal part of the tensor.
This operation returns a tensor with the `diagonal` part of the `input`. The `diagonal` part is computed as follows:
Assume `input` has dimensions `[D1,..., Dk, D1,..., Dk]`, then the output is a tensor of rank `k` with dimensions `[D1,..., Dk]` where:
`diagonal[i1,..., ik] = input[i1, ..., ik, i1,..., ik]`.
For example:
``` # 'input' is [[1, 0, 0, 0]
[0, 2, 0, 0] [0, 0, 3, 0] [0, 0, 0, 4]]
tf.diag_part(input) ==> [1, 2, 3, 4] ```
Arguments:
input: Rank k tensor where k is even and not zero.
Returns The extracted diagonal.
func Digamma ¶
Computes Psi, the derivative of Lgamma (the log of the absolute value of
`Gamma(x)`), element-wise.
func Dilation2D ¶
func Dilation2D(scope *Scope, input tf.Output, filter tf.Output, strides []int64, rates []int64, padding string) (output tf.Output)
Computes the grayscale dilation of 4-D `input` and 3-D `filter` tensors.
The `input` tensor has shape `[batch, in_height, in_width, depth]` and the `filter` tensor has shape `[filter_height, filter_width, depth]`, i.e., each input channel is processed independently of the others with its own structuring function. The `output` tensor has shape `[batch, out_height, out_width, depth]`. The spatial dimensions of the output tensor depend on the `padding` algorithm. We currently only support the default "NHWC" `data_format`.
In detail, the grayscale morphological 2-D dilation is the max-sum correlation (for consistency with `conv2d`, we use unmirrored filters):
output[b, y, x, c] = max_{dy, dx} input[b, strides[1] * y + rates[1] * dy, strides[2] * x + rates[2] * dx, c] + filter[dy, dx, c]
Max-pooling is a special case when the filter has size equal to the pooling kernel size and contains all zeros.
Note on duality: The dilation of `input` by the `filter` is equal to the negation of the erosion of `-input` by the reflected `filter`.
Arguments:
input: 4-D with shape `[batch, in_height, in_width, depth]`. filter: 3-D with shape `[filter_height, filter_width, depth]`. strides: The stride of the sliding window for each dimension of the input
tensor. Must be: `[1, stride_height, stride_width, 1]`.
rates: The input stride for atrous morphological dilation. Must be:
`[1, rate_height, rate_width, 1]`.
padding: The type of padding algorithm to use.
Returns 4-D with shape `[batch, out_height, out_width, depth]`.
func Dilation2DBackpropFilter ¶
func Dilation2DBackpropFilter(scope *Scope, input tf.Output, filter tf.Output, out_backprop tf.Output, strides []int64, rates []int64, padding string) (filter_backprop tf.Output)
Computes the gradient of morphological 2-D dilation with respect to the filter.
Arguments:
input: 4-D with shape `[batch, in_height, in_width, depth]`. filter: 3-D with shape `[filter_height, filter_width, depth]`. out_backprop: 4-D with shape `[batch, out_height, out_width, depth]`. strides: 1-D of length 4. The stride of the sliding window for each dimension of
the input tensor. Must be: `[1, stride_height, stride_width, 1]`.
rates: 1-D of length 4. The input stride for atrous morphological dilation.
Must be: `[1, rate_height, rate_width, 1]`.
padding: The type of padding algorithm to use.
Returns 3-D with shape `[filter_height, filter_width, depth]`.
func Dilation2DBackpropInput ¶
func Dilation2DBackpropInput(scope *Scope, input tf.Output, filter tf.Output, out_backprop tf.Output, strides []int64, rates []int64, padding string) (in_backprop tf.Output)
Computes the gradient of morphological 2-D dilation with respect to the input.
Arguments:
input: 4-D with shape `[batch, in_height, in_width, depth]`. filter: 3-D with shape `[filter_height, filter_width, depth]`. out_backprop: 4-D with shape `[batch, out_height, out_width, depth]`. strides: 1-D of length 4. The stride of the sliding window for each dimension of
the input tensor. Must be: `[1, stride_height, stride_width, 1]`.
rates: 1-D of length 4. The input stride for atrous morphological dilation.
Must be: `[1, rate_height, rate_width, 1]`.
padding: The type of padding algorithm to use.
Returns 4-D with shape `[batch, in_height, in_width, depth]`.
func DirectedInterleaveDataset ¶
func DirectedInterleaveDataset(scope *Scope, selector_input_dataset tf.Output, data_input_datasets []tf.Output, output_types []tf.DataType, output_shapes []tf.Shape, optional ...DirectedInterleaveDatasetAttr) (handle tf.Output)
A substitute for `InterleaveDataset` on a fixed list of `N` datasets.
Arguments:
selector_input_dataset: A dataset of scalar `DT_INT64` elements that determines which of the
`N` data inputs should produce the next output element.
data_input_datasets: `N` datasets with the same type that will be interleaved according to
the values of `selector_input_dataset`.
func DisableCopyOnRead ¶ added in v0.2.0
Turns off the copy-on-read mode.
Turns off the copy-on-read mode of a resource variable. If the variable is not in copy-on-read mode, this op has no effect.
Arguments:
resource: The resource handle of the resource variable.
Returns the created operation.
func Div ¶
Returns x / y element-wise.
*NOTE*: `Div` supports broadcasting. More about broadcasting [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
func DivNoNan ¶
Returns 0 if the denominator is zero.
*NOTE*: `DivNoNan` supports broadcasting. More about broadcasting [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
func DrawBoundingBoxes ¶
Draw bounding boxes on a batch of images.
Outputs a copy of `images` but draws on top of the pixels zero or more bounding boxes specified by the locations in `boxes`. The coordinates of the each bounding box in `boxes` are encoded as `[y_min, x_min, y_max, x_max]`. The bounding box coordinates are floats in `[0.0, 1.0]` relative to the width and height of the underlying image.
For example, if an image is 100 x 200 pixels (height x width) and the bounding box is `[0.1, 0.2, 0.5, 0.9]`, the upper-left and bottom-right coordinates of the bounding box will be `(40, 10)` to `(180, 50)` (in (x,y) coordinates).
Parts of the bounding box may fall outside the image.
Arguments:
images: 4-D with shape `[batch, height, width, depth]`. A batch of images. boxes: 3-D with shape `[batch, num_bounding_boxes, 4]` containing bounding
boxes.
Returns 4-D with the same shape as `images`. The batch of input images with bounding boxes drawn on the images.
func DrawBoundingBoxesV2 ¶
func DrawBoundingBoxesV2(scope *Scope, images tf.Output, boxes tf.Output, colors tf.Output) (output tf.Output)
Draw bounding boxes on a batch of images.
Outputs a copy of `images` but draws on top of the pixels zero or more bounding boxes specified by the locations in `boxes`. The coordinates of the each bounding box in `boxes` are encoded as `[y_min, x_min, y_max, x_max]`. The bounding box coordinates are floats in `[0.0, 1.0]` relative to the width and height of the underlying image.
For example, if an image is 100 x 200 pixels (height x width) and the bounding box is `[0.1, 0.2, 0.5, 0.9]`, the upper-left and bottom-right coordinates of the bounding box will be `(40, 10)` to `(100, 50)` (in (x,y) coordinates).
Parts of the bounding box may fall outside the image.
Arguments:
images: 4-D with shape `[batch, height, width, depth]`. A batch of images. boxes: 3-D with shape `[batch, num_bounding_boxes, 4]` containing bounding
boxes.
colors: 2-D. A list of RGBA colors to cycle through for the boxes.
Returns 4-D with the same shape as `images`. The batch of input images with bounding boxes drawn on the images.
func DynamicEnqueueTPUEmbeddingArbitraryTensorBatch ¶
func DynamicEnqueueTPUEmbeddingArbitraryTensorBatch(scope *Scope, sample_indices_or_row_splits []tf.Output, embedding_indices []tf.Output, aggregation_weights []tf.Output, mode_override tf.Output, device_ordinal tf.Output, optional ...DynamicEnqueueTPUEmbeddingArbitraryTensorBatchAttr) (o *tf.Operation)
Eases the porting of code that uses tf.nn.embedding_lookup_sparse().
embedding_indices[i] and aggregation_weights[i] correspond to the ith feature.
The tensors at corresponding positions in the three input lists (sample_indices, embedding_indices and aggregation_weights) must have the same shape, i.e. rank 1 with dim_size() equal to the total number of lookups into the table described by the corresponding feature.
Arguments:
sample_indices_or_row_splits: A list of rank 2 Tensors specifying the training example to which the
corresponding embedding_indices and aggregation_weights values belong. If the size of its first dimension is 0, we assume each embedding_indices belongs to a different sample. Both int32 and int64 are allowed and will be converted to int32 internally.
Or a list of rank 1 Tensors specifying the row splits for splitting embedding_indices and aggregation_weights into rows. It corresponds to ids.row_splits in embedding_lookup(), when ids is a RaggedTensor. When enqueuing N-D ragged tensor, only the last dimension is allowed to be ragged. the row splits is 1-D dense tensor. When empty, we assume a dense tensor is passed to the op Both int32 and int64 are allowed and will be converted to int32 internally.
embedding_indices: A list of rank 1 Tensors, indices into the embedding
tables. Both int32 and int64 are allowed and will be converted to int32 internally.
aggregation_weights: A list of rank 1 Tensors containing per training
example aggregation weights. Both float32 and float64 are allowed and will be converted to float32 internally.
mode_override: A string input that overrides the mode specified in the
TPUEmbeddingConfiguration. Supported values are {'unspecified', 'inference', 'training', 'backward_pass_only'}. When set to 'unspecified', the mode set in TPUEmbeddingConfiguration is used, otherwise mode_override is used.
device_ordinal: The TPU device to use. Should be >= 0 and less than the number
of TPU cores in the task on which the node is placed.
Returns the created operation.
func DynamicPartition ¶
func DynamicPartition(scope *Scope, data tf.Output, partitions tf.Output, num_partitions int64) (outputs []tf.Output)
Partitions `data` into `num_partitions` tensors using indices from `partitions`.
For each index tuple `js` of size `partitions.ndim`, the slice `data[js, ...]` becomes part of `outputs[partitions[js]]`. The slices with `partitions[js] = i` are placed in `outputs[i]` in lexicographic order of `js`, and the first dimension of `outputs[i]` is the number of entries in `partitions` equal to `i`. In detail,
```python
outputs[i].shape = [sum(partitions == i)] + data.shape[partitions.ndim:] outputs[i] = pack([data[js, ...] for js if partitions[js] == i])
```
`data.shape` must start with `partitions.shape`.
For example:
```python
# Scalar partitions. partitions = 1 num_partitions = 2 data = [10, 20] outputs[0] = [] # Empty with shape [0, 2] outputs[1] = [[10, 20]] # Vector partitions. partitions = [0, 0, 1, 1, 0] num_partitions = 2 data = [10, 20, 30, 40, 50] outputs[0] = [10, 20, 50] outputs[1] = [30, 40]
```
See `dynamic_stitch` for an example on how to merge partitions back.
<div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;"> <img style="width:100%" src="https://www.tensorflow.org/images/DynamicPartition.png" alt> </div>
Raises:
- `InvalidArgumentError` in following cases:
- If partitions is not in range `[0, num_partiions)`
- If `partitions.shape` does not match prefix of `data.shape` argument.
Arguments:
partitions: Any shape. Indices in the range `[0, num_partitions)`. num_partitions: The number of partitions to output.
func DynamicStitch ¶
Interleave the values from the `data` tensors into a single tensor.
Builds a merged tensor such that ¶
```python
merged[indices[m][i, ..., j], ...] = data[m][i, ..., j, ...]
```
For example, if each `indices[m]` is scalar or vector, we have
```python
# Scalar indices: merged[indices[m], ...] = data[m][...] # Vector indices: merged[indices[m][i], ...] = data[m][i, ...]
```
Each `data[i].shape` must start with the corresponding `indices[i].shape`, and the rest of `data[i].shape` must be constant w.r.t. `i`. That is, we must have `data[i].shape = indices[i].shape + constant`. In terms of this `constant`, the output shape is
merged.shape = [max(indices) + 1] + constant
Values are merged in order, so if an index appears in both `indices[m][i]` and `indices[n][j]` for `(m,i) < (n,j)` the slice `data[n][j]` will appear in the merged result. If you do not need this guarantee, ParallelDynamicStitch might perform better on some devices.
For example:
```python
indices[0] = 6 indices[1] = [4, 1] indices[2] = [[5, 2], [0, 3]] data[0] = [61, 62] data[1] = [[41, 42], [11, 12]] data[2] = [[[51, 52], [21, 22]], [[1, 2], [31, 32]]] merged = [[1, 2], [11, 12], [21, 22], [31, 32], [41, 42], [51, 52], [61, 62]]
```
This method can be used to merge partitions created by `dynamic_partition` as illustrated on the following example:
```python
# Apply function (increments x_i) on elements for which a certain condition # apply (x_i != -1 in this example). x=tf.constant([0.1, -1., 5.2, 4.3, -1., 7.4]) condition_mask=tf.not_equal(x,tf.constant(-1.)) partitioned_data = tf.dynamic_partition( x, tf.cast(condition_mask, tf.int32) , 2) partitioned_data[1] = partitioned_data[1] + 1.0 condition_indices = tf.dynamic_partition( tf.range(tf.shape(x)[0]), tf.cast(condition_mask, tf.int32) , 2) x = tf.dynamic_stitch(condition_indices, partitioned_data) # Here x=[1.1, -1., 6.2, 5.3, -1, 8.4], the -1. values remain # unchanged.
```
<div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;"> <img style="width:100%" src="https://www.tensorflow.org/images/DynamicStitch.png" alt> </div>
func EagerPyFunc ¶
func EagerPyFunc(scope *Scope, input []tf.Output, token string, Tout []tf.DataType, optional ...EagerPyFuncAttr) (output []tf.Output)
Eagerly executes a python function to compute func(input)->output. The
semantics of the input, output, and attributes are the same as those for PyFunc.
func EditDistance ¶
func EditDistance(scope *Scope, hypothesis_indices tf.Output, hypothesis_values tf.Output, hypothesis_shape tf.Output, truth_indices tf.Output, truth_values tf.Output, truth_shape tf.Output, optional ...EditDistanceAttr) (output tf.Output)
Computes the (possibly normalized) Levenshtein Edit Distance.
The inputs are variable-length sequences provided by SparseTensors
(hypothesis_indices, hypothesis_values, hypothesis_shape)
and
(truth_indices, truth_values, truth_shape).
The inputs are:
Arguments:
hypothesis_indices: The indices of the hypothesis list SparseTensor.
This is an N x R int64 matrix.
hypothesis_values: The values of the hypothesis list SparseTensor.
This is an N-length vector.
hypothesis_shape: The shape of the hypothesis list SparseTensor.
This is an R-length vector.
truth_indices: The indices of the truth list SparseTensor.
This is an M x R int64 matrix.
truth_values: The values of the truth list SparseTensor.
This is an M-length vector.
truth_shape: truth indices, vector.
Returns A dense float tensor with rank R - 1.
For the example input:
// hypothesis represents a 2x1 matrix with variable-length values: // (0,0) = ["a"] // (1,0) = ["b"] hypothesis_indices = [[0, 0, 0], [1, 0, 0]] hypothesis_values = ["a", "b"] hypothesis_shape = [2, 1, 1] // truth represents a 2x2 matrix with variable-length values: // (0,0) = [] // (0,1) = ["a"] // (1,0) = ["b", "c"] // (1,1) = ["a"] truth_indices = [[0, 1, 0], [1, 0, 0], [1, 0, 1], [1, 1, 0]] truth_values = ["a", "b", "c", "a"] truth_shape = [2, 2, 2] normalize = true
The output will be:
// output is a 2x2 matrix with edit distances normalized by truth lengths. output = [[inf, 1.0], // (0,0): no truth, (0,1): no hypothesis [0.5, 1.0]] // (1,0): addition, (1,1): no hypothesis
func Eig ¶
func Eig(scope *Scope, input tf.Output, Tout tf.DataType, optional ...EigAttr) (e tf.Output, v tf.Output)
Computes the eigen decomposition of one or more square matrices.
Computes the eigenvalues and (optionally) right eigenvectors of each inner matrix in `input` such that `input[..., :, :] = v[..., :, :] * diag(e[..., :])`. The eigenvalues are sorted in non-decreasing order.
```python # a is a tensor. # e is a tensor of eigenvalues. # v is a tensor of eigenvectors. e, v = eig(a) e = eig(a, compute_v=False) ```
Arguments:
input: `Tensor` input of shape `[N, N]`.
Returns:
e: Eigenvalues. Shape is `[N]`. v: Eigenvectors. Shape is `[N, N]`.
func Einsum ¶
Tensor contraction according to Einstein summation convention.
Implements generalized Tensor contraction and reduction. Each input Tensor must have a corresponding input subscript appearing in the comma-separated left-hand side of the equation. The right-hand side of the equation consists of the output subscript. The input subscripts and the output subscript should consist of zero or more named axis labels and at most one ellipsis (`...`).
The named axis labels may be any single character other than those having special meaning, namely `,.->`. The behavior of this Op is undefined if it receives an ill-formatted equation; since the validation is done at graph-building time, we omit format validation checks at runtime.
Note: This Op is *not* intended to be called by the user; instead users should call `tf.einsum` directly. It is a hidden Op used by `tf.einsum`.
Operations are applied to the input(s) according to the following rules:
(a) Generalized Diagonals: For input dimensions corresponding to axis labels appearing more than once in the same input subscript, we take the generalized (`k`-dimensional) diagonal. For example, in the equation `iii->i` with input shape `[3, 3, 3]`, the generalized diagonal would consist of `3` elements at indices `(0, 0, 0)`, `(1, 1, 1)` and `(2, 2, 2)` to create a Tensor of shape `[3]`. (b) Reduction: Axes corresponding to labels appearing only in one input subscript but not in the output subscript are summed over prior to Tensor contraction. For example, in the equation `ab,bc->b`, the axis labels `a` and `c` are the reduction axis labels. (c) Batch Dimensions: Axes corresponding to labels appearing in each of the input subscripts and also in the output subscript make up the batch dimensions in Tensor contraction. Unnamed axis labels corresponding to ellipsis (`...`) also correspond to batch dimensions. For example, for the equation denoting batch matrix multiplication, `bij,bjk->bik`, the axis label `b` corresponds to a batch dimension. (d) Contraction: In case of binary einsum, axes corresponding to labels appearing in two different inputs (and not in the output) are contracted against each other. Considering the batch matrix multiplication equation again (`bij,bjk->bik`), the contracted axis label is `j`. (e) Expand Diagonal: If the output subscripts contain repeated (explicit) axis labels, the opposite operation of (a) is applied. For example, in the equation `i->iii`, and input shape `[3]`, the output of shape `[3, 3, 3]` are all zeros, except for the (generalized) diagonal which is populated with values from the input. Note: This operation is not supported by `np.einsum` or `tf.einsum`; it is provided to enable computing the symbolic gradient of `tf.einsum`.
The output subscripts must contain only labels appearing in at least one of the input subscripts. Furthermore, all dimensions mapping to the same axis label must be equal.
Any of the input and output subscripts may contain at most a single ellipsis (`...`). These ellipsis are mapped against dimensions not corresponding to any named axis label. If two inputs contain ellipsis, then they are broadcasted according to standard NumPy broadcasting [rules](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html).
The broadcasted dimensions are placed in the corresponding location of the ellipsis in the output subscript. If the broadcasted dimensions are non-empty and the output subscripts do not contain ellipsis, then an InvalidArgument error is raised.
@compatibility(numpy) Similar to [`numpy.einsum`](https://docs.scipy.org/doc/numpy/reference/generated/numpy.einsum.html).
Comparison with `numpy.einsum`:
- This Op only supports unary and binary forms of `numpy.einsum`.
- This Op does not support implicit form. (i.e. equations without `->`).
- This Op also supports repeated indices in the output subscript, which is not supported by `numpy.einsum`.
@end_compatibility
Arguments:
inputs: List of 1 or 2 Tensors. equation: String describing the Einstein Summation operation; in the format of np.einsum.
Returns Output Tensor with shape depending upon `equation`.
func Elu ¶
Computes the exponential linear function.
The ELU function is defined as:
- $ e ^ x - 1 $ if $ x < 0 $
- $ x $ if $ x >= 0 $
Examples:
>>> tf.nn.elu(1.0) <tf.Tensor: shape=(), dtype=float32, numpy=1.0> >>> tf.nn.elu(0.0) <tf.Tensor: shape=(), dtype=float32, numpy=0.0> >>> tf.nn.elu(-1000.0) <tf.Tensor: shape=(), dtype=float32, numpy=-1.0>
See [Fast and Accurate Deep Network Learning by Exponential Linear Units (ELUs) ](http://arxiv.org/abs/1511.07289)
func EluGrad ¶
Computes gradients for the exponential linear (Elu) operation.
Arguments:
gradients: The backpropagated gradients to the corresponding Elu operation. outputs: The outputs of the corresponding Elu operation.
Returns The gradients: `gradients * (outputs + 1)` if outputs < 0, `gradients` otherwise.
func Empty ¶
func Empty(scope *Scope, shape tf.Output, dtype tf.DataType, optional ...EmptyAttr) (output tf.Output)
Creates a tensor with the given shape.
This operation creates a tensor of `shape` and `dtype`.
Arguments:
shape: 1-D. Represents the shape of the output tensor.
Returns A `Tensor` of type `T`.
func EmptyTensorList ¶
func EmptyTensorList(scope *Scope, element_shape tf.Output, max_num_elements tf.Output, element_dtype tf.DataType) (handle tf.Output)
Creates and returns an empty tensor list.
All list elements must be tensors of dtype element_dtype and shape compatible with element_shape.
handle: an empty tensor list. element_dtype: the type of elements in the list. element_shape: a shape compatible with that of elements in the list.
func EncodeBase64 ¶
Encode strings into web-safe base64 format.
Refer to [this article](https://en.wikipedia.org/wiki/Base64) for more information on base64 format. Base64 strings may have padding with '=' at the end so that the encoded has length multiple of 4. See Padding section of the link above.
Web-safe means that the encoder uses - and _ instead of + and /.
Arguments:
input: Strings to be encoded.
Returns Input strings encoded in base64.
func EncodeJpeg ¶
JPEG-encode an image.
`image` is a 3-D uint8 Tensor of shape `[height, width, channels]`.
The attr `format` can be used to override the color format of the encoded output. Values can be:
- `”`: Use a default format based on the number of channels in the image.
- `grayscale`: Output a grayscale JPEG image. The `channels` dimension of `image` must be 1.
- `rgb`: Output an RGB JPEG image. The `channels` dimension of `image` must be 3.
If `format` is not specified or is the empty string, a default format is picked in function of the number of channels in `image`:
* 1: Output a grayscale image. * 3: Output an RGB image.
Arguments:
image: 3-D with shape `[height, width, channels]`.
Returns 0-D. JPEG-encoded image.
func EncodeJpegVariableQuality ¶
func EncodeJpegVariableQuality(scope *Scope, images tf.Output, quality tf.Output) (contents tf.Output)
JPEG encode input image with provided compression quality.
`image` is a 3-D uint8 Tensor of shape `[height, width, channels]`. `quality` is an int32 jpeg compression quality value between 0 and 100.
Arguments:
images: Images to adjust. At least 3-D. quality: An int quality to encode to.
Returns 0-D. JPEG-encoded image.
func EncodePng ¶
PNG-encode an image.
`image` is a 3-D uint8 or uint16 Tensor of shape `[height, width, channels]` where `channels` is:
* 1: for grayscale. * 2: for grayscale + alpha. * 3: for RGB. * 4: for RGBA.
The ZLIB compression level, `compression`, can be -1 for the PNG-encoder default or a value from 0 to 9. 9 is the highest compression level, generating the smallest output, but is slower.
Arguments:
image: 3-D with shape `[height, width, channels]`.
Returns 0-D. PNG-encoded image.
func EncodeProto ¶
func EncodeProto(scope *Scope, sizes tf.Output, values []tf.Output, field_names []string, message_type string, optional ...EncodeProtoAttr) (bytes tf.Output)
The op serializes protobuf messages provided in the input tensors.
The types of the tensors in `values` must match the schema for the fields specified in `field_names`. All the tensors in `values` must have a common shape prefix, *batch_shape*.
The `sizes` tensor specifies repeat counts for each field. The repeat count (last dimension) of a each tensor in `values` must be greater than or equal to corresponding repeat count in `sizes`.
A `message_type` name must be provided to give context for the field names. The actual message descriptor can be looked up either in the linked-in descriptor pool or a filename provided by the caller using the `descriptor_source` attribute.
For the most part, the mapping between Proto field types and TensorFlow dtypes is straightforward. However, there are a few special cases:
- A proto field that contains a submessage or group can only be converted to `DT_STRING` (the serialized submessage). This is to reduce the complexity of the API. The resulting string can be used as input to another instance of the decode_proto op.
- TensorFlow lacks support for unsigned integers. The ops represent uint64 types as a `DT_INT64` with the same twos-complement bit pattern (the obvious way). Unsigned int32 values can be represented exactly by specifying type `DT_INT64`, or using twos-complement if the caller specifies `DT_INT32` in the `output_types` attribute.
The `descriptor_source` attribute selects the source of protocol descriptors to consult when looking up `message_type`. This may be:
- An empty string or "local://", in which case protocol descriptors are created for C++ (not Python) proto definitions linked to the binary.
- A file, in which case protocol descriptors are created from the file, which is expected to contain a `FileDescriptorSet` serialized as a string. NOTE: You can build a `descriptor_source` file using the `--descriptor_set_out` and `--include_imports` options to the protocol compiler `protoc`.
- A "bytes://<bytes>", in which protocol descriptors are created from `<bytes>`, which is expected to be a `FileDescriptorSet` serialized as a string.
Arguments:
sizes: Tensor of int32 with shape `[batch_shape, len(field_names)]`. values: List of tensors containing values for the corresponding field. field_names: List of strings containing proto field names. message_type: Name of the proto message type to decode.
Returns Tensor of serialized protos with shape `batch_shape`.
func EncodeWav ¶
Encode audio data using the WAV file format.
This operation will generate a string suitable to be saved out to create a .wav audio file. It will be encoded in the 16-bit PCM format. It takes in float values in the range -1.0f to 1.0f, and any outside that value will be clamped to that range.
`audio` is a 2-D float Tensor of shape `[length, channels]`. `sample_rate` is a scalar Tensor holding the rate to use (e.g. 44100).
Arguments:
audio: 2-D with shape `[length, channels]`. sample_rate: Scalar containing the sample frequency.
Returns 0-D. WAV-encoded file contents.
func EnqueueTPUEmbeddingArbitraryTensorBatch ¶
func EnqueueTPUEmbeddingArbitraryTensorBatch(scope *Scope, sample_indices_or_row_splits []tf.Output, embedding_indices []tf.Output, aggregation_weights []tf.Output, mode_override tf.Output, optional ...EnqueueTPUEmbeddingArbitraryTensorBatchAttr) (o *tf.Operation)
Eases the porting of code that uses tf.nn.embedding_lookup_sparse().
embedding_indices[i] and aggregation_weights[i] correspond to the ith feature.
The tensors at corresponding positions in the three input lists (sample_indices, embedding_indices and aggregation_weights) must have the same shape, i.e. rank 1 with dim_size() equal to the total number of lookups into the table described by the corresponding feature.
Arguments:
sample_indices_or_row_splits: A list of rank 2 Tensors specifying the training example to which the
corresponding embedding_indices and aggregation_weights values belong. If the size of its first dimension is 0, we assume each embedding_indices belongs to a different sample. Both int32 and int64 are allowed and will be converted to int32 internally.
Or a list of rank 1 Tensors specifying the row splits for splitting embedding_indices and aggregation_weights into rows. It corresponds to ids.row_splits in embedding_lookup(), when ids is a RaggedTensor. When enqueuing N-D ragged tensor, only the last dimension is allowed to be ragged. the row splits is 1-D dense tensor. When empty, we assume a dense tensor is passed to the op Both int32 and int64 are allowed and will be converted to int32 internally.
embedding_indices: A list of rank 1 Tensors, indices into the embedding
tables. Both int32 and int64 are allowed and will be converted to int32 internally.
aggregation_weights: A list of rank 1 Tensors containing per training
example aggregation weights. Both float32 and float64 are allowed and will be converted to float32 internally.
mode_override: A string input that overrides the mode specified in the
TPUEmbeddingConfiguration. Supported values are {'unspecified', 'inference', 'training', 'backward_pass_only'}. When set to 'unspecified', the mode set in TPUEmbeddingConfiguration is used, otherwise mode_override is used.
Returns the created operation.
func EnqueueTPUEmbeddingBatch ¶
func EnqueueTPUEmbeddingBatch(scope *Scope, batch []tf.Output, mode_override tf.Output, optional ...EnqueueTPUEmbeddingBatchAttr) (o *tf.Operation)
An op that enqueues a list of input batch tensors to TPUEmbedding.
An op that enqueues a list of input batch tensors to TPUEmbedding.
Arguments:
batch: A list of 1D tensors, one for each embedding table, containing the
batch inputs encoded as dist_belief.SparseFeatures protos. If the weight field in the SparseFeatures proto is not populated for an ID, a weight of 1.0 is assumed.
mode_override: A string input that overrides the mode specified in the
TPUEmbeddingConfiguration. Supported values are {'unspecified', 'inference', 'training', 'backward_pass_only'}. When set to 'unspecified', the mode set in TPUEmbeddingConfiguration is used, otherwise mode_override is used.
Returns the created operation.
func EnqueueTPUEmbeddingIntegerBatch ¶
func EnqueueTPUEmbeddingIntegerBatch(scope *Scope, batch []tf.Output, mode_override tf.Output, optional ...EnqueueTPUEmbeddingIntegerBatchAttr) (o *tf.Operation)
An op that enqueues a list of input batch tensors to TPUEmbedding.
Arguments:
batch: A list of 1D tensors, one for each embedding table, containing the
indices into the tables.
mode_override: A string input that overrides the mode specified in the
TPUEmbeddingConfiguration. Supported values are {'unspecified', 'inference', 'training', 'backward_pass_only'}. When set to 'unspecified', the mode set in TPUEmbeddingConfiguration is used, otherwise mode_override is used.
Returns the created operation.
func EnqueueTPUEmbeddingRaggedTensorBatch ¶
func EnqueueTPUEmbeddingRaggedTensorBatch(scope *Scope, sample_splits []tf.Output, embedding_indices []tf.Output, aggregation_weights []tf.Output, mode_override tf.Output, table_ids []int64, optional ...EnqueueTPUEmbeddingRaggedTensorBatchAttr) (o *tf.Operation)
Eases the porting of code that uses tf.nn.embedding_lookup().
sample_splits[i], embedding_indices[i] and aggregation_weights[i] correspond to the ith feature. table_ids[i] indicates which embedding table to look up ith feature.
The tensors at corresponding positions in two of the input lists, embedding_indices and aggregation_weights, must have the same shape, i.e. rank 1 with dim_size() equal to the total number of lookups into the table described by the corresponding feature.
Arguments:
sample_splits: A list of rank 1 Tensors specifying the break points for splitting
embedding_indices and aggregation_weights into rows. It corresponds to ids.row_splits in embedding_lookup(), when ids is a RaggedTensor.
embedding_indices: A list of rank 1 Tensors, indices into the embedding tables.
It corresponds to ids.values in embedding_lookup(), when ids is a RaggedTensor.
aggregation_weights: A list of rank 1 Tensors containing per training example
aggregation weights. It corresponds to the values field of a RaggedTensor with the same row_splits as ids in embedding_lookup(), when ids is a RaggedTensor.
mode_override: A string input that overrides the mode specified in the
TPUEmbeddingConfiguration. Supported values are {'unspecified', 'inference', 'training', 'backward_pass_only'}. When set to 'unspecified', the mode set in TPUEmbeddingConfiguration is used, otherwise mode_override is used.
table_ids: A list of integers specifying the identifier of the embedding table
(offset of TableDescriptor in the TPUEmbeddingConfiguration) to lookup the corresponding input. The ith input is looked up using table_ids[i]. The size of the table_ids list must be equal to that of sample_indices, embedding_indices and aggregation_weights.
Returns the created operation.
func EnqueueTPUEmbeddingSparseBatch ¶
func EnqueueTPUEmbeddingSparseBatch(scope *Scope, sample_indices []tf.Output, embedding_indices []tf.Output, aggregation_weights []tf.Output, mode_override tf.Output, optional ...EnqueueTPUEmbeddingSparseBatchAttr) (o *tf.Operation)
An op that enqueues TPUEmbedding input indices from a SparseTensor.
This Op eases the porting of code that uses embedding_lookup_sparse(), although some Python preprocessing of the SparseTensor arguments to embedding_lookup_sparse() is required to produce the arguments to this Op, since only a single EnqueueTPUEmbeddingSparseBatch Op is allowed per training step.
The tensors at corresponding positions in the three input lists must have the same shape, i.e. rank 1 with dim_size() equal to the total number of lookups into the table described by the corresponding table_id.
Arguments:
sample_indices: A list of rank 1 Tensors specifying the training example and
feature to which the corresponding embedding_indices and aggregation_weights values belong. sample_indices[i] must equal b * nf + f, where nf is the number of features from the corresponding table, f is in [0, nf), and b is in [0, batch size).
embedding_indices: A list of rank 1 Tensors, indices into the embedding tables. aggregation_weights: A list of rank 1 Tensors containing per sample -- i.e. per
(training example, feature) -- aggregation weights.
mode_override: A string input that overrides the mode specified in the
TPUEmbeddingConfiguration. Supported values are {'unspecified', 'inference', 'training', 'backward_pass_only'}. When set to 'unspecified', the mode set in TPUEmbeddingConfiguration is used, otherwise mode_override is used.
Returns the created operation.
func EnqueueTPUEmbeddingSparseTensorBatch ¶
func EnqueueTPUEmbeddingSparseTensorBatch(scope *Scope, sample_indices []tf.Output, embedding_indices []tf.Output, aggregation_weights []tf.Output, mode_override tf.Output, table_ids []int64, optional ...EnqueueTPUEmbeddingSparseTensorBatchAttr) (o *tf.Operation)
Eases the porting of code that uses tf.nn.embedding_lookup_sparse().
sample_indices[i], embedding_indices[i] and aggregation_weights[i] correspond to the ith feature. table_ids[i] indicates which embedding table to look up ith feature.
The tensors at corresponding positions in the three input lists (sample_indices, embedding_indices and aggregation_weights) must have the same shape, i.e. rank 1 with dim_size() equal to the total number of lookups into the table described by the corresponding feature.
Arguments:
sample_indices: A list of rank 1 Tensors specifying the training example to
which the corresponding embedding_indices and aggregation_weights values belong. It corresponds to sp_ids.indices[:,0] in embedding_lookup_sparse().
embedding_indices: A list of rank 1 Tensors, indices into the embedding tables.
It corresponds to sp_ids.values in embedding_lookup_sparse().
aggregation_weights: A list of rank 1 Tensors containing per training example
aggregation weights. It corresponds to sp_weights.values in embedding_lookup_sparse().
mode_override: A string input that overrides the mode specified in the
TPUEmbeddingConfiguration. Supported values are {'unspecified', 'inference', 'training', 'backward_pass_only'}. When set to 'unspecified', the mode set in TPUEmbeddingConfiguration is used, otherwise mode_override is used.
table_ids: A list of integers specifying the identifier of the embedding table
(offset of TableDescriptor in the TPUEmbeddingConfiguration) to lookup the corresponding input. The ith input is looked up using table_ids[i]. The size of the table_ids list must be equal to that of sample_indices, embedding_indices and aggregation_weights.
Returns the created operation.
func EnsureShape ¶
Ensures that the tensor's shape matches the expected shape.
Raises an error if the input tensor's shape does not match the specified shape. Returns the input tensor otherwise.
Arguments:
input: A tensor, whose shape is to be validated. shape: The expected (possibly partially specified) shape of the input tensor.
Returns A tensor with the same shape and contents as the input tensor or value.
func Enter ¶
func Enter(scope *Scope, data tf.Output, frame_name string, optional ...EnterAttr) (output tf.Output)
Creates or finds a child frame, and makes `data` available to the child frame.
This op is used together with `Exit` to create loops in the graph. The unique `frame_name` is used by the `Executor` to identify frames. If `is_constant` is true, `output` is a constant in the child frame; otherwise it may be changed in the child frame. At most `parallel_iterations` iterations are run in parallel in the child frame.
Arguments:
data: The tensor to be made available to the child frame. frame_name: The name of the child frame.
Returns The same tensor as `data`.
func Equal ¶
Returns the truth value of (x == y) element-wise.
*NOTE*: `Equal` supports broadcasting. More about broadcasting [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
```python x = tf.constant([2, 4]) y = tf.constant(2) tf.math.equal(x, y) ==> array([True, False])
x = tf.constant([2, 4]) y = tf.constant([2, 4]) tf.math.equal(x, y) ==> array([True, True]) ```
func Erf ¶
Computes the [Gauss error function](https://en.wikipedia.org/wiki/Error_function) of `x` element-wise. In statistics, for non-negative values of $x$, the error function has the following interpretation: for a random variable $Y$ that is normally distributed with mean 0 and variance $1/\sqrt{2}$, $erf(x)$ is the probability that $Y$ falls in the range $[−x, x]$.
func EuclideanNorm ¶
func EuclideanNorm(scope *Scope, input tf.Output, axis tf.Output, optional ...EuclideanNormAttr) (output tf.Output)
Computes the euclidean norm of elements across dimensions of a tensor.
Reduces `input` along the dimensions given in `axis`. Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in `axis`. If `keep_dims` is true, the reduced dimensions are retained with length 1.
Arguments:
input: The tensor to reduce. axis: The dimensions to reduce. Must be in the range
`[-rank(input), rank(input))`.
Returns The reduced tensor.
func ExecuteTPUEmbeddingPartitioner ¶ added in v0.2.0
An op that executes the TPUEmbedding partitioner on the central configuration
device and computes the HBM size (in bytes) required for TPUEmbedding operation.
Arguments:
config: An TPUEmbeddingConfiguration proto serialized to a string,
describing the desired TPUEmbedding configuration.
Returns A string-encoded common configuration proto containing metadata about the TPUEmbedding partitioner output and the HBM size (in bytes) required for operation.
func Exit ¶
Exits the current frame to its parent frame.
Exit makes its input `data` available to the parent frame.
Arguments:
data: The tensor to be made available to the parent frame.
Returns The same tensor as `data`.
func Exp ¶
Computes exponential of x element-wise. \\(y = e^x\\).
This function computes the exponential of every element in the input tensor. i.e. `exp(x)` or `e^(x)`, where `x` is the input tensor. `e` denotes Euler's number and is approximately equal to 2.718281. Output is positive for any real input. ```python x = tf.constant(2.0) tf.math.exp(x) ==> 7.389056 x = tf.constant([2.0, 8.0]) tf.math.exp(x) ==> array([7.389056, 2980.958], dtype=float32) ``` For complex numbers, the exponential value is calculated as follows: ``` e^(x+iy) = e^x * e^iy = e^x * (cos y + i sin y) ``` Let's consider complex number 1+1j as an example. e^1 * (cos 1 + i sin 1) = 2.7182818284590 * (0.54030230586+0.8414709848j) ```python x = tf.constant(1 + 1j) tf.math.exp(x) ==> 1.4686939399158851+2.2873552871788423j ```
func ExpandDims ¶
Inserts a dimension of 1 into a tensor's shape.
Given a tensor `input`, this operation inserts a dimension of 1 at the dimension index `axis` of `input`'s shape. The dimension index `axis` starts at zero; if you specify a negative number for `axis` it is counted backward from the end.
This operation is useful if you want to add a batch dimension to a single element. For example, if you have a single image of shape `[height, width, channels]`, you can make it a batch of 1 image with `expand_dims(image, 0)`, which will make the shape `[1, height, width, channels]`.
Other examples:
``` # 't' is a tensor of shape [2] shape(expand_dims(t, 0)) ==> [1, 2] shape(expand_dims(t, 1)) ==> [2, 1] shape(expand_dims(t, -1)) ==> [2, 1]
# 't2' is a tensor of shape [2, 3, 5] shape(expand_dims(t2, 0)) ==> [1, 2, 3, 5] shape(expand_dims(t2, 2)) ==> [2, 3, 1, 5] shape(expand_dims(t2, 3)) ==> [2, 3, 5, 1] ```
This operation requires that:
`-1-input.dims() <= dim <= input.dims()`
This operation is related to `squeeze()`, which removes dimensions of size 1.
Arguments:
axis: 0-D (scalar). Specifies the dimension index at which to
expand the shape of `input`. Must be in the range `[-rank(input) - 1, rank(input)]`.
Returns Contains the same data as `input`, but its shape has an additional dimension of size 1 added.
func ExperimentalAutoShardDataset ¶
func ExperimentalAutoShardDataset(scope *Scope, input_dataset tf.Output, num_workers tf.Output, index tf.Output, output_types []tf.DataType, output_shapes []tf.Shape, optional ...ExperimentalAutoShardDatasetAttr) (handle tf.Output)
Creates a dataset that shards the input dataset.
Creates a dataset that shards the input dataset by num_workers, returning a sharded dataset for the index-th worker. This attempts to automatically shard a dataset by examining the Dataset graph and inserting a shard op before the inputs to a reader Dataset (e.g. CSVDataset, TFRecordDataset).
This dataset will throw a NotFound error if we cannot shard the dataset automatically.
Arguments:
input_dataset: A variant tensor representing the input dataset. num_workers: A scalar representing the number of workers to distribute this dataset across. index: A scalar representing the index of the current worker out of num_workers.
func ExperimentalBytesProducedStatsDataset ¶
func ExperimentalBytesProducedStatsDataset(scope *Scope, input_dataset tf.Output, tag tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output)
Records the bytes size of each element of `input_dataset` in a StatsAggregator.
func ExperimentalDatasetCardinality ¶
Returns the cardinality of `input_dataset`.
Returns the cardinality of `input_dataset`.
Arguments:
input_dataset: A variant tensor representing the dataset to return cardinality for.
Returns The cardinality of `input_dataset`. Named constants are used to represent infinite and unknown cardinality.
func ExperimentalDatasetToTFRecord ¶
func ExperimentalDatasetToTFRecord(scope *Scope, input_dataset tf.Output, filename tf.Output, compression_type tf.Output) (o *tf.Operation)
Writes the given dataset to the given file using the TFRecord format.
Arguments:
input_dataset: A variant tensor representing the dataset to write. filename: A scalar string tensor representing the filename to use. compression_type: A scalar string tensor containing either (i) the empty string (no
compression), (ii) "ZLIB", or (iii) "GZIP".
Returns the created operation.
func ExperimentalDenseToSparseBatchDataset ¶
func ExperimentalDenseToSparseBatchDataset(scope *Scope, input_dataset tf.Output, batch_size tf.Output, row_shape tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output)
Creates a dataset that batches input elements into a SparseTensor.
Arguments:
input_dataset: A handle to an input dataset. Must have a single component. batch_size: A scalar representing the number of elements to accumulate in a
batch.
row_shape: A vector representing the dense shape of each row in the produced
SparseTensor. The shape may be partially specified, using `-1` to indicate that a particular dimension should use the maximum size of all batch elements.
func ExperimentalDirectedInterleaveDataset ¶
func ExperimentalDirectedInterleaveDataset(scope *Scope, selector_input_dataset tf.Output, data_input_datasets []tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output)
A substitute for `InterleaveDataset` on a fixed list of `N` datasets.
Arguments:
selector_input_dataset: A dataset of scalar `DT_INT64` elements that determines which of the
`N` data inputs should produce the next output element.
data_input_datasets: `N` datasets with the same type that will be interleaved according to
the values of `selector_input_dataset`.
func ExperimentalIgnoreErrorsDataset ¶
func ExperimentalIgnoreErrorsDataset(scope *Scope, input_dataset tf.Output, output_types []tf.DataType, output_shapes []tf.Shape, optional ...ExperimentalIgnoreErrorsDatasetAttr) (handle tf.Output)
Creates a dataset that contains the elements of `input_dataset` ignoring errors.
func ExperimentalIteratorGetDevice ¶
Returns the name of the device on which `resource` has been placed.
func ExperimentalLatencyStatsDataset ¶
func ExperimentalLatencyStatsDataset(scope *Scope, input_dataset tf.Output, tag tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output)
Records the latency of producing `input_dataset` elements in a StatsAggregator.
func ExperimentalMaxIntraOpParallelismDataset ¶
func ExperimentalMaxIntraOpParallelismDataset(scope *Scope, input_dataset tf.Output, max_intra_op_parallelism tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output)
Creates a dataset that overrides the maximum intra-op parallelism.
Arguments:
max_intra_op_parallelism: Identifies the maximum intra-op parallelism to use.
func ExperimentalParseExampleDataset ¶
func ExperimentalParseExampleDataset(scope *Scope, input_dataset tf.Output, num_parallel_calls tf.Output, dense_defaults []tf.Output, sparse_keys []string, dense_keys []string, sparse_types []tf.DataType, dense_shapes []tf.Shape, output_types []tf.DataType, output_shapes []tf.Shape, optional ...ExperimentalParseExampleDatasetAttr) (handle tf.Output)
Transforms `input_dataset` containing `Example` protos as vectors of DT_STRING into a dataset of `Tensor` or `SparseTensor` objects representing the parsed features.
Arguments:
dense_defaults: A dict mapping string keys to `Tensor`s.
The keys of the dict must match the dense_keys of the feature.
sparse_keys: A list of string keys in the examples features.
The results for these keys will be returned as `SparseTensor` objects.
dense_keys: A list of Ndense string Tensors (scalars).
The keys expected in the Examples features associated with dense values.
sparse_types: A list of `DTypes` of the same length as `sparse_keys`.
Only `tf.float32` (`FloatList`), `tf.int64` (`Int64List`), and `tf.string` (`BytesList`) are supported.
dense_shapes: List of tuples with the same length as `dense_keys`.
The shape of the data for each dense feature referenced by `dense_keys`. Required for any input tensors identified by `dense_keys`. Must be either fully defined, or may contain an unknown first dimension. An unknown first dimension means the feature is treated as having a variable number of blocks, and the output shape along this dimension is considered unknown at graph build time. Padding is applied for minibatch elements smaller than the maximum number of blocks for the given feature along this dimension.
output_types: The type list for the return values. output_shapes: The list of shapes being produced.
func ExperimentalPrivateThreadPoolDataset ¶
func ExperimentalPrivateThreadPoolDataset(scope *Scope, input_dataset tf.Output, num_threads tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output)
Creates a dataset that uses a custom thread pool to compute `input_dataset`.
Arguments:
num_threads: Identifies the number of threads to use for the private threadpool.
func ExperimentalRandomDataset ¶
func ExperimentalRandomDataset(scope *Scope, seed tf.Output, seed2 tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output)
Creates a Dataset that returns pseudorandom numbers.
Arguments:
seed: A scalar seed for the random number generator. If either seed or
seed2 is set to be non-zero, the random number generator is seeded by the given seed. Otherwise, a random seed is used.
seed2: A second scalar seed to avoid seed collision.
func ExperimentalRebatchDataset ¶
func ExperimentalRebatchDataset(scope *Scope, input_dataset tf.Output, num_replicas tf.Output, output_types []tf.DataType, output_shapes []tf.Shape, optional ...ExperimentalRebatchDatasetAttr) (handle tf.Output)
Creates a dataset that changes the batch size.
Creates a dataset that changes the batch size of the dataset to current batch size // num_replicas.
Arguments:
input_dataset: A variant tensor representing the input dataset. num_replicas: A scalar representing the number of replicas to distribute this batch across. As
a result of this transformation the current batch size would end up being divided by this parameter.
func ExperimentalSlidingWindowDataset ¶
func ExperimentalSlidingWindowDataset(scope *Scope, input_dataset tf.Output, window_size tf.Output, window_shift tf.Output, window_stride tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output)
Creates a dataset that passes a sliding window over `input_dataset`.
Arguments:
window_size: A scalar representing the number of elements in the
sliding window.
window_shift: A scalar representing the steps moving the sliding window
forward in one iteration. It must be positive.
window_stride: A scalar representing the stride of the input elements of the sliding window.
It must be positive.
func ExperimentalSqlDataset ¶
func ExperimentalSqlDataset(scope *Scope, driver_name tf.Output, data_source_name tf.Output, query tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output)
Creates a dataset that executes a SQL query and emits rows of the result set.
Arguments:
driver_name: The database type. Currently, the only supported type is 'sqlite'. data_source_name: A connection string to connect to the database. query: A SQL query to execute.
func ExperimentalStatsAggregatorHandle ¶
func ExperimentalStatsAggregatorHandle(scope *Scope, optional ...ExperimentalStatsAggregatorHandleAttr) (handle tf.Output)
Creates a statistics manager resource.
func ExperimentalStatsAggregatorSummary ¶
Produces a summary of any statistics recorded by the given statistics manager.
func ExperimentalThreadPoolDataset ¶
func ExperimentalThreadPoolDataset(scope *Scope, input_dataset tf.Output, thread_pool tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output)
Creates a dataset that uses a custom thread pool to compute `input_dataset`.
Arguments:
thread_pool: A resource produced by the ThreadPoolHandle op.
func ExperimentalThreadPoolHandle ¶
func ExperimentalThreadPoolHandle(scope *Scope, num_threads int64, display_name string, optional ...ExperimentalThreadPoolHandleAttr) (handle tf.Output)
Creates a dataset that uses a custom thread pool to compute `input_dataset`.
Arguments:
num_threads: The number of threads in the thread pool. display_name: A human-readable name for the threads that may be visible in some
visualizations. threadpool.
Returns A resource that can be consumed by one or more ExperimentalThreadPoolDataset ops.
func ExperimentalUnbatchDataset ¶
func ExperimentalUnbatchDataset(scope *Scope, input_dataset tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output)
A dataset that splits the elements of its input into multiple elements.
func ExperimentalUniqueDataset ¶
func ExperimentalUniqueDataset(scope *Scope, input_dataset tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output)
Creates a dataset that contains the unique elements of `input_dataset`.
func Expm1 ¶
Computes `exp(x) - 1` element-wise.
i.e. `exp(x) - 1` or `e^(x) - 1`, where `x` is the input tensor. `e` denotes Euler's number and is approximately equal to 2.718281. ```python x = tf.constant(2.0) tf.math.expm1(x) ==> 6.389056 x = tf.constant([2.0, 8.0]) tf.math.expm1(x) ==> array([6.389056, 2979.958], dtype=float32) x = tf.constant(1 + 1j) tf.math.expm1(x) ==> (0.46869393991588515+2.2873552871788423j) ```
func ExtractGlimpse ¶
func ExtractGlimpse(scope *Scope, input tf.Output, size tf.Output, offsets tf.Output, optional ...ExtractGlimpseAttr) (glimpse tf.Output)
Extracts a glimpse from the input tensor.
Returns a set of windows called glimpses extracted at location `offsets` from the input tensor. If the windows only partially overlaps the inputs, the non overlapping areas will be filled with random noise.
The result is a 4-D tensor of shape `[batch_size, glimpse_height, glimpse_width, channels]`. The channels and batch dimensions are the same as that of the input tensor. The height and width of the output windows are specified in the `size` parameter.
The argument `normalized` and `centered` controls how the windows are built:
- If the coordinates are normalized but not centered, 0.0 and 1.0 correspond to the minimum and maximum of each height and width dimension.
- If the coordinates are both normalized and centered, they range from -1.0 to 1.0. The coordinates (-1.0, -1.0) correspond to the upper left corner, the lower right corner is located at (1.0, 1.0) and the center is at (0, 0).
- If the coordinates are not normalized they are interpreted as numbers of pixels.
Arguments:
input: A 4-D float tensor of shape `[batch_size, height, width, channels]`. size: A 1-D tensor of 2 elements containing the size of the glimpses
to extract. The glimpse height must be specified first, following by the glimpse width.
offsets: A 2-D integer tensor of shape `[batch_size, 2]` containing
the y, x locations of the center of each window.
Returns A tensor representing the glimpses `[batch_size, glimpse_height, glimpse_width, channels]`.
func ExtractGlimpseV2 ¶
func ExtractGlimpseV2(scope *Scope, input tf.Output, size tf.Output, offsets tf.Output, optional ...ExtractGlimpseV2Attr) (glimpse tf.Output)
Extracts a glimpse from the input tensor.
Returns a set of windows called glimpses extracted at location `offsets` from the input tensor. If the windows only partially overlaps the inputs, the non overlapping areas will be filled with random noise.
The result is a 4-D tensor of shape `[batch_size, glimpse_height, glimpse_width, channels]`. The channels and batch dimensions are the same as that of the input tensor. The height and width of the output windows are specified in the `size` parameter.
The argument `normalized` and `centered` controls how the windows are built:
- If the coordinates are normalized but not centered, 0.0 and 1.0 correspond to the minimum and maximum of each height and width dimension.
- If the coordinates are both normalized and centered, they range from -1.0 to 1.0. The coordinates (-1.0, -1.0) correspond to the upper left corner, the lower right corner is located at (1.0, 1.0) and the center is at (0, 0).
- If the coordinates are not normalized they are interpreted as numbers of pixels.
Arguments:
input: A 4-D float tensor of shape `[batch_size, height, width, channels]`. size: A 1-D tensor of 2 elements containing the size of the glimpses
to extract. The glimpse height must be specified first, following by the glimpse width.
offsets: A 2-D integer tensor of shape `[batch_size, 2]` containing
the y, x locations of the center of each window.
Returns A tensor representing the glimpses `[batch_size, glimpse_height, glimpse_width, channels]`.
func ExtractImagePatches ¶
func ExtractImagePatches(scope *Scope, images tf.Output, ksizes []int64, strides []int64, rates []int64, padding string) (patches tf.Output)
Extract `patches` from `images` and put them in the "depth" output dimension.
Arguments:
images: 4-D Tensor with shape `[batch, in_rows, in_cols, depth]`. ksizes: The size of the sliding window for each dimension of `images`. strides: How far the centers of two consecutive patches are in
the images. Must be: `[1, stride_rows, stride_cols, 1]`.
rates: Must be: `[1, rate_rows, rate_cols, 1]`. This is the
input stride, specifying how far two consecutive patch samples are in the input. Equivalent to extracting patches with `patch_sizes_eff = patch_sizes + (patch_sizes - 1) * (rates - 1)`, followed by subsampling them spatially by a factor of `rates`. This is equivalent to `rate` in dilated (a.k.a. Atrous) convolutions.
padding: The type of padding algorithm to use.
Returns 4-D Tensor with shape `[batch, out_rows, out_cols, ksize_rows * ksize_cols * depth]` containing image patches with size `ksize_rows x ksize_cols x depth` vectorized in the "depth" dimension. Note `out_rows` and `out_cols` are the dimensions of the output patches.
func ExtractJpegShape ¶
func ExtractJpegShape(scope *Scope, contents tf.Output, optional ...ExtractJpegShapeAttr) (image_shape tf.Output)
Extract the shape information of a JPEG-encoded image.
This op only parses the image header, so it is much faster than DecodeJpeg.
Arguments:
contents: 0-D. The JPEG-encoded image.
Returns 1-D. The image shape with format [height, width, channels].
func ExtractVolumePatches ¶
func ExtractVolumePatches(scope *Scope, input tf.Output, ksizes []int64, strides []int64, padding string) (patches tf.Output)
Extract `patches` from `input` and put them in the `"depth"` output dimension. 3D extension of `extract_image_patches`.
Arguments:
input: 5-D Tensor with shape `[batch, in_planes, in_rows, in_cols, depth]`. ksizes: The size of the sliding window for each dimension of `input`. strides: 1-D of length 5. How far the centers of two consecutive patches are in
`input`. Must be: `[1, stride_planes, stride_rows, stride_cols, 1]`.
padding: The type of padding algorithm to use.
The size-related attributes are specified as follows:
```python ksizes = [1, ksize_planes, ksize_rows, ksize_cols, 1] strides = [1, stride_planes, strides_rows, strides_cols, 1] ```
Returns 5-D Tensor with shape `[batch, out_planes, out_rows, out_cols, ksize_planes * ksize_rows * ksize_cols * depth]` containing patches with size `ksize_planes x ksize_rows x ksize_cols x depth` vectorized in the "depth" dimension. Note `out_planes`, `out_rows` and `out_cols` are the dimensions of the output patches.
func FFT ¶
Fast Fourier transform.
Computes the 1-dimensional discrete Fourier transform over the inner-most dimension of `input`.
Arguments:
input: A complex tensor.
Returns A complex tensor of the same shape as `input`. The inner-most
dimension of `input` is replaced with its 1D Fourier transform.
@compatibility(numpy) Equivalent to np.fft.fft @end_compatibility
func FFT2D ¶
2D fast Fourier transform.
Computes the 2-dimensional discrete Fourier transform over the inner-most 2 dimensions of `input`.
Arguments:
input: A complex tensor.
Returns A complex tensor of the same shape as `input`. The inner-most 2
dimensions of `input` are replaced with their 2D Fourier transform.
@compatibility(numpy) Equivalent to np.fft.fft2 @end_compatibility
func FFT3D ¶
3D fast Fourier transform.
Computes the 3-dimensional discrete Fourier transform over the inner-most 3 dimensions of `input`.
Arguments:
input: A complex tensor.
Returns A complex tensor of the same shape as `input`. The inner-most 3
dimensions of `input` are replaced with their 3D Fourier transform.
@compatibility(numpy) Equivalent to np.fft.fftn with 3 dimensions. @end_compatibility
func FFTND ¶ added in v0.7.0
ND fast Fourier transform.
Computes the n-dimensional discrete Fourier transform over designated dimensions of `input`. The designated dimensions of `input` are assumed to be the result of `FFTND`.
If fft_length[i]<shape(input)[i], the input is cropped. If fft_length[i]>shape(input)[i], the input is padded with zeros. If fft_length is not given, the default shape(input) is used.
Axes mean the dimensions to perform the transform on. Default is to perform on all axes.
Arguments:
input: A complex tensor. fft_length: An int32 tensor. The FFT length for each dimension. axes: An int32 tensor with a same shape as fft_length. Axes to perform the transform.
Returns A complex tensor of the same shape as `input`. The designated dimensions of `input` are replaced with their Fourier transforms.
@compatibility(numpy) Equivalent to np.fft.fftn. @end_compatibility
func FIFOQueueV2 ¶
func FIFOQueueV2(scope *Scope, component_types []tf.DataType, optional ...FIFOQueueV2Attr) (handle tf.Output)
A queue that produces elements in first-in first-out order.
Arguments:
component_types: The type of each component in a value.
Returns The handle to the queue.
func FakeParam ¶
This op is used as a placeholder in If branch functions. It doesn't provide a valid output when run, so must either be removed (e.g. replaced with a function input) or guaranteed not to be used (e.g. if mirroring an intermediate output needed for the gradient computation of the other branch).
Arguments:
dtype: The type of the output. shape: The purported shape of the output. This is only used for shape inference; the output will not necessarily have this shape. Can be a partial shape.
Returns \"Fake\" output value. This should not be consumed by another op.
func FakeQuantWithMinMaxArgs ¶
func FakeQuantWithMinMaxArgs(scope *Scope, inputs tf.Output, optional ...FakeQuantWithMinMaxArgsAttr) (outputs tf.Output)
Fake-quantize the 'inputs' tensor, type float to 'outputs' tensor of same shape and type.
Quantization is called fake since the output is still in floating point. The API converts inputs into values within the range [min and max] and returns as output.
Attributes ¶
* `[min; max]` define the clamping range for the `inputs` data. * `inputs` values are quantized into the quantization range ( `[0; 2^num_bits - 1]` when `narrow_range` is false and `[1; 2^num_bits - 1]` when it is true) and then de-quantized and output as floats in `[min; max]` interval. * `num_bits` is the bitwidth of the quantization; between 2 and 16, inclusive.
Before quantization, `min` and `max` values are adjusted with the following logic. It is suggested to have `min <= 0 <= max`. If `0` is not in the range of values, the behavior can be unexpected:
* If `0 < min < max`: `min_adj = 0` and `max_adj = max - min`. * If `min < max < 0`: `min_adj = min - max` and `max_adj = 0`. * If `min <= 0 <= max`: `scale = (max - min) / (2^num_bits - 1) `, `min_adj = scale * round(min / scale)` and `max_adj = max + min_adj - min`.
Examples ¶
```python
inp = tf.constant ([10.03, -10.23, 3]) out = tf.quantization.fake_quant_with_min_max_args(inp, min=-5, max=5,
num_bits=16)
print(out)
# Output: # tf.Tensor([ 4.9999237 -5.0000763 3.0000763], shape=(3,), dtype=float32) ```
Raises:
- InvalidArgumentError:
- If num_bits are outside of range [2, 16].
- If min >= max.
- ValueError: If `inputs` are of any other type than float32.
func FakeQuantWithMinMaxArgsGradient ¶
func FakeQuantWithMinMaxArgsGradient(scope *Scope, gradients tf.Output, inputs tf.Output, optional ...FakeQuantWithMinMaxArgsGradientAttr) (backprops tf.Output)
Compute gradients for a FakeQuantWithMinMaxArgs operation.
Arguments:
gradients: Backpropagated gradients above the FakeQuantWithMinMaxArgs operation. inputs: Values passed as inputs to the FakeQuantWithMinMaxArgs operation.
Returns Backpropagated gradients below the FakeQuantWithMinMaxArgs operation: `gradients * (inputs >= min && inputs <= max)`.
``` import tensorflow as tf
# Define some sample data gradients = tf.random.uniform((2, 3), minval=-5.0, maxval=5.0, dtype=tf.float32) inputs = tf.random.uniform((2, 3), minval=-10.0, maxval=10.0, dtype=tf.float32)
# Define quantization parameters (adjust as needed) min_val = -2.0 max_val = 8.0 num_bits = 4 # Number of bits for quantization
# Calculate gradients for fake quantization with specified parameters output_gradients = tf.quantization.fake_quant_with_min_max_args_gradient(
gradients=gradients, inputs=inputs, min=min_val, max=max_val, num_bits=num_bits, narrow_range = False, name=None
)
# Print the original gradients and the gradients after the fake-quant operation print("Original Gradients:") print(gradients) print("\nGradients after Fake-Quantization:") print(output_gradients)
``` #Original Gradients: #tf.Tensor( #[[ 1.242547 3.217492 3.568469 ] #[-0.55371046 0.23130894 2.608243 ]], shape=(2, 3), dtype=float32)
#Gradients after Fake-Quantization: #tf.Tensor( #[[ 0. 3.217492 3.568469 ] # [-0.55371046 0.23130894 2.608243 ]], shape=(2, 3), dtype=float32)
func FakeQuantWithMinMaxVars ¶
func FakeQuantWithMinMaxVars(scope *Scope, inputs tf.Output, min tf.Output, max tf.Output, optional ...FakeQuantWithMinMaxVarsAttr) (outputs tf.Output)
Fake-quantize the 'inputs' tensor of type float via global float scalars
Fake-quantize the `inputs` tensor of type float via global float scalars `min` and `max` to `outputs` tensor of same shape as `inputs`.
Attributes ¶
* `[min; max]` define the clamping range for the `inputs` data. * `inputs` values are quantized into the quantization range ( `[0; 2^num_bits - 1]` when `narrow_range` is false and `[1; 2^num_bits - 1]` when it is true) and then de-quantized and output as floats in `[min; max]` interval. * `num_bits` is the bitwidth of the quantization; between 2 and 16, inclusive.
Before quantization, `min` and `max` values are adjusted with the following logic. It is suggested to have `min <= 0 <= max`. If `0` is not in the range of values, the behavior can be unexpected:
* If `0 < min < max`: `min_adj = 0` and `max_adj = max - min`. * If `min < max < 0`: `min_adj = min - max` and `max_adj = 0`. * If `min <= 0 <= max`: `scale = (max - min) / (2^num_bits - 1) `, `min_adj = scale * round(min / scale)` and `max_adj = max + min_adj - min`.
This operation has a gradient and thus allows for training `min` and `max` values.
>>> constant_input = tf.constant([[1.2, -0.3, 0.7], [2.1, 0.5, -1.0]], dtype=tf.float32) >>> >>> min_val = -0.5 >>> max_val = 0.8 >>> num_bits = 8 >>> narrow_range = False #False:for the quantization range [0; 2^num_bits - 1] >>> >>> quantized_data = tf.quantization.fake_quant_with_min_max_vars( ... inputs=constant_input, min=min_val, max=max_val, num_bits=num_bits, narrow_range=narrow_range ... ) >>> >>> print("Input:\n", constant_input.numpy()) Input: [[ 1.2 -0.3 0.7] [ 2.1 0.5 -1. ]] >>> print("Output:\n", quantized_data.numpy()) Output: [[ 0.8003921 -0.3007843 0.6984313] [ 0.8003921 0.4996078 -0.4996078]]
func FakeQuantWithMinMaxVarsGradient ¶
func FakeQuantWithMinMaxVarsGradient(scope *Scope, gradients tf.Output, inputs tf.Output, min tf.Output, max tf.Output, optional ...FakeQuantWithMinMaxVarsGradientAttr) (backprops_wrt_input tf.Output, backprop_wrt_min tf.Output, backprop_wrt_max tf.Output)
Compute gradients for a FakeQuantWithMinMaxVars operation.
Arguments:
gradients: Backpropagated gradients above the FakeQuantWithMinMaxVars operation. inputs: Values passed as inputs to the FakeQuantWithMinMaxVars operation.
min, max: Quantization interval, scalar floats.
Returns:
backprops_wrt_input: Backpropagated gradients w.r.t. inputs:
`gradients * (inputs >= min && inputs <= max)`.
backprop_wrt_min: Backpropagated gradients w.r.t. min parameter:
`sum(gradients * (inputs < min))`.
backprop_wrt_max: Backpropagated gradients w.r.t. max parameter:
`sum(gradients * (inputs > max))`.
func FakeQuantWithMinMaxVarsPerChannel ¶
func FakeQuantWithMinMaxVarsPerChannel(scope *Scope, inputs tf.Output, min tf.Output, max tf.Output, optional ...FakeQuantWithMinMaxVarsPerChannelAttr) (outputs tf.Output)
Fake-quantize the 'inputs' tensor of type float via per-channel floats
Fake-quantize the `inputs` tensor of type float per-channel and one of the shapes: `[d]`, `[b, d]` `[b, h, w, d]` via per-channel floats `min` and `max` of shape `[d]` to `outputs` tensor of same shape as `inputs`.
Attributes ¶
* `[min; max]` define the clamping range for the `inputs` data. * `inputs` values are quantized into the quantization range ( `[0; 2^num_bits - 1]` when `narrow_range` is false and `[1; 2^num_bits - 1]` when it is true) and then de-quantized and output as floats in `[min; max]` interval. * `num_bits` is the bitwidth of the quantization; between 2 and 16, inclusive.
Before quantization, `min` and `max` values are adjusted with the following logic. It is suggested to have `min <= 0 <= max`. If `0` is not in the range of values, the behavior can be unexpected:
* If `0 < min < max`: `min_adj = 0` and `max_adj = max - min`. * If `min < max < 0`: `min_adj = min - max` and `max_adj = 0`. * If `min <= 0 <= max`: `scale = (max - min) / (2^num_bits - 1) `, `min_adj = scale * round(min / scale)` and `max_adj = max + min_adj - min`.
This operation has a gradient and thus allows for training `min` and `max` values.
func FakeQuantWithMinMaxVarsPerChannelGradient ¶
func FakeQuantWithMinMaxVarsPerChannelGradient(scope *Scope, gradients tf.Output, inputs tf.Output, min tf.Output, max tf.Output, optional ...FakeQuantWithMinMaxVarsPerChannelGradientAttr) (backprops_wrt_input tf.Output, backprop_wrt_min tf.Output, backprop_wrt_max tf.Output)
Compute gradients for a FakeQuantWithMinMaxVarsPerChannel operation.
Arguments:
gradients: Backpropagated gradients above the FakeQuantWithMinMaxVars operation,
shape one of: `[d]`, `[b, d]`, `[b, h, w, d]`.
inputs: Values passed as inputs to the FakeQuantWithMinMaxVars operation, shape same as `gradients`.
min, max: Quantization interval, floats of shape `[d]`.
Returns:
backprops_wrt_input: Backpropagated gradients w.r.t. inputs, shape same as
`inputs`:
`gradients * (inputs >= min && inputs <= max)`. backprop_wrt_min: Backpropagated gradients w.r.t. min parameter, shape `[d]`:
`sum_per_d(gradients * (inputs < min))`.
backprop_wrt_max: Backpropagated gradients w.r.t. max parameter, shape `[d]`:
`sum_per_d(gradients * (inputs > max))`.
func FileSystemSetConfiguration ¶
func FileSystemSetConfiguration(scope *Scope, scheme tf.Output, key tf.Output, value tf.Output) (o *tf.Operation)
Set configuration of the file system.
Arguments:
scheme: File system scheme. key: The name of the configuration option. value: The value of the configuration option.
Returns the created operation.
func Fill ¶
Creates a tensor filled with a scalar value.
This operation creates a tensor of shape `dims` and fills it with `value`.
For example:
``` # Output tensor has shape [2, 3]. fill([2, 3], 9) ==> [[9, 9, 9]
[9, 9, 9]]
```
`tf.fill` differs from `tf.constant` in a few ways:
- `tf.fill` only supports scalar contents, whereas `tf.constant` supports Tensor values.
- `tf.fill` creates an Op in the computation graph that constructs the actual Tensor value at runtime. This is in contrast to `tf.constant` which embeds the entire Tensor into the graph with a `Const` node.
- Because `tf.fill` evaluates at graph runtime, it supports dynamic shapes based on other runtime Tensors, unlike `tf.constant`.
Arguments:
dims: 1-D. Represents the shape of the output tensor. value: 0-D (scalar). Value to fill the returned tensor.
@compatibility(numpy) Equivalent to np.full @end_compatibility
func FilterByLastComponentDataset ¶
func FilterByLastComponentDataset(scope *Scope, input_dataset tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (output tf.Output)
Creates a dataset containing elements of first component of `input_dataset` having true in the last component.
func FinalizeDataset ¶
func FinalizeDataset(scope *Scope, input_dataset tf.Output, output_types []tf.DataType, output_shapes []tf.Shape, optional ...FinalizeDatasetAttr) (handle tf.Output)
Creates a dataset by applying `tf.data.Options` to `input_dataset`.
Arguments:
input_dataset: A variant tensor representing the input dataset.
func FinalizeTPUEmbedding ¶ added in v0.2.0
func FinalizeTPUEmbedding(scope *Scope, common_config tf.Output, memory_config tf.Output) (o *tf.Operation)
An op that finalizes the TPUEmbedding configuration.
Arguments:
common_config: A string-encoded common configuration proto containing metadata
about the TPUEmbedding partitioner output and the HBM size (in bytes) required for operation.
memory_config: A string-encoded memory config proto containing metadata about
the memory allocations reserved for TPUEmbedding.
Returns the created operation.
func FinalizeTPUEmbeddingV2 ¶ added in v0.8.2
func FinalizeTPUEmbeddingV2(scope *Scope, common_config tf.Output, memory_config tf.Output) (embedding_partitions tf.Output, hbm_buffers_config tf.Output)
An op that finalizes the TPUEmbedding configuration.
Arguments:
common_config: A string-encoded common configuration proto containing metadata
about the TPUEmbedding partitioner output and the HBM size (in bytes) required for operation.
memory_config: A string-encoded memory config proto containing metadata about
the memory allocations reserved for TPUEmbedding.
Returns:
embedding_partitions: A string-encoded embedding partitions proto describing how embedding tables are
partitioned along their feature and ID.
hbm_buffers_config: A string-encoded HBM buffers config proto specifies where HBM buffers are
located.
func Fingerprint ¶
Generates fingerprint values.
Generates fingerprint values of `data`.
Fingerprint op considers the first dimension of `data` as the batch dimension, and `output[i]` contains the fingerprint value generated from contents in `data[i, ...]` for all `i`.
Fingerprint op writes fingerprint values as byte arrays. For example, the default method `farmhash64` generates a 64-bit fingerprint value at a time. This 8-byte value is written out as an `uint8` array of size 8, in little-endian order.
For example, suppose that `data` has data type `DT_INT32` and shape (2, 3, 4), and that the fingerprint method is `farmhash64`. In this case, the output shape is (2, 8), where 2 is the batch dimension size of `data`, and 8 is the size of each fingerprint value in bytes. `output[0, :]` is generated from 12 integers in `data[0, :, :]` and similarly `output[1, :]` is generated from other 12 integers in `data[1, :, :]`.
Note that this op fingerprints the raw underlying buffer, and it does not fingerprint Tensor's metadata such as data type and/or shape. For example, the fingerprint values are invariant under reshapes and bitcasts as long as the batch dimension remain the same:
``` Fingerprint(data) == Fingerprint(Reshape(data, ...)) Fingerprint(data) == Fingerprint(Bitcast(data, ...)) ```
For string data, one should expect `Fingerprint(data) != Fingerprint(ReduceJoin(data))` in general.
Arguments:
data: Must have rank 1 or higher. method: Fingerprint method used by this op. Currently available method is
`farmhash::fingerprint64`.
Returns A two-dimensional `Tensor` of type `tf.uint8`. The first dimension equals to `data`'s first dimension, and the second dimension size depends on the fingerprint algorithm.
func FixedLengthRecordDataset ¶
func FixedLengthRecordDataset(scope *Scope, filenames tf.Output, header_bytes tf.Output, record_bytes tf.Output, footer_bytes tf.Output, buffer_size tf.Output, optional ...FixedLengthRecordDatasetAttr) (handle tf.Output)
Creates a dataset that emits the records from one or more binary files.
Arguments:
filenames: A scalar or a vector containing the name(s) of the file(s) to be
read.
header_bytes: A scalar representing the number of bytes to skip at the
beginning of a file.
record_bytes: A scalar representing the number of bytes in each record. footer_bytes: A scalar representing the number of bytes to skip at the end
of a file.
buffer_size: A scalar representing the number of bytes to buffer. Must be > 0.
func FixedLengthRecordReaderV2 ¶
func FixedLengthRecordReaderV2(scope *Scope, record_bytes int64, optional ...FixedLengthRecordReaderV2Attr) (reader_handle tf.Output)
A Reader that outputs fixed-length records from a file.
Arguments:
record_bytes: Number of bytes in the record.
Returns The handle to reference the Reader.
func FixedUnigramCandidateSampler ¶
func FixedUnigramCandidateSampler(scope *Scope, true_classes tf.Output, num_true int64, num_sampled int64, unique bool, range_max int64, optional ...FixedUnigramCandidateSamplerAttr) (sampled_candidates tf.Output, true_expected_count tf.Output, sampled_expected_count tf.Output)
Generates labels for candidate sampling with a learned unigram distribution.
A unigram sampler could use a fixed unigram distribution read from a file or passed in as an in-memory array instead of building up the distribution from data on the fly. There is also an option to skew the distribution by applying a distortion power to the weights.
The vocabulary file should be in CSV-like format, with the last field being the weight associated with the word.
For each batch, this op picks a single set of sampled candidate labels.
The advantages of sampling candidates per-batch are simplicity and the possibility of efficient dense matrix multiplication. The disadvantage is that the sampled candidates must be chosen independently of the context and of the true labels.
Arguments:
true_classes: A batch_size * num_true matrix, in which each row contains the
IDs of the num_true target_classes in the corresponding original label.
num_true: Number of true labels per context. num_sampled: Number of candidates to randomly sample. unique: If unique is true, we sample with rejection, so that all sampled
candidates in a batch are unique. This requires some approximation to estimate the post-rejection sampling probabilities.
range_max: The sampler will sample integers from the interval [0, range_max).
Returns:
sampled_candidates: A vector of length num_sampled, in which each element is
the ID of a sampled candidate.
true_expected_count: A batch_size * num_true matrix, representing
the number of times each candidate is expected to occur in a batch of sampled candidates. If unique=true, then this is a probability.
sampled_expected_count: A vector of length num_sampled, for each sampled
candidate representing the number of times the candidate is expected to occur in a batch of sampled candidates. If unique=true, then this is a probability.
func FloorDiv ¶
Returns x // y element-wise.
*NOTE*: `FloorDiv` supports broadcasting. More about broadcasting [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
func FloorMod ¶
Returns element-wise remainder of division.
This follows Python semantics in that the result here is consistent with a flooring divide. E.g. `floor(x / y) * y + floormod(x, y) = x`, regardless of the signs of x and y.
*NOTE*: `FloorMod` supports broadcasting. More about broadcasting [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
func FractionalAvgPool ¶
func FractionalAvgPool(scope *Scope, value tf.Output, pooling_ratio []float32, optional ...FractionalAvgPoolAttr) (output tf.Output, row_pooling_sequence tf.Output, col_pooling_sequence tf.Output)
Performs fractional average pooling on the input.
Fractional average pooling is similar to Fractional max pooling in the pooling region generation step. The only difference is that after pooling regions are generated, a mean operation is performed instead of a max operation in each pooling region.
Arguments:
value: 4-D with shape `[batch, height, width, channels]`. pooling_ratio: Pooling ratio for each dimension of `value`, currently only
supports row and col dimension and should be >= 1.0. For example, a valid pooling ratio looks like [1.0, 1.44, 1.73, 1.0]. The first and last elements must be 1.0 because we don't allow pooling on batch and channels dimensions. 1.44 and 1.73 are pooling ratio on height and width dimensions respectively.
Returns:
output: output tensor after fractional avg pooling. row_pooling_sequence: row pooling sequence, needed to calculate gradient. col_pooling_sequence: column pooling sequence, needed to calculate gradient.
func FractionalAvgPoolGrad ¶
func FractionalAvgPoolGrad(scope *Scope, orig_input_tensor_shape tf.Output, out_backprop tf.Output, row_pooling_sequence tf.Output, col_pooling_sequence tf.Output, optional ...FractionalAvgPoolGradAttr) (output tf.Output)
Computes gradient of the FractionalAvgPool function.
Unlike FractionalMaxPoolGrad, we don't need to find arg_max for FractionalAvgPoolGrad, we just need to evenly back-propagate each element of out_backprop to those indices that form the same pooling cell. Therefore, we just need to know the shape of original input tensor, instead of the whole tensor.
Arguments:
orig_input_tensor_shape: Original input tensor shape for `fractional_avg_pool` out_backprop: 4-D with shape `[batch, height, width, channels]`. Gradients
w.r.t. the output of `fractional_avg_pool`.
row_pooling_sequence: row pooling sequence, form pooling region with
col_pooling_sequence.
col_pooling_sequence: column pooling sequence, form pooling region with
row_pooling sequence.
Returns 4-D. Gradients w.r.t. the input of `fractional_avg_pool`.
func FractionalMaxPool ¶
func FractionalMaxPool(scope *Scope, value tf.Output, pooling_ratio []float32, optional ...FractionalMaxPoolAttr) (output tf.Output, row_pooling_sequence tf.Output, col_pooling_sequence tf.Output)
Performs fractional max pooling on the input.
Fractional max pooling is slightly different than regular max pooling. In regular max pooling, you downsize an input set by taking the maximum value of smaller N x N subsections of the set (often 2x2), and try to reduce the set by a factor of N, where N is an integer. Fractional max pooling, as you might expect from the word "fractional", means that the overall reduction ratio N does not have to be an integer.
The sizes of the pooling regions are generated randomly but are fairly uniform. For example, let's look at the height dimension, and the constraints on the list of rows that will be pool boundaries.
First we define the following:
1. input_row_length : the number of rows from the input set 2. output_row_length : which will be smaller than the input 3. alpha = input_row_length / output_row_length : our reduction ratio 4. K = floor(alpha) 5. row_pooling_sequence : this is the result list of pool boundary rows
Then, row_pooling_sequence should satisfy:
1. a[0] = 0 : the first value of the sequence is 0 2. a[end] = input_row_length : the last value of the sequence is the size 3. K <= (a[i+1] - a[i]) <= K+1 : all intervals are K or K+1 size 4. length(row_pooling_sequence) = output_row_length+1
For more details on fractional max pooling, see this paper: [Benjamin Graham, Fractional Max-Pooling](http://arxiv.org/abs/1412.6071)
Arguments:
value: 4-D with shape `[batch, height, width, channels]`. pooling_ratio: Pooling ratio for each dimension of `value`, currently only
supports row and col dimension and should be >= 1.0. For example, a valid pooling ratio looks like [1.0, 1.44, 1.73, 1.0]. The first and last elements must be 1.0 because we don't allow pooling on batch and channels dimensions. 1.44 and 1.73 are pooling ratio on height and width dimensions respectively.
Returns:
output: output tensor after fractional max pooling. row_pooling_sequence: row pooling sequence, needed to calculate gradient. col_pooling_sequence: column pooling sequence, needed to calculate gradient.
func FractionalMaxPoolGrad ¶
func FractionalMaxPoolGrad(scope *Scope, orig_input tf.Output, orig_output tf.Output, out_backprop tf.Output, row_pooling_sequence tf.Output, col_pooling_sequence tf.Output, optional ...FractionalMaxPoolGradAttr) (output tf.Output)
Computes gradient of the FractionalMaxPool function.
Arguments:
orig_input: Original input for `fractional_max_pool` orig_output: Original output for `fractional_max_pool` out_backprop: 4-D with shape `[batch, height, width, channels]`. Gradients
w.r.t. the output of `fractional_max_pool`.
row_pooling_sequence: row pooling sequence, form pooling region with
col_pooling_sequence.
col_pooling_sequence: column pooling sequence, form pooling region with
row_pooling sequence.
Returns 4-D. Gradients w.r.t. the input of `fractional_max_pool`.
func FusedBatchNorm ¶
func FusedBatchNorm(scope *Scope, x tf.Output, scale tf.Output, offset tf.Output, mean tf.Output, variance tf.Output, optional ...FusedBatchNormAttr) (y tf.Output, batch_mean tf.Output, batch_variance tf.Output, reserve_space_1 tf.Output, reserve_space_2 tf.Output)
Batch normalization.
Note that the size of 4D Tensors are defined by either "NHWC" or "NCHW". The size of 1D Tensors matches the dimension C of the 4D Tensors.
Arguments:
x: A 4D Tensor for input data. scale: A 1D Tensor for scaling factor, to scale the normalized x. offset: A 1D Tensor for offset, to shift to the normalized x. mean: A 1D Tensor for population mean. Used for inference only;
must be empty for training.
variance: A 1D Tensor for population variance. Used for inference only;
must be empty for training.
Returns:
y: A 4D Tensor for output data. batch_mean: A 1D Tensor for the computed batch mean, to be used by TensorFlow
to compute the running mean.
batch_variance: A 1D Tensor for the computed batch variance, to be used by
TensorFlow to compute the running variance.
reserve_space_1: A 1D Tensor for the computed batch mean, to be reused
in the gradient computation.
reserve_space_2: A 1D Tensor for the computed batch variance (inverted variance
in the cuDNN case), to be reused in the gradient computation.
func FusedBatchNormGrad ¶
func FusedBatchNormGrad(scope *Scope, y_backprop tf.Output, x tf.Output, scale tf.Output, reserve_space_1 tf.Output, reserve_space_2 tf.Output, optional ...FusedBatchNormGradAttr) (x_backprop tf.Output, scale_backprop tf.Output, offset_backprop tf.Output, reserve_space_3 tf.Output, reserve_space_4 tf.Output)
Gradient for batch normalization.
Note that the size of 4D Tensors are defined by either "NHWC" or "NCHW". The size of 1D Tensors matches the dimension C of the 4D Tensors.
Arguments:
y_backprop: A 4D Tensor for the gradient with respect to y. x: A 4D Tensor for input data. scale: A 1D Tensor for scaling factor, to scale the normalized x. reserve_space_1: When is_training is True, a 1D Tensor for the computed batch
mean to be reused in gradient computation. When is_training is False, a 1D Tensor for the population mean to be reused in both 1st and 2nd order gradient computation.
reserve_space_2: When is_training is True, a 1D Tensor for the computed batch
variance (inverted variance in the cuDNN case) to be reused in gradient computation. When is_training is False, a 1D Tensor for the population variance to be reused in both 1st and 2nd order gradient computation.
Returns:
x_backprop: A 4D Tensor for the gradient with respect to x. scale_backprop: A 1D Tensor for the gradient with respect to scale. offset_backprop: A 1D Tensor for the gradient with respect to offset. reserve_space_3: Unused placeholder to match the mean input in FusedBatchNorm. reserve_space_4: Unused placeholder to match the variance input
in FusedBatchNorm.
func FusedBatchNormGradV2 ¶
func FusedBatchNormGradV2(scope *Scope, y_backprop tf.Output, x tf.Output, scale tf.Output, reserve_space_1 tf.Output, reserve_space_2 tf.Output, optional ...FusedBatchNormGradV2Attr) (x_backprop tf.Output, scale_backprop tf.Output, offset_backprop tf.Output, reserve_space_3 tf.Output, reserve_space_4 tf.Output)
Gradient for batch normalization.
Note that the size of 4D Tensors are defined by either "NHWC" or "NCHW". The size of 1D Tensors matches the dimension C of the 4D Tensors.
Arguments:
y_backprop: A 4D Tensor for the gradient with respect to y. x: A 4D Tensor for input data. scale: A 1D Tensor for scaling factor, to scale the normalized x. reserve_space_1: When is_training is True, a 1D Tensor for the computed batch
mean to be reused in gradient computation. When is_training is False, a 1D Tensor for the population mean to be reused in both 1st and 2nd order gradient computation.
reserve_space_2: When is_training is True, a 1D Tensor for the computed batch
variance (inverted variance in the cuDNN case) to be reused in gradient computation. When is_training is False, a 1D Tensor for the population variance to be reused in both 1st and 2nd order gradient computation.
Returns:
x_backprop: A 4D Tensor for the gradient with respect to x. scale_backprop: A 1D Tensor for the gradient with respect to scale. offset_backprop: A 1D Tensor for the gradient with respect to offset. reserve_space_3: Unused placeholder to match the mean input in FusedBatchNorm. reserve_space_4: Unused placeholder to match the variance input
in FusedBatchNorm.
func FusedBatchNormGradV3 ¶
func FusedBatchNormGradV3(scope *Scope, y_backprop tf.Output, x tf.Output, scale tf.Output, reserve_space_1 tf.Output, reserve_space_2 tf.Output, reserve_space_3 tf.Output, optional ...FusedBatchNormGradV3Attr) (x_backprop tf.Output, scale_backprop tf.Output, offset_backprop tf.Output, reserve_space_4 tf.Output, reserve_space_5 tf.Output)
Gradient for batch normalization.
Note that the size of 4D Tensors are defined by either "NHWC" or "NCHW". The size of 1D Tensors matches the dimension C of the 4D Tensors.
Arguments:
y_backprop: A 4D Tensor for the gradient with respect to y. x: A 4D Tensor for input data. scale: A 1D Tensor for scaling factor, to scale the normalized x. reserve_space_1: When is_training is True, a 1D Tensor for the computed batch
mean to be reused in gradient computation. When is_training is False, a 1D Tensor for the population mean to be reused in both 1st and 2nd order gradient computation.
reserve_space_2: When is_training is True, a 1D Tensor for the computed batch
variance (inverted variance in the cuDNN case) to be reused in gradient computation. When is_training is False, a 1D Tensor for the population variance to be reused in both 1st and 2nd order gradient computation.
reserve_space_3: When is_training is True, a 1D Tensor for some intermediate results to be reused
in gradient computation. When is_training is False, a dummy empty Tensor will be created.
Returns:
x_backprop: A 4D Tensor for the gradient with respect to x. scale_backprop: A 1D Tensor for the gradient with respect to scale. offset_backprop: A 1D Tensor for the gradient with respect to offset. reserve_space_4: Unused placeholder to match the mean input in FusedBatchNorm. reserve_space_5: Unused placeholder to match the variance input
in FusedBatchNorm.
func FusedBatchNormV2 ¶
func FusedBatchNormV2(scope *Scope, x tf.Output, scale tf.Output, offset tf.Output, mean tf.Output, variance tf.Output, optional ...FusedBatchNormV2Attr) (y tf.Output, batch_mean tf.Output, batch_variance tf.Output, reserve_space_1 tf.Output, reserve_space_2 tf.Output)
Batch normalization.
Note that the size of 4D Tensors are defined by either "NHWC" or "NCHW". The size of 1D Tensors matches the dimension C of the 4D Tensors.
Arguments:
x: A 4D Tensor for input data. scale: A 1D Tensor for scaling factor, to scale the normalized x. offset: A 1D Tensor for offset, to shift to the normalized x. mean: A 1D Tensor for population mean. Used for inference only;
must be empty for training.
variance: A 1D Tensor for population variance. Used for inference only;
must be empty for training.
Returns:
y: A 4D Tensor for output data. batch_mean: A 1D Tensor for the computed batch mean, to be used by TensorFlow
to compute the running mean.
batch_variance: A 1D Tensor for the computed batch variance, to be used by
TensorFlow to compute the running variance.
reserve_space_1: A 1D Tensor for the computed batch mean, to be reused
in the gradient computation.
reserve_space_2: A 1D Tensor for the computed batch variance (inverted variance
in the cuDNN case), to be reused in the gradient computation.
func FusedBatchNormV3 ¶
func FusedBatchNormV3(scope *Scope, x tf.Output, scale tf.Output, offset tf.Output, mean tf.Output, variance tf.Output, optional ...FusedBatchNormV3Attr) (y tf.Output, batch_mean tf.Output, batch_variance tf.Output, reserve_space_1 tf.Output, reserve_space_2 tf.Output, reserve_space_3 tf.Output)
Batch normalization.
Note that the size of 4D Tensors are defined by either "NHWC" or "NCHW". The size of 1D Tensors matches the dimension C of the 4D Tensors.
Arguments:
x: A 4D Tensor for input data. scale: A 1D Tensor for scaling factor, to scale the normalized x. offset: A 1D Tensor for offset, to shift to the normalized x. mean: A 1D Tensor for population mean. Used for inference only;
must be empty for training.
variance: A 1D Tensor for population variance. Used for inference only;
must be empty for training.
Returns:
y: A 4D Tensor for output data. batch_mean: A 1D Tensor for the computed batch mean, to be used by TensorFlow
to compute the running mean.
batch_variance: A 1D Tensor for the computed batch variance, to be used by
TensorFlow to compute the running variance.
reserve_space_1: A 1D Tensor for the computed batch mean, to be reused
in the gradient computation.
reserve_space_2: A 1D Tensor for the computed batch variance (inverted variance
in the cuDNN case), to be reused in the gradient computation.
reserve_space_3: A 1D Tensor for some intermediate results, to be reused in the gradient
computation for better efficiency.
func FusedPadConv2D ¶
func FusedPadConv2D(scope *Scope, input tf.Output, paddings tf.Output, filter tf.Output, mode string, strides []int64, padding string) (output tf.Output)
Performs a padding as a preprocess during a convolution.
Similar to FusedResizeAndPadConv2d, this op allows for an optimized implementation where the spatial padding transformation stage is fused with the im2col lookup, but in this case without the bilinear filtering required for resizing. Fusing the padding prevents the need to write out the intermediate results as whole tensors, reducing memory pressure, and we can get some latency gains by merging the transformation calculations. The data_format attribute for Conv2D isn't supported by this op, and 'NHWC' order is used instead. Internally this op uses a single per-graph scratch buffer, which means that it will block if multiple versions are being run in parallel. This is because this operator is primarily an optimization to minimize memory usage.
Arguments:
input: 4-D with shape `[batch, in_height, in_width, in_channels]`. paddings: A two-column matrix specifying the padding sizes. The number of
rows must be the same as the rank of `input`.
filter: 4-D with shape
`[filter_height, filter_width, in_channels, out_channels]`.
strides: 1-D of length 4. The stride of the sliding window for each dimension
of `input`. Must be in the same order as the dimension specified with format.
padding: The type of padding algorithm to use.
func FusedResizeAndPadConv2D ¶
func FusedResizeAndPadConv2D(scope *Scope, input tf.Output, size tf.Output, paddings tf.Output, filter tf.Output, mode string, strides []int64, padding string, optional ...FusedResizeAndPadConv2DAttr) (output tf.Output)
Performs a resize and padding as a preprocess during a convolution.
It's often possible to do spatial transformations more efficiently as part of the packing stage of a convolution, so this op allows for an optimized implementation where these stages are fused together. This prevents the need to write out the intermediate results as whole tensors, reducing memory pressure, and we can get some latency gains by merging the transformation calculations. The data_format attribute for Conv2D isn't supported by this op, and defaults to 'NHWC' order. Internally this op uses a single per-graph scratch buffer, which means that it will block if multiple versions are being run in parallel. This is because this operator is primarily an optimization to minimize memory usage.
Arguments:
input: 4-D with shape `[batch, in_height, in_width, in_channels]`. size: A 1-D int32 Tensor of 2 elements: `new_height, new_width`. The
new size for the images.
paddings: A two-column matrix specifying the padding sizes. The number of
rows must be the same as the rank of `input`.
filter: 4-D with shape
`[filter_height, filter_width, in_channels, out_channels]`.
strides: 1-D of length 4. The stride of the sliding window for each dimension
of `input`. Must be in the same order as the dimension specified with format.
padding: The type of padding algorithm to use.
func GRUBlockCell ¶
func GRUBlockCell(scope *Scope, x tf.Output, h_prev tf.Output, w_ru tf.Output, w_c tf.Output, b_ru tf.Output, b_c tf.Output) (r tf.Output, u tf.Output, c tf.Output, h tf.Output)
Computes the GRU cell forward propagation for 1 time step.
Args
x: Input to the GRU cell. h_prev: State input from the previous GRU cell. w_ru: Weight matrix for the reset and update gate. w_c: Weight matrix for the cell connection gate. b_ru: Bias vector for the reset and update gate. b_c: Bias vector for the cell connection gate.
Returns
r: Output of the reset gate. u: Output of the update gate. c: Output of the cell connection gate. h: Current state of the GRU cell.
Note on notation of the variables:
Concatenation of a and b is represented by a_b Element-wise dot product of a and b is represented by ab Element-wise dot product is represented by \circ Matrix multiplication is represented by *
Biases are initialized with : `b_ru` - constant_initializer(1.0) `b_c` - constant_initializer(0.0)
This kernel op implements the following mathematical equations:
``` x_h_prev = [x, h_prev]
[r_bar u_bar] = x_h_prev * w_ru + b_ru
r = sigmoid(r_bar) u = sigmoid(u_bar)
h_prevr = h_prev \circ r
x_h_prevr = [x h_prevr]
c_bar = x_h_prevr * w_c + b_c c = tanh(c_bar)
h = (1-u) \circ c + u \circ h_prev ```
func GRUBlockCellGrad ¶
func GRUBlockCellGrad(scope *Scope, x tf.Output, h_prev tf.Output, w_ru tf.Output, w_c tf.Output, b_ru tf.Output, b_c tf.Output, r tf.Output, u tf.Output, c tf.Output, d_h tf.Output) (d_x tf.Output, d_h_prev tf.Output, d_c_bar tf.Output, d_r_bar_u_bar tf.Output)
Computes the GRU cell back-propagation for 1 time step.
Args
x: Input to the GRU cell. h_prev: State input from the previous GRU cell. w_ru: Weight matrix for the reset and update gate. w_c: Weight matrix for the cell connection gate. b_ru: Bias vector for the reset and update gate. b_c: Bias vector for the cell connection gate. r: Output of the reset gate. u: Output of the update gate. c: Output of the cell connection gate. d_h: Gradients of the h_new wrt to objective function.
Returns
d_x: Gradients of the x wrt to objective function. d_h_prev: Gradients of the h wrt to objective function. d_c_bar Gradients of the c_bar wrt to objective function. d_r_bar_u_bar Gradients of the r_bar & u_bar wrt to objective function.
This kernel op implements the following mathematical equations:
Note on notation of the variables:
Concatenation of a and b is represented by a_b Element-wise dot product of a and b is represented by ab Element-wise dot product is represented by \circ Matrix multiplication is represented by *
Additional notes for clarity:
`w_ru` can be segmented into 4 different matrices. ``` w_ru = [w_r_x w_u_x
w_r_h_prev w_u_h_prev]
``` Similarly, `w_c` can be segmented into 2 different matrices. ``` w_c = [w_c_x w_c_h_prevr] ``` Same goes for biases. ``` b_ru = [b_ru_x b_ru_h] b_c = [b_c_x b_c_h] ``` Another note on notation: ``` d_x = d_x_component_1 + d_x_component_2
where d_x_component_1 = d_r_bar * w_r_x^T + d_u_bar * w_r_x^T and d_x_component_2 = d_c_bar * w_c_x^T
d_h_prev = d_h_prev_component_1 + d_h_prevr \circ r + d_h \circ u where d_h_prev_componenet_1 = d_r_bar * w_r_h_prev^T + d_u_bar * w_r_h_prev^T ```
Mathematics behind the Gradients below: ``` d_c_bar = d_h \circ (1-u) \circ (1-c \circ c) d_u_bar = d_h \circ (h-c) \circ u \circ (1-u)
d_r_bar_u_bar = [d_r_bar d_u_bar]
[d_x_component_1 d_h_prev_component_1] = d_r_bar_u_bar * w_ru^T
[d_x_component_2 d_h_prevr] = d_c_bar * w_c^T
d_x = d_x_component_1 + d_x_component_2
d_h_prev = d_h_prev_component_1 + d_h_prevr \circ r + u ``` Below calculation is performed in the python wrapper for the Gradients (not in the gradient kernel.) ``` d_w_ru = x_h_prevr^T * d_c_bar
d_w_c = x_h_prev^T * d_r_bar_u_bar
d_b_ru = sum of d_r_bar_u_bar along axis = 0
d_b_c = sum of d_c_bar along axis = 0 ```
func Gather ¶
func Gather(scope *Scope, params tf.Output, indices tf.Output, optional ...GatherAttr) (output tf.Output)
Gather slices from `params` according to `indices`.
`indices` must be an integer tensor of any dimension (usually 0-D or 1-D). Produces an output tensor with shape `indices.shape + params.shape[1:]` where:
```python
# Scalar indices output[:, ..., :] = params[indices, :, ... :] # Vector indices output[i, :, ..., :] = params[indices[i], :, ... :] # Higher rank indices output[i, ..., j, :, ... :] = params[indices[i, ..., j], :, ..., :]
```
If `indices` is a permutation and `len(indices) == params.shape[0]` then this operation will permute `params` accordingly.
`validate_indices`: DEPRECATED. If this operation is assigned to CPU, values in `indices` are always validated to be within range. If assigned to GPU, out-of-bound indices result in safe but unspecified behavior, which may include raising an error.
<div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;"> <img style="width:100%" src="https://www.tensorflow.org/images/Gather.png" alt> </div>
func GatherNd ¶
func GatherNd(scope *Scope, params tf.Output, indices tf.Output, optional ...GatherNdAttr) (output tf.Output)
Gather slices from `params` into a Tensor with shape specified by `indices`.
`indices` is a K-dimensional integer tensor, best thought of as a (K-1)-dimensional tensor of indices into `params`, where each element defines a slice of `params`:
output[\\(i_0, ..., i_{K-2}\\)] = params[indices[\\(i_0, ..., i_{K-2}\\)]]
Whereas in `tf.gather` `indices` defines slices into the `axis` dimension of `params`, in `tf.gather_nd`, `indices` defines slices into the first `N` dimensions of `params`, where `N = indices.shape[-1]`.
The last dimension of `indices` can be at most the rank of `params`:
indices.shape[-1] <= params.rank
The last dimension of `indices` corresponds to elements (if `indices.shape[-1] == params.rank`) or slices (if `indices.shape[-1] < params.rank`) along dimension `indices.shape[-1]` of `params`. The output tensor has shape
indices.shape[:-1] + params.shape[indices.shape[-1]:]
If `indices` contains any out-of-bound indices, depending on `bad_indices_policy`, the op will either return an error or ignore the out-of-bound indices. `bad_indices_policy` can be one of the following values:
- "" or "DEFAULT": raises on CPU and ignore on GPU. This is because historically on CPU and GPU we handle errors in different ways, and for backward compatibility we keep the default behavior.
- "ERROR": raises error; GPU does not support this value.
- "IGNORE": ignore error and set the corresponding output to 0; supported on both CPU and GPU.
Some examples below.
Simple indexing into a matrix:
```python
indices = [[0, 0], [1, 1]] params = [['a', 'b'], ['c', 'd']] output = ['a', 'd']
```
Slice indexing into a matrix:
```python
indices = [[1], [0]] params = [['a', 'b'], ['c', 'd']] output = [['c', 'd'], ['a', 'b']]
```
Indexing into a 3-tensor:
```python
indices = [[1]] params = [[['a0', 'b0'], ['c0', 'd0']], [['a1', 'b1'], ['c1', 'd1']]] output = [[['a1', 'b1'], ['c1', 'd1']]] indices = [[0, 1], [1, 0]] params = [[['a0', 'b0'], ['c0', 'd0']], [['a1', 'b1'], ['c1', 'd1']]] output = [['c0', 'd0'], ['a1', 'b1']] indices = [[0, 0, 1], [1, 0, 1]] params = [[['a0', 'b0'], ['c0', 'd0']], [['a1', 'b1'], ['c1', 'd1']]] output = ['b0', 'b1']
```
Batched indexing into a matrix:
```python
indices = [[[0, 0]], [[0, 1]]] params = [['a', 'b'], ['c', 'd']] output = [['a'], ['b']]
```
Batched slice indexing into a matrix:
```python
indices = [[[1]], [[0]]] params = [['a', 'b'], ['c', 'd']] output = [[['c', 'd']], [['a', 'b']]]
```
Batched indexing into a 3-tensor:
```python
indices = [[[1]], [[0]]] params = [[['a0', 'b0'], ['c0', 'd0']], [['a1', 'b1'], ['c1', 'd1']]] output = [[[['a1', 'b1'], ['c1', 'd1']]], [[['a0', 'b0'], ['c0', 'd0']]]] indices = [[[0, 1], [1, 0]], [[0, 0], [1, 1]]] params = [[['a0', 'b0'], ['c0', 'd0']], [['a1', 'b1'], ['c1', 'd1']]] output = [[['c0', 'd0'], ['a1', 'b1']], [['a0', 'b0'], ['c1', 'd1']]] indices = [[[0, 0, 1], [1, 0, 1]], [[0, 1, 1], [1, 1, 0]]] params = [[['a0', 'b0'], ['c0', 'd0']], [['a1', 'b1'], ['c1', 'd1']]] output = [['b0', 'b1'], ['d0', 'c1']]
```
See also `tf.gather` and `tf.batch_gather`.
Arguments:
params: The tensor from which to gather values. indices: Index tensor.
Returns Values from `params` gathered from indices given by `indices`, with shape `indices.shape[:-1] + params.shape[indices.shape[-1]:]`.
func GatherV2 ¶
func GatherV2(scope *Scope, params tf.Output, indices tf.Output, axis tf.Output, optional ...GatherV2Attr) (output tf.Output)
Gather slices from `params` axis `axis` according to `indices`.
`indices` must be an integer tensor of any dimension (usually 0-D or 1-D). Produces an output tensor with shape `params.shape[:axis] + indices.shape[batch_dims:] + params.shape[axis + 1:]` where:
```python
# Scalar indices (output is rank(params) - 1). output[a_0, ..., a_n, b_0, ..., b_n] = params[a_0, ..., a_n, indices, b_0, ..., b_n] # Vector indices (output is rank(params)). output[a_0, ..., a_n, i, b_0, ..., b_n] = params[a_0, ..., a_n, indices[i], b_0, ..., b_n] # Higher rank indices (output is rank(params) + rank(indices) - 1). output[a_0, ..., a_n, i, ..., j, b_0, ... b_n] = params[a_0, ..., a_n, indices[i, ..., j], b_0, ..., b_n]
```
<div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;"> <img style="width:100%" src="https://www.tensorflow.org/images/Gather.png" alt> </div>
Note that on CPU, if an out of bound index is found, an error is returned. On GPU, if an out of bound index is found, a 0 is stored in the corresponding output value.
Note that on TPU, if any dimension of `params` is of size 0 then the output will be the expected shape filled with zeros. On CPU and GPU an error will be returned.
See also `tf.batch_gather` and `tf.gather_nd`.
Arguments:
params: The tensor from which to gather values. Must be at least rank
`axis + 1`.
indices: Index tensor. Must be in range `[0, params.shape[axis])`. axis: The axis in `params` to gather `indices` from. Defaults to the first
dimension. Supports negative indexes.
Returns Values from `params` gathered from indices given by `indices`, with shape `params.shape[:axis] + indices.shape + params.shape[axis + 1:]`.
func GenerateBoundingBoxProposals ¶
func GenerateBoundingBoxProposals(scope *Scope, scores tf.Output, bbox_deltas tf.Output, image_info tf.Output, anchors tf.Output, nms_threshold tf.Output, pre_nms_topn tf.Output, min_size tf.Output, optional ...GenerateBoundingBoxProposalsAttr) (rois tf.Output, roi_probabilities tf.Output)
This op produces Region of Interests from given bounding boxes(bbox_deltas) encoded wrt anchors according to eq.2 in arXiv:1506.01497
The op selects top `pre_nms_topn` scoring boxes, decodes them with respect to anchors, applies non-maximal suppression on overlapping boxes with higher than `nms_threshold` intersection-over-union (iou) value, discarding boxes where shorter side is less than `min_size`. Inputs: `scores`: A 4D tensor of shape [Batch, Height, Width, Num Anchors] containing the scores per anchor at given position `bbox_deltas`: is a tensor of shape [Batch, Height, Width, 4 x Num Anchors] boxes encoded to each anchor `anchors`: A 1D tensor of shape [4 x Num Anchors], representing the anchors. Outputs: `rois`: output RoIs, a 3D tensor of shape [Batch, post_nms_topn, 4], padded by 0 if less than post_nms_topn candidates found. `roi_probabilities`: probability scores of each roi in 'rois', a 2D tensor of shape [Batch,post_nms_topn], padded with 0 if needed, sorted by scores.
Arguments:
scores: A 4-D float tensor of shape `[num_images, height, width, num_achors]` containing scores of the boxes for given anchors, can be unsorted. bbox_deltas: A 4-D float tensor of shape `[num_images, height, width, 4 x num_anchors]`. encoding boxes with respec to each anchor.
Coordinates are given in the form [dy, dx, dh, dw].
image_info: A 2-D float tensor of shape `[num_images, 5]` containing image information Height, Width, Scale. anchors: A 2-D float tensor of shape `[num_anchors, 4]` describing the anchor boxes. Boxes are formatted in the form [y1, x1, y2, x2]. nms_threshold: A scalar float tensor for non-maximal-suppression threshold. pre_nms_topn: A scalar int tensor for the number of top scoring boxes to be used as input. min_size: A scalar float tensor. Any box that has a smaller size than min_size will be discarded.
Returns:
rois: A 3-D float tensor of shape `[num_images,post_nms_topn,4]` representing the selected
region of interest boxes. Sorted in descending order in scores.
roi_probabilities: A 2-D float tensor of shape `[num_images, post_nms_topn]` representing the score of the
region of interest box in `rois` tensor at the same index.
func GenerateVocabRemapping ¶
func GenerateVocabRemapping(scope *Scope, new_vocab_file tf.Output, old_vocab_file tf.Output, new_vocab_offset int64, num_new_vocab int64, optional ...GenerateVocabRemappingAttr) (remapping tf.Output, num_present tf.Output)
Given a path to new and old vocabulary files, returns a remapping Tensor of
length `num_new_vocab`, where `remapping[i]` contains the row number in the old vocabulary that corresponds to row `i` in the new vocabulary (starting at line `new_vocab_offset` and up to `num_new_vocab` entities), or `-1` if entry `i` in the new vocabulary is not in the old vocabulary. The old vocabulary is constrained to the first `old_vocab_size` entries if `old_vocab_size` is not the default value of -1.
`num_vocab_offset` enables use in the partitioned variable case, and should generally be set through examining partitioning info. The format of the files should be a text file, with each line containing a single entity within the vocabulary.
For example, with `new_vocab_file` a text file containing each of the following elements on a single line: `[f0, f1, f2, f3]`, old_vocab_file = [f1, f0, f3], `num_new_vocab = 3, new_vocab_offset = 1`, the returned remapping would be `[0, -1, 2]`.
The op also returns a count of how many entries in the new vocabulary were present in the old vocabulary, which is used to calculate the number of values to initialize in a weight matrix remapping
This functionality can be used to remap both row vocabularies (typically, features) and column vocabularies (typically, classes) from TensorFlow checkpoints. Note that the partitioning logic relies on contiguous vocabularies corresponding to div-partitioned variables. Moreover, the underlying remapping uses an IndexTable (as opposed to an inexact CuckooTable), so client code should use the corresponding index_table_from_file() as the FeatureColumn framework does (as opposed to tf.feature_to_id(), which uses a CuckooTable).
Arguments:
new_vocab_file: Path to the new vocab file. old_vocab_file: Path to the old vocab file. new_vocab_offset: How many entries into the new vocab file to start reading. num_new_vocab: Number of entries in the new vocab file to remap.
Returns:
remapping: A Tensor of length num_new_vocab where the element at index i
is equal to the old ID that maps to the new ID i. This element is -1 for any new ID that is not found in the old vocabulary.
num_present: Number of new vocab entries found in old vocab.
func GetElementAtIndex ¶
func GetElementAtIndex(scope *Scope, dataset tf.Output, index tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (components []tf.Output)
Gets the element at the specified index in a dataset.
func GetOptions ¶
Returns the `tf.data.Options` attached to `input_dataset`.
Arguments:
input_dataset: A variant tensor representing the input dataset.
func GetSessionHandle ¶
Store the input tensor in the state of the current session.
Arguments:
value: The tensor to be stored.
Returns The handle for the tensor stored in the session state, represented as a string.
func GetSessionHandleV2 ¶
Store the input tensor in the state of the current session.
Arguments:
value: The tensor to be stored.
Returns The handle for the tensor stored in the session state, represented as a ResourceHandle object.
func GetSessionTensor ¶
Get the value of the tensor specified by its handle.
Arguments:
handle: The handle for a tensor stored in the session state. dtype: The type of the output value.
Returns The tensor for the given handle.
func GetTpuTaskId ¶ added in v0.8.2
An op returns the TPU task ID from TPU topology.
This op is to return the TPU task ID from TPU topology.
Returns The TPU task ID from TPU topology.
func Gradients ¶
Gradients adds gradients computation ops to the graph according to scope.
Arguments:
y: output of the function to derive x: inputs of the function for which partial derivatives are computed dx: if not null, the partial derivatives of some loss function L w.r.t. y return the partial derivatives
func Greater ¶
Returns the truth value of (x > y) element-wise.
*NOTE*: `Greater` supports broadcasting. More about broadcasting [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
Example:
```python x = tf.constant([5, 4, 6]) y = tf.constant([5, 2, 5]) tf.math.greater(x, y) ==> [False, True, True]
x = tf.constant([5, 4, 6]) y = tf.constant([5]) tf.math.greater(x, y) ==> [False, False, True] ```
func GreaterEqual ¶
Returns the truth value of (x >= y) element-wise.
*NOTE*: `GreaterEqual` supports broadcasting. More about broadcasting [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
Example:
```python x = tf.constant([5, 4, 6, 7]) y = tf.constant([5, 2, 5, 10]) tf.math.greater_equal(x, y) ==> [True, True, True, False]
x = tf.constant([5, 4, 6, 7]) y = tf.constant([5]) tf.math.greater_equal(x, y) ==> [True, False, True, True] ```
func GuaranteeConst ¶
Gives a guarantee to the TF runtime that the input tensor is a constant.
The runtime is then free to make optimizations based on this.
Only accepts value typed tensors as inputs and rejects resource variable handles as input.
Returns the input tensor without modification.
func HSVToRGB ¶
Convert one or more images from HSV to RGB.
Outputs a tensor of the same shape as the `images` tensor, containing the RGB value of the pixels. The output is only well defined if the value in `images` are in `[0,1]`.
See `rgb_to_hsv` for a description of the HSV encoding.
Arguments:
images: 1-D or higher rank. HSV data to convert. Last dimension must be size 3.
Returns `images` converted to RGB.
func HashTableV2 ¶
func HashTableV2(scope *Scope, key_dtype tf.DataType, value_dtype tf.DataType, optional ...HashTableV2Attr) (table_handle tf.Output)
Creates a non-initialized hash table.
This op creates a hash table, specifying the type of its keys and values. Before using the table you will have to initialize it. After initialization the table will be immutable.
Arguments:
key_dtype: Type of the table keys. value_dtype: Type of the table values.
Returns Handle to a table.
func HistogramFixedWidth ¶
func HistogramFixedWidth(scope *Scope, values tf.Output, value_range tf.Output, nbins tf.Output, optional ...HistogramFixedWidthAttr) (out tf.Output)
Return histogram of values.
Given the tensor `values`, this operation returns a rank 1 histogram counting the number of entries in `values` that fall into every bin. The bins are equal width and determined by the arguments `value_range` and `nbins`.
```python # Bins will be: (-inf, 1), [1, 2), [2, 3), [3, 4), [4, inf) nbins = 5 value_range = [0.0, 5.0] new_values = [-1.0, 0.0, 1.5, 2.0, 5.0, 15]
with tf.get_default_session() as sess:
hist = tf.histogram_fixed_width(new_values, value_range, nbins=5) variables.global_variables_initializer().run() sess.run(hist) => [2, 1, 1, 0, 2]
```
Arguments:
values: Numeric `Tensor`. value_range: Shape [2] `Tensor` of same `dtype` as `values`.
values <= value_range[0] will be mapped to hist[0], values >= value_range[1] will be mapped to hist[-1].
nbins: Scalar `int32 Tensor`. Number of histogram bins.
Returns A 1-D `Tensor` holding histogram of values.
func HistogramSummary ¶
Outputs a `Summary` protocol buffer with a histogram.
The generated [`Summary`](https://www.tensorflow.org/code/tensorflow/core/framework/summary.proto) has one summary value containing a histogram for `values`.
This op reports an `InvalidArgument` error if any value is not finite.
Arguments:
tag: Scalar. Tag to use for the `Summary.Value`. values: Any shape. Values to use to build the histogram.
Returns Scalar. Serialized `Summary` protocol buffer.
func HostConst ¶
Returns a constant tensor on the host. Only for writing C++ tests.
Arguments:
value: Attr `value` is the tensor to return.
func IFFT ¶
Inverse fast Fourier transform.
Computes the inverse 1-dimensional discrete Fourier transform over the inner-most dimension of `input`.
Arguments:
input: A complex tensor.
Returns A complex tensor of the same shape as `input`. The inner-most
dimension of `input` is replaced with its inverse 1D Fourier transform.
@compatibility(numpy) Equivalent to np.fft.ifft @end_compatibility
func IFFT2D ¶
Inverse 2D fast Fourier transform.
Computes the inverse 2-dimensional discrete Fourier transform over the inner-most 2 dimensions of `input`.
Arguments:
input: A complex tensor.
Returns A complex tensor of the same shape as `input`. The inner-most 2
dimensions of `input` are replaced with their inverse 2D Fourier transform.
@compatibility(numpy) Equivalent to np.fft.ifft2 @end_compatibility
func IFFT3D ¶
Inverse 3D fast Fourier transform.
Computes the inverse 3-dimensional discrete Fourier transform over the inner-most 3 dimensions of `input`.
Arguments:
input: A complex tensor.
Returns A complex tensor of the same shape as `input`. The inner-most 3
dimensions of `input` are replaced with their inverse 3D Fourier transform.
@compatibility(numpy) Equivalent to np.fft.ifftn with 3 dimensions. @end_compatibility
func IFFTND ¶ added in v0.7.0
ND inverse fast Fourier transform.
Computes the n-dimensional inverse discrete Fourier transform over designated dimensions of `input`. The designated dimensions of `input` are assumed to be the result of `IFFTND`.
If fft_length[i]<shape(input)[i], the input is cropped. If fft_length[i]>shape(input)[i], the input is padded with zeros. If fft_length is not given, the default shape(input) is used.
Axes mean the dimensions to perform the transform on. Default is to perform on all axes.
Arguments:
input: A complex tensor. fft_length: An int32 tensor. The FFT length for each dimension. axes: An int32 tensor with a same shape as fft_length. Axes to perform the transform.
Returns A complex tensor of the same shape as `input`. The designated dimensions of `input` are replaced with their inverse Fourier transforms.
@compatibility(numpy) Equivalent to np.fft.fftn. @end_compatibility
func IRFFT ¶
func IRFFT(scope *Scope, input tf.Output, fft_length tf.Output, optional ...IRFFTAttr) (output tf.Output)
Inverse real-valued fast Fourier transform.
Computes the inverse 1-dimensional discrete Fourier transform of a real-valued signal over the inner-most dimension of `input`.
The inner-most dimension of `input` is assumed to be the result of `RFFT`: the `fft_length / 2 + 1` unique components of the DFT of a real-valued signal. If `fft_length` is not provided, it is computed from the size of the inner-most dimension of `input` (`fft_length = 2 * (inner - 1)`). If the FFT length used to compute `input` is odd, it should be provided since it cannot be inferred properly.
Along the axis `IRFFT` is computed on, if `fft_length / 2 + 1` is smaller than the corresponding dimension of `input`, the dimension is cropped. If it is larger, the dimension is padded with zeros.
Arguments:
input: A complex tensor. fft_length: An int32 tensor of shape [1]. The FFT length.
Returns A float32 tensor of the same rank as `input`. The inner-most
dimension of `input` is replaced with the `fft_length` samples of its inverse 1D Fourier transform.
@compatibility(numpy) Equivalent to np.fft.irfft @end_compatibility
func IRFFT2D ¶
func IRFFT2D(scope *Scope, input tf.Output, fft_length tf.Output, optional ...IRFFT2DAttr) (output tf.Output)
Inverse 2D real-valued fast Fourier transform.
Computes the inverse 2-dimensional discrete Fourier transform of a real-valued signal over the inner-most 2 dimensions of `input`.
The inner-most 2 dimensions of `input` are assumed to be the result of `RFFT2D`: The inner-most dimension contains the `fft_length / 2 + 1` unique components of the DFT of a real-valued signal. If `fft_length` is not provided, it is computed from the size of the inner-most 2 dimensions of `input`. If the FFT length used to compute `input` is odd, it should be provided since it cannot be inferred properly.
Along each axis `IRFFT2D` is computed on, if `fft_length` (or `fft_length / 2 + 1` for the inner-most dimension) is smaller than the corresponding dimension of `input`, the dimension is cropped. If it is larger, the dimension is padded with zeros.
Arguments:
input: A complex tensor. fft_length: An int32 tensor of shape [2]. The FFT length for each dimension.
Returns A float32 tensor of the same rank as `input`. The inner-most 2
dimensions of `input` are replaced with the `fft_length` samples of their inverse 2D Fourier transform.
@compatibility(numpy) Equivalent to np.fft.irfft2 @end_compatibility
func IRFFT3D ¶
func IRFFT3D(scope *Scope, input tf.Output, fft_length tf.Output, optional ...IRFFT3DAttr) (output tf.Output)
Inverse 3D real-valued fast Fourier transform.
Computes the inverse 3-dimensional discrete Fourier transform of a real-valued signal over the inner-most 3 dimensions of `input`.
The inner-most 3 dimensions of `input` are assumed to be the result of `RFFT3D`: The inner-most dimension contains the `fft_length / 2 + 1` unique components of the DFT of a real-valued signal. If `fft_length` is not provided, it is computed from the size of the inner-most 3 dimensions of `input`. If the FFT length used to compute `input` is odd, it should be provided since it cannot be inferred properly.
Along each axis `IRFFT3D` is computed on, if `fft_length` (or `fft_length / 2 + 1` for the inner-most dimension) is smaller than the corresponding dimension of `input`, the dimension is cropped. If it is larger, the dimension is padded with zeros.
Arguments:
input: A complex tensor. fft_length: An int32 tensor of shape [3]. The FFT length for each dimension.
Returns A float32 tensor of the same rank as `input`. The inner-most 3
dimensions of `input` are replaced with the `fft_length` samples of their inverse 3D real Fourier transform.
@compatibility(numpy) Equivalent to np.irfftn with 3 dimensions. @end_compatibility
func IRFFTND ¶ added in v0.7.0
func IRFFTND(scope *Scope, input tf.Output, fft_length tf.Output, axes tf.Output, optional ...IRFFTNDAttr) (output tf.Output)
ND inverse real fast Fourier transform.
Computes the n-dimensional inverse real discrete Fourier transform over designated dimensions of `input`. The designated dimensions of `input` are assumed to be the result of `IRFFTND`. The inner-most dimension contains the `fft_length / 2 + 1` unique components of the DFT of a real-valued signal.
If fft_length[i]<shape(input)[i], the input is cropped. If fft_length[i]>shape(input)[i], the input is padded with zeros. If fft_length is not given, the default shape(input) is used.
Axes mean the dimensions to perform the transform on. Default is to perform on all axes.
Arguments:
input: A complex tensor. fft_length: An int32 tensor. The FFT length for each dimension. axes: An int32 tensor with a same shape as fft_length. Axes to perform the transform.
Returns A complex tensor of the same shape as `input`. The designated dimensions of `input` are replaced with their inverse real Fourier transforms.
@compatibility(numpy) Equivalent to np.fft.irfftn. @end_compatibility
func IdentityN ¶
Returns a list of tensors with the same shapes and contents as the input
tensors.
This op can be used to override the gradient for complicated functions. For example, suppose y = f(x) and we wish to apply a custom function g for backprop such that dx = g(dy). In Python,
```python with tf.get_default_graph().gradient_override_map(
{'IdentityN': 'OverrideGradientWithG'}): y, _ = identity_n([f(x), x])
@tf.RegisterGradient('OverrideGradientWithG') def ApplyG(op, dy, _):
return [None, g(dy)] # Do not backprop to f(x).
```
func IdentityReaderV2 ¶
func IdentityReaderV2(scope *Scope, optional ...IdentityReaderV2Attr) (reader_handle tf.Output)
A Reader that outputs the queued work as both the key and value.
To use, enqueue strings in a Queue. ReaderRead will take the front work string and output (work, work).
Returns The handle to reference the Reader.
func Igamma ¶
Compute the lower regularized incomplete Gamma function `P(a, x)`.
The lower regularized incomplete Gamma function is defined as:
\\(P(a, x) = gamma(a, x) / Gamma(a) = 1 - Q(a, x)\\)
where
\\(gamma(a, x) = \\int_{0}^{x} t^{a-1} exp(-t) dt\\)
is the lower incomplete Gamma function.
Note, above `Q(a, x)` (`Igammac`) is the upper regularized complete Gamma function.
func IgammaGradA ¶
Computes the gradient of `igamma(a, x)` wrt `a`.
func Igammac ¶
Compute the upper regularized incomplete Gamma function `Q(a, x)`.
The upper regularized incomplete Gamma function is defined as:
\\(Q(a, x) = Gamma(a, x) / Gamma(a) = 1 - P(a, x)\\)
where
\\(Gamma(a, x) = \int_{x}^{\infty} t^{a-1} exp(-t) dt\\)
is the upper incomplete Gamma function.
Note, above `P(a, x)` (`Igamma`) is the lower regularized complete Gamma function.
func IgnoreErrorsDataset ¶
func IgnoreErrorsDataset(scope *Scope, input_dataset tf.Output, output_types []tf.DataType, output_shapes []tf.Shape, optional ...IgnoreErrorsDatasetAttr) (handle tf.Output)
Creates a dataset that contains the elements of `input_dataset` ignoring errors.
func Imag ¶
Returns the imaginary part of a complex number.
Given a tensor `input` of complex numbers, this operation returns a tensor of type `float` that is the imaginary part of each element in `input`. All elements in `input` must be complex numbers of the form \\(a + bj\\), where *a* is the real part and *b* is the imaginary part returned by this operation.
For example:
``` # tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j] tf.imag(input) ==> [4.75, 5.75] ```
func ImageProjectiveTransformV2 ¶
func ImageProjectiveTransformV2(scope *Scope, images tf.Output, transforms tf.Output, output_shape tf.Output, interpolation string, optional ...ImageProjectiveTransformV2Attr) (transformed_images tf.Output)
Applies the given transform to each of the images.
If one row of `transforms` is `[a0, a1, a2, b0, b1, b2, c0, c1]`, then it maps the *output* point `(x, y)` to a transformed *input* point `(x', y') = ((a0 x + a1 y + a2) / k, (b0 x + b1 y + b2) / k)`, where `k = c0 x + c1 y + 1`. If the transformed point lays outside of the input image, the output pixel is set to 0.
Arguments:
images: 4-D with shape `[batch, height, width, channels]`. transforms: 2-D Tensor, `[batch, 8]` or `[1, 8]` matrix, where each row corresponds to a 3 x 3
projective transformation matrix, with the last entry assumed to be 1. If there is one row, the same transformation will be applied to all images.
output_shape: 1-D Tensor [new_height, new_width]. interpolation: Interpolation method, "NEAREST" or "BILINEAR".
Returns 4-D with shape `[batch, new_height, new_width, channels]`.
func ImageProjectiveTransformV3 ¶
func ImageProjectiveTransformV3(scope *Scope, images tf.Output, transforms tf.Output, output_shape tf.Output, fill_value tf.Output, interpolation string, optional ...ImageProjectiveTransformV3Attr) (transformed_images tf.Output)
Applies the given transform to each of the images.
If one row of `transforms` is `[a0, a1, a2, b0, b1, b2, c0, c1]`, then it maps the *output* point `(x, y)` to a transformed *input* point `(x', y') = ((a0 x + a1 y + a2) / k, (b0 x + b1 y + b2) / k)`, where `k = c0 x + c1 y + 1`. If the transformed point lays outside of the input image, the output pixel is set to fill_value.
Arguments:
images: 4-D with shape `[batch, height, width, channels]`. transforms: 2-D Tensor, `[batch, 8]` or `[1, 8]` matrix, where each row corresponds to a 3 x 3
projective transformation matrix, with the last entry assumed to be 1. If there is one row, the same transformation will be applied to all images.
output_shape: 1-D Tensor [new_height, new_width]. fill_value: float, the value to be filled when fill_mode is constant". interpolation: Interpolation method, "NEAREST" or "BILINEAR".
Returns 4-D with shape `[batch, new_height, new_width, channels]`.
func ImageSummary ¶
func ImageSummary(scope *Scope, tag tf.Output, tensor tf.Output, optional ...ImageSummaryAttr) (summary tf.Output)
Outputs a `Summary` protocol buffer with images.
The summary has up to `max_images` summary values containing images. The images are built from `tensor` which must be 4-D with shape `[batch_size, height, width, channels]` and where `channels` can be:
* 1: `tensor` is interpreted as Grayscale. * 3: `tensor` is interpreted as RGB. * 4: `tensor` is interpreted as RGBA.
The images have the same number of channels as the input tensor. For float input, the values are normalized one image at a time to fit in the range `[0, 255]`. `uint8` values are unchanged. The op uses two different normalization algorithms:
If the input values are all positive, they are rescaled so the largest one is 255.
If any input value is negative, the values are shifted so input value 0.0 is at 127. They are then rescaled so that either the smallest value is 0, or the largest one is 255.
The `tag` argument is a scalar `Tensor` of type `string`. It is used to build the `tag` of the summary values:
- If `max_images` is 1, the summary value tag is '*tag*/image'.
- If `max_images` is greater than 1, the summary value tags are generated sequentially as '*tag*/image/0', '*tag*/image/1', etc.
The `bad_color` argument is the color to use in the generated images for non-finite input values. It is a `uint8` 1-D tensor of length `channels`. Each element must be in the range `[0, 255]` (It represents the value of a pixel in the output image). Non-finite values in the input tensor are replaced by this tensor in the output image. The default value is the color red.
Arguments:
tag: Scalar. Used to build the `tag` attribute of the summary values. tensor: 4-D of shape `[batch_size, height, width, channels]` where
`channels` is 1, 3, or 4.
Returns Scalar. Serialized `Summary` protocol buffer.
func ImmutableConst ¶
func ImmutableConst(scope *Scope, dtype tf.DataType, shape tf.Shape, memory_region_name string) (tensor tf.Output)
Returns immutable tensor from memory region.
The current implementation memmaps the tensor from a file.
Arguments:
dtype: Type of the returned tensor. shape: Shape of the returned tensor. memory_region_name: Name of readonly memory region used by the tensor, see
NewReadOnlyMemoryRegionFromFile in tensorflow::Env.
func InTopK ¶
Says whether the targets are in the top `K` predictions.
This outputs a `batch_size` bool array, an entry `out[i]` is `true` if the prediction for the target class is among the top `k` predictions among all predictions for example `i`. Note that the behavior of `InTopK` differs from the `TopK` op in its handling of ties; if multiple classes have the same prediction value and straddle the top-`k` boundary, all of those classes are considered to be in the top `k`.
More formally, let
\\(predictions_i\\) be the predictions for all classes for example `i`, \\(targets_i\\) be the target class for example `i`, \\(out_i\\) be the output for example `i`,
$$out_i = predictions_{i, targets_i} \in TopKIncludingTies(predictions_i)$$
Arguments:
predictions: A `batch_size` x `classes` tensor. targets: A `batch_size` vector of class ids. k: Number of top elements to look at for computing precision.
Returns Computed Precision at `k` as a `bool Tensor`.
func InTopKV2 ¶
func InTopKV2(scope *Scope, predictions tf.Output, targets tf.Output, k tf.Output) (precision tf.Output)
Says whether the targets are in the top `K` predictions.
This outputs a `batch_size` bool array, an entry `out[i]` is `true` if the prediction for the target class is among the top `k` predictions among all predictions for example `i`. Note that the behavior of `InTopK` differs from the `TopK` op in its handling of ties; if multiple classes have the same prediction value and straddle the top-`k` boundary, all of those classes are considered to be in the top `k`.
More formally, let
\\(predictions_i\\) be the predictions for all classes for example `i`, \\(targets_i\\) be the target class for example `i`, \\(out_i\\) be the output for example `i`,
$$out_i = predictions_{i, targets_i} \in TopKIncludingTies(predictions_i)$$
Arguments:
predictions: A `batch_size` x `classes` tensor. targets: A `batch_size` vector of class ids. k: Number of top elements to look at for computing precision.
Returns Computed precision at `k` as a `bool Tensor`.
func InfeedDequeue ¶
A placeholder op for a value that will be fed into the computation.
Arguments:
dtype: The type of elements in the tensor. shape: The shape of the tensor.
Returns A tensor that will be provided using the infeed mechanism.
func InfeedDequeueTuple ¶
func InfeedDequeueTuple(scope *Scope, dtypes []tf.DataType, shapes []tf.Shape) (outputs []tf.Output)
Fetches multiple values from infeed as an XLA tuple.
Arguments:
dtypes: The element types of each element in `outputs`. shapes: The shapes of each tensor in `outputs`.
Returns A list of tensors that will be provided using the infeed mechanism.
func InfeedEnqueue ¶
An op which feeds a single Tensor value into the computation.
Arguments:
input: A tensor that will be provided using the infeed mechanism.
Returns the created operation.
func InfeedEnqueuePrelinearizedBuffer ¶
func InfeedEnqueuePrelinearizedBuffer(scope *Scope, input tf.Output, optional ...InfeedEnqueuePrelinearizedBufferAttr) (o *tf.Operation)
An op which enqueues prelinearized buffer into TPU infeed.
Arguments:
input: A variant tensor representing linearized output.
Returns the created operation.
func InfeedEnqueueTuple ¶
func InfeedEnqueueTuple(scope *Scope, inputs []tf.Output, shapes []tf.Shape, optional ...InfeedEnqueueTupleAttr) (o *tf.Operation)
Feeds multiple Tensor values into the computation as an XLA tuple.
Arguments:
inputs: A list of tensors that will be provided using the infeed mechanism. shapes: The shapes of each tensor in `inputs`.
Returns the created operation.
func InitializeTableFromTextFileV2 ¶
func InitializeTableFromTextFileV2(scope *Scope, table_handle tf.Output, filename tf.Output, key_index int64, value_index int64, optional ...InitializeTableFromTextFileV2Attr) (o *tf.Operation)
Initializes a table from a text file.
It inserts one key-value pair into the table for each line of the file. The key and value is extracted from the whole line content, elements from the split line based on `delimiter` or the line number (starting from zero). Where to extract the key and value from a line is specified by `key_index` and `value_index`.
- A value of -1 means use the line number(starting from zero), expects `int64`.
- A value of -2 means use the whole line content, expects `string`.
- A value >= 0 means use the index (starting at zero) of the split line based on `delimiter`.
Arguments:
table_handle: Handle to a table which will be initialized. filename: Filename of a vocabulary text file. key_index: Column index in a line to get the table `key` values from. value_index: Column index that represents information of a line to get the table
`value` values from.
Returns the created operation.
func InitializeTableV2 ¶
func InitializeTableV2(scope *Scope, table_handle tf.Output, keys tf.Output, values tf.Output) (o *tf.Operation)
Table initializer that takes two tensors for keys and values respectively.
Arguments:
table_handle: Handle to a table which will be initialized. keys: Keys of type Tkey. values: Values of type Tval.
Returns the created operation.
func InplaceAdd ¶
Adds v into specified rows of x.
Computes y = x; y[i, :] += v; return y.
Arguments:
x: A `Tensor` of type T. i: A vector. Indices into the left-most dimension of `x`. v: A `Tensor` of type T. Same dimension sizes as x except the first dimension, which must be the same as i's size.
Returns A `Tensor` of type T. An alias of `x`. The content of `y` is undefined if there are duplicates in `i`.
func InplaceSub ¶
Subtracts `v` into specified rows of `x`. Computes y = x; y[i, :] -= v; return y.
Arguments:
x: A `Tensor` of type T. i: A vector. Indices into the left-most dimension of `x`. v: A `Tensor` of type T. Same dimension sizes as x except the first dimension, which must be the same as i's size.
Returns A `Tensor` of type T. An alias of `x`. The content of `y` is undefined if there are duplicates in `i`.
func InplaceUpdate ¶
Updates specified rows 'i' with values 'v'.
Computes `x[i, :] = v; return x`.
Originally this function is mutative however for compilation we make this operation create / operate on a copy of `x`.
Arguments:
x: A tensor of type `T`. i: A vector. Indices into the left-most dimension of `x`. v: A `Tensor` of type T. Same dimension sizes as x except the first dimension, which must be the same as i's size.
Returns A `Tensor` of type T. An alias of `x`. The content of `y` is undefined if there are duplicates in `i`.
func InvGrad ¶
Computes the gradient for the inverse of `x` wrt its input.
Specifically, `grad = -dy * y*y`, where `y = 1/x`, and `dy` is the corresponding input gradient.
func Invert ¶
Invert (flip) each bit of supported types; for example, type `uint8` value 01010101 becomes 10101010.
Flip each bit of supported types. For example, type `int8` (decimal 2) binary 00000010 becomes (decimal -3) binary 11111101. This operation is performed on each element of the tensor argument `x`.
Example: ```python import tensorflow as tf from tensorflow.python.ops import bitwise_ops
# flip 2 (00000010) to -3 (11111101) tf.assert_equal(-3, bitwise_ops.invert(2))
dtype_list = [dtypes.int8, dtypes.int16, dtypes.int32, dtypes.int64,
dtypes.uint8, dtypes.uint16, dtypes.uint32, dtypes.uint64]
inputs = [0, 5, 3, 14] for dtype in dtype_list:
# Because of issues with negative numbers, let's test this indirectly. # 1. invert(a) and a = 0 # 2. invert(a) or a = invert(0) input_tensor = tf.constant([0, 5, 3, 14], dtype=dtype) not_a_and_a, not_a_or_a, not_0 = [bitwise_ops.bitwise_and( input_tensor, bitwise_ops.invert(input_tensor)), bitwise_ops.bitwise_or( input_tensor, bitwise_ops.invert(input_tensor)), bitwise_ops.invert( tf.constant(0, dtype=dtype))] expected = tf.constant([0, 0, 0, 0], dtype=tf.float32) tf.assert_equal(tf.cast(not_a_and_a, tf.float32), expected) expected = tf.cast([not_0] * 4, tf.float32) tf.assert_equal(tf.cast(not_a_or_a, tf.float32), expected) # For unsigned dtypes let's also check the result directly. if dtype.is_unsigned: inverted = bitwise_ops.invert(input_tensor) expected = tf.constant([dtype.max - x for x in inputs], dtype=tf.float32) tf.assert_equal(tf.cast(inverted, tf.float32), tf.cast(expected, tf.float32))
```
func InvertPermutation ¶
Computes the inverse permutation of a tensor.
This operation computes the inverse of an index permutation. It takes a 1-D integer tensor `x`, which represents the indices of a zero-based array, and swaps each value with its index position. In other words, for an output tensor `y` and an input tensor `x`, this operation computes the following:
`y[x[i]] = i for i in [0, 1, ..., len(x) - 1]`
The values must include 0. There can be no duplicate values or negative values.
For example:
``` # tensor `x` is [3, 4, 0, 2, 1] invert_permutation(x) ==> [2, 4, 3, 0, 1] ```
Arguments:
x: 1-D.
Returns 1-D.
func IsBoostedTreesEnsembleInitialized ¶
func IsBoostedTreesEnsembleInitialized(scope *Scope, tree_ensemble_handle tf.Output) (is_initialized tf.Output)
Checks whether a tree ensemble has been initialized.
Arguments:
tree_ensemble_handle: Handle to the tree ensemble resource.
Returns output boolean on whether it is initialized or not.
func IsBoostedTreesQuantileStreamResourceInitialized ¶
func IsBoostedTreesQuantileStreamResourceInitialized(scope *Scope, quantile_stream_resource_handle tf.Output) (is_initialized tf.Output)
Checks whether a quantile stream has been initialized.
An Op that checks if quantile stream resource is initialized.
Arguments:
quantile_stream_resource_handle: resource; The reference to quantile stream resource handle.
Returns bool; True if the resource is initialized, False otherwise.
func IsFinite ¶
Returns which elements of x are finite.
@compatibility(numpy) Equivalent to np.isfinite @end_compatibility
Example:
```python x = tf.constant([5.0, 4.8, 6.8, np.inf, np.nan]) tf.math.is_finite(x) ==> [True, True, True, False, False] ```
func IsInf ¶
Returns which elements of x are Inf.
@compatibility(numpy) Equivalent to np.isinf @end_compatibility
Example:
```python x = tf.constant([5.0, np.inf, 6.8, np.inf]) tf.math.is_inf(x) ==> [False, True, False, True] ```
func IsNan ¶
Returns which elements of x are NaN.
@compatibility(numpy) Equivalent to np.isnan @end_compatibility
Example:
```python x = tf.constant([5.0, np.nan, 6.8, np.nan, np.inf]) tf.math.is_nan(x) ==> [False, True, False, True, False] ```
func IsTPUEmbeddingInitialized ¶
func IsTPUEmbeddingInitialized(scope *Scope, optional ...IsTPUEmbeddingInitializedAttr) (is_tpu_embedding_initialized tf.Output)
Whether TPU Embedding is initialized in a distributed TPU system.
func IsotonicRegression ¶
func IsotonicRegression(scope *Scope, input tf.Output, optional ...IsotonicRegressionAttr) (output tf.Output, segments tf.Output)
Solves a batch of isotonic regression problems.
Arguments:
input: A (batch_size, dim)-tensor holding a batch of inputs.
Returns:
output: A (batch_size, dim)-tensor holding the per-batch element solutions. segments: An int32 (batch_size, dim)-tensor with the segments.
func Iterator ¶
func Iterator(scope *Scope, shared_name string, container string, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output)
A container for an iterator resource.
Returns A handle to the iterator that can be passed to a "MakeIterator" or "IteratorGetNext" op.
func IteratorFromStringHandle ¶
func IteratorFromStringHandle(scope *Scope, string_handle tf.Output, optional ...IteratorFromStringHandleAttr) (resource_handle tf.Output)
Converts the given string representing a handle to an iterator to a resource.
Arguments:
string_handle: A string representation of the given handle.
Returns A handle to an iterator resource.
func IteratorGetDevice ¶
Returns the name of the device on which `resource` has been placed.
func IteratorGetModelProto ¶ added in v0.8.2
Returns the serialized model proto of an iterator resource.
Returns the serialized model proto of an iterator resource.
Arguments:
iterator: An resource from an dataset iterator.
Returns A serialized model proto.
func IteratorGetNext ¶
func IteratorGetNext(scope *Scope, iterator tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (components []tf.Output)
Gets the next output from the given iterator .
func IteratorGetNextAsOptional ¶
func IteratorGetNextAsOptional(scope *Scope, iterator tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (optional tf.Output)
Gets the next output from the given iterator as an Optional variant.
func IteratorGetNextSync ¶
func IteratorGetNextSync(scope *Scope, iterator tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (components []tf.Output)
Gets the next output from the given iterator.
This operation is a synchronous version IteratorGetNext. It should only be used in situations where the iterator does not block the calling thread, or where the calling thread is not a member of the thread pool used to execute parallel operations (e.g. in eager mode).
func IteratorToStringHandle ¶
Converts the given `resource_handle` representing an iterator to a string.
Arguments:
resource_handle: A handle to an iterator resource.
Returns A string representation of the given handle.
func KMC2ChainInitialization ¶
Returns the index of a data point that should be added to the seed set.
Entries in distances are assumed to be squared distances of candidate points to the already sampled centers in the seed set. The op constructs one Markov chain of the k-MC^2 algorithm and returns the index of one candidate point to be added as an additional cluster center.
Arguments:
distances: Vector with squared distances to the closest previously sampled cluster center
for each candidate point.
seed: Scalar. Seed for initializing the random number generator.
Returns Scalar with the index of the sampled point.
func KmeansPlusPlusInitialization ¶
func KmeansPlusPlusInitialization(scope *Scope, points tf.Output, num_to_sample tf.Output, seed tf.Output, num_retries_per_sample tf.Output) (samples tf.Output)
Selects num_to_sample rows of input using the KMeans++ criterion.
Rows of points are assumed to be input points. One row is selected at random. Subsequent rows are sampled with probability proportional to the squared L2 distance from the nearest row selected thus far till num_to_sample rows have been sampled.
Arguments:
points: Matrix of shape (n, d). Rows are assumed to be input points. num_to_sample: Scalar. The number of rows to sample. This value must not be larger than n. seed: Scalar. Seed for initializing the random number generator. num_retries_per_sample: Scalar. For each row that is sampled, this parameter
specifies the number of additional points to draw from the current distribution before selecting the best. If a negative value is specified, a heuristic is used to sample O(log(num_to_sample)) additional points.
Returns Matrix of shape (num_to_sample, d). The sampled rows.
func KthOrderStatistic ¶
Computes the Kth order statistic of a data set. The current
implementation uses a binary search requiring exactly 32 passes over the input data. The running time is linear with respect to input size. The median-of-medians algorithm is probably faster, but is difficult to implement efficiently in XLA. The implementation imposes a total ordering on floats. The ordering is consistent with the usual partial order. Positive NaNs are greater than positive infinity. Negative NaNs are less than negative infinity. NaNs with distinct payloads are treated as distinct. Subnormal numbers are preserved (not flushed to zero). Positive infinity is greater than all numbers. Negative infinity is less than all numbers. Positive is greater than negative zero. There are less than k values greater than the kth order statistic. There are at least k values greater than or equal to the Kth order statistic. The semantics are not the same as top_k_unique.
func L2Loss ¶
L2 Loss.
Computes half the L2 norm of a tensor without the `sqrt`:
output = sum(t ** 2) / 2
Arguments:
t: Typically 2-D, but may have any dimensions.
Returns 0-D.
func LMDBDataset ¶
func LMDBDataset(scope *Scope, filenames tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output)
Creates a dataset that emits the key-value pairs in one or more LMDB files.
The Lightning Memory-Mapped Database Manager, or LMDB, is an embedded binary key-value database. This dataset can read the contents of LMDB database files, the names of which generally have the `.mdb` suffix.
Each output element consists of a key-value pair represented as a pair of scalar string `Tensor`s, where the first `Tensor` contains the key and the second `Tensor` contains the value.
LMDB uses different file formats on big- and little-endian machines. `LMDBDataset` can only read files in the format of the host machine.
Arguments:
filenames: A scalar or a vector containing the name(s) of the binary file(s) to be
read.
func LRN ¶
Local Response Normalization.
The 4-D `input` tensor is treated as a 3-D array of 1-D vectors (along the last dimension), and each vector is normalized independently. Within a given vector, each component is divided by the weighted, squared sum of inputs within `depth_radius`. In detail,
sqr_sum[a, b, c, d] = sum(input[a, b, c, d - depth_radius : d + depth_radius + 1] ** 2) output = input / (bias + alpha * sqr_sum) ** beta
For details, see [Krizhevsky et al., ImageNet classification with deep convolutional neural networks (NIPS 2012)](http://papers.nips.cc/paper/4824-imagenet-classification-with-deep-convolutional-neural-networks).
Arguments:
input: 4-D.
func LRNGrad ¶
func LRNGrad(scope *Scope, input_grads tf.Output, input_image tf.Output, output_image tf.Output, optional ...LRNGradAttr) (output tf.Output)
Gradients for Local Response Normalization.
Arguments:
input_grads: 4-D with shape `[batch, height, width, channels]`. input_image: 4-D with shape `[batch, height, width, channels]`. output_image: 4-D with shape `[batch, height, width, channels]`.
Returns The gradients for LRN.
func LSTMBlockCell ¶
func LSTMBlockCell(scope *Scope, x tf.Output, cs_prev tf.Output, h_prev tf.Output, w tf.Output, wci tf.Output, wcf tf.Output, wco tf.Output, b tf.Output, optional ...LSTMBlockCellAttr) (i tf.Output, cs tf.Output, f tf.Output, o tf.Output, ci tf.Output, co tf.Output, h tf.Output)
Computes the LSTM cell forward propagation for 1 time step.
This implementation uses 1 weight matrix and 1 bias vector, and there's an optional peephole connection.
This kernel op implements the following mathematical equations:
```python xh = [x, h_prev] [i, f, ci, o] = xh * w + b f = f + forget_bias
if not use_peephole:
wci = wcf = wco = 0
i = sigmoid(cs_prev * wci + i) f = sigmoid(cs_prev * wcf + f) ci = tanh(ci)
cs = ci .* i + cs_prev .* f cs = clip(cs, cell_clip)
o = sigmoid(cs * wco + o) co = tanh(cs) h = co .* o ```
Arguments:
x: The input to the LSTM cell, shape (batch_size, num_inputs). cs_prev: Value of the cell state at previous time step. h_prev: Output of the previous cell at previous time step. w: The weight matrix. wci: The weight matrix for input gate peephole connection. wcf: The weight matrix for forget gate peephole connection. wco: The weight matrix for output gate peephole connection. b: The bias vector.
Returns:
i: The input gate. cs: The cell state before the tanh. f: The forget gate. o: The output gate. ci: The cell input. co: The cell after the tanh. h: The output h vector.
func LSTMBlockCellGrad ¶
func LSTMBlockCellGrad(scope *Scope, x tf.Output, cs_prev tf.Output, h_prev tf.Output, w tf.Output, wci tf.Output, wcf tf.Output, wco tf.Output, b tf.Output, i tf.Output, cs tf.Output, f tf.Output, o tf.Output, ci tf.Output, co tf.Output, cs_grad tf.Output, h_grad tf.Output, use_peephole bool) (cs_prev_grad tf.Output, dicfo tf.Output, wci_grad tf.Output, wcf_grad tf.Output, wco_grad tf.Output)
Computes the LSTM cell backward propagation for 1 timestep.
This implementation is to be used in conjunction of LSTMBlockCell.
Arguments:
x: The input to the LSTM cell, shape (batch_size, num_inputs). cs_prev: The previous cell state. h_prev: The previous h state. w: The weight matrix. wci: The weight matrix for input gate peephole connection. wcf: The weight matrix for forget gate peephole connection. wco: The weight matrix for output gate peephole connection. b: The bias vector. i: The input gate. cs: The cell state before the tanh. f: The forget gate. o: The output gate. ci: The cell input. co: The cell after the tanh. cs_grad: The current gradient of cs. h_grad: The gradient of h vector. use_peephole: Whether the cell uses peephole connections.
Returns:
cs_prev_grad: The gradient of cs to be back-propped. dicfo: The derivative wrt to [i, cs, f, o]. wci_grad: The gradient for wci to be back-propped. wcf_grad: The gradient for wcf to be back-propped. wco_grad: The gradient for wco to be back-propped.
func LatencyStatsDataset ¶
func LatencyStatsDataset(scope *Scope, input_dataset tf.Output, tag tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output)
Records the latency of producing `input_dataset` elements in a StatsAggregator.
func LeakyReluGrad ¶
func LeakyReluGrad(scope *Scope, gradients tf.Output, features tf.Output, optional ...LeakyReluGradAttr) (backprops tf.Output)
Computes rectified linear gradients for a LeakyRelu operation.
Arguments:
gradients: The backpropagated gradients to the corresponding LeakyRelu operation. features: The features passed as input to the corresponding LeakyRelu operation,
OR the outputs of that operation (both work equivalently).
Returns `gradients * (features > 0) + alpha * gradients * (features <= 0)`.
func LearnedUnigramCandidateSampler ¶
func LearnedUnigramCandidateSampler(scope *Scope, true_classes tf.Output, num_true int64, num_sampled int64, unique bool, range_max int64, optional ...LearnedUnigramCandidateSamplerAttr) (sampled_candidates tf.Output, true_expected_count tf.Output, sampled_expected_count tf.Output)
Generates labels for candidate sampling with a learned unigram distribution.
See explanations of candidate sampling and the data formats at go/candidate-sampling.
For each batch, this op picks a single set of sampled candidate labels.
The advantages of sampling candidates per-batch are simplicity and the possibility of efficient dense matrix multiplication. The disadvantage is that the sampled candidates must be chosen independently of the context and of the true labels.
Arguments:
true_classes: A batch_size * num_true matrix, in which each row contains the
IDs of the num_true target_classes in the corresponding original label.
num_true: Number of true labels per context. num_sampled: Number of candidates to randomly sample. unique: If unique is true, we sample with rejection, so that all sampled
candidates in a batch are unique. This requires some approximation to estimate the post-rejection sampling probabilities.
range_max: The sampler will sample integers from the interval [0, range_max).
Returns:
sampled_candidates: A vector of length num_sampled, in which each element is
the ID of a sampled candidate.
true_expected_count: A batch_size * num_true matrix, representing
the number of times each candidate is expected to occur in a batch of sampled candidates. If unique=true, then this is a probability.
sampled_expected_count: A vector of length num_sampled, for each sampled
candidate representing the number of times the candidate is expected to occur in a batch of sampled candidates. If unique=true, then this is a probability.
func LeftShift ¶
Elementwise computes the bitwise left-shift of `x` and `y`.
If `y` is negative, or greater than or equal to the width of `x` in bits the result is implementation defined.
Example:
```python import tensorflow as tf from tensorflow.python.ops import bitwise_ops import numpy as np dtype_list = [tf.int8, tf.int16, tf.int32, tf.int64]
for dtype in dtype_list:
lhs = tf.constant([-1, -5, -3, -14], dtype=dtype) rhs = tf.constant([5, 0, 7, 11], dtype=dtype) left_shift_result = bitwise_ops.left_shift(lhs, rhs) print(left_shift_result)
# This will print: # tf.Tensor([ -32 -5 -128 0], shape=(4,), dtype=int8) # tf.Tensor([ -32 -5 -384 -28672], shape=(4,), dtype=int16) # tf.Tensor([ -32 -5 -384 -28672], shape=(4,), dtype=int32) # tf.Tensor([ -32 -5 -384 -28672], shape=(4,), dtype=int64)
lhs = np.array([-2, 64, 101, 32], dtype=np.int8) rhs = np.array([-1, -5, -3, -14], dtype=np.int8) bitwise_ops.left_shift(lhs, rhs) # <tf.Tensor: shape=(4,), dtype=int8, numpy=array([ -2, 64, 101, 32], dtype=int8)> ```
func Less ¶
Returns the truth value of (x < y) element-wise.
*NOTE*: `Less` supports broadcasting. More about broadcasting [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
Example:
```python x = tf.constant([5, 4, 6]) y = tf.constant([5]) tf.math.less(x, y) ==> [False, True, False]
x = tf.constant([5, 4, 6]) y = tf.constant([5, 6, 7]) tf.math.less(x, y) ==> [False, True, True] ```
func LessEqual ¶
Returns the truth value of (x <= y) element-wise.
*NOTE*: `LessEqual` supports broadcasting. More about broadcasting [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
Example:
```python x = tf.constant([5, 4, 6]) y = tf.constant([5]) tf.math.less_equal(x, y) ==> [True, True, False]
x = tf.constant([5, 4, 6]) y = tf.constant([5, 6, 6]) tf.math.less_equal(x, y) ==> [True, True, True] ```
func Lgamma ¶
Computes the log of the absolute value of `Gamma(x)` element-wise.
For positive numbers, this function computes log((input - 1)!) for every element in the tensor. `lgamma(5) = log((5-1)!) = log(4!) = log(24) = 3.1780539`
Example:
```python x = tf.constant([0, 0.5, 1, 4.5, -4, -5.6]) tf.math.lgamma(x) ==> [inf, 0.5723649, 0., 2.4537368, inf, -4.6477685] ```
func LinSpace ¶
Generates values in an interval.
A sequence of `num` evenly-spaced values are generated beginning at `start`. If `num > 1`, the values in the sequence increase by `(stop - start) / (num - 1)`, so that the last one is exactly `stop`.
For example:
``` tf.linspace(10.0, 12.0, 3, name="linspace") => [ 10.0 11.0 12.0] ```
Arguments:
start: 0-D tensor. First entry in the range. stop: 0-D tensor. Last entry in the range. num: 0-D tensor. Number of values to generate.
Returns 1-D. The generated values.
func ListDataset ¶ added in v0.2.0
func ListDataset(scope *Scope, tensors []tf.Output, output_types []tf.DataType, output_shapes []tf.Shape, optional ...ListDatasetAttr) (handle tf.Output)
Creates a dataset that emits each of `tensors` once.
func ListDiff ¶
func ListDiff(scope *Scope, x tf.Output, y tf.Output, optional ...ListDiffAttr) (out tf.Output, idx tf.Output)
Computes the difference between two lists of numbers or strings.
Given a list `x` and a list `y`, this operation returns a list `out` that represents all values that are in `x` but not in `y`. The returned list `out` is sorted in the same order that the numbers appear in `x` (duplicates are preserved). This operation also returns a list `idx` that represents the position of each `out` element in `x`. In other words:
`out[i] = x[idx[i]] for i in [0, 1, ..., len(out) - 1]`
For example, given this input:
``` x = [1, 2, 3, 4, 5, 6] y = [1, 3, 5] ```
This operation would return:
``` out ==> [2, 4, 6] idx ==> [1, 3, 5] ```
Arguments:
x: 1-D. Values to keep. y: 1-D. Values to remove.
Returns:
out: 1-D. Values present in `x` but not in `y`. idx: 1-D. Positions of `x` values preserved in `out`.
func LoadAllTPUEmbeddingParameters ¶
func LoadAllTPUEmbeddingParameters(scope *Scope, parameters []tf.Output, auxiliary1 []tf.Output, auxiliary2 []tf.Output, auxiliary3 []tf.Output, auxiliary4 []tf.Output, auxiliary5 []tf.Output, auxiliary6 []tf.Output, auxiliary7 []tf.Output, config string, num_shards int64, shard_id int64) (o *tf.Operation)
An op that loads optimization parameters into embedding memory.
An op that loads optimization parameters into embedding memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up the correct embedding table configuration. For example, this op is used to install parameters that are loaded from a checkpoint before a training loop is executed. For Adagrad, auxiliary1 should be the accumulators. For SGD, all of the auxiliary* values should be empty. For FTRL, auxiliary1 should be the accumulators and auxiliary2 should be the linear terms. For ADAM, auxiliary1 should be the momenta and auxiliary2 should be the velocities.
Arguments:
parameters: A list of tensors, one for each embedding table,
containing the initial embedding table parameters to use in embedding lookups.
auxiliary1: A list of tensors, one for each embedding table, containing the
initial values of the first auxiliary optimization parameter to use in embedding training loop updates. The shape of each entry is ignored (and thus can be empty) for those tables whose optimization algorithms do not have at least one auxiliary parameter.
auxiliary2: A list of tensors, one for each embedding table, containing the
initial values of the second auxiliary optimization parameter to use in embedding training loop updates. The shape of each entry is ignored (and thus can be empty) for those tables whose optimization algorithms do not have at least two auxiliary
auxiliary3: A list of tensors, one for each embedding table, containing the
initial values of the third auxiliary optimization parameter to use in embedding training loop updates. The shape of each entry is ignored (and thus can be empty) for those tables whose optimization algorithms do not have three auxiliary parameters.
auxiliary4: A list of tensors, one for each embedding table, containing the
initial values of the second auxiliary optimization parameter to use in embedding training loop updates. The shape of each entry is ignored (and thus can be empty) for those tables whose optimization algorithms do not have at least four auxiliary
auxiliary5: A list of tensors, one for each embedding table, containing the
initial values of the third auxiliary optimization parameter to use in embedding training loop updates. The shape of each entry is ignored (and thus can be empty) for those tables whose optimization algorithms do not have five auxiliary parameters.
auxiliary6: A list of tensors, one for each embedding table, containing the
initial values of the second auxiliary optimization parameter to use in embedding training loop updates. The shape of each entry is ignored (and thus can be empty) for those tables whose optimization algorithms do not have at least six auxiliary
auxiliary7: A list of tensors, one for each embedding table, containing the
initial values of the third auxiliary optimization parameter to use in embedding training loop updates. The shape of each entry is ignored (and thus can be empty) for those tables whose optimization algorithms do not have sevan auxiliary parameters.
config: An TPUEmbeddingConfiguration proto describing the
table parameters being loaded, serialized to a string.
num_shards: Number of shards into which the embedding tables are divided. shard_id: Identifier of shard for this operation.
Returns the created operation.
func LoadAndRemapMatrix ¶
func LoadAndRemapMatrix(scope *Scope, ckpt_path tf.Output, old_tensor_name tf.Output, row_remapping tf.Output, col_remapping tf.Output, initializing_values tf.Output, num_rows int64, num_cols int64, optional ...LoadAndRemapMatrixAttr) (output_matrix tf.Output)
Loads a 2-D (matrix) `Tensor` with name `old_tensor_name` from the checkpoint
at `ckpt_path` and potentially reorders its rows and columns using the specified remappings.
Most users should use one of the wrapper initializers (such as `tf.contrib.framework.load_and_remap_matrix_initializer`) instead of this function directly.
The remappings are 1-D tensors with the following properties:
- `row_remapping` must have exactly `num_rows` entries. Row `i` of the output matrix will be initialized from the row corresponding to index `row_remapping[i]` in the old `Tensor` from the checkpoint.
- `col_remapping` must have either 0 entries (indicating that no column reordering is needed) or `num_cols` entries. If specified, column `j` of the output matrix will be initialized from the column corresponding to index `col_remapping[j]` in the old `Tensor` from the checkpoint.
- A value of -1 in either of the remappings signifies a "missing" entry. In that case, values from the `initializing_values` tensor will be used to fill that missing row or column. If `row_remapping` has `r` missing entries and `col_remapping` has `c` missing entries, then the following condition must be true:
`(r * num_cols) + (c * num_rows) - (r * c) == len(initializing_values)`
The remapping tensors can be generated using the GenerateVocabRemapping op.
As an example, with row_remapping = [1, 0, -1], col_remapping = [0, 2, -1], initializing_values = [0.5, -0.5, 0.25, -0.25, 42], and w(i, j) representing the value from row i, column j of the old tensor in the checkpoint, the output matrix will look like the following:
[[w(1, 0), w(1, 2), 0.5],
[w(0, 0), w(0, 2), -0.5], [0.25, -0.25, 42]]
Arguments:
ckpt_path: Path to the TensorFlow checkpoint (version 2, `TensorBundle`) from
which the old matrix `Tensor` will be loaded.
old_tensor_name: Name of the 2-D `Tensor` to load from checkpoint. row_remapping: An int `Tensor` of row remappings (generally created by
`generate_vocab_remapping`). Even if no row remapping is needed, this must still be an index-valued Tensor (e.g. [0, 1, 2, ...]), or a shifted index-valued `Tensor` (e.g. [8, 9, 10, ...], for partitioned `Variables`).
col_remapping: An int `Tensor` of column remappings (generally created by
`generate_vocab_remapping`). May be a size-0 `Tensor` if only row remapping is to be done (e.g. column ordering is the same).
initializing_values: A float `Tensor` containing values to fill in for cells
in the output matrix that are not loaded from the checkpoint. Length must be exactly the same as the number of missing / new cells.
num_rows: Number of rows (length of the 1st dimension) in the output matrix. num_cols: Number of columns (length of the 2nd dimension) in the output matrix.
Returns Output matrix containing existing values loaded from the checkpoint, and with any missing values filled in from initializing_values.
func LoadTPUEmbeddingADAMParameters ¶
func LoadTPUEmbeddingADAMParameters(scope *Scope, parameters tf.Output, momenta tf.Output, velocities tf.Output, num_shards int64, shard_id int64, optional ...LoadTPUEmbeddingADAMParametersAttr) (o *tf.Operation)
Load ADAM embedding parameters.
An op that loads optimization parameters into HBM for embedding. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up the correct embedding table configuration. For example, this op is used to install parameters that are loaded from a checkpoint before a training loop is executed.
Arguments:
parameters: Value of parameters used in the ADAM optimization algorithm. momenta: Value of momenta used in the ADAM optimization algorithm. velocities: Value of velocities used in the ADAM optimization algorithm.
Returns the created operation.
func LoadTPUEmbeddingAdadeltaParameters ¶
func LoadTPUEmbeddingAdadeltaParameters(scope *Scope, parameters tf.Output, accumulators tf.Output, updates tf.Output, num_shards int64, shard_id int64, optional ...LoadTPUEmbeddingAdadeltaParametersAttr) (o *tf.Operation)
Load Adadelta embedding parameters.
An op that loads optimization parameters into HBM for embedding. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up the correct embedding table configuration. For example, this op is used to install parameters that are loaded from a checkpoint before a training loop is executed.
Arguments:
parameters: Value of parameters used in the Adadelta optimization algorithm. accumulators: Value of accumulators used in the Adadelta optimization algorithm. updates: Value of updates used in the Adadelta optimization algorithm.
Returns the created operation.
func LoadTPUEmbeddingAdagradMomentumParameters ¶
func LoadTPUEmbeddingAdagradMomentumParameters(scope *Scope, parameters tf.Output, accumulators tf.Output, momenta tf.Output, num_shards int64, shard_id int64, optional ...LoadTPUEmbeddingAdagradMomentumParametersAttr) (o *tf.Operation)
Load Adagrad Momentum embedding parameters.
An op that loads optimization parameters into HBM for embedding. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up the correct embedding table configuration. For example, this op is used to install parameters that are loaded from a checkpoint before a training loop is executed.
Arguments:
parameters: Value of parameters used in the Adagrad Momentum optimization algorithm. accumulators: Value of accumulators used in the Adagrad Momentum optimization algorithm. momenta: Value of momenta used in the Adagrad Momentum optimization algorithm.
Returns the created operation.
func LoadTPUEmbeddingAdagradParameters ¶
func LoadTPUEmbeddingAdagradParameters(scope *Scope, parameters tf.Output, accumulators tf.Output, num_shards int64, shard_id int64, optional ...LoadTPUEmbeddingAdagradParametersAttr) (o *tf.Operation)
Load Adagrad embedding parameters.
An op that loads optimization parameters into HBM for embedding. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up the correct embedding table configuration. For example, this op is used to install parameters that are loaded from a checkpoint before a training loop is executed.
Arguments:
parameters: Value of parameters used in the Adagrad optimization algorithm. accumulators: Value of accumulators used in the Adagrad optimization algorithm.
Returns the created operation.
func LoadTPUEmbeddingCenteredRMSPropParameters ¶
func LoadTPUEmbeddingCenteredRMSPropParameters(scope *Scope, parameters tf.Output, ms tf.Output, mom tf.Output, mg tf.Output, num_shards int64, shard_id int64, optional ...LoadTPUEmbeddingCenteredRMSPropParametersAttr) (o *tf.Operation)
Load centered RMSProp embedding parameters.
An op that loads optimization parameters into HBM for embedding. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up the correct embedding table configuration. For example, this op is used to install parameters that are loaded from a checkpoint before a training loop is executed.
Arguments:
parameters: Value of parameters used in the centered RMSProp optimization algorithm. ms: Value of ms used in the centered RMSProp optimization algorithm. mom: Value of mom used in the centered RMSProp optimization algorithm. mg: Value of mg used in the centered RMSProp optimization algorithm.
Returns the created operation.
func LoadTPUEmbeddingFTRLParameters ¶
func LoadTPUEmbeddingFTRLParameters(scope *Scope, parameters tf.Output, accumulators tf.Output, linears tf.Output, num_shards int64, shard_id int64, optional ...LoadTPUEmbeddingFTRLParametersAttr) (o *tf.Operation)
Load FTRL embedding parameters.
An op that loads optimization parameters into HBM for embedding. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up the correct embedding table configuration. For example, this op is used to install parameters that are loaded from a checkpoint before a training loop is executed.
Arguments:
parameters: Value of parameters used in the FTRL optimization algorithm. accumulators: Value of accumulators used in the FTRL optimization algorithm. linears: Value of linears used in the FTRL optimization algorithm.
Returns the created operation.
func LoadTPUEmbeddingFrequencyEstimatorParameters ¶
func LoadTPUEmbeddingFrequencyEstimatorParameters(scope *Scope, parameters tf.Output, last_hit_step tf.Output, num_shards int64, shard_id int64, optional ...LoadTPUEmbeddingFrequencyEstimatorParametersAttr) (o *tf.Operation)
Load frequency estimator embedding parameters.
An op that loads optimization parameters into HBM for embedding. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up the correct embedding table configuration. For example, this op is used to install parameters that are loaded from a checkpoint before a training loop is executed.
Arguments:
parameters: Value of parameters used in the frequency estimator optimization algorithm. last_hit_step: Value of last_hit_step used in the frequency estimator optimization algorithm.
Returns the created operation.
func LoadTPUEmbeddingMDLAdagradLightParameters ¶
func LoadTPUEmbeddingMDLAdagradLightParameters(scope *Scope, parameters tf.Output, accumulators tf.Output, weights tf.Output, benefits tf.Output, num_shards int64, shard_id int64, optional ...LoadTPUEmbeddingMDLAdagradLightParametersAttr) (o *tf.Operation)
Load MDL Adagrad Light embedding parameters.
An op that loads optimization parameters into HBM for embedding. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up the correct embedding table configuration. For example, this op is used to install parameters that are loaded from a checkpoint before a training loop is executed.
Arguments:
parameters: Value of parameters used in the MDL Adagrad Light optimization algorithm. accumulators: Value of accumulators used in the MDL Adagrad Light optimization algorithm. weights: Value of weights used in the MDL Adagrad Light optimization algorithm. benefits: Value of benefits used in the MDL Adagrad Light optimization algorithm.
Returns the created operation.
func LoadTPUEmbeddingMomentumParameters ¶
func LoadTPUEmbeddingMomentumParameters(scope *Scope, parameters tf.Output, momenta tf.Output, num_shards int64, shard_id int64, optional ...LoadTPUEmbeddingMomentumParametersAttr) (o *tf.Operation)
Load Momentum embedding parameters.
An op that loads optimization parameters into HBM for embedding. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up the correct embedding table configuration. For example, this op is used to install parameters that are loaded from a checkpoint before a training loop is executed.
Arguments:
parameters: Value of parameters used in the Momentum optimization algorithm. momenta: Value of momenta used in the Momentum optimization algorithm.
Returns the created operation.
func LoadTPUEmbeddingProximalAdagradParameters ¶
func LoadTPUEmbeddingProximalAdagradParameters(scope *Scope, parameters tf.Output, accumulators tf.Output, num_shards int64, shard_id int64, optional ...LoadTPUEmbeddingProximalAdagradParametersAttr) (o *tf.Operation)
Load proximal Adagrad embedding parameters.
An op that loads optimization parameters into HBM for embedding. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up the correct embedding table configuration. For example, this op is used to install parameters that are loaded from a checkpoint before a training loop is executed.
Arguments:
parameters: Value of parameters used in the proximal Adagrad optimization algorithm. accumulators: Value of accumulators used in the proximal Adagrad optimization algorithm.
Returns the created operation.
func LoadTPUEmbeddingRMSPropParameters ¶
func LoadTPUEmbeddingRMSPropParameters(scope *Scope, parameters tf.Output, ms tf.Output, mom tf.Output, num_shards int64, shard_id int64, optional ...LoadTPUEmbeddingRMSPropParametersAttr) (o *tf.Operation)
Load RMSProp embedding parameters.
An op that loads optimization parameters into HBM for embedding. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up the correct embedding table configuration. For example, this op is used to install parameters that are loaded from a checkpoint before a training loop is executed.
Arguments:
parameters: Value of parameters used in the RMSProp optimization algorithm. ms: Value of ms used in the RMSProp optimization algorithm. mom: Value of mom used in the RMSProp optimization algorithm.
Returns the created operation.
func LoadTPUEmbeddingStochasticGradientDescentParameters ¶
func LoadTPUEmbeddingStochasticGradientDescentParameters(scope *Scope, parameters tf.Output, num_shards int64, shard_id int64, optional ...LoadTPUEmbeddingStochasticGradientDescentParametersAttr) (o *tf.Operation)
Load SGD embedding parameters.
An op that loads optimization parameters into HBM for embedding. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up the correct embedding table configuration. For example, this op is used to install parameters that are loaded from a checkpoint before a training loop is executed.
Arguments:
parameters: Value of parameters used in the stochastic gradient descent optimization algorithm.
Returns the created operation.
func Log ¶
Computes natural logarithm of x element-wise.
I.e., \\(y = \log_e x\\).
Example:
```python x = tf.constant([0, 0.5, 1, 5]) tf.math.log(x) ==> [-inf, -0.6931472, 0. , 1.609438] ```
func Log1p ¶
Computes natural logarithm of (1 + x) element-wise.
I.e., \\(y = \log_e (1 + x)\\).
Example:
```python x = tf.constant([0, 0.5, 1, 5]) tf.math.log1p(x) ==> [0., 0.4054651, 0.6931472, 1.7917595] ```
func LogMatrixDeterminant ¶
func LogMatrixDeterminant(scope *Scope, input tf.Output) (sign tf.Output, log_abs_determinant tf.Output)
Computes the sign and the log of the absolute value of the determinant of
one or more square matrices.
The input is a tensor of shape `[N, M, M]` whose inner-most 2 dimensions form square matrices. The outputs are two tensors containing the signs and absolute values of the log determinants for all N input submatrices `[..., :, :]` such that `determinant = sign*exp(log_abs_determinant)`. The `log_abs_determinant` is computed as `det(P)*sum(log(diag(LU)))` where `LU` is the `LU` decomposition of the input and `P` is the corresponding permutation matrix.
Arguments:
input: Shape is `[N, M, M]`.
Returns:
sign: The signs of the log determinants of the inputs. Shape is `[N]`. log_abs_determinant: The logs of the absolute values of the determinants
of the N input matrices. Shape is `[N]`.
func LogSoftmax ¶
Computes log softmax activations.
For each batch `i` and class `j` we have
logsoftmax[i, j] = logits[i, j] - log(sum(exp(logits[i])))
Arguments:
logits: 2-D with shape `[batch_size, num_classes]`.
Returns Same shape as `logits`.
func LogUniformCandidateSampler ¶
func LogUniformCandidateSampler(scope *Scope, true_classes tf.Output, num_true int64, num_sampled int64, unique bool, range_max int64, optional ...LogUniformCandidateSamplerAttr) (sampled_candidates tf.Output, true_expected_count tf.Output, sampled_expected_count tf.Output)
Generates labels for candidate sampling with a log-uniform distribution.
See explanations of candidate sampling and the data formats at go/candidate-sampling.
For each batch, this op picks a single set of sampled candidate labels.
The advantages of sampling candidates per-batch are simplicity and the possibility of efficient dense matrix multiplication. The disadvantage is that the sampled candidates must be chosen independently of the context and of the true labels.
Arguments:
true_classes: A batch_size * num_true matrix, in which each row contains the
IDs of the num_true target_classes in the corresponding original label.
num_true: Number of true labels per context. num_sampled: Number of candidates to randomly sample. unique: If unique is true, we sample with rejection, so that all sampled
candidates in a batch are unique. This requires some approximation to estimate the post-rejection sampling probabilities.
range_max: The sampler will sample integers from the interval [0, range_max).
Returns:
sampled_candidates: A vector of length num_sampled, in which each element is
the ID of a sampled candidate.
true_expected_count: A batch_size * num_true matrix, representing
the number of times each candidate is expected to occur in a batch of sampled candidates. If unique=true, then this is a probability.
sampled_expected_count: A vector of length num_sampled, for each sampled
candidate representing the number of times the candidate is expected to occur in a batch of sampled candidates. If unique=true, then this is a probability.
func LogicalAnd ¶
Returns the truth value of x AND y element-wise.
*NOTE*: `LogicalAnd` supports broadcasting. More about broadcasting [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
func LogicalNot ¶
Returns the truth value of `NOT x` element-wise.
Arguments:
x: A `Tensor` of type `bool`.
Returns A `Tensor` of type `bool` with the same shape as `x`. The logical negation of `x`.
func LogicalOr ¶
Returns the truth value of x OR y element-wise.
*NOTE*: `LogicalOr` supports broadcasting. More about broadcasting [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
func LookupTableExportV2 ¶
func LookupTableExportV2(scope *Scope, table_handle tf.Output, Tkeys tf.DataType, Tvalues tf.DataType) (keys tf.Output, values tf.Output)
Outputs all keys and values in the table.
Arguments:
table_handle: Handle to the table.
Returns:
keys: Vector of all keys present in the table. values: Tensor of all values in the table. Indexed in parallel with `keys`.
func LookupTableFindV2 ¶
func LookupTableFindV2(scope *Scope, table_handle tf.Output, keys tf.Output, default_value tf.Output) (values tf.Output)
Looks up keys in a table, outputs the corresponding values.
The tensor `keys` must of the same type as the keys of the table. The output `values` is of the type of the table values.
The scalar `default_value` is the value output for keys not present in the table. It must also be of the same type as the table values.
Arguments:
table_handle: Handle to the table. keys: Any shape. Keys to look up.
Returns Same shape as `keys`. Values found in the table, or `default_values` for missing keys.
func LookupTableImportV2 ¶
func LookupTableImportV2(scope *Scope, table_handle tf.Output, keys tf.Output, values tf.Output) (o *tf.Operation)
Replaces the contents of the table with the specified keys and values.
The tensor `keys` must be of the same type as the keys of the table. The tensor `values` must be of the type of the table values.
Arguments:
table_handle: Handle to the table. keys: Any shape. Keys to look up. values: Values to associate with keys.
Returns the created operation.
func LookupTableInsertV2 ¶
func LookupTableInsertV2(scope *Scope, table_handle tf.Output, keys tf.Output, values tf.Output) (o *tf.Operation)
Updates the table to associates keys with values.
The tensor `keys` must be of the same type as the keys of the table. The tensor `values` must be of the type of the table values.
Arguments:
table_handle: Handle to the table. keys: Any shape. Keys to look up. values: Values to associate with keys.
Returns the created operation.
func LookupTableRemoveV2 ¶
Removes keys and its associated values from a table.
The tensor `keys` must of the same type as the keys of the table. Keys not already in the table are silently ignored.
Arguments:
table_handle: Handle to the table. keys: Any shape. Keys of the elements to remove.
Returns the created operation.
func LookupTableSizeV2 ¶
Computes the number of elements in the given table.
Arguments:
table_handle: Handle to the table.
Returns Scalar that contains number of elements in the table.
func LoopCond ¶
Forwards the input to the output.
This operator represents the loop termination condition used by the "pivot" switches of a loop.
Arguments:
input: A boolean scalar, representing the branch predicate of the Switch op.
Returns The same tensor as `input`.
func LowerBound ¶
func LowerBound(scope *Scope, sorted_inputs tf.Output, values tf.Output, optional ...LowerBoundAttr) (output tf.Output)
Applies lower_bound(sorted_search_values, values) along each row.
Each set of rows with the same index in (sorted_inputs, values) is treated independently. The resulting row is the equivalent of calling `np.searchsorted(sorted_inputs, values, side='left')`.
The result is not a global index to the entire `Tensor`, but rather just the index in the last dimension.
A 2-D example:
sorted_sequence = [[0, 3, 9, 9, 10], [1, 2, 3, 4, 5]] values = [[2, 4, 9], [0, 2, 6]] result = LowerBound(sorted_sequence, values) result == [[1, 2, 2], [0, 1, 5]]
Arguments:
sorted_inputs: 2-D Tensor where each row is ordered. values: 2-D Tensor with the same numbers of rows as `sorted_search_values`. Contains
the values that will be searched for in `sorted_search_values`.
Returns A `Tensor` with the same shape as `values`. It contains the first scalar index into the last dimension where values can be inserted without changing the ordered property.
func Lu ¶
Computes the LU decomposition of one or more square matrices.
The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions form square matrices.
The input has to be invertible.
The output consists of two tensors LU and P containing the LU decomposition of all input submatrices `[..., :, :]`. LU encodes the lower triangular and upper triangular factors.
For each input submatrix of shape `[M, M]`, L is a lower triangular matrix of shape `[M, M]` with unit diagonal whose entries correspond to the strictly lower triangular part of LU. U is a upper triangular matrix of shape `[M, M]` whose entries correspond to the upper triangular part, including the diagonal, of LU.
P represents a permutation matrix encoded as a list of indices each between `0` and `M-1`, inclusive. If P_mat denotes the permutation matrix corresponding to P, then the L, U and P satisfies P_mat * input = L * U.
Arguments:
input: A tensor of shape `[..., M, M]` whose inner-most 2 dimensions form matrices of
size `[M, M]`.
Returns:
lu: A tensor of shape `[..., M, M]` whose strictly lower triangular part denotes the
lower triangular factor `L` with unit diagonal, and whose upper triangular part denotes the upper triangular factor `U`.
p: Permutation of the rows encoded as a list of indices in `0..M-1`. Shape is
`[..., M]`. @compatibility(scipy) Similar to `scipy.linalg.lu`, except the triangular factors `L` and `U` are packed into a single tensor, the permutation is applied to `input` instead of the right hand side and the permutation `P` is returned as a list of indices instead of a permutation matrix. @end_compatibility
func MakeIterator ¶
Makes a new iterator from the given `dataset` and stores it in `iterator`.
This operation may be executed multiple times. Each execution will reset the iterator in `iterator` to the first element of `dataset`.
Returns the created operation.
func MakeUnique ¶
Make all elements in the non-Batch dimension unique, but \"close\" to
their initial value. Never returns a sub-normal number. Never returns zero. The sign of each input element is always identical to the sign of the corresponding output element. Behavior for infinite elements is undefined. Behavior for subnormal elements is undefined.
func MapIncompleteSize ¶
func MapIncompleteSize(scope *Scope, dtypes []tf.DataType, optional ...MapIncompleteSizeAttr) (size tf.Output)
Op returns the number of incomplete elements in the underlying container.
func MapPeek ¶
func MapPeek(scope *Scope, key tf.Output, indices tf.Output, dtypes []tf.DataType, optional ...MapPeekAttr) (values []tf.Output)
Op peeks at the values at the specified key. If the
underlying container does not contain this key this op will block until it does.
func MapStage ¶
func MapStage(scope *Scope, key tf.Output, indices tf.Output, values []tf.Output, dtypes []tf.DataType, optional ...MapStageAttr) (o *tf.Operation)
Stage (key, values) in the underlying container which behaves like a hashtable.
Arguments:
key: int64 values: a list of tensors
dtypes A list of data types that inserted values should adhere to.
Returns the created operation.
func MapUnstage ¶
func MapUnstage(scope *Scope, key tf.Output, indices tf.Output, dtypes []tf.DataType, optional ...MapUnstageAttr) (values []tf.Output)
Op removes and returns the values associated with the key
from the underlying container. If the underlying container does not contain this key, the op will block until it does.
func MapUnstageNoKey ¶
func MapUnstageNoKey(scope *Scope, indices tf.Output, dtypes []tf.DataType, optional ...MapUnstageNoKeyAttr) (key tf.Output, values []tf.Output)
Op removes and returns a random (key, value)
from the underlying container. If the underlying container does not contain elements, the op will block until it does.
func MatMul ¶
Multiply the matrix "a" by the matrix "b".
The inputs must be two-dimensional matrices and the inner dimension of "a" (after being transposed if transpose_a is true) must match the outer dimension of "b" (after being transposed if transposed_b is true).
*Note*: The default kernel implementation for MatMul on GPUs uses cublas.
func MatchingFiles ¶
Returns the set of files matching one or more glob patterns.
Note that this routine only supports wildcard characters in the basename portion of the pattern, not in the directory portion. Note also that the order of filenames returned is deterministic.
Arguments:
pattern: Shell wildcard pattern(s). Scalar or vector of type string.
Returns A vector of matching filenames.
func MatrixBandPart ¶
func MatrixBandPart(scope *Scope, input tf.Output, num_lower tf.Output, num_upper tf.Output) (band tf.Output)
Copy a tensor setting everything outside a central band in each innermost matrix to zero.
The `band` part is computed as follows: Assume `input` has `k` dimensions `[I, J, K, ..., M, N]`, then the output is a tensor with the same shape where
`band[i, j, k, ..., m, n] = in_band(m, n) * input[i, j, k, ..., m, n]`.
The indicator function ¶
`in_band(m, n) = (num_lower < 0 || (m-n) <= num_lower)) &&
(num_upper < 0 || (n-m) <= num_upper)`.
For example:
``` # if 'input' is [[ 0, 1, 2, 3] # [-1, 0, 1, 2] # [-2, -1, 0, 1] # [-3, -2, -1, 0]],
tf.linalg.band_part(input, 1, -1) ==> [[ 0, 1, 2, 3]
[-1, 0, 1, 2] [ 0, -1, 0, 1] [ 0, 0, -1, 0]],
tf.linalg.band_part(input, 2, 1) ==> [[ 0, 1, 0, 0]
[-1, 0, 1, 0] [-2, -1, 0, 1] [ 0, -2, -1, 0]]
```
Useful special cases:
```
tf.linalg.band_part(input, 0, -1) ==> Upper triangular part. tf.linalg.band_part(input, -1, 0) ==> Lower triangular part. tf.linalg.band_part(input, 0, 0) ==> Diagonal.
```
Arguments:
input: Rank `k` tensor. num_lower: 0-D tensor. Number of subdiagonals to keep. If negative, keep entire
lower triangle.
num_upper: 0-D tensor. Number of superdiagonals to keep. If negative, keep
entire upper triangle.
Returns Rank `k` tensor of the same shape as input. The extracted banded tensor.
func MatrixDeterminant ¶
Computes the determinant of one or more square matrices.
The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions form square matrices. The output is a tensor containing the determinants for all input submatrices `[..., :, :]`.
Arguments:
input: Shape is `[..., M, M]`.
Returns Shape is `[...]`.
func MatrixDiag ¶
Returns a batched diagonal tensor with a given batched diagonal values.
Given a `diagonal`, this operation returns a tensor with the `diagonal` and everything else padded with zeros. The diagonal is computed as follows:
Assume `diagonal` has `k` dimensions `[I, J, K, ..., N]`, then the output is a tensor of rank `k+1` with dimensions [I, J, K, ..., N, N]` where:
`output[i, j, k, ..., m, n] = 1{m=n} * diagonal[i, j, k, ..., n]`.
For example:
``` # 'diagonal' is [[1, 2, 3, 4], [5, 6, 7, 8]]
and diagonal.shape = (2, 4)
tf.matrix_diag(diagonal) ==> [[[1, 0, 0, 0]
[0, 2, 0, 0] [0, 0, 3, 0] [0, 0, 0, 4]], [[5, 0, 0, 0] [0, 6, 0, 0] [0, 0, 7, 0] [0, 0, 0, 8]]]
which has shape (2, 4, 4) ```
Arguments:
diagonal: Rank `k`, where `k >= 1`.
Returns Rank `k+1`, with `output.shape = diagonal.shape + [diagonal.shape[-1]]`.
func MatrixDiagPart ¶
Returns the batched diagonal part of a batched tensor.
This operation returns a tensor with the `diagonal` part of the batched `input`. The `diagonal` part is computed as follows:
Assume `input` has `k` dimensions `[I, J, K, ..., M, N]`, then the output is a tensor of rank `k - 1` with dimensions `[I, J, K, ..., min(M, N)]` where:
`diagonal[i, j, k, ..., n] = input[i, j, k, ..., n, n]`.
The input must be at least a matrix.
For example:
``` # 'input' is [[[1, 0, 0, 0]
[0, 2, 0, 0] [0, 0, 3, 0] [0, 0, 0, 4]], [[5, 0, 0, 0] [0, 6, 0, 0] [0, 0, 7, 0] [0, 0, 0, 8]]]
and input.shape = (2, 4, 4)
tf.matrix_diag_part(input) ==> [[1, 2, 3, 4], [5, 6, 7, 8]]
which has shape (2, 4) ```
Arguments:
input: Rank `k` tensor where `k >= 2`.
Returns The extracted diagonal(s) having shape `diagonal.shape = input.shape[:-2] + [min(input.shape[-2:])]`.
func MatrixDiagPartV2 ¶
func MatrixDiagPartV2(scope *Scope, input tf.Output, k tf.Output, padding_value tf.Output) (diagonal tf.Output)
Returns the batched diagonal part of a batched tensor.
Returns a tensor with the `k[0]`-th to `k[1]`-th diagonals of the batched `input`.
Assume `input` has `r` dimensions `[I, J, ..., L, M, N]`. Let `max_diag_len` be the maximum length among all diagonals to be extracted, `max_diag_len = min(M + min(k[1], 0), N + min(-k[0], 0))` Let `num_diags` be the number of diagonals to extract, `num_diags = k[1] - k[0] + 1`.
If `num_diags == 1`, the output tensor is of rank `r - 1` with shape `[I, J, ..., L, max_diag_len]` and values:
``` diagonal[i, j, ..., l, n]
= input[i, j, ..., l, n+y, n+x] ; if 0 <= n+y < M and 0 <= n+x < N, padding_value ; otherwise.
``` where `y = max(-k[1], 0)`, `x = max(k[1], 0)`.
Otherwise, the output tensor has rank `r` with dimensions `[I, J, ..., L, num_diags, max_diag_len]` with values:
``` diagonal[i, j, ..., l, m, n]
= input[i, j, ..., l, n+y, n+x] ; if 0 <= n+y < M and 0 <= n+x < N, padding_value ; otherwise.
``` where `d = k[1] - m`, `y = max(-d, 0)`, and `x = max(d, 0)`.
The input must be at least a matrix.
For example:
``` input = np.array([[[1, 2, 3, 4], # Input shape: (2, 3, 4)
[5, 6, 7, 8], [9, 8, 7, 6]], [[5, 4, 3, 2], [1, 2, 3, 4], [5, 6, 7, 8]]])
# A main diagonal from each batch. tf.matrix_diag_part(input) ==> [[1, 6, 7], # Output shape: (2, 3)
[5, 2, 7]]
# A superdiagonal from each batch. tf.matrix_diag_part(input, k = 1)
==> [[2, 7, 6], # Output shape: (2, 3) [4, 3, 8]]
# A tridiagonal band from each batch. tf.matrix_diag_part(input, k = (-1, 1))
==> [[[2, 7, 6], # Output shape: (2, 3, 3) [1, 6, 7], [5, 8, 0]], [[4, 3, 8], [5, 2, 7], [1, 6, 0]]]
# Padding value = 9 tf.matrix_diag_part(input, k = (1, 3), padding_value = 9)
==> [[[4, 9, 9], # Output shape: (2, 3, 3) [3, 8, 9], [2, 7, 6]], [[2, 9, 9], [3, 4, 9], [4, 3, 8]]]
```
Arguments:
input: Rank `r` tensor where `r >= 2`. k: Diagonal offset(s). Positive value means superdiagonal, 0 refers to the main
diagonal, and negative value means subdiagonals. `k` can be a single integer (for a single diagonal) or a pair of integers specifying the low and high ends of a matrix band. `k[0]` must not be larger than `k[1]`.
padding_value: The value to fill the area outside the specified diagonal band with.
Default is 0.
Returns The extracted diagonal(s).
func MatrixDiagPartV3 ¶
func MatrixDiagPartV3(scope *Scope, input tf.Output, k tf.Output, padding_value tf.Output, optional ...MatrixDiagPartV3Attr) (diagonal tf.Output)
Returns the batched diagonal part of a batched tensor.
Returns a tensor with the `k[0]`-th to `k[1]`-th diagonals of the batched `input`.
Assume `input` has `r` dimensions `[I, J, ..., L, M, N]`. Let `max_diag_len` be the maximum length among all diagonals to be extracted, `max_diag_len = min(M + min(k[1], 0), N + min(-k[0], 0))` Let `num_diags` be the number of diagonals to extract, `num_diags = k[1] - k[0] + 1`.
If `num_diags == 1`, the output tensor is of rank `r - 1` with shape `[I, J, ..., L, max_diag_len]` and values:
``` diagonal[i, j, ..., l, n]
= input[i, j, ..., l, n+y, n+x] ; if 0 <= n+y < M and 0 <= n+x < N, padding_value ; otherwise.
``` where `y = max(-k[1], 0)`, `x = max(k[1], 0)`.
Otherwise, the output tensor has rank `r` with dimensions `[I, J, ..., L, num_diags, max_diag_len]` with values:
``` diagonal[i, j, ..., l, m, n]
= input[i, j, ..., l, n+y, n+x] ; if 0 <= n+y < M and 0 <= n+x < N, padding_value ; otherwise.
``` where `d = k[1] - m`, `y = max(-d, 0) - offset`, and `x = max(d, 0) - offset`.
`offset` is zero except when the alignment of the diagonal is to the right. ``` offset = max_diag_len - diag_len(d) ; if (`align` in {RIGHT_LEFT, RIGHT_RIGHT}
and `d >= 0`) or (`align` in {LEFT_RIGHT, RIGHT_RIGHT} and `d <= 0`) 0 ; otherwise
``` where `diag_len(d) = min(cols - max(d, 0), rows + min(d, 0))`.
The input must be at least a matrix.
For example:
``` input = np.array([[[1, 2, 3, 4], # Input shape: (2, 3, 4)
[5, 6, 7, 8], [9, 8, 7, 6]], [[5, 4, 3, 2], [1, 2, 3, 4], [5, 6, 7, 8]]])
# A main diagonal from each batch. tf.matrix_diag_part(input) ==> [[1, 6, 7], # Output shape: (2, 3)
[5, 2, 7]]
# A superdiagonal from each batch. tf.matrix_diag_part(input, k = 1)
==> [[2, 7, 6], # Output shape: (2, 3) [4, 3, 8]]
# A band from each batch. tf.matrix_diag_part(input, k = (-1, 2))
==> [[[0, 3, 8], # Output shape: (2, 4, 3) [2, 7, 6], [1, 6, 7], [5, 8, 0]], [[0, 3, 4], [4, 3, 8], [5, 2, 7], [1, 6, 0]]]
# LEFT_RIGHT alignment. tf.matrix_diag_part(input, k = (-1, 2), align="LEFT_RIGHT")
==> [[[3, 8, 0], # Output shape: (2, 4, 3) [2, 7, 6], [1, 6, 7], [0, 5, 8]], [[3, 4, 0], [4, 3, 8], [5, 2, 7], [0, 1, 6]]]
# max_diag_len can be shorter than the main diagonal. tf.matrix_diag_part(input, k = (-2, -1))
==> [[[5, 8], [9, 0]], [[1, 6], [5, 0]]]
# padding_value = 9 tf.matrix_diag_part(input, k = (1, 3), padding_value = 9)
==> [[[9, 9, 4], # Output shape: (2, 3, 3) [9, 3, 8], [2, 7, 6]], [[9, 9, 2], [9, 3, 4], [4, 3, 8]]]
```
Arguments:
input: Rank `r` tensor where `r >= 2`. k: Diagonal offset(s). Positive value means superdiagonal, 0 refers to the main
diagonal, and negative value means subdiagonals. `k` can be a single integer (for a single diagonal) or a pair of integers specifying the low and high ends of a matrix band. `k[0]` must not be larger than `k[1]`.
padding_value: The value to fill the area outside the specified diagonal band with.
Default is 0.
Returns The extracted diagonal(s).
func MatrixDiagV2 ¶
func MatrixDiagV2(scope *Scope, diagonal tf.Output, k tf.Output, num_rows tf.Output, num_cols tf.Output, padding_value tf.Output) (output tf.Output)
Returns a batched diagonal tensor with given batched diagonal values.
Returns a tensor with the contents in `diagonal` as `k[0]`-th to `k[1]`-th diagonals of a matrix, with everything else padded with `padding`. `num_rows` and `num_cols` specify the dimension of the innermost matrix of the output. If both are not specified, the op assumes the innermost matrix is square and infers its size from `k` and the innermost dimension of `diagonal`. If only one of them is specified, the op assumes the unspecified value is the smallest possible based on other criteria.
Let `diagonal` have `r` dimensions `[I, J, ..., L, M, N]`. The output tensor has rank `r+1` with shape `[I, J, ..., L, M, num_rows, num_cols]` when only one diagonal is given (`k` is an integer or `k[0] == k[1]`). Otherwise, it has rank `r` with shape `[I, J, ..., L, num_rows, num_cols]`.
The second innermost dimension of `diagonal` has double meaning. When `k` is scalar or `k[0] == k[1]`, `M` is part of the batch size [I, J, ..., M], and the output tensor is:
``` output[i, j, ..., l, m, n]
= diagonal[i, j, ..., l, n-max(d_upper, 0)] ; if n - m == d_upper padding_value ; otherwise
```
Otherwise, `M` is treated as the number of diagonals for the matrix in the same batch (`M = k[1]-k[0]+1`), and the output tensor is:
``` output[i, j, ..., l, m, n]
= diagonal[i, j, ..., l, diag_index, index_in_diag] ; if k[0] <= d <= k[1] padding_value ; otherwise
``` where `d = n - m`, `diag_index = k[1] - d`, and `index_in_diag = n - max(d, 0)`.
For example:
``` # The main diagonal. diagonal = np.array([[1, 2, 3, 4], # Input shape: (2, 4)
[5, 6, 7, 8]])
tf.matrix_diag(diagonal) ==> [[[1, 0, 0, 0], # Output shape: (2, 4, 4)
[0, 2, 0, 0], [0, 0, 3, 0], [0, 0, 0, 4]], [[5, 0, 0, 0], [0, 6, 0, 0], [0, 0, 7, 0], [0, 0, 0, 8]]]
# A superdiagonal (per batch). diagonal = np.array([[1, 2, 3], # Input shape: (2, 3)
[4, 5, 6]])
tf.matrix_diag(diagonal, k = 1)
==> [[[0, 1, 0, 0], # Output shape: (2, 4, 4) [0, 0, 2, 0], [0, 0, 0, 3], [0, 0, 0, 0]], [[0, 4, 0, 0], [0, 0, 5, 0], [0, 0, 0, 6], [0, 0, 0, 0]]]
# A band of diagonals. diagonals = np.array([[[1, 2, 3], # Input shape: (2, 2, 3)
[4, 5, 0]], [[6, 7, 9], [9, 1, 0]]])
tf.matrix_diag(diagonals, k = (-1, 0))
==> [[[1, 0, 0], # Output shape: (2, 3, 3) [4, 2, 0], [0, 5, 3]], [[6, 0, 0], [9, 7, 0], [0, 1, 9]]]
# Rectangular matrix. diagonal = np.array([1, 2]) # Input shape: (2) tf.matrix_diag(diagonal, k = -1, num_rows = 3, num_cols = 4)
==> [[0, 0, 0, 0], # Output shape: (3, 4) [1, 0, 0, 0], [0, 2, 0, 0]]
# Rectangular matrix with inferred num_cols and padding_value = 9. tf.matrix_diag(diagonal, k = -1, num_rows = 3, padding_value = 9)
==> [[9, 9], # Output shape: (3, 2) [1, 9], [9, 2]]
```
Arguments:
diagonal: Rank `r`, where `r >= 1` k: Diagonal offset(s). Positive value means superdiagonal, 0 refers to the main
diagonal, and negative value means subdiagonals. `k` can be a single integer (for a single diagonal) or a pair of integers specifying the low and high ends of a matrix band. `k[0]` must not be larger than `k[1]`.
num_rows: The number of rows of the output matrix. If it is not provided, the op assumes
the output matrix is a square matrix and infers the matrix size from k and the innermost dimension of `diagonal`.
num_cols: The number of columns of the output matrix. If it is not provided, the op
assumes the output matrix is a square matrix and infers the matrix size from k and the innermost dimension of `diagonal`.
padding_value: The number to fill the area outside the specified diagonal band with.
Default is 0.
Returns Has rank `r+1` when `k` is an integer or `k[0] == k[1]`, rank `r` otherwise.
func MatrixDiagV3 ¶
func MatrixDiagV3(scope *Scope, diagonal tf.Output, k tf.Output, num_rows tf.Output, num_cols tf.Output, padding_value tf.Output, optional ...MatrixDiagV3Attr) (output tf.Output)
Returns a batched diagonal tensor with given batched diagonal values.
Returns a tensor with the contents in `diagonal` as `k[0]`-th to `k[1]`-th diagonals of a matrix, with everything else padded with `padding`. `num_rows` and `num_cols` specify the dimension of the innermost matrix of the output. If both are not specified, the op assumes the innermost matrix is square and infers its size from `k` and the innermost dimension of `diagonal`. If only one of them is specified, the op assumes the unspecified value is the smallest possible based on other criteria.
Let `diagonal` have `r` dimensions `[I, J, ..., L, M, N]`. The output tensor has rank `r+1` with shape `[I, J, ..., L, M, num_rows, num_cols]` when only one diagonal is given (`k` is an integer or `k[0] == k[1]`). Otherwise, it has rank `r` with shape `[I, J, ..., L, num_rows, num_cols]`.
The second innermost dimension of `diagonal` has double meaning. When `k` is scalar or `k[0] == k[1]`, `M` is part of the batch size [I, J, ..., M], and the output tensor is:
``` output[i, j, ..., l, m, n]
= diagonal[i, j, ..., l, n-max(d_upper, 0)] ; if n - m == d_upper padding_value ; otherwise
```
Otherwise, `M` is treated as the number of diagonals for the matrix in the same batch (`M = k[1]-k[0]+1`), and the output tensor is:
``` output[i, j, ..., l, m, n]
= diagonal[i, j, ..., l, diag_index, index_in_diag] ; if k[0] <= d <= k[1] padding_value ; otherwise
``` where `d = n - m`, `diag_index = [k] - d`, and `index_in_diag = n - max(d, 0) + offset`.
`offset` is zero except when the alignment of the diagonal is to the right. ``` offset = max_diag_len - diag_len(d) ; if (`align` in {RIGHT_LEFT, RIGHT_RIGHT}
and `d >= 0`) or (`align` in {LEFT_RIGHT, RIGHT_RIGHT} and `d <= 0`) 0 ; otherwise
``` where `diag_len(d) = min(cols - max(d, 0), rows + min(d, 0))`.
For example:
``` # The main diagonal. diagonal = np.array([[1, 2, 3, 4], # Input shape: (2, 4)
[5, 6, 7, 8]])
tf.matrix_diag(diagonal) ==> [[[1, 0, 0, 0], # Output shape: (2, 4, 4)
[0, 2, 0, 0], [0, 0, 3, 0], [0, 0, 0, 4]], [[5, 0, 0, 0], [0, 6, 0, 0], [0, 0, 7, 0], [0, 0, 0, 8]]]
# A superdiagonal (per batch). diagonal = np.array([[1, 2, 3], # Input shape: (2, 3)
[4, 5, 6]])
tf.matrix_diag(diagonal, k = 1)
==> [[[0, 1, 0, 0], # Output shape: (2, 4, 4) [0, 0, 2, 0], [0, 0, 0, 3], [0, 0, 0, 0]], [[0, 4, 0, 0], [0, 0, 5, 0], [0, 0, 0, 6], [0, 0, 0, 0]]]
# A tridiagonal band (per batch). diagonals = np.array([[[0, 8, 9], # Input shape: (2, 2, 3)
[1, 2, 3], [4, 5, 0]], [[0, 2, 3], [6, 7, 9], [9, 1, 0]]])
tf.matrix_diag(diagonals, k = (-1, 1))
==> [[[1, 8, 0], # Output shape: (2, 3, 3) [4, 2, 9], [0, 5, 3]], [[6, 2, 0], [9, 7, 3], [0, 1, 9]]]
# LEFT_RIGHT alignment. diagonals = np.array([[[8, 9, 0], # Input shape: (2, 2, 3)
[1, 2, 3], [0, 4, 5]], [[2, 3, 0], [6, 7, 9], [0, 9, 1]]])
tf.matrix_diag(diagonals, k = (-1, 1), align="LEFT_RIGHT")
==> [[[1, 8, 0], # Output shape: (2, 3, 3) [4, 2, 9], [0, 5, 3]], [[6, 2, 0], [9, 7, 3], [0, 1, 9]]]
# Rectangular matrix. diagonal = np.array([1, 2]) # Input shape: (2) tf.matrix_diag(diagonal, k = -1, num_rows = 3, num_cols = 4)
==> [[0, 0, 0, 0], # Output shape: (3, 4) [1, 0, 0, 0], [0, 2, 0, 0]]
# Rectangular matrix with inferred num_cols and padding_value = 9. tf.matrix_diag(diagonal, k = -1, num_rows = 3, padding_value = 9)
==> [[9, 9], # Output shape: (3, 2) [1, 9], [9, 2]]
```
Arguments:
diagonal: Rank `r`, where `r >= 1` k: Diagonal offset(s). Positive value means superdiagonal, 0 refers to the main
diagonal, and negative value means subdiagonals. `k` can be a single integer (for a single diagonal) or a pair of integers specifying the low and high ends of a matrix band. `k[0]` must not be larger than `k[1]`.
num_rows: The number of rows of the output matrix. If it is not provided, the op assumes
the output matrix is a square matrix and infers the matrix size from k and the innermost dimension of `diagonal`.
num_cols: The number of columns of the output matrix. If it is not provided, the op
assumes the output matrix is a square matrix and infers the matrix size from k and the innermost dimension of `diagonal`.
padding_value: The number to fill the area outside the specified diagonal band with.
Default is 0.
Returns Has rank `r+1` when `k` is an integer or `k[0] == k[1]`, rank `r` otherwise.
func MatrixExponential ¶
Deprecated, use python implementation tf.linalg.matrix_exponential.
DEPRECATED at GraphDef version 27: Use Python implementation tf.linalg.matrix_exponential instead.
func MatrixInverse ¶
Computes the inverse of one or more square invertible matrices or their adjoints (conjugate transposes).
The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions form square matrices. The output is a tensor of the same shape as the input containing the inverse for all input submatrices `[..., :, :]`.
The op uses LU decomposition with partial pivoting to compute the inverses.
If a matrix is not invertible there is no guarantee what the op does. It may detect the condition and raise an exception or it may simply return a garbage result.
Arguments:
input: Shape is `[..., M, M]`.
Returns Shape is `[..., M, M]`.
@compatibility(numpy) Equivalent to np.linalg.inv @end_compatibility
func MatrixLogarithm ¶
Computes the matrix logarithm of one or more square matrices:
\\(log(exp(A)) = A\\)
This op is only defined for complex matrices. If A is positive-definite and real, then casting to a complex matrix, taking the logarithm and casting back to a real matrix will give the correct result.
This function computes the matrix logarithm using the Schur-Parlett algorithm. Details of the algorithm can be found in Section 11.6.2 of: Nicholas J. Higham, Functions of Matrices: Theory and Computation, SIAM 2008. ISBN 978-0-898716-46-7.
The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions form square matrices. The output is a tensor of the same shape as the input containing the exponential for all input submatrices `[..., :, :]`.
Arguments:
input: Shape is `[..., M, M]`.
Returns Shape is `[..., M, M]`.
@compatibility(scipy) Equivalent to scipy.linalg.logm @end_compatibility
func MatrixSetDiag ¶
Returns a batched matrix tensor with new batched diagonal values.
Given `input` and `diagonal`, this operation returns a tensor with the same shape and values as `input`, except for the main diagonal of the innermost matrices. These will be overwritten by the values in `diagonal`.
The output is computed as follows:
Assume `input` has `k+1` dimensions `[I, J, K, ..., M, N]` and `diagonal` has `k` dimensions `[I, J, K, ..., min(M, N)]`. Then the output is a tensor of rank `k+1` with dimensions `[I, J, K, ..., M, N]` where:
- `output[i, j, k, ..., m, n] = diagonal[i, j, k, ..., n]` for `m == n`.
- `output[i, j, k, ..., m, n] = input[i, j, k, ..., m, n]` for `m != n`.
Arguments:
input: Rank `k+1`, where `k >= 1`. diagonal: Rank `k`, where `k >= 1`.
Returns Rank `k+1`, with `output.shape = input.shape`.
func MatrixSetDiagV2 ¶
func MatrixSetDiagV2(scope *Scope, input tf.Output, diagonal tf.Output, k tf.Output) (output tf.Output)
Returns a batched matrix tensor with new batched diagonal values.
Given `input` and `diagonal`, this operation returns a tensor with the same shape and values as `input`, except for the specified diagonals of the innermost matrices. These will be overwritten by the values in `diagonal`.
`input` has `r+1` dimensions `[I, J, ..., L, M, N]`. When `k` is scalar or `k[0] == k[1]`, `diagonal` has `r` dimensions `[I, J, ..., L, max_diag_len]`. Otherwise, it has `r+1` dimensions `[I, J, ..., L, num_diags, max_diag_len]`. `num_diags` is the number of diagonals, `num_diags = k[1] - k[0] + 1`. `max_diag_len` is the longest diagonal in the range `[k[0], k[1]]`, `max_diag_len = min(M + min(k[1], 0), N + min(-k[0], 0))`
The output is a tensor of rank `k+1` with dimensions `[I, J, ..., L, M, N]`. If `k` is scalar or `k[0] == k[1]`:
``` output[i, j, ..., l, m, n]
= diagonal[i, j, ..., l, n-max(k[1], 0)] ; if n - m == k[1] input[i, j, ..., l, m, n] ; otherwise
```
Otherwise,
``` output[i, j, ..., l, m, n]
= diagonal[i, j, ..., l, diag_index, index_in_diag] ; if k[0] <= d <= k[1] input[i, j, ..., l, m, n] ; otherwise
``` where `d = n - m`, `diag_index = k[1] - d`, and `index_in_diag = n - max(d, 0)`.
For example:
``` # The main diagonal. input = np.array([[[7, 7, 7, 7], # Input shape: (2, 3, 4)
[7, 7, 7, 7], [7, 7, 7, 7]], [[7, 7, 7, 7], [7, 7, 7, 7], [7, 7, 7, 7]]])
diagonal = np.array([[1, 2, 3], # Diagonal shape: (2, 3)
[4, 5, 6]])
tf.matrix_set_diag(diagonal) ==> [[[1, 7, 7, 7], # Output shape: (2, 3, 4)
[7, 2, 7, 7], [7, 7, 3, 7]], [[4, 7, 7, 7], [7, 5, 7, 7], [7, 7, 6, 7]]]
# A superdiagonal (per batch). tf.matrix_set_diag(diagonal, k = 1)
==> [[[7, 1, 7, 7], # Output shape: (2, 3, 4) [7, 7, 2, 7], [7, 7, 7, 3]], [[7, 4, 7, 7], [7, 7, 5, 7], [7, 7, 7, 6]]]
# A band of diagonals. diagonals = np.array([[[1, 2, 3], # Diagonal shape: (2, 2, 3)
[4, 5, 0]], [[6, 1, 2], [3, 4, 0]]])
tf.matrix_set_diag(diagonals, k = (-1, 0))
==> [[[1, 7, 7, 7], # Output shape: (2, 3, 4) [4, 2, 7, 7], [0, 5, 3, 7]], [[6, 7, 7, 7], [3, 1, 7, 7], [7, 4, 2, 7]]]
```
Arguments:
input: Rank `r+1`, where `r >= 1`. diagonal: Rank `r` when `k` is an integer or `k[0] == k[1]`. Otherwise, it has rank `r+1`.
`k >= 1`.
k: Diagonal offset(s). Positive value means superdiagonal, 0 refers to the main
diagonal, and negative value means subdiagonals. `k` can be a single integer (for a single diagonal) or a pair of integers specifying the low and high ends of a matrix band. `k[0]` must not be larger than `k[1]`.
Returns Rank `r+1`, with `output.shape = input.shape`.
func MatrixSetDiagV3 ¶
func MatrixSetDiagV3(scope *Scope, input tf.Output, diagonal tf.Output, k tf.Output, optional ...MatrixSetDiagV3Attr) (output tf.Output)
Returns a batched matrix tensor with new batched diagonal values.
Given `input` and `diagonal`, this operation returns a tensor with the same shape and values as `input`, except for the specified diagonals of the innermost matrices. These will be overwritten by the values in `diagonal`.
`input` has `r+1` dimensions `[I, J, ..., L, M, N]`. When `k` is scalar or `k[0] == k[1]`, `diagonal` has `r` dimensions `[I, J, ..., L, max_diag_len]`. Otherwise, it has `r+1` dimensions `[I, J, ..., L, num_diags, max_diag_len]`. `num_diags` is the number of diagonals, `num_diags = k[1] - k[0] + 1`. `max_diag_len` is the longest diagonal in the range `[k[0], k[1]]`, `max_diag_len = min(M + min(k[1], 0), N + min(-k[0], 0))`
The output is a tensor of rank `k+1` with dimensions `[I, J, ..., L, M, N]`. If `k` is scalar or `k[0] == k[1]`:
``` output[i, j, ..., l, m, n]
= diagonal[i, j, ..., l, n-max(k[1], 0)] ; if n - m == k[1] input[i, j, ..., l, m, n] ; otherwise
```
Otherwise,
``` output[i, j, ..., l, m, n]
= diagonal[i, j, ..., l, diag_index, index_in_diag] ; if k[0] <= d <= k[1] input[i, j, ..., l, m, n] ; otherwise
``` where `d = n - m`, `diag_index = k[1] - d`, and `index_in_diag = n - max(d, 0) + offset`.
`offset` is zero except when the alignment of the diagonal is to the right. ``` offset = max_diag_len - diag_len(d) ; if (`align` in {RIGHT_LEFT, RIGHT_RIGHT}
and `d >= 0`) or (`align` in {LEFT_RIGHT, RIGHT_RIGHT} and `d <= 0`) 0 ; otherwise
``` where `diag_len(d) = min(cols - max(d, 0), rows + min(d, 0))`.
For example:
``` # The main diagonal. input = np.array([[[7, 7, 7, 7], # Input shape: (2, 3, 4)
[7, 7, 7, 7], [7, 7, 7, 7]], [[7, 7, 7, 7], [7, 7, 7, 7], [7, 7, 7, 7]]])
diagonal = np.array([[1, 2, 3], # Diagonal shape: (2, 3)
[4, 5, 6]])
tf.matrix_set_diag(input, diagonal)
==> [[[1, 7, 7, 7], # Output shape: (2, 3, 4) [7, 2, 7, 7], [7, 7, 3, 7]], [[4, 7, 7, 7], [7, 5, 7, 7], [7, 7, 6, 7]]]
# A superdiagonal (per batch). tf.matrix_set_diag(input, diagonal, k = 1)
==> [[[7, 1, 7, 7], # Output shape: (2, 3, 4) [7, 7, 2, 7], [7, 7, 7, 3]], [[7, 4, 7, 7], [7, 7, 5, 7], [7, 7, 7, 6]]]
# A band of diagonals. diagonals = np.array([[[0, 9, 1], # Diagonal shape: (2, 4, 3)
[6, 5, 8], [1, 2, 3], [4, 5, 0]], [[0, 1, 2], [5, 6, 4], [6, 1, 2], [3, 4, 0]]])
tf.matrix_set_diag(input, diagonals, k = (-1, 2))
==> [[[1, 6, 9, 7], # Output shape: (2, 3, 4) [4, 2, 5, 1], [7, 5, 3, 8]], [[6, 5, 1, 7], [3, 1, 6, 2], [7, 4, 2, 4]]]
# LEFT_RIGHT alignment. diagonals = np.array([[[9, 1, 0], # Diagonal shape: (2, 4, 3)
[6, 5, 8], [1, 2, 3], [0, 4, 5]], [[1, 2, 0], [5, 6, 4], [6, 1, 2], [0, 3, 4]]])
tf.matrix_set_diag(input, diagonals, k = (-1, 2), align="LEFT_RIGHT")
==> [[[1, 6, 9, 7], # Output shape: (2, 3, 4) [4, 2, 5, 1], [7, 5, 3, 8]], [[6, 5, 1, 7], [3, 1, 6, 2], [7, 4, 2, 4]]]
```
Arguments:
input: Rank `r+1`, where `r >= 1`. diagonal: Rank `r` when `k` is an integer or `k[0] == k[1]`. Otherwise, it has rank `r+1`.
`k >= 1`.
k: Diagonal offset(s). Positive value means superdiagonal, 0 refers to the main
diagonal, and negative value means subdiagonals. `k` can be a single integer (for a single diagonal) or a pair of integers specifying the low and high ends of a matrix band. `k[0]` must not be larger than `k[1]`.
Returns Rank `r+1`, with `output.shape = input.shape`.
func MatrixSolve ¶
func MatrixSolve(scope *Scope, matrix tf.Output, rhs tf.Output, optional ...MatrixSolveAttr) (output tf.Output)
Solves systems of linear equations.
`Matrix` is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions form square matrices. `Rhs` is a tensor of shape `[..., M, K]`. The `output` is a tensor shape `[..., M, K]`. If `adjoint` is `False` then each output matrix satisfies `matrix[..., :, :] * output[..., :, :] = rhs[..., :, :]`. If `adjoint` is `True` then each output matrix satisfies `adjoint(matrix[..., :, :]) * output[..., :, :] = rhs[..., :, :]`.
Arguments:
matrix: Shape is `[..., M, M]`. rhs: Shape is `[..., M, K]`.
Returns Shape is `[..., M, K]`.
func MatrixSolveLs ¶
func MatrixSolveLs(scope *Scope, matrix tf.Output, rhs tf.Output, l2_regularizer tf.Output, optional ...MatrixSolveLsAttr) (output tf.Output)
Solves one or more linear least-squares problems.
`matrix` is a tensor of shape `[..., M, N]` whose inner-most 2 dimensions form real or complex matrices of size `[M, N]`. `Rhs` is a tensor of the same type as `matrix` and shape `[..., M, K]`. The output is a tensor shape `[..., N, K]` where each output matrix solves each of the equations `matrix[..., :, :]` * `output[..., :, :]` = `rhs[..., :, :]` in the least squares sense.
We use the following notation for (complex) matrix and right-hand sides in the batch:
`matrix`=\\(A \in \mathbb{C}^{m \times n}\\), `rhs`=\\(B \in \mathbb{C}^{m \times k}\\), `output`=\\(X \in \mathbb{C}^{n \times k}\\), `l2_regularizer`=\\(\lambda \in \mathbb{R}\\).
If `fast` is `True`, then the solution is computed by solving the normal equations using Cholesky decomposition. Specifically, if \\(m \ge n\\) then \\(X = (A^H A + \lambda I)^{-1} A^H B\\), which solves the least-squares problem \\(X = \mathrm{argmin}_{Z \in \Re^{n \times k} } ||A Z - B||_F^2 + \lambda ||Z||_F^2\\). If \\(m \lt n\\) then `output` is computed as \\(X = A^H (A A^H + \lambda I)^{-1} B\\), which (for \\(\lambda = 0\\)) is the minimum-norm solution to the under-determined linear system, i.e. \\(X = \mathrm{argmin}_{Z \in \mathbb{C}^{n \times k} } ||Z||_F^2 \\), subject to \\(A Z = B\\). Notice that the fast path is only numerically stable when \\(A\\) is numerically full rank and has a condition number \\(\mathrm{cond}(A) \lt \frac{1}{\sqrt{\epsilon_{mach} } }\\) or \\(\lambda\\) is sufficiently large.
If `fast` is `False` an algorithm based on the numerically robust complete orthogonal decomposition is used. This computes the minimum-norm least-squares solution, even when \\(A\\) is rank deficient. This path is typically 6-7 times slower than the fast path. If `fast` is `False` then `l2_regularizer` is ignored.
Arguments:
matrix: Shape is `[..., M, N]`. rhs: Shape is `[..., M, K]`. l2_regularizer: Scalar tensor.
@compatibility(numpy) Equivalent to np.linalg.lstsq @end_compatibility
Returns Shape is `[..., N, K]`.
func MatrixSquareRoot ¶
Computes the matrix square root of one or more square matrices:
matmul(sqrtm(A), sqrtm(A)) = A
The input matrix should be invertible. If the input matrix is real, it should have no eigenvalues which are real and negative (pairs of complex conjugate eigenvalues are allowed).
The matrix square root is computed by first reducing the matrix to quasi-triangular form with the real Schur decomposition. The square root of the quasi-triangular matrix is then computed directly. Details of the algorithm can be found in: Nicholas J. Higham, "Computing real square roots of a real matrix", Linear Algebra Appl., 1987.
The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions form square matrices. The output is a tensor of the same shape as the input containing the matrix square root for all input submatrices `[..., :, :]`.
Arguments:
input: Shape is `[..., M, M]`.
Returns Shape is `[..., M, M]`.
@compatibility(scipy) Equivalent to scipy.linalg.sqrtm @end_compatibility
func MatrixTriangularSolve ¶
func MatrixTriangularSolve(scope *Scope, matrix tf.Output, rhs tf.Output, optional ...MatrixTriangularSolveAttr) (output tf.Output)
Solves systems of linear equations with upper or lower triangular matrices by backsubstitution.
`matrix` is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions form square matrices. If `lower` is `True` then the strictly upper triangular part of each inner-most matrix is assumed to be zero and not accessed. If `lower` is False then the strictly lower triangular part of each inner-most matrix is assumed to be zero and not accessed. `rhs` is a tensor of shape `[..., M, N]`.
The output is a tensor of shape `[..., M, N]`. If `adjoint` is `True` then the innermost matrices in `output` satisfy matrix equations `matrix[..., :, :] * output[..., :, :] = rhs[..., :, :]`. If `adjoint` is `False` then the strictly then the innermost matrices in `output` satisfy matrix equations `adjoint(matrix[..., i, k]) * output[..., k, j] = rhs[..., i, j]`.
Note, the batch shapes for the inputs only need to broadcast.
Example: ```python
a = tf.constant([[3, 0, 0, 0],
[2, 1, 0, 0], [1, 0, 1, 0], [1, 1, 1, 1]], dtype=tf.float32)
b = tf.constant([[4],
[2], [4], [2]], dtype=tf.float32)
x = tf.linalg.triangular_solve(a, b, lower=True) x # <tf.Tensor: shape=(4, 1), dtype=float32, numpy= # array([[ 1.3333334 ], # [-0.66666675], # [ 2.6666665 ], # [-1.3333331 ]], dtype=float32)>
# in python3 one can use `a@x` tf.matmul(a, x) # <tf.Tensor: shape=(4, 1), dtype=float32, numpy= # array([[4. ], # [2. ], # [4. ], # [1.9999999]], dtype=float32)> ```
Arguments:
matrix: Shape is `[..., M, M]`. rhs: Shape is `[..., M, K]`.
Returns Shape is `[..., M, K]`.
func Max ¶
Computes the maximum of elements across dimensions of a tensor.
Reduces `input` along the dimensions given in `axis`. Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in `axis`. If `keep_dims` is true, the reduced dimensions are retained with length 1.
Arguments:
input: The tensor to reduce. axis: The dimensions to reduce. Must be in the range
`[-rank(input), rank(input))`.
Returns The reduced tensor.
func MaxIntraOpParallelismDataset ¶
func MaxIntraOpParallelismDataset(scope *Scope, input_dataset tf.Output, max_intra_op_parallelism tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output)
Creates a dataset that overrides the maximum intra-op parallelism.
Arguments:
max_intra_op_parallelism: Identifies the maximum intra-op parallelism to use.
func MaxPool ¶
func MaxPool(scope *Scope, input tf.Output, ksize []int64, strides []int64, padding string, optional ...MaxPoolAttr) (output tf.Output)
Performs max pooling on the input.
Arguments:
input: 4-D input to pool over. ksize: The size of the window for each dimension of the input tensor. strides: The stride of the sliding window for each dimension of the
input tensor.
padding: The type of padding algorithm to use.
Returns The max pooled output tensor.
func MaxPool3D ¶
func MaxPool3D(scope *Scope, input tf.Output, ksize []int64, strides []int64, padding string, optional ...MaxPool3DAttr) (output tf.Output)
Performs 3D max pooling on the input.
Arguments:
input: Shape `[batch, depth, rows, cols, channels]` tensor to pool over. ksize: 1-D tensor of length 5. The size of the window for each dimension of
the input tensor. Must have `ksize[0] = ksize[4] = 1`.
strides: 1-D tensor of length 5. The stride of the sliding window for each
dimension of `input`. Must have `strides[0] = strides[4] = 1`.
padding: The type of padding algorithm to use.
Returns The max pooled output tensor.
func MaxPool3DGrad ¶
func MaxPool3DGrad(scope *Scope, orig_input tf.Output, orig_output tf.Output, grad tf.Output, ksize []int64, strides []int64, padding string, optional ...MaxPool3DGradAttr) (output tf.Output)
Computes gradients of 3D max pooling function.
Arguments:
orig_input: The original input tensor. orig_output: The original output tensor. grad: Output backprop of shape `[batch, depth, rows, cols, channels]`. ksize: 1-D tensor of length 5. The size of the window for each dimension of
the input tensor. Must have `ksize[0] = ksize[4] = 1`.
strides: 1-D tensor of length 5. The stride of the sliding window for each
dimension of `input`. Must have `strides[0] = strides[4] = 1`.
padding: The type of padding algorithm to use.
func MaxPool3DGradGrad ¶
func MaxPool3DGradGrad(scope *Scope, orig_input tf.Output, orig_output tf.Output, grad tf.Output, ksize []int64, strides []int64, padding string, optional ...MaxPool3DGradGradAttr) (output tf.Output)
Computes second-order gradients of the maxpooling function.
Arguments:
orig_input: The original input tensor. orig_output: The original output tensor. grad: Output backprop of shape `[batch, depth, rows, cols, channels]`. ksize: 1-D tensor of length 5. The size of the window for each dimension of
the input tensor. Must have `ksize[0] = ksize[4] = 1`.
strides: 1-D tensor of length 5. The stride of the sliding window for each
dimension of `input`. Must have `strides[0] = strides[4] = 1`.
padding: The type of padding algorithm to use.
Returns Gradients of gradients w.r.t. the input to `max_pool`.
func MaxPoolGrad ¶
func MaxPoolGrad(scope *Scope, orig_input tf.Output, orig_output tf.Output, grad tf.Output, ksize []int64, strides []int64, padding string, optional ...MaxPoolGradAttr) (output tf.Output)
Computes gradients of the maxpooling function.
Arguments:
orig_input: The original input tensor. orig_output: The original output tensor. grad: 4-D. Gradients w.r.t. the output of `max_pool`. ksize: The size of the window for each dimension of the input tensor. strides: The stride of the sliding window for each dimension of the
input tensor.
padding: The type of padding algorithm to use.
Returns Gradients w.r.t. the input to `max_pool`.
func MaxPoolGradGrad ¶
func MaxPoolGradGrad(scope *Scope, orig_input tf.Output, orig_output tf.Output, grad tf.Output, ksize []int64, strides []int64, padding string, optional ...MaxPoolGradGradAttr) (output tf.Output)
Computes second-order gradients of the maxpooling function.
Arguments:
orig_input: The original input tensor. orig_output: The original output tensor. grad: 4-D. Gradients of gradients w.r.t. the input of `max_pool`. ksize: The size of the window for each dimension of the input tensor. strides: The stride of the sliding window for each dimension of the
input tensor.
padding: The type of padding algorithm to use.
Returns Gradients of gradients w.r.t. the input to `max_pool`.
func MaxPoolGradGradV2 ¶
func MaxPoolGradGradV2(scope *Scope, orig_input tf.Output, orig_output tf.Output, grad tf.Output, ksize tf.Output, strides tf.Output, padding string, optional ...MaxPoolGradGradV2Attr) (output tf.Output)
Computes second-order gradients of the maxpooling function.
Arguments:
orig_input: The original input tensor. orig_output: The original output tensor. grad: 4-D. Gradients of gradients w.r.t. the input of `max_pool`. ksize: The size of the window for each dimension of the input tensor. strides: The stride of the sliding window for each dimension of the
input tensor.
padding: The type of padding algorithm to use.
Returns Gradients of gradients w.r.t. the input to `max_pool`.
func MaxPoolGradGradWithArgmax ¶
func MaxPoolGradGradWithArgmax(scope *Scope, input tf.Output, grad tf.Output, argmax tf.Output, ksize []int64, strides []int64, padding string, optional ...MaxPoolGradGradWithArgmaxAttr) (output tf.Output)
Computes second-order gradients of the maxpooling function.
Arguments:
input: The original input. grad: 4-D with shape `[batch, height, width, channels]`. Gradients w.r.t. the
input of `max_pool`.
argmax: The indices of the maximum values chosen for each output of `max_pool`. ksize: The size of the window for each dimension of the input tensor. strides: The stride of the sliding window for each dimension of the
input tensor.
padding: The type of padding algorithm to use.
Returns Gradients of gradients w.r.t. the input of `max_pool`.
func MaxPoolGradV2 ¶
func MaxPoolGradV2(scope *Scope, orig_input tf.Output, orig_output tf.Output, grad tf.Output, ksize tf.Output, strides tf.Output, padding string, optional ...MaxPoolGradV2Attr) (output tf.Output)
Computes gradients of the maxpooling function.
Arguments:
orig_input: The original input tensor. orig_output: The original output tensor. grad: 4-D. Gradients w.r.t. the output of `max_pool`. ksize: The size of the window for each dimension of the input tensor. strides: The stride of the sliding window for each dimension of the
input tensor.
padding: The type of padding algorithm to use.
Returns Gradients w.r.t. the input to `max_pool`.
func MaxPoolGradWithArgmax ¶
func MaxPoolGradWithArgmax(scope *Scope, input tf.Output, grad tf.Output, argmax tf.Output, ksize []int64, strides []int64, padding string, optional ...MaxPoolGradWithArgmaxAttr) (output tf.Output)
Computes gradients of the maxpooling function.
Arguments:
input: The original input. grad: 4-D with shape `[batch, height, width, channels]`. Gradients w.r.t. the
output of `max_pool`.
argmax: The indices of the maximum values chosen for each output of `max_pool`. ksize: The size of the window for each dimension of the input tensor. strides: The stride of the sliding window for each dimension of the
input tensor.
padding: The type of padding algorithm to use.
Returns Gradients w.r.t. the input of `max_pool`.
func MaxPoolV2 ¶
func MaxPoolV2(scope *Scope, input tf.Output, ksize tf.Output, strides tf.Output, padding string, optional ...MaxPoolV2Attr) (output tf.Output)
Performs max pooling on the input.
Arguments:
input: 4-D input to pool over. ksize: The size of the window for each dimension of the input tensor. strides: The stride of the sliding window for each dimension of the
input tensor.
padding: The type of padding algorithm to use.
Returns The max pooled output tensor.
func MaxPoolWithArgmax ¶
func MaxPoolWithArgmax(scope *Scope, input tf.Output, ksize []int64, strides []int64, padding string, optional ...MaxPoolWithArgmaxAttr) (output tf.Output, argmax tf.Output)
Performs max pooling on the input and outputs both max values and indices.
The indices in `argmax` are flattened, so that a maximum value at position `[b, y, x, c]` becomes flattened index: `(y * width + x) * channels + c` if `include_batch_in_index` is False; `((b * height + y) * width + x) * channels + c` if `include_batch_in_index` is True.
The indices returned are always in `[0, height) x [0, width)` before flattening, even if padding is involved and the mathematically correct answer is outside (either negative or too large). This is a bug, but fixing it is difficult to do in a safe backwards compatible way, especially due to flattening.
Arguments:
input: 4-D with shape `[batch, height, width, channels]`. Input to pool over. ksize: The size of the window for each dimension of the input tensor. strides: The stride of the sliding window for each dimension of the
input tensor.
padding: The type of padding algorithm to use.
Returns:
output: The max pooled output tensor. argmax: 4-D. The flattened indices of the max values chosen for each output.
func Maximum ¶
Returns the max of x and y (i.e. x > y ? x : y) element-wise.
*NOTE*: `Maximum` supports broadcasting. More about broadcasting [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
func Mean ¶
Computes the mean of elements across dimensions of a tensor.
Reduces `input` along the dimensions given in `axis`. Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in `axis`. If `keep_dims` is true, the reduced dimensions are retained with length 1.
Arguments:
input: The tensor to reduce. axis: The dimensions to reduce. Must be in the range
`[-rank(input), rank(input))`.
Returns The reduced tensor.
func Merge ¶
Forwards the value of an available tensor from `inputs` to `output`.
`Merge` waits for at least one of the tensors in `inputs` to become available. It is usually combined with `Switch` to implement branching.
`Merge` forwards the first tensor to become available to `output`, and sets `value_index` to its index in `inputs`.
Arguments:
inputs: The input tensors, exactly one of which will become available.
Returns:
output: Will be set to the available input tensor. value_index: The index of the chosen input tensor in `inputs`.
func MergeDedupData ¶ added in v0.4.0
func MergeDedupData(scope *Scope, integer_tensor tf.Output, float_tensor tf.Output, tuple_mask string, optional ...MergeDedupDataAttr) (output tf.Output)
An op merges elements of integer and float tensors into deduplication data as XLA tuple.
This op merges outputs of SplitDedupDataOp, which gives two 1-D tensors, integer and floating point. With respect to tuple_mask, this op merges values of these two tensors into an XLA tuple, which should be as same as input to SplitDedupDataOp.
Arguments:
integer_tensor: A 1-D integer tensor, includes integer elements of deduplication data tuple. float_tensor: A 1-D float tensor, includes float elements of deduplication data tuple. tuple_mask: A serialized TensorProto string of output tuple mask. This mask is a 2-D tensor,
with first column as tuple element type, and second column as span of this type. For example, an output tuple of (1, 2, 0.1, 3), its mask is [[0, 2], [1, 1], [0, 1]]. We expect only two types of elements: integer(0) and float(1).
Returns An XLA tuple merging integer and float elements as deduplication data tuple.
func MergeSummary ¶
Merges summaries.
This op creates a [`Summary`](https://www.tensorflow.org/code/tensorflow/core/framework/summary.proto) protocol buffer that contains the union of all the values in the input summaries.
When the Op is run, it reports an `InvalidArgument` error if multiple values in the summaries to merge use the same tag.
Arguments:
inputs: Can be of any shape. Each must contain serialized `Summary` protocol
buffers.
Returns Scalar. Serialized `Summary` protocol buffer.
func MergeV2Checkpoints ¶
func MergeV2Checkpoints(scope *Scope, checkpoint_prefixes tf.Output, destination_prefix tf.Output, optional ...MergeV2CheckpointsAttr) (o *tf.Operation)
V2 format specific: merges the metadata files of sharded checkpoints. The
result is one logical checkpoint, with one physical metadata file and renamed data files.
Intended for "grouping" multiple checkpoints in a sharded checkpoint setup.
If delete_old_dirs is true, attempts to delete recursively the dirname of each path in the input checkpoint_prefixes. This is useful when those paths are non user-facing temporary locations.
If allow_missing_files is true, merges the checkpoint prefixes as long as at least one file exists. Otherwise, if no files exist, an error will be thrown. The default value for allow_missing_files is false.
Arguments:
checkpoint_prefixes: prefixes of V2 checkpoints to merge. destination_prefix: scalar. The desired final prefix. Allowed to be the same
as one of the checkpoint_prefixes.
Returns the created operation.
func Mfcc ¶
func Mfcc(scope *Scope, spectrogram tf.Output, sample_rate tf.Output, optional ...MfccAttr) (output tf.Output)
Transforms a spectrogram into a form that's useful for speech recognition.
Mel Frequency Cepstral Coefficients are a way of representing audio data that's been effective as an input feature for machine learning. They are created by taking the spectrum of a spectrogram (a 'cepstrum'), and discarding some of the higher frequencies that are less significant to the human ear. They have a long history in the speech recognition world, and https://en.wikipedia.org/wiki/Mel-frequency_cepstrum is a good resource to learn more.
Arguments:
spectrogram: Typically produced by the Spectrogram op, with magnitude_squared
set to true.
sample_rate: How many samples per second the source audio used.
func Min ¶
Computes the minimum of elements across dimensions of a tensor.
Reduces `input` along the dimensions given in `axis`. Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in `axis`. If `keep_dims` is true, the reduced dimensions are retained with length 1.
Arguments:
input: The tensor to reduce. axis: The dimensions to reduce. Must be in the range
`[-rank(input), rank(input))`.
Returns The reduced tensor.
func Minimum ¶
Returns the min of x and y (i.e. x < y ? x : y) element-wise.
*NOTE*: `Minimum` supports broadcasting. More about broadcasting [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
func MirrorPad ¶
Pads a tensor with mirrored values.
This operation pads a `input` with mirrored values according to the `paddings` you specify. `paddings` is an integer tensor with shape `[n, 2]`, where n is the rank of `input`. For each dimension D of `input`, `paddings[D, 0]` indicates how many values to add before the contents of `input` in that dimension, and `paddings[D, 1]` indicates how many values to add after the contents of `input` in that dimension. Both `paddings[D, 0]` and `paddings[D, 1]` must be no greater than `input.dim_size(D)` (or `input.dim_size(D) - 1`) if `copy_border` is true (if false, respectively).
The padded size of each dimension D of the output is:
`paddings(D, 0) + input.dim_size(D) + paddings(D, 1)`
For example:
``` # 't' is [[1, 2, 3], [4, 5, 6]]. # 'paddings' is [[1, 1]], [2, 2]]. # 'mode' is SYMMETRIC. # rank of 't' is 2. pad(t, paddings) ==> [[2, 1, 1, 2, 3, 3, 2]
[2, 1, 1, 2, 3, 3, 2] [5, 4, 4, 5, 6, 6, 5] [5, 4, 4, 5, 6, 6, 5]]
```
Arguments:
input: The input tensor to be padded. paddings: A two-column matrix specifying the padding sizes. The number of
rows must be the same as the rank of `input`.
mode: Either `REFLECT` or `SYMMETRIC`. In reflect mode the padded regions
do not include the borders, while in symmetric mode the padded regions do include the borders. For example, if `input` is `[1, 2, 3]` and `paddings` is `[0, 2]`, then the output is `[1, 2, 3, 2, 1]` in reflect mode, and it is `[1, 2, 3, 3, 2]` in symmetric mode.
Returns The padded tensor.
func MirrorPadGrad ¶
func MirrorPadGrad(scope *Scope, input tf.Output, paddings tf.Output, mode string) (output tf.Output)
Gradient op for `MirrorPad` op. This op folds a mirror-padded tensor.
This operation folds the padded areas of `input` by `MirrorPad` according to the `paddings` you specify. `paddings` must be the same as `paddings` argument given to the corresponding `MirrorPad` op.
The folded size of each dimension D of the output is:
`input.dim_size(D) - paddings(D, 0) - paddings(D, 1)`
For example:
``` # 't' is [[1, 2, 3], [4, 5, 6], [7, 8, 9]]. # 'paddings' is [[0, 1]], [0, 1]]. # 'mode' is SYMMETRIC. # rank of 't' is 2. pad(t, paddings) ==> [[ 1, 5]
[11, 28]]
```
Arguments:
input: The input tensor to be folded. paddings: A two-column matrix specifying the padding sizes. The number of
rows must be the same as the rank of `input`.
mode: The mode used in the `MirrorPad` op.
Returns The folded tensor.
func MlirPassthroughOp ¶
func MlirPassthroughOp(scope *Scope, inputs []tf.Output, mlir_module string, Toutputs []tf.DataType) (outputs []tf.Output)
Wraps an arbitrary MLIR computation expressed as a module with a main() function.
This operation does not have an associated kernel and is not intended to be executed in a regular TensorFlow session. Instead it is intended to be used for testing or for special case where a user intends to pass custom MLIR computation through a TensorFlow graph with the intent of having custom tooling processing it downstream (when targeting a different environment, like TensorFlow lite for example). The MLIR module is expected to have a main() function that will be used as an entry point. The inputs to the operations will be passed as argument to the main() function and the returned values of the main function mapped to the outputs. Example usage:
``` import tensorflow as tf from tensorflow.compiler.mlir.tensorflow.gen_mlir_passthrough_op import mlir_passthrough_op
mlir_module = ”'python
func @main(%arg0 : tensor<10xf32>, %arg1 : tensor<10xf32>) -> tensor<10x10xf32> { %add = "magic.op"(%arg0, %arg1) : (tensor<10xf32>, tensor<10xf32>) -> tensor<10x10xf32> return %ret : tensor<10x10xf32> }
”'
@tf.function def foo(x, y):
return mlir_passthrough_op([x, y], mlir_module, Toutputs=[tf.float32])
graph_def = foo.get_concrete_function(tf.TensorSpec([10], tf.float32), tf.TensorSpec([10], tf.float32)).graph.as_graph_def() ```
func Mod ¶
Returns element-wise remainder of division. This emulates C semantics in that
the result here is consistent with a truncating divide. E.g. `tf.truncatediv(x, y) * y + truncate_mod(x, y) = x`.
*NOTE*: `Mod` supports broadcasting. More about broadcasting [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
func ModelDataset ¶
func ModelDataset(scope *Scope, input_dataset tf.Output, output_types []tf.DataType, output_shapes []tf.Shape, optional ...ModelDatasetAttr) (handle tf.Output)
Identity transformation that models performance.
Identity transformation that models performance.
Arguments:
input_dataset: A variant tensor representing the input dataset.
func Mul ¶
Returns x * y element-wise.
*NOTE*: `Multiply` supports broadcasting. More about broadcasting [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
func MulNoNan ¶
Returns x * y element-wise. Returns zero if y is zero, even if x if infinite or NaN.
*NOTE*: `MulNoNan` supports broadcasting. More about broadcasting [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
func MultiDeviceIterator ¶
func MultiDeviceIterator(scope *Scope, devices []string, shared_name string, container string, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output)
Creates a MultiDeviceIterator resource.
Arguments:
devices: A list of devices the iterator works across. shared_name: If non-empty, this resource will be shared under the given name
across multiple sessions.
container: If non-empty, this resource is placed in the given container.
Otherwise, a default container is used.
output_types: The type list for the return values. output_shapes: The list of shapes being produced.
Returns Handle to the resource created.
func MultiDeviceIteratorFromStringHandle ¶
func MultiDeviceIteratorFromStringHandle(scope *Scope, string_handle tf.Output, optional ...MultiDeviceIteratorFromStringHandleAttr) (multi_device_iterator tf.Output)
Generates a MultiDeviceIterator resource from its provided string handle.
Arguments:
string_handle: String representing the resource.
Returns A MultiDeviceIterator resource.
func MultiDeviceIteratorGetNextFromShard ¶
func MultiDeviceIteratorGetNextFromShard(scope *Scope, multi_device_iterator tf.Output, shard_num tf.Output, incarnation_id tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (components []tf.Output)
Gets next element for the provided shard number.
Arguments:
multi_device_iterator: A MultiDeviceIterator resource. shard_num: Integer representing which shard to fetch data for. incarnation_id: Which incarnation of the MultiDeviceIterator is running. output_types: The type list for the return values. output_shapes: The list of shapes being produced.
Returns Result of the get_next on the dataset.
func MultiDeviceIteratorInit ¶
func MultiDeviceIteratorInit(scope *Scope, dataset tf.Output, multi_device_iterator tf.Output, max_buffer_size tf.Output) (incarnation_id tf.Output)
Initializes the multi device iterator with the given dataset.
Arguments:
dataset: Dataset to be iterated upon. multi_device_iterator: A MultiDeviceIteratorResource. max_buffer_size: The maximum size of the host side per device buffer to keep.
Returns An int64 indicating which incarnation of the MultiDeviceIterator is running.
func MultiDeviceIteratorToStringHandle ¶
func MultiDeviceIteratorToStringHandle(scope *Scope, multi_device_iterator tf.Output) (string_handle tf.Output)
Produces a string handle for the given MultiDeviceIterator.
Arguments:
multi_device_iterator: A MultiDeviceIterator resource.
Returns A string representing the resource.
func Multinomial ¶
func Multinomial(scope *Scope, logits tf.Output, num_samples tf.Output, optional ...MultinomialAttr) (output tf.Output)
Draws samples from a multinomial distribution.
Arguments:
logits: 2-D Tensor with shape `[batch_size, num_classes]`. Each slice `[i, :]`
represents the unnormalized log probabilities for all classes.
num_samples: 0-D. Number of independent samples to draw for each row slice.
Returns 2-D Tensor with shape `[batch_size, num_samples]`. Each slice `[i, :]` contains the drawn class labels with range `[0, num_classes)`.
func MutableDenseHashTableV2 ¶
func MutableDenseHashTableV2(scope *Scope, empty_key tf.Output, deleted_key tf.Output, value_dtype tf.DataType, optional ...MutableDenseHashTableV2Attr) (table_handle tf.Output)
Creates an empty hash table that uses tensors as the backing store.
It uses "open addressing" with quadratic reprobing to resolve collisions.
This op creates a mutable hash table, specifying the type of its keys and values. Each value must be a scalar. Data can be inserted into the table using the insert operations. It does not support the initialization operation.
Arguments:
empty_key: The key used to represent empty key buckets internally. Must not
be used in insert or lookup operations.
value_dtype: Type of the table values.
Returns Handle to a table.
func MutableHashTableOfTensorsV2 ¶
func MutableHashTableOfTensorsV2(scope *Scope, key_dtype tf.DataType, value_dtype tf.DataType, optional ...MutableHashTableOfTensorsV2Attr) (table_handle tf.Output)
Creates an empty hash table.
This op creates a mutable hash table, specifying the type of its keys and values. Each value must be a vector. Data can be inserted into the table using the insert operations. It does not support the initialization operation.
Arguments:
key_dtype: Type of the table keys. value_dtype: Type of the table values.
Returns Handle to a table.
func MutableHashTableV2 ¶
func MutableHashTableV2(scope *Scope, key_dtype tf.DataType, value_dtype tf.DataType, optional ...MutableHashTableV2Attr) (table_handle tf.Output)
Creates an empty hash table.
This op creates a mutable hash table, specifying the type of its keys and values. Each value must be a scalar. Data can be inserted into the table using the insert operations. It does not support the initialization operation.
Arguments:
key_dtype: Type of the table keys. value_dtype: Type of the table values.
Returns Handle to a table.
func MutexLock ¶
Locks a mutex resource. The output is the lock. So long as the lock tensor
is alive, any other request to use `MutexLock` with this mutex will wait.
This is particularly useful for creating a critical section when used in conjunction with `MutexLockIdentity`:
```python
mutex = mutex_v2(
shared_name=handle_name, container=container, name=name)
def execute_in_critical_section(fn, *args, **kwargs):
lock = gen_resource_variable_ops.mutex_lock(mutex) with ops.control_dependencies([lock]): r = fn(*args, **kwargs) with ops.control_dependencies(nest.flatten(r)): with ops.colocate_with(mutex): ensure_lock_exists = mutex_lock_identity(lock) # Make sure that if any element of r is accessed, all of # them are executed together. r = nest.map_structure(tf.identity, r) with ops.control_dependencies([ensure_lock_exists]): return nest.map_structure(tf.identity, r)
```
While `fn` is running in the critical section, no other functions which wish to use this critical section may run.
Often the use case is that two executions of the same graph, in parallel, wish to run `fn`; and we wish to ensure that only one of them executes at a time. This is especially important if `fn` modifies one or more variables at a time.
It is also useful if two separate functions must share a resource, but we wish to ensure the usage is exclusive.
Arguments:
mutex: The mutex resource to lock.
Returns A tensor that keeps a shared pointer to a lock on the mutex; when the Tensor is destroyed, the use count on the shared pointer is decreased by 1. When it reaches 0, the lock is released.
func MutexV2 ¶
func MutexV2(scope *Scope, optional ...MutexV2Attr) (resource tf.Output)
Creates a Mutex resource that can be locked by `MutexLock`.
Returns The mutex resource.
func NcclAllReduce ¶
func NcclAllReduce(scope *Scope, input tf.Output, reduction string, num_devices int64, shared_name string) (data tf.Output)
Outputs a tensor containing the reduction across all input tensors.
Outputs a tensor containing the reduction across all input tensors passed to ops within the same `shared_name.
The graph should be constructed so if one op runs with shared_name value `c`, then `num_devices` ops will run with shared_name value `c`. Failure to do so will cause the graph execution to fail to complete.
input: the input to the reduction data: the value of the reduction across all `num_devices` devices. reduction: the reduction operation to perform. num_devices: The number of devices participating in this reduction. shared_name: Identifier that shared between ops of the same reduction.
func NcclBroadcast ¶
Sends `input` to all devices that are connected to the output.
Sends `input` to all devices that are connected to the output.
The graph should be constructed so that all ops connected to the output have a valid device assignment, and the op itself is assigned one of these devices.
input: The input to the broadcast. output: The same as input. shape: The shape of the input tensor.
func NcclReduce ¶
Reduces `input` from `num_devices` using `reduction` to a single device.
Reduces `input` from `num_devices` using `reduction` to a single device.
The graph should be constructed so that all inputs have a valid device assignment, and the op itself is assigned one of these devices.
input: The input to the reduction. data: the value of the reduction across all `num_devices` devices. reduction: the reduction operation to perform.
func NearestNeighbors ¶
func NearestNeighbors(scope *Scope, points tf.Output, centers tf.Output, k tf.Output) (nearest_center_indices tf.Output, nearest_center_distances tf.Output)
Selects the k nearest centers for each point.
Rows of points are assumed to be input points. Rows of centers are assumed to be the list of candidate centers. For each point, the k centers that have least L2 distance to it are computed.
Arguments:
points: Matrix of shape (n, d). Rows are assumed to be input points. centers: Matrix of shape (m, d). Rows are assumed to be centers. k: Number of nearest centers to return for each point. If k is larger than m, then
only m centers are returned.
Returns:
nearest_center_indices: Matrix of shape (n, min(m, k)). Each row contains the indices of the centers
closest to the corresponding point, ordered by increasing distance.
nearest_center_distances: Matrix of shape (n, min(m, k)). Each row contains the squared L2 distance to the
corresponding center in nearest_center_indices.
func NextAfter ¶
Returns the next representable value of `x1` in the direction of `x2`, element-wise.
This operation returns the same result as the C++ std::nextafter function.
It can also return a subnormal number.
@compatibility(cpp) Equivalent to C++ std::nextafter function. @end_compatibility
func NextIteration ¶
Makes its input available to the next iteration.
Arguments:
data: The tensor to be made available to the next iteration.
Returns The same tensor as `data`.
func NoOp ¶
Does nothing. Only useful as a placeholder for control edges.
Returns the created operation.
func NonDeterministicInts ¶
func NonDeterministicInts(scope *Scope, shape tf.Output, optional ...NonDeterministicIntsAttr) (output tf.Output)
Non-deterministically generates some integers.
This op may use some OS-provided source of non-determinism (e.g. an RNG), so each execution will give different results.
Arguments:
shape: The shape of the output tensor.
Returns Non-deterministic integer values with specified shape.
func NonMaxSuppression ¶
func NonMaxSuppression(scope *Scope, boxes tf.Output, scores tf.Output, max_output_size tf.Output, optional ...NonMaxSuppressionAttr) (selected_indices tf.Output)
Greedily selects a subset of bounding boxes in descending order of score,
pruning away boxes that have high intersection-over-union (IOU) overlap with previously selected boxes. Bounding boxes are supplied as [y1, x1, y2, x2], where (y1, x1) and (y2, x2) are the coordinates of any diagonal pair of box corners and the coordinates can be provided as normalized (i.e., lying in the interval [0, 1]) or absolute. Note that this algorithm is agnostic to where the origin is in the coordinate system. Note that this algorithm is invariant to orthogonal transformations and translations of the coordinate system; thus translating or reflections of the coordinate system result in the same boxes being selected by the algorithm. The output of this operation is a set of integers indexing into the input collection of bounding boxes representing the selected boxes. The bounding box coordinates corresponding to the selected indices can then be obtained using the `tf.gather operation`. For example:
selected_indices = tf.image.non_max_suppression( boxes, scores, max_output_size, iou_threshold) selected_boxes = tf.gather(boxes, selected_indices)
Arguments:
boxes: A 2-D float tensor of shape `[num_boxes, 4]`. scores: A 1-D float tensor of shape `[num_boxes]` representing a single
score corresponding to each box (each row of boxes).
max_output_size: A scalar integer tensor representing the maximum number of
boxes to be selected by non max suppression.
Returns A 1-D integer tensor of shape `[M]` representing the selected indices from the boxes tensor, where `M <= max_output_size`.
func NonMaxSuppressionV2 ¶
func NonMaxSuppressionV2(scope *Scope, boxes tf.Output, scores tf.Output, max_output_size tf.Output, iou_threshold tf.Output) (selected_indices tf.Output)
Greedily selects a subset of bounding boxes in descending order of score,
pruning away boxes that have high intersection-over-union (IOU) overlap with previously selected boxes. Bounding boxes are supplied as [y1, x1, y2, x2], where (y1, x1) and (y2, x2) are the coordinates of any diagonal pair of box corners and the coordinates can be provided as normalized (i.e., lying in the interval [0, 1]) or absolute. Note that this algorithm is agnostic to where the origin is in the coordinate system. Note that this algorithm is invariant to orthogonal transformations and translations of the coordinate system; thus translating or reflections of the coordinate system result in the same boxes being selected by the algorithm.
The output of this operation is a set of integers indexing into the input collection of bounding boxes representing the selected boxes. The bounding box coordinates corresponding to the selected indices can then be obtained using the `tf.gather operation`. For example:
selected_indices = tf.image.non_max_suppression_v2( boxes, scores, max_output_size, iou_threshold) selected_boxes = tf.gather(boxes, selected_indices)
Arguments:
boxes: A 2-D float tensor of shape `[num_boxes, 4]`. scores: A 1-D float tensor of shape `[num_boxes]` representing a single
score corresponding to each box (each row of boxes).
max_output_size: A scalar integer tensor representing the maximum number of
boxes to be selected by non max suppression.
iou_threshold: A 0-D float tensor representing the threshold for deciding whether
boxes overlap too much with respect to IOU.
Returns A 1-D integer tensor of shape `[M]` representing the selected indices from the boxes tensor, where `M <= max_output_size`.
func NonMaxSuppressionV3 ¶
func NonMaxSuppressionV3(scope *Scope, boxes tf.Output, scores tf.Output, max_output_size tf.Output, iou_threshold tf.Output, score_threshold tf.Output) (selected_indices tf.Output)
Greedily selects a subset of bounding boxes in descending order of score,
pruning away boxes that have high intersection-over-union (IOU) overlap with previously selected boxes. Bounding boxes with score less than `score_threshold` are removed. Bounding boxes are supplied as [y1, x1, y2, x2], where (y1, x1) and (y2, x2) are the coordinates of any diagonal pair of box corners and the coordinates can be provided as normalized (i.e., lying in the interval [0, 1]) or absolute. Note that this algorithm is agnostic to where the origin is in the coordinate system and more generally is invariant to orthogonal transformations and translations of the coordinate system; thus translating or reflections of the coordinate system result in the same boxes being selected by the algorithm. The output of this operation is a set of integers indexing into the input collection of bounding boxes representing the selected boxes. The bounding box coordinates corresponding to the selected indices can then be obtained using the `tf.gather operation`. For example:
selected_indices = tf.image.non_max_suppression_v2( boxes, scores, max_output_size, iou_threshold, score_threshold) selected_boxes = tf.gather(boxes, selected_indices)
Arguments:
boxes: A 2-D float tensor of shape `[num_boxes, 4]`. scores: A 1-D float tensor of shape `[num_boxes]` representing a single
score corresponding to each box (each row of boxes).
max_output_size: A scalar integer tensor representing the maximum number of
boxes to be selected by non max suppression.
iou_threshold: A 0-D float tensor representing the threshold for deciding whether
boxes overlap too much with respect to IOU.
score_threshold: A 0-D float tensor representing the threshold for deciding when to remove
boxes based on score.
Returns A 1-D integer tensor of shape `[M]` representing the selected indices from the boxes tensor, where `M <= max_output_size`.
func NonMaxSuppressionV4 ¶
func NonMaxSuppressionV4(scope *Scope, boxes tf.Output, scores tf.Output, max_output_size tf.Output, iou_threshold tf.Output, score_threshold tf.Output, optional ...NonMaxSuppressionV4Attr) (selected_indices tf.Output, valid_outputs tf.Output)
Greedily selects a subset of bounding boxes in descending order of score,
pruning away boxes that have high intersection-over-union (IOU) overlap with previously selected boxes. Bounding boxes with score less than `score_threshold` are removed. Bounding boxes are supplied as [y1, x1, y2, x2], where (y1, x1) and (y2, x2) are the coordinates of any diagonal pair of box corners and the coordinates can be provided as normalized (i.e., lying in the interval [0, 1]) or absolute. Note that this algorithm is agnostic to where the origin is in the coordinate system and more generally is invariant to orthogonal transformations and translations of the coordinate system; thus translating or reflections of the coordinate system result in the same boxes being selected by the algorithm. The output of this operation is a set of integers indexing into the input collection of bounding boxes representing the selected boxes. The bounding box coordinates corresponding to the selected indices can then be obtained using the `tf.gather operation`. For example:
selected_indices = tf.image.non_max_suppression_v2( boxes, scores, max_output_size, iou_threshold, score_threshold) selected_boxes = tf.gather(boxes, selected_indices)
Arguments:
boxes: A 2-D float tensor of shape `[num_boxes, 4]`. scores: A 1-D float tensor of shape `[num_boxes]` representing a single
score corresponding to each box (each row of boxes).
max_output_size: A scalar integer tensor representing the maximum number of
boxes to be selected by non max suppression.
iou_threshold: A 0-D float tensor representing the threshold for deciding whether
boxes overlap too much with respect to IOU.
score_threshold: A 0-D float tensor representing the threshold for deciding when to remove
boxes based on score.
Returns:
selected_indices: A 1-D integer tensor of shape `[M]` representing the selected
indices from the boxes tensor, where `M <= max_output_size`.
valid_outputs: A 0-D integer tensor representing the number of valid elements in
`selected_indices`, with the valid elements appearing first.
func NonMaxSuppressionV5 ¶
func NonMaxSuppressionV5(scope *Scope, boxes tf.Output, scores tf.Output, max_output_size tf.Output, iou_threshold tf.Output, score_threshold tf.Output, soft_nms_sigma tf.Output, optional ...NonMaxSuppressionV5Attr) (selected_indices tf.Output, selected_scores tf.Output, valid_outputs tf.Output)
Greedily selects a subset of bounding boxes in descending order of score,
pruning away boxes that have high intersection-over-union (IOU) overlap with previously selected boxes. Bounding boxes with score less than `score_threshold` are removed. Bounding boxes are supplied as [y1, x1, y2, x2], where (y1, x1) and (y2, x2) are the coordinates of any diagonal pair of box corners and the coordinates can be provided as normalized (i.e., lying in the interval [0, 1]) or absolute. Note that this algorithm is agnostic to where the origin is in the coordinate system and more generally is invariant to orthogonal transformations and translations of the coordinate system; thus translating or reflections of the coordinate system result in the same boxes being selected by the algorithm. The output of this operation is a set of integers indexing into the input collection of bounding boxes representing the selected boxes. The bounding box coordinates corresponding to the selected indices can then be obtained using the `tf.gather operation`. For example:
selected_indices = tf.image.non_max_suppression_v2( boxes, scores, max_output_size, iou_threshold, score_threshold) selected_boxes = tf.gather(boxes, selected_indices)
This op also supports a Soft-NMS (with Gaussian weighting) mode (c.f. Bodla et al, https://arxiv.org/abs/1704.04503) where boxes reduce the score of other overlapping boxes instead of directly causing them to be pruned. To enable this Soft-NMS mode, set the `soft_nms_sigma` parameter to be larger than 0.
Arguments:
boxes: A 2-D float tensor of shape `[num_boxes, 4]`. scores: A 1-D float tensor of shape `[num_boxes]` representing a single
score corresponding to each box (each row of boxes).
max_output_size: A scalar integer tensor representing the maximum number of
boxes to be selected by non max suppression.
iou_threshold: A 0-D float tensor representing the threshold for deciding whether
boxes overlap too much with respect to IOU.
score_threshold: A 0-D float tensor representing the threshold for deciding when to remove
boxes based on score.
soft_nms_sigma: A 0-D float tensor representing the sigma parameter for Soft NMS; see Bodla et
al (c.f. https://arxiv.org/abs/1704.04503). When `soft_nms_sigma=0.0` (which is default), we fall back to standard (hard) NMS.
Returns:
selected_indices: A 1-D integer tensor of shape `[M]` representing the selected
indices from the boxes tensor, where `M <= max_output_size`.
selected_scores: A 1-D float tensor of shape `[M]` representing the corresponding
scores for each selected box, where `M <= max_output_size`. Scores only differ from corresponding input scores when using Soft NMS (i.e. when `soft_nms_sigma>0`)
valid_outputs: A 0-D integer tensor representing the number of valid elements in
`selected_indices`, with the valid elements appearing first.
func NonMaxSuppressionWithOverlaps ¶
func NonMaxSuppressionWithOverlaps(scope *Scope, overlaps tf.Output, scores tf.Output, max_output_size tf.Output, overlap_threshold tf.Output, score_threshold tf.Output) (selected_indices tf.Output)
Greedily selects a subset of bounding boxes in descending order of score,
pruning away boxes that have high overlaps with previously selected boxes. Bounding boxes with score less than `score_threshold` are removed. N-by-n overlap values are supplied as square matrix, which allows for defining a custom overlap criterium (eg. intersection over union, intersection over area, etc.).
The output of this operation is a set of integers indexing into the input collection of bounding boxes representing the selected boxes. The bounding box coordinates corresponding to the selected indices can then be obtained using the `tf.gather operation`. For example:
selected_indices = tf.image.non_max_suppression_with_overlaps( overlaps, scores, max_output_size, overlap_threshold, score_threshold) selected_boxes = tf.gather(boxes, selected_indices)
Arguments:
overlaps: A 2-D float tensor of shape `[num_boxes, num_boxes]` representing
the n-by-n box overlap values.
scores: A 1-D float tensor of shape `[num_boxes]` representing a single
score corresponding to each box (each row of boxes).
max_output_size: A scalar integer tensor representing the maximum number of
boxes to be selected by non max suppression.
overlap_threshold: A 0-D float tensor representing the threshold for deciding whether
boxes overlap too.
score_threshold: A 0-D float tensor representing the threshold for deciding when to remove
boxes based on score.
Returns A 1-D integer tensor of shape `[M]` representing the selected indices from the boxes tensor, where `M <= max_output_size`.
func NotEqual ¶
Returns the truth value of (x != y) element-wise.
*NOTE*: `NotEqual` supports broadcasting. More about broadcasting [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
func NthElement ¶
func NthElement(scope *Scope, input tf.Output, n tf.Output, optional ...NthElementAttr) (values tf.Output)
Finds values of the `n`-th order statistic for the last dimension.
If the input is a vector (rank-1), finds the entries which is the nth-smallest value in the vector and outputs their values as scalar tensor.
For matrices (resp. higher rank input), computes the entries which is the nth-smallest value in each row (resp. vector along the last dimension). Thus,
values.shape = input.shape[:-1]
Arguments:
input: 1-D or higher with last dimension at least `n+1`. n: 0-D. Position of sorted vector to select along the last dimension (along
each row for matrices). Valid range of n is `[0, input.shape[:-1])`
Returns The `n`-th order statistic along each last dimensional slice.
func OneHot ¶
func OneHot(scope *Scope, indices tf.Output, depth tf.Output, on_value tf.Output, off_value tf.Output, optional ...OneHotAttr) (output tf.Output)
Returns a one-hot tensor.
The locations represented by indices in `indices` take value `on_value`, while all other locations take value `off_value`.
If the input `indices` is rank `N`, the output will have rank `N+1`, The new axis is created at dimension `axis` (default: the new axis is appended at the end).
If `indices` is a scalar the output shape will be a vector of length `depth`.
If `indices` is a vector of length `features`, the output shape will be: ```
features x depth if axis == -1 depth x features if axis == 0
```
If `indices` is a matrix (batch) with shape `[batch, features]`, the output shape will be: ```
batch x features x depth if axis == -1 batch x depth x features if axis == 1 depth x batch x features if axis == 0
```
Examples =========
Suppose that ```
indices = [0, 2, -1, 1] depth = 3 on_value = 5.0 off_value = 0.0 axis = -1
```
Then output is `[4 x 3]`: ``` output =
[5.0 0.0 0.0] // one_hot(0) [0.0 0.0 5.0] // one_hot(2) [0.0 0.0 0.0] // one_hot(-1) [0.0 5.0 0.0] // one_hot(1)
```
Suppose that ```
indices = [0, 2, -1, 1] depth = 3 on_value = 0.0 off_value = 3.0 axis = 0
```
Then output is `[3 x 4]`: ``` output =
[0.0 3.0 3.0 3.0] [3.0 3.0 3.0 0.0] [3.0 3.0 3.0 3.0] [3.0 0.0 3.0 3.0]
// ^ one_hot(0) // ^ one_hot(2) // ^ one_hot(-1) // ^ one_hot(1) ```
Suppose that ```
indices = [[0, 2], [1, -1]] depth = 3 on_value = 1.0 off_value = 0.0 axis = -1
```
Then output is `[2 x 2 x 3]`: ``` output =
[ [1.0, 0.0, 0.0] // one_hot(0) [0.0, 0.0, 1.0] // one_hot(2) ][ [0.0, 1.0, 0.0] // one_hot(1) [0.0, 0.0, 0.0] // one_hot(-1) ]
```
Arguments:
indices: A tensor of indices. depth: A scalar defining the depth of the one hot dimension. on_value: A scalar defining the value to fill in output when `indices[j] = i`. off_value: A scalar defining the value to fill in output when `indices[j] != i`.
Returns The one-hot tensor.
func OnesLike ¶
Returns a tensor of ones with the same shape and type as x.
Arguments:
x: a tensor of type T.
Returns a tensor of the same shape and type as x but filled with ones.
func OptimizeDataset ¶
func OptimizeDataset(scope *Scope, input_dataset tf.Output, optimizations tf.Output, output_types []tf.DataType, output_shapes []tf.Shape, optional ...OptimizeDatasetAttr) (handle tf.Output)
Creates a dataset by applying optimizations to `input_dataset`.
Creates a dataset by applying optimizations to `input_dataset`.
Arguments:
input_dataset: A variant tensor representing the input dataset. optimizations: A `tf.string` vector `tf.Tensor` identifying optimizations to use.
func OptimizeDatasetV2 ¶
func OptimizeDatasetV2(scope *Scope, input_dataset tf.Output, optimizations_enabled tf.Output, optimizations_disabled tf.Output, optimizations_default tf.Output, output_types []tf.DataType, output_shapes []tf.Shape, optional ...OptimizeDatasetV2Attr) (handle tf.Output)
Creates a dataset by applying related optimizations to `input_dataset`.
Creates a dataset by applying related optimizations to `input_dataset`.
Arguments:
input_dataset: A variant tensor representing the input dataset. optimizations_enabled: A `tf.string` vector `tf.Tensor` identifying user enabled optimizations. optimizations_disabled: A `tf.string` vector `tf.Tensor` identifying user disabled optimizations. optimizations_default: A `tf.string` vector `tf.Tensor` identifying optimizations by default.
func OptionalFromValue ¶
Constructs an Optional variant from a tuple of tensors.
func OptionalGetValue ¶
func OptionalGetValue(scope *Scope, optional tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (components []tf.Output)
Returns the value stored in an Optional variant or raises an error if none exists.
func OptionalHasValue ¶
Returns true if and only if the given Optional variant has a value.
func OptionalNone ¶
Creates an Optional variant with no value.
func OptionsDataset ¶
func OptionsDataset(scope *Scope, input_dataset tf.Output, serialized_options string, output_types []tf.DataType, output_shapes []tf.Shape, optional ...OptionsDatasetAttr) (handle tf.Output)
Creates a dataset by attaching tf.data.Options to `input_dataset`.
Arguments:
input_dataset: A variant tensor representing the input dataset. serialized_options: A `tf.string` scalar `tf.Tensor` of serialized `tf.data.Options` protocol buffer.
func OrderedMapClear ¶
func OrderedMapClear(scope *Scope, dtypes []tf.DataType, optional ...OrderedMapClearAttr) (o *tf.Operation)
Op removes all elements in the underlying container.
Returns the created operation.
func OrderedMapIncompleteSize ¶
func OrderedMapIncompleteSize(scope *Scope, dtypes []tf.DataType, optional ...OrderedMapIncompleteSizeAttr) (size tf.Output)
Op returns the number of incomplete elements in the underlying container.
func OrderedMapPeek ¶
func OrderedMapPeek(scope *Scope, key tf.Output, indices tf.Output, dtypes []tf.DataType, optional ...OrderedMapPeekAttr) (values []tf.Output)
Op peeks at the values at the specified key. If the
underlying container does not contain this key this op will block until it does. This Op is optimized for performance.
func OrderedMapSize ¶
func OrderedMapSize(scope *Scope, dtypes []tf.DataType, optional ...OrderedMapSizeAttr) (size tf.Output)
Op returns the number of elements in the underlying container.
func OrderedMapStage ¶
func OrderedMapStage(scope *Scope, key tf.Output, indices tf.Output, values []tf.Output, dtypes []tf.DataType, optional ...OrderedMapStageAttr) (o *tf.Operation)
Stage (key, values) in the underlying container which behaves like a ordered
associative container. Elements are ordered by key.
Arguments:
key: int64 values: a list of tensors
dtypes A list of data types that inserted values should adhere to.
Returns the created operation.
func OrderedMapUnstage ¶
func OrderedMapUnstage(scope *Scope, key tf.Output, indices tf.Output, dtypes []tf.DataType, optional ...OrderedMapUnstageAttr) (values []tf.Output)
Op removes and returns the values associated with the key
from the underlying container. If the underlying container does not contain this key, the op will block until it does.
func OrderedMapUnstageNoKey ¶
func OrderedMapUnstageNoKey(scope *Scope, indices tf.Output, dtypes []tf.DataType, optional ...OrderedMapUnstageNoKeyAttr) (key tf.Output, values []tf.Output)
Op removes and returns the (key, value) element with the smallest
key from the underlying container. If the underlying container does not contain elements, the op will block until it does.
func OutfeedDequeue ¶
func OutfeedDequeue(scope *Scope, dtype tf.DataType, shape tf.Shape, optional ...OutfeedDequeueAttr) (output tf.Output)
Retrieves a single tensor from the computation outfeed.
This operation will block indefinitely until data is available.
Arguments:
dtype: The type of elements in the tensor. shape: The shape of the tensor.
Returns A tensor that will be read from the device outfeed.
func OutfeedDequeueTuple ¶
func OutfeedDequeueTuple(scope *Scope, dtypes []tf.DataType, shapes []tf.Shape, optional ...OutfeedDequeueTupleAttr) (outputs []tf.Output)
Retrieve multiple values from the computation outfeed.
This operation will block indefinitely until data is available. Output `i` corresponds to XLA tuple element `i`.
Arguments:
dtypes: The element types of each element in `outputs`. shapes: The shapes of each tensor in `outputs`.
Returns A list of tensors that will be read from the outfeed.
func OutfeedDequeueTupleV2 ¶
func OutfeedDequeueTupleV2(scope *Scope, device_ordinal tf.Output, dtypes []tf.DataType, shapes []tf.Shape) (outputs []tf.Output)
Retrieve multiple values from the computation outfeed. Device ordinal is a tensor allowing dynamic outfeed.
This operation will block indefinitely until data is available. Output `i` corresponds to XLA tuple element `i`.
Arguments:
device_ordinal: An int scalar tensor, representing the TPU device to use. This should be -1 when
the Op is running on a TPU device, and >= 0 when the Op is running on the CPU device.
dtypes: The element types of each element in `outputs`. shapes: The shapes of each tensor in `outputs`.
Returns A list of tensors that will be read from the outfeed.
func OutfeedDequeueV2 ¶
func OutfeedDequeueV2(scope *Scope, device_ordinal tf.Output, dtype tf.DataType, shape tf.Shape) (output tf.Output)
Retrieves a single tensor from the computation outfeed. Device ordinal is a tensor allowing dynamic outfeed.
This operation will block indefinitely until data is available.
Arguments:
device_ordinal: An int scalar tensor, representing the TPU device to use. This should be -1 when
the Op is running on a TPU device, and >= 0 when the Op is running on the CPU device.
dtype: The type of elements in the tensor. shape: The shape of the tensor.
Returns A tensor that will be read from the device outfeed.
func OutfeedEnqueue ¶
Enqueue a Tensor on the computation outfeed.
Arguments:
input: A tensor that will be inserted into the outfeed queue.
Returns the created operation.
func OutfeedEnqueueTuple ¶
Enqueue multiple Tensor values on the computation outfeed.
Arguments:
inputs: A list of tensors that will be inserted into the outfeed queue as an
XLA tuple.
Returns the created operation.
func Pack ¶
Packs a list of `N` rank-`R` tensors into one rank-`(R+1)` tensor.
Packs the `N` tensors in `values` into a tensor with rank one higher than each tensor in `values`, by packing them along the `axis` dimension. Given a list of tensors of shape `(A, B, C)`;
if `axis == 0` then the `output` tensor will have the shape `(N, A, B, C)`. if `axis == 1` then the `output` tensor will have the shape `(A, N, B, C)`. Etc.
For example:
``` # 'x' is [1, 4] # 'y' is [2, 5] # 'z' is [3, 6] pack([x, y, z]) => [[1, 4], [2, 5], [3, 6]] # Pack along first dim. pack([x, y, z], axis=1) => [[1, 2, 3], [4, 5, 6]] ```
This is the opposite of `unpack`.
Arguments:
values: Must be of same shape and type.
Returns The packed tensor.
func Pad ¶
Pads a tensor with zeros.
This operation pads a `input` with zeros according to the `paddings` you specify. `paddings` is an integer tensor with shape `[Dn, 2]`, where n is the rank of `input`. For each dimension D of `input`, `paddings[D, 0]` indicates how many zeros to add before the contents of `input` in that dimension, and `paddings[D, 1]` indicates how many zeros to add after the contents of `input` in that dimension.
The padded size of each dimension D of the output is:
`paddings(D, 0) + input.dim_size(D) + paddings(D, 1)`
For example:
``` # 't' is [[1, 1], [2, 2]] # 'paddings' is [[1, 1], [2, 2]] # rank of 't' is 2 pad(t, paddings) ==> [[0, 0, 0, 0, 0, 0]
[0, 0, 1, 1, 0, 0] [0, 0, 2, 2, 0, 0] [0, 0, 0, 0, 0, 0]]
```
func PadV2 ¶
func PadV2(scope *Scope, input tf.Output, paddings tf.Output, constant_values tf.Output) (output tf.Output)
Pads a tensor.
This operation pads `input` according to the `paddings` and `constant_values` you specify. `paddings` is an integer tensor with shape `[Dn, 2]`, where n is the rank of `input`. For each dimension D of `input`, `paddings[D, 0]` indicates how many padding values to add before the contents of `input` in that dimension, and `paddings[D, 1]` indicates how many padding values to add after the contents of `input` in that dimension. `constant_values` is a scalar tensor of the same type as `input` that indicates the value to use for padding `input`.
The padded size of each dimension D of the output is:
`paddings(D, 0) + input.dim_size(D) + paddings(D, 1)`
For example:
``` # 't' is [[1, 1], [2, 2]] # 'paddings' is [[1, 1], [2, 2]] # 'constant_values' is 0 # rank of 't' is 2 pad(t, paddings) ==> [[0, 0, 0, 0, 0, 0]
[0, 0, 1, 1, 0, 0] [0, 0, 2, 2, 0, 0] [0, 0, 0, 0, 0, 0]]
```
func PaddedBatchDataset ¶
func PaddedBatchDataset(scope *Scope, input_dataset tf.Output, batch_size tf.Output, padded_shapes []tf.Output, padding_values []tf.Output, output_shapes []tf.Shape, optional ...PaddedBatchDatasetAttr) (handle tf.Output)
Creates a dataset that batches and pads `batch_size` elements from the input.
Arguments:
batch_size: A scalar representing the number of elements to accumulate in a
batch.
padded_shapes: A list of int64 tensors representing the desired padded shapes
of the corresponding output components. These shapes may be partially specified, using `-1` to indicate that a particular dimension should be padded to the maximum size of all batch elements.
padding_values: A list of scalars containing the padding value to use for
each of the outputs.
func PaddedBatchDatasetV2 ¶
func PaddedBatchDatasetV2(scope *Scope, input_dataset tf.Output, batch_size tf.Output, padded_shapes []tf.Output, padding_values []tf.Output, drop_remainder tf.Output, output_shapes []tf.Shape, optional ...PaddedBatchDatasetV2Attr) (handle tf.Output)
Creates a dataset that batches and pads `batch_size` elements from the input.
Arguments:
batch_size: A scalar representing the number of elements to accumulate in a
batch.
padded_shapes: A list of int64 tensors representing the desired padded shapes
of the corresponding output components. These shapes may be partially specified, using `-1` to indicate that a particular dimension should be padded to the maximum size of all batch elements.
padding_values: A list of scalars containing the padding value to use for
each of the outputs.
drop_remainder: A scalar representing whether the last batch should be dropped in case its size
is smaller than desired.
func PaddingFIFOQueueV2 ¶
func PaddingFIFOQueueV2(scope *Scope, component_types []tf.DataType, optional ...PaddingFIFOQueueV2Attr) (handle tf.Output)
A queue that produces elements in first-in first-out order.
Variable-size shapes are allowed by setting the corresponding shape dimensions to 0 in the shape attr. In this case DequeueMany will pad up to the maximum size of any given element in the minibatch. See below for details.
Arguments:
component_types: The type of each component in a value.
Returns The handle to the queue.
func ParallelConcat ¶
Concatenates a list of `N` tensors along the first dimension.
The input tensors are all required to have size 1 in the first dimension.
For example:
``` # 'x' is [[1, 4]] # 'y' is [[2, 5]] # 'z' is [[3, 6]] parallel_concat([x, y, z]) => [[1, 4], [2, 5], [3, 6]] # Pack along first dim. ```
The difference between concat and parallel_concat is that concat requires all of the inputs be computed before the operation will begin but doesn't require that the input shapes be known during graph construction. Parallel concat will copy pieces of the input into the output as they become available, in some situations this can provide a performance benefit.
Arguments:
values: Tensors to be concatenated. All must have size 1 in the first dimension
and same shape.
shape: the final shape of the result; should be equal to the shapes of any input
but with the number of input values in the first dimension.
Returns The concatenated tensor.
func ParallelDynamicStitch ¶
Interleave the values from the `data` tensors into a single tensor.
Builds a merged tensor such that ¶
```python
merged[indices[m][i, ..., j], ...] = data[m][i, ..., j, ...]
```
For example, if each `indices[m]` is scalar or vector, we have
```python
# Scalar indices: merged[indices[m], ...] = data[m][...] # Vector indices: merged[indices[m][i], ...] = data[m][i, ...]
```
Each `data[i].shape` must start with the corresponding `indices[i].shape`, and the rest of `data[i].shape` must be constant w.r.t. `i`. That is, we must have `data[i].shape = indices[i].shape + constant`. In terms of this `constant`, the output shape is
merged.shape = [max(indices)] + constant
Values may be merged in parallel, so if an index appears in both `indices[m][i]` and `indices[n][j]`, the result may be invalid. This differs from the normal DynamicStitch operator that defines the behavior in that case.
For example:
```python
indices[0] = 6 indices[1] = [4, 1] indices[2] = [[5, 2], [0, 3]] data[0] = [61, 62] data[1] = [[41, 42], [11, 12]] data[2] = [[[51, 52], [21, 22]], [[1, 2], [31, 32]]] merged = [[1, 2], [11, 12], [21, 22], [31, 32], [41, 42], [51, 52], [61, 62]]
```
This method can be used to merge partitions created by `dynamic_partition` as illustrated on the following example:
```python
# Apply function (increments x_i) on elements for which a certain condition # apply (x_i != -1 in this example). x=tf.constant([0.1, -1., 5.2, 4.3, -1., 7.4]) condition_mask=tf.not_equal(x,tf.constant(-1.)) partitioned_data = tf.dynamic_partition( x, tf.cast(condition_mask, tf.int32) , 2) partitioned_data[1] = partitioned_data[1] + 1.0 condition_indices = tf.dynamic_partition( tf.range(tf.shape(x)[0]), tf.cast(condition_mask, tf.int32) , 2) x = tf.dynamic_stitch(condition_indices, partitioned_data) # Here x=[1.1, -1., 6.2, 5.3, -1, 8.4], the -1. values remain # unchanged.
```
<div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;"> <img style="width:100%" src="https://www.tensorflow.org/images/DynamicStitch.png" alt> </div>
func ParameterizedTruncatedNormal ¶
func ParameterizedTruncatedNormal(scope *Scope, shape tf.Output, means tf.Output, stdevs tf.Output, minvals tf.Output, maxvals tf.Output, optional ...ParameterizedTruncatedNormalAttr) (output tf.Output)
Outputs random values from a normal distribution. The parameters may each be a
scalar which applies to the entire output, or a vector of length shape[0] which stores the parameters for each batch.
Arguments:
shape: The shape of the output tensor. Batches are indexed by the 0th dimension. means: The mean parameter of each batch. stdevs: The standard deviation parameter of each batch. Must be greater than 0. minvals: The minimum cutoff. May be -infinity. maxvals: The maximum cutoff. May be +infinity, and must be more than the minval
for each batch.
Returns A matrix of shape num_batches x samples_per_batch, filled with random truncated normal values using the parameters for each row.
func ParseExample ¶
func ParseExample(scope *Scope, serialized tf.Output, names tf.Output, sparse_keys []tf.Output, dense_keys []tf.Output, dense_defaults []tf.Output, sparse_types []tf.DataType, dense_shapes []tf.Shape) (sparse_indices []tf.Output, sparse_values []tf.Output, sparse_shapes []tf.Output, dense_values []tf.Output)
Transforms a vector of brain.Example protos (as strings) into typed tensors.
Arguments:
serialized: A vector containing a batch of binary serialized Example protos. names: A vector containing the names of the serialized protos.
May contain, for example, table key (descriptive) names for the corresponding serialized protos. These are purely useful for debugging purposes, and the presence of values here has no effect on the output. May also be an empty vector if no names are available. If non-empty, this vector must be the same length as "serialized".
sparse_keys: A list of Nsparse string Tensors (scalars).
The keys expected in the Examples' features associated with sparse values.
dense_keys: A list of Ndense string Tensors (scalars).
The keys expected in the Examples' features associated with dense values.
dense_defaults: A list of Ndense Tensors (some may be empty).
dense_defaults[j] provides default values when the example's feature_map lacks dense_key[j]. If an empty Tensor is provided for dense_defaults[j], then the Feature dense_keys[j] is required. The input type is inferred from dense_defaults[j], even when it's empty. If dense_defaults[j] is not empty, and dense_shapes[j] is fully defined, then the shape of dense_defaults[j] must match that of dense_shapes[j]. If dense_shapes[j] has an undefined major dimension (variable strides dense feature), dense_defaults[j] must contain a single element: the padding element.
sparse_types: A list of Nsparse types; the data types of data in each Feature
given in sparse_keys. Currently the ParseExample supports DT_FLOAT (FloatList), DT_INT64 (Int64List), and DT_STRING (BytesList).
dense_shapes: A list of Ndense shapes; the shapes of data in each Feature
given in dense_keys. The number of elements in the Feature corresponding to dense_key[j] must always equal dense_shapes[j].NumEntries(). If dense_shapes[j] == (D0, D1, ..., DN) then the shape of output Tensor dense_values[j] will be (|serialized|, D0, D1, ..., DN): The dense outputs are just the inputs row-stacked by batch. This works for dense_shapes[j] = (-1, D1, ..., DN). In this case the shape of the output Tensor dense_values[j] will be (|serialized|, M, D1, .., DN), where M is the maximum number of blocks of elements of length D1 * .... * DN, across all minibatch entries in the input. Any minibatch entry with less than M blocks of elements of length D1 * ... * DN will be padded with the corresponding default_value scalar element along the second dimension.
func ParseExampleDataset ¶
func ParseExampleDataset(scope *Scope, input_dataset tf.Output, num_parallel_calls tf.Output, dense_defaults []tf.Output, sparse_keys []string, dense_keys []string, sparse_types []tf.DataType, dense_shapes []tf.Shape, output_types []tf.DataType, output_shapes []tf.Shape, optional ...ParseExampleDatasetAttr) (handle tf.Output)
Transforms `input_dataset` containing `Example` protos as vectors of DT_STRING into a dataset of `Tensor` or `SparseTensor` objects representing the parsed features.
Arguments:
dense_defaults: A dict mapping string keys to `Tensor`s.
The keys of the dict must match the dense_keys of the feature.
sparse_keys: A list of string keys in the examples features.
The results for these keys will be returned as `SparseTensor` objects.
dense_keys: A list of Ndense string Tensors (scalars).
The keys expected in the Examples features associated with dense values.
sparse_types: A list of `DTypes` of the same length as `sparse_keys`.
Only `tf.float32` (`FloatList`), `tf.int64` (`Int64List`), and `tf.string` (`BytesList`) are supported.
dense_shapes: List of tuples with the same length as `dense_keys`.
The shape of the data for each dense feature referenced by `dense_keys`. Required for any input tensors identified by `dense_keys`. Must be either fully defined, or may contain an unknown first dimension. An unknown first dimension means the feature is treated as having a variable number of blocks, and the output shape along this dimension is considered unknown at graph build time. Padding is applied for minibatch elements smaller than the maximum number of blocks for the given feature along this dimension.
output_types: The type list for the return values. output_shapes: The list of shapes being produced.
func ParseExampleDatasetV2 ¶
func ParseExampleDatasetV2(scope *Scope, input_dataset tf.Output, num_parallel_calls tf.Output, dense_defaults []tf.Output, sparse_keys []string, dense_keys []string, sparse_types []tf.DataType, dense_shapes []tf.Shape, output_types []tf.DataType, output_shapes []tf.Shape, optional ...ParseExampleDatasetV2Attr) (handle tf.Output)
Transforms `input_dataset` containing `Example` protos as vectors of DT_STRING into a dataset of `Tensor` or `SparseTensor` objects representing the parsed features.
Arguments:
dense_defaults: A dict mapping string keys to `Tensor`s.
The keys of the dict must match the dense_keys of the feature.
sparse_keys: A list of string keys in the examples features.
The results for these keys will be returned as `SparseTensor` objects.
dense_keys: A list of Ndense string Tensors (scalars).
The keys expected in the Examples features associated with dense values.
sparse_types: A list of `DTypes` of the same length as `sparse_keys`.
Only `tf.float32` (`FloatList`), `tf.int64` (`Int64List`), and `tf.string` (`BytesList`) are supported.
dense_shapes: List of tuples with the same length as `dense_keys`.
The shape of the data for each dense feature referenced by `dense_keys`. Required for any input tensors identified by `dense_keys`. Must be either fully defined, or may contain an unknown first dimension. An unknown first dimension means the feature is treated as having a variable number of blocks, and the output shape along this dimension is considered unknown at graph build time. Padding is applied for minibatch elements smaller than the maximum number of blocks for the given feature along this dimension.
output_types: The type list for the return values. output_shapes: The list of shapes being produced.
func ParseExampleV2 ¶
func ParseExampleV2(scope *Scope, serialized tf.Output, names tf.Output, sparse_keys tf.Output, dense_keys tf.Output, ragged_keys tf.Output, dense_defaults []tf.Output, num_sparse int64, sparse_types []tf.DataType, ragged_value_types []tf.DataType, ragged_split_types []tf.DataType, dense_shapes []tf.Shape) (sparse_indices []tf.Output, sparse_values []tf.Output, sparse_shapes []tf.Output, dense_values []tf.Output, ragged_values []tf.Output, ragged_row_splits []tf.Output)
Transforms a vector of tf.Example protos (as strings) into typed tensors.
Arguments:
serialized: A scalar or vector containing binary serialized Example protos. names: A tensor containing the names of the serialized protos.
Corresponds 1:1 with the `serialized` tensor. May contain, for example, table key (descriptive) names for the corresponding serialized protos. These are purely useful for debugging purposes, and the presence of values here has no effect on the output. May also be an empty vector if no names are available. If non-empty, this tensor must have the same shape as "serialized".
sparse_keys: Vector of strings.
The keys expected in the Examples' features associated with sparse values.
dense_keys: Vector of strings.
The keys expected in the Examples' features associated with dense values.
ragged_keys: Vector of strings.
The keys expected in the Examples' features associated with ragged values.
dense_defaults: A list of Tensors (some may be empty). Corresponds 1:1 with `dense_keys`.
dense_defaults[j] provides default values when the example's feature_map lacks dense_key[j]. If an empty Tensor is provided for dense_defaults[j], then the Feature dense_keys[j] is required. The input type is inferred from dense_defaults[j], even when it's empty. If dense_defaults[j] is not empty, and dense_shapes[j] is fully defined, then the shape of dense_defaults[j] must match that of dense_shapes[j]. If dense_shapes[j] has an undefined major dimension (variable strides dense feature), dense_defaults[j] must contain a single element: the padding element.
num_sparse: The number of sparse keys. sparse_types: A list of `num_sparse` types; the data types of data in each Feature
given in sparse_keys. Currently the ParseExample supports DT_FLOAT (FloatList), DT_INT64 (Int64List), and DT_STRING (BytesList).
ragged_value_types: A list of `num_ragged` types; the data types of data in each Feature
given in ragged_keys (where `num_ragged = sparse_keys.size()`). Currently the ParseExample supports DT_FLOAT (FloatList), DT_INT64 (Int64List), and DT_STRING (BytesList).
ragged_split_types: A list of `num_ragged` types; the data types of row_splits in each Feature
given in ragged_keys (where `num_ragged = sparse_keys.size()`). May be DT_INT32 or DT_INT64.
dense_shapes: A list of `num_dense` shapes; the shapes of data in each Feature
given in dense_keys (where `num_dense = dense_keys.size()`). The number of elements in the Feature corresponding to dense_key[j] must always equal dense_shapes[j].NumEntries(). If dense_shapes[j] == (D0, D1, ..., DN) then the shape of output Tensor dense_values[j] will be (|serialized|, D0, D1, ..., DN): The dense outputs are just the inputs row-stacked by batch. This works for dense_shapes[j] = (-1, D1, ..., DN). In this case the shape of the output Tensor dense_values[j] will be (|serialized|, M, D1, .., DN), where M is the maximum number of blocks of elements of length D1 * .... * DN, across all minibatch entries in the input. Any minibatch entry with less than M blocks of elements of length D1 * ... * DN will be padded with the corresponding default_value scalar element along the second dimension.
func ParseSequenceExample ¶
func ParseSequenceExample(scope *Scope, serialized tf.Output, debug_name tf.Output, context_dense_defaults []tf.Output, feature_list_dense_missing_assumed_empty []string, context_sparse_keys []string, context_dense_keys []string, feature_list_sparse_keys []string, feature_list_dense_keys []string, optional ...ParseSequenceExampleAttr) (context_sparse_indices []tf.Output, context_sparse_values []tf.Output, context_sparse_shapes []tf.Output, context_dense_values []tf.Output, feature_list_sparse_indices []tf.Output, feature_list_sparse_values []tf.Output, feature_list_sparse_shapes []tf.Output, feature_list_dense_values []tf.Output, feature_list_dense_lengths []tf.Output)
Transforms a vector of brain.SequenceExample protos (as strings) into typed tensors.
Arguments:
serialized: A vector containing binary serialized SequenceExample protos. debug_name: A vector containing the names of the serialized protos.
May contain, for example, table key (descriptive) name for the corresponding serialized proto. This is purely useful for debugging purposes, and the presence of values here has no effect on the output. May also be an empty vector if no name is available.
context_dense_defaults: A list of Ncontext_dense Tensors (some may be empty).
context_dense_defaults[j] provides default values when the SequenceExample's context map lacks context_dense_key[j]. If an empty Tensor is provided for context_dense_defaults[j], then the Feature context_dense_keys[j] is required. The input type is inferred from context_dense_defaults[j], even when it's empty. If context_dense_defaults[j] is not empty, its shape must match context_dense_shapes[j].
feature_list_dense_missing_assumed_empty: A vector listing the
FeatureList keys which may be missing from the SequenceExamples. If the associated FeatureList is missing, it is treated as empty. By default, any FeatureList not listed in this vector must exist in the SequenceExamples.
context_sparse_keys: A list of Ncontext_sparse string Tensors (scalars).
The keys expected in the Examples' features associated with context_sparse values.
context_dense_keys: A list of Ncontext_dense string Tensors (scalars).
The keys expected in the SequenceExamples' context features associated with dense values.
feature_list_sparse_keys: A list of Nfeature_list_sparse string Tensors
(scalars). The keys expected in the FeatureLists associated with sparse values.
feature_list_dense_keys: A list of Nfeature_list_dense string Tensors (scalars).
The keys expected in the SequenceExamples' feature_lists associated with lists of dense values.
func ParseSequenceExampleV2 ¶
func ParseSequenceExampleV2(scope *Scope, serialized tf.Output, debug_name tf.Output, context_sparse_keys tf.Output, context_dense_keys tf.Output, context_ragged_keys tf.Output, feature_list_sparse_keys tf.Output, feature_list_dense_keys tf.Output, feature_list_ragged_keys tf.Output, feature_list_dense_missing_assumed_empty tf.Output, context_dense_defaults []tf.Output, optional ...ParseSequenceExampleV2Attr) (context_sparse_indices []tf.Output, context_sparse_values []tf.Output, context_sparse_shapes []tf.Output, context_dense_values []tf.Output, context_ragged_values []tf.Output, context_ragged_row_splits []tf.Output, feature_list_sparse_indices []tf.Output, feature_list_sparse_values []tf.Output, feature_list_sparse_shapes []tf.Output, feature_list_dense_values []tf.Output, feature_list_dense_lengths []tf.Output, feature_list_ragged_values []tf.Output, feature_list_ragged_outer_splits []tf.Output, feature_list_ragged_inner_splits []tf.Output)
Transforms a vector of tf.io.SequenceExample protos (as strings) into typed tensors.
Arguments:
serialized: A scalar or vector containing binary serialized SequenceExample protos. debug_name: A scalar or vector containing the names of the serialized protos.
May contain, for example, table key (descriptive) name for the corresponding serialized proto. This is purely useful for debugging purposes, and the presence of values here has no effect on the output. May also be an empty vector if no name is available.
context_sparse_keys: The keys expected in the Examples' features associated with context_sparse
values.
context_dense_keys: The keys expected in the SequenceExamples' context features associated with
dense values.
context_ragged_keys: The keys expected in the Examples' features associated with context_ragged
values.
feature_list_sparse_keys: The keys expected in the FeatureLists associated with sparse values. feature_list_dense_keys: The keys expected in the SequenceExamples' feature_lists associated
with lists of dense values.
feature_list_ragged_keys: The keys expected in the FeatureLists associated with ragged values. feature_list_dense_missing_assumed_empty: A vector corresponding 1:1 with feature_list_dense_keys, indicating which
features may be missing from the SequenceExamples. If the associated FeatureList is missing, it is treated as empty.
context_dense_defaults: A list of Ncontext_dense Tensors (some may be empty).
context_dense_defaults[j] provides default values when the SequenceExample's context map lacks context_dense_key[j]. If an empty Tensor is provided for context_dense_defaults[j], then the Feature context_dense_keys[j] is required. The input type is inferred from context_dense_defaults[j], even when it's empty. If context_dense_defaults[j] is not empty, its shape must match context_dense_shapes[j].
func ParseSingleExample ¶
func ParseSingleExample(scope *Scope, serialized tf.Output, dense_defaults []tf.Output, num_sparse int64, sparse_keys []string, dense_keys []string, sparse_types []tf.DataType, dense_shapes []tf.Shape) (sparse_indices []tf.Output, sparse_values []tf.Output, sparse_shapes []tf.Output, dense_values []tf.Output)
Transforms a tf.Example proto (as a string) into typed tensors.
Arguments:
serialized: A vector containing a batch of binary serialized Example protos. dense_defaults: A list of Tensors (some may be empty), whose length matches
the length of `dense_keys`. dense_defaults[j] provides default values when the example's feature_map lacks dense_key[j]. If an empty Tensor is provided for dense_defaults[j], then the Feature dense_keys[j] is required. The input type is inferred from dense_defaults[j], even when it's empty. If dense_defaults[j] is not empty, and dense_shapes[j] is fully defined, then the shape of dense_defaults[j] must match that of dense_shapes[j]. If dense_shapes[j] has an undefined major dimension (variable strides dense feature), dense_defaults[j] must contain a single element: the padding element.
num_sparse: The number of sparse features to be parsed from the example. This
must match the lengths of `sparse_keys` and `sparse_types`.
sparse_keys: A list of `num_sparse` strings.
The keys expected in the Examples' features associated with sparse values.
dense_keys: The keys expected in the Examples' features associated with dense
values.
sparse_types: A list of `num_sparse` types; the data types of data in each
Feature given in sparse_keys. Currently the ParseSingleExample op supports DT_FLOAT (FloatList), DT_INT64 (Int64List), and DT_STRING (BytesList).
dense_shapes: The shapes of data in each Feature given in dense_keys.
The length of this list must match the length of `dense_keys`. The number of elements in the Feature corresponding to dense_key[j] must always equal dense_shapes[j].NumEntries(). If dense_shapes[j] == (D0, D1, ..., DN) then the shape of output Tensor dense_values[j] will be (D0, D1, ..., DN): In the case dense_shapes[j] = (-1, D1, ..., DN), the shape of the output Tensor dense_values[j] will be (M, D1, .., DN), where M is the number of blocks of elements of length D1 * .... * DN, in the input.
func ParseSingleSequenceExample ¶
func ParseSingleSequenceExample(scope *Scope, serialized tf.Output, feature_list_dense_missing_assumed_empty tf.Output, context_sparse_keys []tf.Output, context_dense_keys []tf.Output, feature_list_sparse_keys []tf.Output, feature_list_dense_keys []tf.Output, context_dense_defaults []tf.Output, debug_name tf.Output, optional ...ParseSingleSequenceExampleAttr) (context_sparse_indices []tf.Output, context_sparse_values []tf.Output, context_sparse_shapes []tf.Output, context_dense_values []tf.Output, feature_list_sparse_indices []tf.Output, feature_list_sparse_values []tf.Output, feature_list_sparse_shapes []tf.Output, feature_list_dense_values []tf.Output)
Transforms a scalar brain.SequenceExample proto (as strings) into typed tensors.
Arguments:
serialized: A scalar containing a binary serialized SequenceExample proto. feature_list_dense_missing_assumed_empty: A vector listing the
FeatureList keys which may be missing from the SequenceExample. If the associated FeatureList is missing, it is treated as empty. By default, any FeatureList not listed in this vector must exist in the SequenceExample.
context_sparse_keys: A list of Ncontext_sparse string Tensors (scalars).
The keys expected in the Examples' features associated with context_sparse values.
context_dense_keys: A list of Ncontext_dense string Tensors (scalars).
The keys expected in the SequenceExamples' context features associated with dense values.
feature_list_sparse_keys: A list of Nfeature_list_sparse string Tensors
(scalars). The keys expected in the FeatureLists associated with sparse values.
feature_list_dense_keys: A list of Nfeature_list_dense string Tensors (scalars).
The keys expected in the SequenceExamples' feature_lists associated with lists of dense values.
context_dense_defaults: A list of Ncontext_dense Tensors (some may be empty).
context_dense_defaults[j] provides default values when the SequenceExample's context map lacks context_dense_key[j]. If an empty Tensor is provided for context_dense_defaults[j], then the Feature context_dense_keys[j] is required. The input type is inferred from context_dense_defaults[j], even when it's empty. If context_dense_defaults[j] is not empty, its shape must match context_dense_shapes[j].
debug_name: A scalar containing the name of the serialized proto.
May contain, for example, table key (descriptive) name for the corresponding serialized proto. This is purely useful for debugging purposes, and the presence of values here has no effect on the output. May also be an empty scalar if no name is available.
func ParseTensor ¶
Transforms a serialized tensorflow.TensorProto proto into a Tensor.
Arguments:
serialized: A scalar string containing a serialized TensorProto proto. out_type: The type of the serialized tensor. The provided type must match the
type of the serialized tensor and no implicit conversion will take place.
Returns A Tensor of type `out_type`.
func Placeholder ¶
A placeholder op for a value that will be fed into the computation.
N.B. This operation will fail with an error if it is executed. It is intended as a way to represent a value that will always be fed, and to provide attrs that enable the fed value to be checked at runtime.
Arguments:
dtype: The type of elements in the tensor.
Returns A placeholder tensor that must be replaced using the feed mechanism.
func PlaceholderV2 ¶
A placeholder op for a value that will be fed into the computation.
DEPRECATED at GraphDef version 23: Placeholder now behaves the same as PlaceholderV2.
N.B. This operation will fail with an error if it is executed. It is intended as a way to represent a value that will always be fed, and to provide attrs that enable the fed value to be checked at runtime.
Arguments:
dtype: The type of elements in the tensor. shape: The shape of the tensor. The shape can be any partially-specified
shape. To be unconstrained, pass in a shape with unknown rank.
Returns A placeholder tensor that must be replaced using the feed mechanism.
func PlaceholderWithDefault ¶
A placeholder op that passes through `input` when its output is not fed.
Arguments:
input: The default value to produce when `output` is not fed. shape: The (possibly partial) shape of the tensor.
Returns A placeholder tensor that defaults to `input` if it is not fed.
func Polygamma ¶
Compute the polygamma function \\(\psi^{(n)}(x)\\).
The polygamma function is defined as:
\\(\psi^{(a)}(x) = \frac{d^a}{dx^a} \psi(x)\\)
where \\(\psi(x)\\) is the digamma function. The polygamma function is defined only for non-negative integer orders \\a\\.
func PopulationCount ¶
Computes element-wise population count (a.k.a. popcount, bitsum, bitcount).
For each entry in `x`, calculates the number of `1` (on) bits in the binary representation of that entry.
**NOTE**: It is more efficient to first `tf.bitcast` your tensors into `int32` or `int64` and perform the bitcount on the result, than to feed in 8- or 16-bit inputs and then aggregate the resulting counts.
func Pow ¶
Computes the power of one value to another.
Given a tensor `x` and a tensor `y`, this operation computes \\(x^y\\) for corresponding elements in `x` and `y`. For example:
``` # tensor 'x' is [[2, 2]], [3, 3]] # tensor 'y' is [[8, 16], [2, 3]] tf.pow(x, y) ==> [[256, 65536], [9, 27]] ```
func PrefetchDataset ¶
func PrefetchDataset(scope *Scope, input_dataset tf.Output, buffer_size tf.Output, output_types []tf.DataType, output_shapes []tf.Shape, optional ...PrefetchDatasetAttr) (handle tf.Output)
Creates a dataset that asynchronously prefetches elements from `input_dataset`.
Arguments:
buffer_size: The maximum number of elements to buffer in an iterator over
this dataset.
func Prelinearize ¶
An op which linearizes one Tensor value to an opaque variant tensor.
Arguments:
input: A tensor that will be linearized.
func PrelinearizeTuple ¶
func PrelinearizeTuple(scope *Scope, inputs []tf.Output, shapes []tf.Shape, optional ...PrelinearizeTupleAttr) (output tf.Output)
An op which linearizes multiple Tensor values to an opaque variant tensor.
Arguments:
inputs: A list of tensors that will be provided using the infeed mechanism. shapes: The shapes of each tensor in `inputs`.
func PreventGradient ¶
func PreventGradient(scope *Scope, input tf.Output, optional ...PreventGradientAttr) (output tf.Output)
An identity op that triggers an error if a gradient is requested.
When executed in a graph, this op outputs its input tensor as-is.
When building ops to compute gradients, the TensorFlow gradient system will return an error when trying to lookup the gradient of this op, because no gradient must ever be registered for this function. This op exists to prevent subtle bugs from silently returning unimplemented gradients in some corner cases.
Arguments:
input: any tensor.
Returns the same input tensor.
func Print ¶
func Print(scope *Scope, input tf.Output, data []tf.Output, optional ...PrintAttr) (output tf.Output)
Prints a list of tensors.
Passes `input` through to `output` and prints `data` when evaluating.
Arguments:
input: The tensor passed to `output` data: A list of tensors to print out when op is evaluated.
Returns The unmodified `input` tensor
func PrintV2 ¶
Prints a string scalar.
Prints a string scalar to the desired output_stream.
Arguments:
input: The string scalar to print.
Returns the created operation.
func PriorityQueueV2 ¶
func PriorityQueueV2(scope *Scope, shapes []tf.Shape, optional ...PriorityQueueV2Attr) (handle tf.Output)
A queue that produces elements sorted by the first component value.
Note that the PriorityQueue requires the first component of any element to be a scalar int64, in addition to the other elements declared by component_types. Therefore calls to Enqueue and EnqueueMany (resp. Dequeue and DequeueMany) on a PriorityQueue will all require (resp. output) one extra entry in their input (resp. output) lists.
Arguments:
shapes: The shape of each component in a value. The length of this attr must
be either 0 or the same as the length of component_types. If the length of this attr is 0, the shapes of queue elements are not constrained, and only one element may be dequeued at a time.
Returns The handle to the queue.
func PrivateThreadPoolDataset ¶
func PrivateThreadPoolDataset(scope *Scope, input_dataset tf.Output, num_threads tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output)
Creates a dataset that uses a custom thread pool to compute `input_dataset`.
Arguments:
num_threads: Identifies the number of threads to use for the private threadpool.
func Prod ¶
Computes the product of elements across dimensions of a tensor.
Reduces `input` along the dimensions given in `axis`. Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in `axis`. If `keep_dims` is true, the reduced dimensions are retained with length 1.
Arguments:
input: The tensor to reduce. axis: The dimensions to reduce. Must be in the range
`[-rank(input), rank(input))`.
Returns The reduced tensor.
func Qr ¶
Computes the QR decompositions of one or more matrices.
Computes the QR decomposition of each inner matrix in `tensor` such that `tensor[..., :, :] = q[..., :, :] * r[..., :,:])`
Currently, the gradient for the QR decomposition is well-defined only when the first `P` columns of the inner matrix are linearly independent, where `P` is the minimum of `M` and `N`, the 2 inner-most dimmensions of `tensor`.
```python # a is a tensor. # q is a tensor of orthonormal matrices. # r is a tensor of upper triangular matrices. q, r = qr(a) q_full, r_full = qr(a, full_matrices=True) ```
Arguments:
input: A tensor of shape `[..., M, N]` whose inner-most 2 dimensions
form matrices of size `[M, N]`. Let `P` be the minimum of `M` and `N`.
Returns:
q: Orthonormal basis for range of `a`. If `full_matrices` is `False` then
shape is `[..., M, P]`; if `full_matrices` is `True` then shape is `[..., M, M]`.
r: Triangular factor. If `full_matrices` is `False` then shape is
`[..., P, N]`. If `full_matrices` is `True` then shape is `[..., M, N]`.
func QuantizeAndDequantize ¶
func QuantizeAndDequantize(scope *Scope, input tf.Output, optional ...QuantizeAndDequantizeAttr) (output tf.Output)
Use QuantizeAndDequantizeV2 instead.
DEPRECATED at GraphDef version 22: Replaced by QuantizeAndDequantizeV2
func QuantizeAndDequantizeV2 ¶
func QuantizeAndDequantizeV2(scope *Scope, input tf.Output, input_min tf.Output, input_max tf.Output, optional ...QuantizeAndDequantizeV2Attr) (output tf.Output)
Quantizes then dequantizes a tensor.
This op simulates the precision loss from the quantized forward pass by:
- Quantizing the tensor to fixed point numbers, which should match the target quantization method when it is used in inference.
- Dequantizing it back to floating point numbers for the following ops, most likely matmul.
There are different ways to quantize. This version uses only scaling, so 0.0 maps to 0.
From the specified 'num_bits' in the quantized output type, it determines minimum and maximum representable quantized values.
e.g.
* [-128, 127] for signed, num_bits = 8, or * [0, 255] for unsigned, num_bits = 8.
If range_given == False, the initial input_min, input_max will be determined automatically as the minimum and maximum values in the input tensor, otherwise the specified values of input_min, input_max are used.
Note: If the input_min, input_max are specified, they do not need to equal the actual minimum and maximum values in the tensor. e.g. in some cases it may be beneficial to specify these values such that the low probability extremes of the input distribution are clipped.
This op determines the maximum scale_factor that would map the initial [input_min, input_max] range to a range that lies within the representable quantized range.
It determines the scale from one of input_min and input_max, then updates the other one to maximize the representable range.
e.g.
- if the output is signed, num_bits = 8, [input_min, input_max] = [-10.0, 5.0]: it would use a scale_factor of -128 / -10.0 = 12.8 In this case, it would update input_max to be 127 / 12.8 = 9.921875
- if the output is signed, num_bits = 8, [input_min, input_max] = [-10.0, 10.0]: it would use a scale_factor of 127 / 10.0 = 12.7 In this case, it would update input_min to be 128.0 / 12.7 = -10.07874
- if the output is unsigned, input_min is forced to be 0, and only the specified input_max is used.
After determining the scale_factor and updating the input range, it applies the following to each value in the 'input' tensor.
output = round(clamp(value, input_min, input_max) * scale_factor) / scale_factor.
The above round function rounds the value based on the given round_mode.
Arguments:
input: Tensor to quantize and then dequantize. input_min: If `range_given == True`, this specifies the minimum input value that needs to
be represented, otherwise it is determined from the min value of the `input` tensor.
input_max: If `range_given == True`, this specifies the maximum input value that needs to
be represented, otherwise it is determined from the max value of the `input` tensor.
func QuantizeAndDequantizeV3 ¶
func QuantizeAndDequantizeV3(scope *Scope, input tf.Output, input_min tf.Output, input_max tf.Output, num_bits tf.Output, optional ...QuantizeAndDequantizeV3Attr) (output tf.Output)
Quantizes then dequantizes a tensor.
This is almost identical to QuantizeAndDequantizeV2, except that num_bits is a tensor, so its value can change during training.
func QuantizeAndDequantizeV4 ¶
func QuantizeAndDequantizeV4(scope *Scope, input tf.Output, input_min tf.Output, input_max tf.Output, optional ...QuantizeAndDequantizeV4Attr) (output tf.Output)
Quantizes then dequantizes a tensor.
This is almost identical to QuantizeAndDequantizeV2, except that it returns a gradient of 1 for inputs that are within the quantization range, or 0 otherwise.
Arguments:
input: Tensor to quantize and then dequantize. input_min: If `range_given == True`, this specifies the minimum input value that needs to
be represented, otherwise it is determined from the min value of the `input` tensor.
input_max: If `range_given == True`, this specifies the maximum input value that needs to
be represented, otherwise it is determined from the max value of the `input` tensor.
func QuantizeAndDequantizeV4Grad ¶
func QuantizeAndDequantizeV4Grad(scope *Scope, gradients tf.Output, input tf.Output, input_min tf.Output, input_max tf.Output, optional ...QuantizeAndDequantizeV4GradAttr) (input_backprop tf.Output, input_min_backprop tf.Output, input_max_backprop tf.Output)
Returns the gradient of `QuantizeAndDequantizeV4`.
Returns a gradient of 1 for inputs that are within the quantization range, or 0 otherwise.
func QuantizeDownAndShrinkRange ¶
func QuantizeDownAndShrinkRange(scope *Scope, input tf.Output, input_min tf.Output, input_max tf.Output, out_type tf.DataType) (output tf.Output, output_min tf.Output, output_max tf.Output)
Convert the quantized 'input' tensor into a lower-precision 'output', using the
actual distribution of the values to maximize the usage of the lower bit depth and adjusting the output min and max ranges accordingly.
[input_min, input_max] are scalar floats that specify the range for the float interpretation of the 'input' data. For example, if input_min is -1.0f and input_max is 1.0f, and we are dealing with quint16 quantized data, then a 0 value in the 16-bit data should be interpreted as -1.0f, and a 65535 means 1.0f.
This operator tries to squeeze as much precision as possible into an output with a lower bit depth by calculating the actual min and max values found in the data. For example, maybe that quint16 input has no values lower than 16,384 and none higher than 49,152. That means only half the range is actually needed, all the float interpretations are between -0.5f and 0.5f, so if we want to compress the data into a quint8 output, we can use that range rather than the theoretical -1.0f to 1.0f that is suggested by the input min and max.
In practice, this is most useful for taking output from operations like QuantizedMatMul that can produce higher bit-depth outputs than their inputs and may have large potential output ranges, but in practice have a distribution of input values that only uses a small fraction of the possible range. By feeding that output into this operator, we can reduce it from 32 bits down to 8 with minimal loss of accuracy.
Arguments:
input_min: The float value that the minimum quantized input value represents. input_max: The float value that the maximum quantized input value represents. out_type: The type of the output. Should be a lower bit depth than Tinput.
Returns:
output output_min: The float value that the minimum quantized output value represents. output_max: The float value that the maximum quantized output value represents.
func QuantizeV2 ¶
func QuantizeV2(scope *Scope, input tf.Output, min_range tf.Output, max_range tf.Output, T tf.DataType, optional ...QuantizeV2Attr) (output tf.Output, output_min tf.Output, output_max tf.Output)
Quantize the 'input' tensor of type float to 'output' tensor of type 'T'.
[min_range, max_range] are scalar floats that specify the range for the 'input' data. The 'mode' attribute controls exactly which calculations are used to convert the float values to their quantized equivalents. The 'round_mode' attribute controls which rounding tie-breaking algorithm is used when rounding float values to their quantized equivalents.
In 'MIN_COMBINED' mode, each value of the tensor will undergo the following:
``` out[i] = (in[i] - min_range) * range(T) / (max_range - min_range) if T == qint8: out[i] -= (range(T) + 1) / 2.0 ```
here `range(T) = numeric_limits<T>::max() - numeric_limits<T>::min()`
*MIN_COMBINED Mode Example*
Assume the input is type float and has a possible range of [0.0, 6.0] and the output type is quint8 ([0, 255]). The min_range and max_range values should be specified as 0.0 and 6.0. Quantizing from float to quint8 will multiply each value of the input by 255/6 and cast to quint8.
If the output type was qint8 ([-128, 127]), the operation will additionally subtract each value by 128 prior to casting, so that the range of values aligns with the range of qint8.
If the mode is 'MIN_FIRST', then this approach is used:
``` num_discrete_values = 1 << (# of bits in T) range_adjust = num_discrete_values / (num_discrete_values - 1) range = (range_max - range_min) * range_adjust range_scale = num_discrete_values / range quantized = round(input * range_scale) - round(range_min * range_scale) +
numeric_limits<T>::min()
quantized = max(quantized, numeric_limits<T>::min()) quantized = min(quantized, numeric_limits<T>::max()) ```
The biggest difference between this and MIN_COMBINED is that the minimum range is rounded first, before it's subtracted from the rounded value. With MIN_COMBINED, a small bias is introduced where repeated iterations of quantizing and dequantizing will introduce a larger and larger error.
*SCALED mode Example*
`SCALED` mode matches the quantization approach used in `QuantizeAndDequantize{V2|V3}`.
If the mode is `SCALED`, the quantization is performed by multiplying each input value by a scaling_factor. The scaling_factor is determined from `min_range` and `max_range` to be as large as possible such that the range from `min_range` to `max_range` is representable within values of type T.
```c++
const int min_T = std::numeric_limits<T>::min(); const int max_T = std::numeric_limits<T>::max(); const float max_float = std::numeric_limits<float>::max(); const float scale_factor_from_min_side = (min_T * min_range > 0) ? min_T / min_range : max_float; const float scale_factor_from_max_side = (max_T * max_range > 0) ? max_T / max_range : max_float; const float scale_factor = std::min(scale_factor_from_min_side, scale_factor_from_max_side);
```
We next use the scale_factor to adjust min_range and max_range as follows:
```c++
min_range = min_T / scale_factor; max_range = max_T / scale_factor;
```
e.g. if T = qint8, and initially min_range = -10, and max_range = 9, we would compare -128/-10.0 = 12.8 to 127/9.0 = 14.11, and set scaling_factor = 12.8 In this case, min_range would remain -10, but max_range would be adjusted to 127 / 12.8 = 9.921875
So we will quantize input values in the range (-10, 9.921875) to (-128, 127).
The input tensor can now be quantized by clipping values to the range `min_range` to `max_range`, then multiplying by scale_factor as follows:
```c++ result = round(min(max_range, max(min_range, input)) * scale_factor) ```
The adjusted `min_range` and `max_range` are returned as outputs 2 and 3 of this operation. These outputs should be used as the range for any further calculations.
*narrow_range (bool) attribute*
If true, we do not use the minimum quantized value. i.e. for int8 the quantized output, it would be restricted to the range -127..127 instead of the full -128..127 range. This is provided for compatibility with certain inference backends. (Only applies to SCALED mode)
*axis (int) attribute*
An optional `axis` attribute can specify a dimension index of the input tensor, such that quantization ranges will be calculated and applied separately for each slice of the tensor along that dimension. This is useful for per-channel quantization.
If axis is specified, min_range and max_range
if `axis`=None, per-tensor quantization is performed as normal.
*ensure_minimum_range (float) attribute*
Ensures the minimum quantization range is at least this value. The legacy default value for this is 0.01, but it is strongly suggested to set it to 0 for new uses.
Arguments:
min_range: The minimum value of the quantization range. This value may be adjusted by the
op depending on other parameters. The adjusted value is written to `output_min`. If the `axis` attribute is specified, this must be a 1-D tensor whose size matches the `axis` dimension of the input and output tensors.
max_range: The maximum value of the quantization range. This value may be adjusted by the
op depending on other parameters. The adjusted value is written to `output_max`. If the `axis` attribute is specified, this must be a 1-D tensor whose size matches the `axis` dimension of the input and output tensors.
Returns:
output: The quantized data produced from the float input. output_min: The final quantization range minimum, used to clip input values before scaling
and rounding them to quantized values. If the `axis` attribute is specified, this will be a 1-D tensor whose size matches the `axis` dimension of the input and output tensors.
output_max: The final quantization range maximum, used to clip input values before scaling
and rounding them to quantized values. If the `axis` attribute is specified, this will be a 1-D tensor whose size matches the `axis` dimension of the input and output tensors.
func QuantizedAdd ¶
func QuantizedAdd(scope *Scope, x tf.Output, y tf.Output, min_x tf.Output, max_x tf.Output, min_y tf.Output, max_y tf.Output, optional ...QuantizedAddAttr) (z tf.Output, min_z tf.Output, max_z tf.Output)
Returns x + y element-wise, working on quantized buffers.
Arguments:
min_x: The float value that the lowest quantized `x` value represents. max_x: The float value that the highest quantized `x` value represents. min_y: The float value that the lowest quantized `y` value represents. max_y: The float value that the highest quantized `y` value represents.
Returns:
z min_z: The float value that the lowest quantized output value represents. max_z: The float value that the highest quantized output value represents.
*NOTE*: `QuantizedAdd` supports limited forms of broadcasting. More about broadcasting [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
func QuantizedAvgPool ¶
func QuantizedAvgPool(scope *Scope, input tf.Output, min_input tf.Output, max_input tf.Output, ksize []int64, strides []int64, padding string) (output tf.Output, min_output tf.Output, max_output tf.Output)
Produces the average pool of the input tensor for quantized types.
Arguments:
input: 4-D with shape `[batch, height, width, channels]`. min_input: The float value that the lowest quantized input value represents. max_input: The float value that the highest quantized input value represents. ksize: The size of the window for each dimension of the input tensor.
The length must be 4 to match the number of dimensions of the input.
strides: The stride of the sliding window for each dimension of the input
tensor. The length must be 4 to match the number of dimensions of the input.
padding: The type of padding algorithm to use.
Returns:
output min_output: The float value that the lowest quantized output value represents. max_output: The float value that the highest quantized output value represents.
func QuantizedBatchNormWithGlobalNormalization ¶
func QuantizedBatchNormWithGlobalNormalization(scope *Scope, t tf.Output, t_min tf.Output, t_max tf.Output, m tf.Output, m_min tf.Output, m_max tf.Output, v tf.Output, v_min tf.Output, v_max tf.Output, beta tf.Output, beta_min tf.Output, beta_max tf.Output, gamma tf.Output, gamma_min tf.Output, gamma_max tf.Output, out_type tf.DataType, variance_epsilon float32, scale_after_normalization bool) (result tf.Output, result_min tf.Output, result_max tf.Output)
Quantized Batch normalization.
This op is deprecated and will be removed in the future. Prefer `tf.nn.batch_normalization`.
Arguments:
t: A 4D input Tensor. t_min: The value represented by the lowest quantized input. t_max: The value represented by the highest quantized input. m: A 1D mean Tensor with size matching the last dimension of t.
This is the first output from tf.nn.moments, or a saved moving average thereof.
m_min: The value represented by the lowest quantized mean. m_max: The value represented by the highest quantized mean. v: A 1D variance Tensor with size matching the last dimension of t.
This is the second output from tf.nn.moments, or a saved moving average thereof.
v_min: The value represented by the lowest quantized variance. v_max: The value represented by the highest quantized variance. beta: A 1D beta Tensor with size matching the last dimension of t.
An offset to be added to the normalized tensor.
beta_min: The value represented by the lowest quantized offset. beta_max: The value represented by the highest quantized offset. gamma: A 1D gamma Tensor with size matching the last dimension of t.
If "scale_after_normalization" is true, this tensor will be multiplied with the normalized tensor.
gamma_min: The value represented by the lowest quantized gamma. gamma_max: The value represented by the highest quantized gamma. variance_epsilon: A small float number to avoid dividing by 0. scale_after_normalization: A bool indicating whether the resulted tensor
needs to be multiplied with gamma.
func QuantizedBiasAdd ¶
func QuantizedBiasAdd(scope *Scope, input tf.Output, bias tf.Output, min_input tf.Output, max_input tf.Output, min_bias tf.Output, max_bias tf.Output, out_type tf.DataType) (output tf.Output, min_out tf.Output, max_out tf.Output)
Adds Tensor 'bias' to Tensor 'input' for Quantized types.
Broadcasts the values of bias on dimensions 0..N-2 of 'input'.
Arguments:
bias: A 1D bias Tensor with size matching the last dimension of 'input'. min_input: The float value that the lowest quantized input value represents. max_input: The float value that the highest quantized input value represents. min_bias: The float value that the lowest quantized bias value represents. max_bias: The float value that the highest quantized bias value represents.
Returns:
output min_out: The float value that the lowest quantized output value represents. max_out: The float value that the highest quantized output value represents.
func QuantizedConcat ¶
func QuantizedConcat(scope *Scope, concat_dim tf.Output, values []tf.Output, input_mins []tf.Output, input_maxes []tf.Output) (output tf.Output, output_min tf.Output, output_max tf.Output)
Concatenates quantized tensors along one dimension.
Arguments:
concat_dim: 0-D. The dimension along which to concatenate. Must be in the
range [0, rank(values)).
values: The `N` Tensors to concatenate. Their ranks and types must match,
and their sizes must match in all dimensions except `concat_dim`.
input_mins: The minimum scalar values for each of the input tensors. input_maxes: The maximum scalar values for each of the input tensors.
Returns:
output: A `Tensor` with the concatenation of values stacked along the
`concat_dim` dimension. This tensor's shape matches that of `values` except in `concat_dim` where it has the sum of the sizes.
output_min: The float value that the minimum quantized output value represents. output_max: The float value that the maximum quantized output value represents.
func QuantizedConv2D ¶
func QuantizedConv2D(scope *Scope, input tf.Output, filter tf.Output, min_input tf.Output, max_input tf.Output, min_filter tf.Output, max_filter tf.Output, strides []int64, padding string, optional ...QuantizedConv2DAttr) (output tf.Output, min_output tf.Output, max_output tf.Output)
Computes a 2D convolution given quantized 4D input and filter tensors.
The inputs are quantized tensors where the lowest value represents the real number of the associated minimum, and the highest represents the maximum. This means that you can only interpret the quantized output in the same way, by taking the returned minimum and maximum values into account.
Arguments:
filter: filter's input_depth dimension must match input's depth dimensions. min_input: The float value that the lowest quantized input value represents. max_input: The float value that the highest quantized input value represents. min_filter: The float value that the lowest quantized filter value represents. max_filter: The float value that the highest quantized filter value represents. strides: The stride of the sliding window for each dimension of the input
tensor.
padding: The type of padding algorithm to use.
Returns:
output min_output: The float value that the lowest quantized output value represents. max_output: The float value that the highest quantized output value represents.
func QuantizedConv2DPerChannel ¶
func QuantizedConv2DPerChannel(scope *Scope, input tf.Output, filter tf.Output, min_input tf.Output, max_input tf.Output, min_filter tf.Output, max_filter tf.Output, strides []int64, padding string, optional ...QuantizedConv2DPerChannelAttr) (output tf.Output, min_output tf.Output, max_output tf.Output)
Computes QuantizedConv2D per channel.
Arguments:
input: The original input tensor. filter: The original filter tensor. min_input: The minimum value of the input tensor max_input: The maximum value of the input tensor. min_filter: The minimum value of the filter tensor. max_filter: The maximum value of the filter tensor. strides: list of stride values.
Returns:
output: The output tensor. min_output: The minimum value of the final output tensor. max_output: The maximum value of the final output tensor.
func QuantizedDepthwiseConv2D ¶
func QuantizedDepthwiseConv2D(scope *Scope, input tf.Output, filter tf.Output, min_input tf.Output, max_input tf.Output, min_filter tf.Output, max_filter tf.Output, strides []int64, padding string, optional ...QuantizedDepthwiseConv2DAttr) (output tf.Output, min_output tf.Output, max_output tf.Output)
Computes quantized depthwise Conv2D.
Arguments:
input: The original input tensor. filter: The original filter tensor. min_input: The float value that the minimum quantized input value represents. max_input: The float value that the maximum quantized input value represents. min_filter: The float value that the minimum quantized filter value represents. max_filter: The float value that the maximum quantized filter value represents. strides: List of stride values.
Returns:
output: The output tensor. min_output: The float value that the minimum quantized output value represents. max_output: The float value that the maximum quantized output value represents.
func QuantizedDepthwiseConv2DWithBias ¶
func QuantizedDepthwiseConv2DWithBias(scope *Scope, input tf.Output, filter tf.Output, bias tf.Output, min_input tf.Output, max_input tf.Output, min_filter tf.Output, max_filter tf.Output, strides []int64, padding string, optional ...QuantizedDepthwiseConv2DWithBiasAttr) (output tf.Output, min_output tf.Output, max_output tf.Output)
Computes quantized depthwise Conv2D with Bias.
Arguments:
input: The original input tensor. filter: The original filter tensor. bias: The original bias tensor. min_input: The float value that the minimum quantized input value represents. max_input: The float value that the maximum quantized input value represents. min_filter: The float value that the minimum quantized filter value represents. max_filter: The float value that the maximum quantized filter value represents. strides: List of stride values.
Returns:
output: The output tensor. min_output: The float value that the minimum quantized output value represents. max_output: The float value that the maximum quantized output value represents.
func QuantizedDepthwiseConv2DWithBiasAndRelu ¶
func QuantizedDepthwiseConv2DWithBiasAndRelu(scope *Scope, input tf.Output, filter tf.Output, bias tf.Output, min_input tf.Output, max_input tf.Output, min_filter tf.Output, max_filter tf.Output, strides []int64, padding string, optional ...QuantizedDepthwiseConv2DWithBiasAndReluAttr) (output tf.Output, min_output tf.Output, max_output tf.Output)
Computes quantized depthwise Conv2D with Bias and Relu.
Arguments:
input: The original input tensor. filter: The original filter tensor. bias: The original bias tensor. min_input: The float value that the minimum quantized input value represents. max_input: The float value that the maximum quantized input value represents. min_filter: The float value that the minimum quantized filter value represents. max_filter: The float value that the maximum quantized filter value represents. strides: List of stride values.
Returns:
output: The output tensor. min_output: The float value that the minimum quantized output value represents. max_output: The float value that the maximum quantized output value represents.
func QuantizedDepthwiseConv2DWithBiasAndReluAndRequantize ¶
func QuantizedDepthwiseConv2DWithBiasAndReluAndRequantize(scope *Scope, input tf.Output, filter tf.Output, bias tf.Output, min_input tf.Output, max_input tf.Output, min_filter tf.Output, max_filter tf.Output, min_freezed_output tf.Output, max_freezed_output tf.Output, strides []int64, padding string, optional ...QuantizedDepthwiseConv2DWithBiasAndReluAndRequantizeAttr) (output tf.Output, min_output tf.Output, max_output tf.Output)
Computes quantized depthwise Conv2D with Bias, Relu and Requantize.
Arguments:
input: The original input tensor. filter: The original filter tensor. bias: The original bias tensor. min_input: The float value that the minimum quantized input value represents. max_input: The float value that the maximum quantized input value represents. min_filter: The float value that the minimum quantized filter value represents. max_filter: The float value that the maximum quantized filter value represents. min_freezed_output: The minimum float value of the output tensor. max_freezed_output: The maximum float value of the output tensor. strides: List of stride values.
Returns:
output: The output tensor. min_output: The float value that the minimum quantized output value represents. max_output: The float value that the maximum quantized output value represents.
func QuantizedInstanceNorm ¶
func QuantizedInstanceNorm(scope *Scope, x tf.Output, x_min tf.Output, x_max tf.Output, optional ...QuantizedInstanceNormAttr) (y tf.Output, y_min tf.Output, y_max tf.Output)
Quantized Instance normalization.
Arguments:
x: A 4D input Tensor. x_min: The value represented by the lowest quantized input. x_max: The value represented by the highest quantized input.
Returns:
y: A 4D Tensor. y_min: The value represented by the lowest quantized output. y_max: The value represented by the highest quantized output.
func QuantizedMatMul ¶
func QuantizedMatMul(scope *Scope, a tf.Output, b tf.Output, min_a tf.Output, max_a tf.Output, min_b tf.Output, max_b tf.Output, optional ...QuantizedMatMulAttr) (out tf.Output, min_out tf.Output, max_out tf.Output)
Perform a quantized matrix multiplication of `a` by the matrix `b`.
The inputs must be two-dimensional matrices and the inner dimension of `a` (after being transposed if `transpose_a` is non-zero) must match the outer dimension of `b` (after being transposed if `transposed_b` is non-zero).
Arguments:
a: Must be a two-dimensional tensor. b: Must be a two-dimensional tensor. min_a: The float value that the lowest quantized `a` value represents. max_a: The float value that the highest quantized `a` value represents. min_b: The float value that the lowest quantized `b` value represents. max_b: The float value that the highest quantized `b` value represents.
Returns:
out min_out: The float value that the lowest quantized output value represents. max_out: The float value that the highest quantized output value represents.
func QuantizedMatMulWithBias ¶
func QuantizedMatMulWithBias(scope *Scope, a tf.Output, b tf.Output, bias tf.Output, min_a tf.Output, max_a tf.Output, min_b tf.Output, max_b tf.Output, optional ...QuantizedMatMulWithBiasAttr) (out tf.Output, min_out tf.Output, max_out tf.Output)
Performs a quantized matrix multiplication of `a` by the matrix `b` with bias add.
The inputs must be two-dimensional matrices and 1D bias vector. And the inner dimension of `a` (after being transposed if `transpose_a` is non-zero) must match the outer dimension of `b` (after being transposed if `transposed_b` is non-zero). Then do broadcast add operation with bias values on the matrix multiplication result. The bias size must match inner dimension of `b`.
Arguments:
a: A matrix to be multiplied. Must be a two-dimensional tensor of type `quint8`. b: A matrix to be multiplied and must be a two-dimensional tensor of type `qint8`. bias: A 1D bias tensor with size matching inner dimension of `b` (after being
transposed if `transposed_b` is non-zero).
min_a: The float value that the lowest quantized `a` value represents. max_a: The float value that the highest quantized `a` value represents. min_b: The float value that the lowest quantized `b` value represents. max_b: The float value that the highest quantized `b` value represents.
Returns:
out min_out: The float value that the lowest quantized output value represents. max_out: The float value that the highest quantized output value represents.
func QuantizedMatMulWithBiasAndRelu ¶
func QuantizedMatMulWithBiasAndRelu(scope *Scope, a tf.Output, b tf.Output, bias tf.Output, min_a tf.Output, max_a tf.Output, min_b tf.Output, max_b tf.Output, optional ...QuantizedMatMulWithBiasAndReluAttr) (out tf.Output, min_out tf.Output, max_out tf.Output)
Perform a quantized matrix multiplication of `a` by the matrix `b` with bias add and relu fusion.
The inputs must be two-dimensional matrices and 1D bias vector. And the inner dimension of `a` (after being transposed if `transpose_a` is non-zero) must match the outer dimension of `b` (after being transposed if `transposed_b` is non-zero). Then do broadcast add operation with bias values on the matrix multiplication result. The bias size must match inner dimension of `b`. Then do relu activation to get non-negative result.
Arguments:
a: A matrix to be multiplied. Must be a two-dimensional tensor of type `quint8`. b: A matrix to be multiplied and must be a two-dimensional tensor of type `qint8`. bias: A 1D bias tensor with size matching with inner dimension of `b` (after being
transposed if `transposed_b` is non-zero).
min_a: The float value that the lowest quantized `a` value represents. max_a: The float value that the highest quantized `a` value represents. min_b: The float value that the lowest quantized `b` value represents. max_b: The float value that the highest quantized `b` value represents.
Returns:
out min_out: The float value that the lowest quantized output value represents. max_out: The float value that the highest quantized output value represents.
func QuantizedMatMulWithBiasAndReluAndRequantize ¶
func QuantizedMatMulWithBiasAndReluAndRequantize(scope *Scope, a tf.Output, b tf.Output, bias tf.Output, min_a tf.Output, max_a tf.Output, min_b tf.Output, max_b tf.Output, min_freezed_output tf.Output, max_freezed_output tf.Output, optional ...QuantizedMatMulWithBiasAndReluAndRequantizeAttr) (out tf.Output, min_out tf.Output, max_out tf.Output)
Perform a quantized matrix multiplication of `a` by the matrix `b` with bias add and relu and requantize fusion.
The inputs must be two-dimensional matrices and 1D bias vector. And the inner dimension of `a` (after being transposed if `transpose_a` is non-zero) must match the outer dimension of `b` (after being transposed if `transposed_b` is non-zero). Then do broadcast add operation with bias values on the matrix multiplication result. The bias size must match inner dimension of `b`. Then do relu activation to get non-negative result. Then do requantize operation to get final uint8 result.
Arguments:
a: A matrix to be multiplied. Must be a two-dimensional tensor of type `quint8`. b: A matrix to be multiplied and must be a two-dimensional tensor of type `qint8`. bias: A 1D bias tensor with size matching with inner dimension of `b` (after being
transposed if `transposed_b` is non-zero).
min_a: The float value that the lowest quantized `a` value represents. max_a: The float value that the highest quantized `a` value represents. min_b: The float value that the lowest quantized `b` value represents. max_b: The float value that the highest quantized `b` value represents. min_freezed_output: The float value that the highest quantized output value after requantize.
Returns:
out min_out: The float value that the lowest quantized output value represents. max_out: The float value that the highest quantized output value represents.
func QuantizedMaxPool ¶
func QuantizedMaxPool(scope *Scope, input tf.Output, min_input tf.Output, max_input tf.Output, ksize []int64, strides []int64, padding string) (output tf.Output, min_output tf.Output, max_output tf.Output)
Produces the max pool of the input tensor for quantized types.
Arguments:
input: The 4D (batch x rows x cols x depth) Tensor to MaxReduce over. min_input: The float value that the lowest quantized input value represents. max_input: The float value that the highest quantized input value represents. ksize: The size of the window for each dimension of the input tensor.
The length must be 4 to match the number of dimensions of the input.
strides: The stride of the sliding window for each dimension of the input
tensor. The length must be 4 to match the number of dimensions of the input.
padding: The type of padding algorithm to use.
Returns:
output min_output: The float value that the lowest quantized output value represents. max_output: The float value that the highest quantized output value represents.
func QuantizedMul ¶
func QuantizedMul(scope *Scope, x tf.Output, y tf.Output, min_x tf.Output, max_x tf.Output, min_y tf.Output, max_y tf.Output, optional ...QuantizedMulAttr) (z tf.Output, min_z tf.Output, max_z tf.Output)
Returns x * y element-wise, working on quantized buffers.
Arguments:
min_x: The float value that the lowest quantized `x` value represents. max_x: The float value that the highest quantized `x` value represents. min_y: The float value that the lowest quantized `y` value represents. max_y: The float value that the highest quantized `y` value represents.
Returns:
z min_z: The float value that the lowest quantized output value represents. max_z: The float value that the highest quantized output value represents.
*NOTE*: `QuantizedMul` supports limited forms of broadcasting. More about broadcasting [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
func QuantizedRelu ¶
func QuantizedRelu(scope *Scope, features tf.Output, min_features tf.Output, max_features tf.Output, optional ...QuantizedReluAttr) (activations tf.Output, min_activations tf.Output, max_activations tf.Output)
Computes Quantized Rectified Linear: `max(features, 0)`
Arguments:
min_features: The float value that the lowest quantized value represents. max_features: The float value that the highest quantized value represents.
Returns:
activations: Has the same output shape as "features". min_activations: The float value that the lowest quantized value represents. max_activations: The float value that the highest quantized value represents.
func QuantizedRelu6 ¶
func QuantizedRelu6(scope *Scope, features tf.Output, min_features tf.Output, max_features tf.Output, optional ...QuantizedRelu6Attr) (activations tf.Output, min_activations tf.Output, max_activations tf.Output)
Computes Quantized Rectified Linear 6: `min(max(features, 0), 6)`
Arguments:
min_features: The float value that the lowest quantized value represents. max_features: The float value that the highest quantized value represents.
Returns:
activations: Has the same output shape as "features". min_activations: The float value that the lowest quantized value represents. max_activations: The float value that the highest quantized value represents.
func QuantizedReluX ¶
func QuantizedReluX(scope *Scope, features tf.Output, max_value tf.Output, min_features tf.Output, max_features tf.Output, optional ...QuantizedReluXAttr) (activations tf.Output, min_activations tf.Output, max_activations tf.Output)
Computes Quantized Rectified Linear X: `min(max(features, 0), max_value)`
Arguments:
min_features: The float value that the lowest quantized value represents. max_features: The float value that the highest quantized value represents.
Returns:
activations: Has the same output shape as "features". min_activations: The float value that the lowest quantized value represents. max_activations: The float value that the highest quantized value represents.
func QuantizedReshape ¶
func QuantizedReshape(scope *Scope, tensor tf.Output, shape tf.Output, input_min tf.Output, input_max tf.Output) (output tf.Output, output_min tf.Output, output_max tf.Output)
Reshapes a quantized tensor as per the Reshape op.
```
Arguments:
shape: Defines the shape of the output tensor. input_min: The minimum value of the input. input_max: The maximum value of the input.
Returns:
output output_min: This value is copied from input_min. output_max: This value is copied from input_max.
func QuantizedResizeBilinear ¶
func QuantizedResizeBilinear(scope *Scope, images tf.Output, size tf.Output, min tf.Output, max tf.Output, optional ...QuantizedResizeBilinearAttr) (resized_images tf.Output, out_min tf.Output, out_max tf.Output)
Resize quantized `images` to `size` using quantized bilinear interpolation.
Input images and output images must be quantized types.
Arguments:
images: 4-D with shape `[batch, height, width, channels]`. size: = A 1-D int32 Tensor of 2 elements: `new_height, new_width`. The
new size for the images.
Returns:
resized_images: 4-D with shape
`[batch, new_height, new_width, channels]`.
out_min out_max
func QueueCloseV2 ¶
Closes the given queue.
This operation signals that no more elements will be enqueued in the given queue. Subsequent Enqueue(Many) operations will fail. Subsequent Dequeue(Many) operations will continue to succeed if sufficient elements remain in the queue. Subsequent Dequeue(Many) operations that would block will fail immediately.
Arguments:
handle: The handle to a queue.
Returns the created operation.
func QueueDequeueManyV2 ¶
func QueueDequeueManyV2(scope *Scope, handle tf.Output, n tf.Output, component_types []tf.DataType, optional ...QueueDequeueManyV2Attr) (components []tf.Output)
Dequeues `n` tuples of one or more tensors from the given queue.
If the queue is closed and there are fewer than `n` elements, then an OutOfRange error is returned.
This operation concatenates queue-element component tensors along the 0th dimension to make a single component tensor. All of the components in the dequeued tuple will have size `n` in the 0th dimension.
This operation has `k` outputs, where `k` is the number of components in the tuples stored in the given queue, and output `i` is the ith component of the dequeued tuple.
N.B. If the queue is empty, this operation will block until `n` elements have been dequeued (or 'timeout_ms' elapses, if specified).
Arguments:
handle: The handle to a queue. n: The number of tuples to dequeue. component_types: The type of each component in a tuple.
Returns One or more tensors that were dequeued as a tuple.
func QueueDequeueUpToV2 ¶
func QueueDequeueUpToV2(scope *Scope, handle tf.Output, n tf.Output, component_types []tf.DataType, optional ...QueueDequeueUpToV2Attr) (components []tf.Output)
Dequeues `n` tuples of one or more tensors from the given queue.
This operation is not supported by all queues. If a queue does not support DequeueUpTo, then an Unimplemented error is returned.
If the queue is closed and there are more than 0 but less than `n` elements remaining, then instead of returning an OutOfRange error like QueueDequeueMany, less than `n` elements are returned immediately. If the queue is closed and there are 0 elements left in the queue, then an OutOfRange error is returned just like in QueueDequeueMany. Otherwise the behavior is identical to QueueDequeueMany:
This operation concatenates queue-element component tensors along the 0th dimension to make a single component tensor. All of the components in the dequeued tuple will have size n in the 0th dimension.
This operation has `k` outputs, where `k` is the number of components in the tuples stored in the given queue, and output `i` is the ith component of the dequeued tuple.
Arguments:
handle: The handle to a queue. n: The number of tuples to dequeue. component_types: The type of each component in a tuple.
Returns One or more tensors that were dequeued as a tuple.
func QueueDequeueV2 ¶
func QueueDequeueV2(scope *Scope, handle tf.Output, component_types []tf.DataType, optional ...QueueDequeueV2Attr) (components []tf.Output)
Dequeues a tuple of one or more tensors from the given queue.
This operation has k outputs, where k is the number of components in the tuples stored in the given queue, and output i is the ith component of the dequeued tuple.
N.B. If the queue is empty, this operation will block until an element has been dequeued (or 'timeout_ms' elapses, if specified).
Arguments:
handle: The handle to a queue. component_types: The type of each component in a tuple.
Returns One or more tensors that were dequeued as a tuple.
func QueueEnqueueManyV2 ¶
func QueueEnqueueManyV2(scope *Scope, handle tf.Output, components []tf.Output, optional ...QueueEnqueueManyV2Attr) (o *tf.Operation)
Enqueues zero or more tuples of one or more tensors in the given queue.
This operation slices each component tensor along the 0th dimension to make multiple queue elements. All of the tuple components must have the same size in the 0th dimension.
The components input has k elements, which correspond to the components of tuples stored in the given queue.
N.B. If the queue is full, this operation will block until the given elements have been enqueued (or 'timeout_ms' elapses, if specified).
Arguments:
handle: The handle to a queue. components: One or more tensors from which the enqueued tensors should
be taken.
Returns the created operation.
func QueueEnqueueV2 ¶
func QueueEnqueueV2(scope *Scope, handle tf.Output, components []tf.Output, optional ...QueueEnqueueV2Attr) (o *tf.Operation)
Enqueues a tuple of one or more tensors in the given queue.
The components input has k elements, which correspond to the components of tuples stored in the given queue.
N.B. If the queue is full, this operation will block until the given element has been enqueued (or 'timeout_ms' elapses, if specified).
Arguments:
handle: The handle to a queue. components: One or more tensors from which the enqueued tensors should be taken.
Returns the created operation.
func QueueIsClosedV2 ¶
Returns true if queue is closed.
This operation returns true if the queue is closed and false if the queue is open.
Arguments:
handle: The handle to a queue.
func QueueSizeV2 ¶
Computes the number of elements in the given queue.
Arguments:
handle: The handle to a queue.
Returns The number of elements in the given queue.
func RFFT ¶
func RFFT(scope *Scope, input tf.Output, fft_length tf.Output, optional ...RFFTAttr) (output tf.Output)
Real-valued fast Fourier transform.
Computes the 1-dimensional discrete Fourier transform of a real-valued signal over the inner-most dimension of `input`.
Since the DFT of a real signal is Hermitian-symmetric, `RFFT` only returns the `fft_length / 2 + 1` unique components of the FFT: the zero-frequency term, followed by the `fft_length / 2` positive-frequency terms.
Along the axis `RFFT` is computed on, if `fft_length` is smaller than the corresponding dimension of `input`, the dimension is cropped. If it is larger, the dimension is padded with zeros.
Arguments:
input: A float32 tensor. fft_length: An int32 tensor of shape [1]. The FFT length.
Returns A complex64 tensor of the same rank as `input`. The inner-most
dimension of `input` is replaced with the `fft_length / 2 + 1` unique frequency components of its 1D Fourier transform.
@compatibility(numpy) Equivalent to np.fft.rfft @end_compatibility
func RFFT2D ¶
func RFFT2D(scope *Scope, input tf.Output, fft_length tf.Output, optional ...RFFT2DAttr) (output tf.Output)
2D real-valued fast Fourier transform.
Computes the 2-dimensional discrete Fourier transform of a real-valued signal over the inner-most 2 dimensions of `input`.
Since the DFT of a real signal is Hermitian-symmetric, `RFFT2D` only returns the `fft_length / 2 + 1` unique components of the FFT for the inner-most dimension of `output`: the zero-frequency term, followed by the `fft_length / 2` positive-frequency terms.
Along each axis `RFFT2D` is computed on, if `fft_length` is smaller than the corresponding dimension of `input`, the dimension is cropped. If it is larger, the dimension is padded with zeros.
Arguments:
input: A float32 tensor. fft_length: An int32 tensor of shape [2]. The FFT length for each dimension.
Returns A complex64 tensor of the same rank as `input`. The inner-most 2
dimensions of `input` are replaced with their 2D Fourier transform. The inner-most dimension contains `fft_length / 2 + 1` unique frequency components.
@compatibility(numpy) Equivalent to np.fft.rfft2 @end_compatibility
func RFFT3D ¶
func RFFT3D(scope *Scope, input tf.Output, fft_length tf.Output, optional ...RFFT3DAttr) (output tf.Output)
3D real-valued fast Fourier transform.
Computes the 3-dimensional discrete Fourier transform of a real-valued signal over the inner-most 3 dimensions of `input`.
Since the DFT of a real signal is Hermitian-symmetric, `RFFT3D` only returns the `fft_length / 2 + 1` unique components of the FFT for the inner-most dimension of `output`: the zero-frequency term, followed by the `fft_length / 2` positive-frequency terms.
Along each axis `RFFT3D` is computed on, if `fft_length` is smaller than the corresponding dimension of `input`, the dimension is cropped. If it is larger, the dimension is padded with zeros.
Arguments:
input: A float32 tensor. fft_length: An int32 tensor of shape [3]. The FFT length for each dimension.
Returns A complex64 tensor of the same rank as `input`. The inner-most 3
dimensions of `input` are replaced with the their 3D Fourier transform. The inner-most dimension contains `fft_length / 2 + 1` unique frequency components.
@compatibility(numpy) Equivalent to np.fft.rfftn with 3 dimensions. @end_compatibility
func RFFTND ¶ added in v0.7.0
func RFFTND(scope *Scope, input tf.Output, fft_length tf.Output, axes tf.Output, optional ...RFFTNDAttr) (output tf.Output)
ND fast real Fourier transform.
Computes the n-dimensional real discrete Fourier transform over designated dimensions of `input`. The designated dimensions of `input` are assumed to be the result of `RFFTND`. The length of the last axis transformed will be fft_length[-1]//2+1.
If fft_length[i]<shape(input)[i], the input is cropped. If fft_length[i]>shape(input)[i], the input is padded with zeros. If fft_length is not given, the default shape(input) is used.
Axes mean the dimensions to perform the transform on. Default is to perform on all axes.
Arguments:
input: A complex tensor. fft_length: An int32 tensor. The FFT length for each dimension. axes: An int32 tensor with a same shape as fft_length. Axes to perform the transform.
Returns A complex tensor of the same shape as `input`. The designated dimensions of `input` are replaced with their real Fourier transforms.
@compatibility(numpy) Equivalent to np.fft.rfftn. @end_compatibility
func RGBToHSV ¶
Converts one or more images from RGB to HSV.
Outputs a tensor of the same shape as the `images` tensor, containing the HSV value of the pixels. The output is only well defined if the value in `images` are in `[0,1]`.
`output[..., 0]` contains hue, `output[..., 1]` contains saturation, and `output[..., 2]` contains value. All HSV values are in `[0,1]`. A hue of 0 corresponds to pure red, hue 1/3 is pure green, and 2/3 is pure blue.
Usage Example:
>>> blue_image = tf.stack([ ... tf.zeros([5,5]), ... tf.zeros([5,5]), ... tf.ones([5,5])], ... axis=-1) >>> blue_hsv_image = tf.image.rgb_to_hsv(blue_image) >>> blue_hsv_image[0,0].numpy() array([0.6666667, 1. , 1. ], dtype=float32)
Arguments:
images: 1-D or higher rank. RGB data to convert. Last dimension must be size 3.
Returns `images` converted to HSV.
func RaggedBincount ¶
func RaggedBincount(scope *Scope, splits tf.Output, values tf.Output, size tf.Output, weights tf.Output, optional ...RaggedBincountAttr) (output tf.Output)
Counts the number of occurrences of each value in an integer array.
Outputs a vector with length `size` and the same dtype as `weights`. If `weights` are empty, then index `i` stores the number of times the value `i` is counted in `arr`. If `weights` are non-empty, then index `i` stores the sum of the value in `weights` at each index where the corresponding value in `arr` is `i`.
Values in `arr` outside of the range [0, size) are ignored.
Arguments:
splits: 1D int64 `Tensor`. values: 2D int `Tensor`. size: non-negative int scalar `Tensor`. weights: is an int32, int64, float32, or float64 `Tensor` with the same
shape as `input`, or a length-0 `Tensor`, in which case it acts as all weights equal to 1.
Returns 1D `Tensor` with length equal to `size` or 2D `Tensor` with [batch_size, `size`]. The counts or summed weights for each value in the range [0, size).
func RaggedCountSparseOutput ¶
func RaggedCountSparseOutput(scope *Scope, splits tf.Output, values tf.Output, weights tf.Output, binary_output bool, optional ...RaggedCountSparseOutputAttr) (output_indices tf.Output, output_values tf.Output, output_dense_shape tf.Output)
Performs sparse-output bin counting for a ragged tensor input.
Counts the number of times each value occurs in the input.
Arguments:
splits: Tensor containing the row splits of the ragged tensor to count. values: Tensor containing values of the sparse tensor to count. weights: A Tensor of the same shape as indices containing per-index weight values.
May also be the empty tensor if no weights are used.
binary_output: Whether to output the number of occurrences of each value or 1.
Returns:
output_indices: Indices tensor for the resulting sparse tensor object. output_values: Values tensor for the resulting sparse tensor object. output_dense_shape: Shape tensor for the resulting sparse tensor object. END } attr { name: "T" description: <<END
Dtype of the input values tensor.
func RaggedCross ¶
func RaggedCross(scope *Scope, ragged_values []tf.Output, ragged_row_splits []tf.Output, sparse_indices []tf.Output, sparse_values []tf.Output, sparse_shape []tf.Output, dense_inputs []tf.Output, input_order string, hashed_output bool, num_buckets int64, hash_key int64, out_values_type tf.DataType, out_row_splits_type tf.DataType) (output_values tf.Output, output_row_splits tf.Output)
Generates a feature cross from a list of tensors, and returns it as a RaggedTensor. See `tf.ragged.cross` for more details.
Arguments:
ragged_values: The values tensor for each RaggedTensor input. ragged_row_splits: The row_splits tensor for each RaggedTensor input. sparse_indices: The indices tensor for each SparseTensor input. sparse_values: The values tensor for each SparseTensor input. sparse_shape: The dense_shape tensor for each SparseTensor input. dense_inputs: The tf.Tensor inputs. input_order: String specifying the tensor type for each input. The `i`th character in
this string specifies the type of the `i`th input, and is one of: 'R' (ragged), 'D' (dense), or 'S' (sparse). This attr is used to ensure that the crossed values are combined in the order of the inputs from the call to tf.ragged.cross.
Returns:
output_values: The `values` for the returned `RaggedTensor`. output_row_splits: The `row_splits` for the returned `RaggedTensor`.
func RaggedGather ¶
func RaggedGather(scope *Scope, params_nested_splits []tf.Output, params_dense_values tf.Output, indices tf.Output, OUTPUT_RAGGED_RANK int64) (output_nested_splits []tf.Output, output_dense_values tf.Output)
Gather ragged slices from `params` axis `0` according to `indices`.
Outputs a `RaggedTensor` output composed from `output_dense_values` and `output_nested_splits`, such that:
```python output.shape = indices.shape + params.shape[1:] output.ragged_rank = indices.shape.ndims + params.ragged_rank output[i...j, d0...dn] = params[indices[i...j], d0...dn] ```
where
- `params = ragged.from_nested_row_splits(params_dense_values, params_nested_splits)` provides the values that should be gathered.
- `indices` ia a dense tensor with dtype `int32` or `int64`, indicating which values should be gathered.
- `output = ragged.from_nested_row_splits(output_dense_values, output_nested_splits)` is the output tensor.
(Note: This c++ op is used to implement the higher-level python `tf.ragged.gather` op, which also supports ragged indices.)
Arguments:
params_nested_splits: The `nested_row_splits` tensors that define the row-partitioning for the
`params` RaggedTensor input.
params_dense_values: The `flat_values` for the `params` RaggedTensor. There was a terminology change
at the python level from dense_values to flat_values, so dense_values is the deprecated name.
indices: Indices in the outermost dimension of `params` of the values that should be
gathered.
OUTPUT_RAGGED_RANK: The ragged rank of the output RaggedTensor. `output_nested_splits` will contain
this number of `row_splits` tensors. This value should equal `indices.shape.ndims + params.ragged_rank - 1`.
Returns:
output_nested_splits: The `nested_row_splits` tensors that define the row-partitioning for the
returned RaggedTensor.
output_dense_values: The `flat_values` for the returned RaggedTensor.
func RaggedRange ¶
func RaggedRange(scope *Scope, starts tf.Output, limits tf.Output, deltas tf.Output, optional ...RaggedRangeAttr) (rt_nested_splits tf.Output, rt_dense_values tf.Output)
Returns a `RaggedTensor` containing the specified sequences of numbers.
Returns a `RaggedTensor` `result` composed from `rt_dense_values` and `rt_nested_splits`, such that `result[i] = range(starts[i], limits[i], deltas[i])`.
```python (rt_nested_splits, rt_dense_values) = ragged_range(
starts=[2, 5, 8], limits=[3, 5, 12], deltas=1)
result = tf.ragged.from_row_splits(rt_dense_values, rt_nested_splits) print(result) <tf.RaggedTensor [[2], [], [8, 9, 10, 11]] > ```
The input tensors `starts`, `limits`, and `deltas` may be scalars or vectors. The vector inputs must all have the same size. Scalar inputs are broadcast to match the size of the vector inputs.
Arguments:
starts: The starts of each range. limits: The limits of each range. deltas: The deltas of each range.
Returns:
rt_nested_splits: The `row_splits` for the returned `RaggedTensor`. rt_dense_values: The `flat_values` for the returned `RaggedTensor`.
func RaggedTensorFromVariant ¶
func RaggedTensorFromVariant(scope *Scope, encoded_ragged tf.Output, input_ragged_rank int64, output_ragged_rank int64, Tvalues tf.DataType, optional ...RaggedTensorFromVariantAttr) (output_nested_splits []tf.Output, output_dense_values tf.Output)
Decodes a `variant` Tensor into a `RaggedTensor`.
Decodes the given `variant` Tensor and returns a `RaggedTensor`. The input could be a scalar, meaning it encodes a single `RaggedTensor` with ragged_rank `output_ragged_rank`. It could also have an arbitrary rank, in which case each element is decoded into a `RaggedTensor` with ragged_rank `input_ragged_rank` and these are then stacked according to the input shape to output a single `RaggedTensor` with ragged_rank `output_ragged_rank`. Each `variant` element in the input Tensor is decoded by retrieving from the element a 1-D `variant` Tensor with `input_ragged_rank + 1` Tensors, corresponding to the splits and values of the decoded `RaggedTensor`. If `input_ragged_rank` is -1, then it is inferred as `output_ragged_rank` - `rank(encoded_ragged)`. See `RaggedTensorToVariant` for the corresponding encoding logic.
Arguments:
encoded_ragged: A `variant` Tensor containing encoded `RaggedTensor`s. input_ragged_rank: The ragged rank of each encoded `RaggedTensor` component in the input. If set to
-1, this is inferred as `output_ragged_rank` - `rank(encoded_ragged)`
output_ragged_rank: The expected ragged rank of the output `RaggedTensor`. The following must hold:
`output_ragged_rank = rank(encoded_ragged) + input_ragged_rank`.
Returns:
output_nested_splits: A list of one or more Tensors representing the splits of the output
`RaggedTensor`.
output_dense_values: A Tensor representing the values of the output `RaggedTensor`.
func RaggedTensorToSparse ¶
func RaggedTensorToSparse(scope *Scope, rt_nested_splits []tf.Output, rt_dense_values tf.Output) (sparse_indices tf.Output, sparse_values tf.Output, sparse_dense_shape tf.Output)
Converts a `RaggedTensor` into a `SparseTensor` with the same values.
input=ragged.from_nested_row_splits(rt_dense_values, rt_nested_splits) output=SparseTensor(indices=sparse_indices, values=sparse_values,
dense_shape=sparse_dense_shape)
Arguments:
rt_nested_splits: The `row_splits` for the `RaggedTensor`. rt_dense_values: The `flat_values` for the `RaggedTensor`.
Returns:
sparse_indices: The indices for the `SparseTensor`. sparse_values: The values of the `SparseTensor`. sparse_dense_shape: `sparse_dense_shape` is a tight bounding box of the input `RaggedTensor`.
func RaggedTensorToTensor ¶
func RaggedTensorToTensor(scope *Scope, shape tf.Output, values tf.Output, default_value tf.Output, row_partition_tensors []tf.Output, row_partition_types []string) (result tf.Output)
Create a dense tensor from a ragged tensor, possibly altering its shape.
The `ragged_to_dense` op creates a dense tensor from a list of row partition tensors, a value vector, and default values. If the shape is unspecified, the minimal shape required to contain all the elements in the ragged tensor (the natural shape) will be used. If some dimensions are left unspecified, then the size of the natural shape is used in that dimension.
The default_value will be broadcast to the output shape. After that, the values from the ragged tensor overwrite the default values. Note that the default_value must have less dimensions than the value.
The row partition tensors are in the order of the dimensions. At present, the types can be:
- "ROW_SPLITS": the row_splits tensor from the ragged tensor.
- "VALUE_ROWIDS": the value_rowids tensor from the ragged tensor.
- "FIRST_DIM_SIZE": if value_rowids is used for the first dimension, then it is preceded by "FIRST_DIM_SIZE".
Arguments:
shape: The desired shape of the output tensor. If left unspecified (empty),
the minimal shape required to contain all the elements in the ragged tensor (the natural shape) will be used. If some dimensions are left unspecified, then the size of the natural shape is used in that dimension.
Note that dense dimensions cannot be modified by the shape argument. Trying to change the size of a dense dimension will cause the op to fail. Examples: natural shape: [4, 5, 6] shape: -1 output shape: [4, 5, 6]
natural shape: [4, 5, 6] shape: [3, -1, 2] output shape: [3, 5, 2]
natural shape: [4, 5, 6] shape: [3, 7, 2] output shape: [3, 7, 2]
values: A 1D tensor representing the values of the ragged tensor. default_value: The default_value when the shape is larger than the ragged tensor. The
default_value is broadcast until it is the shape of the output tensor, and then overwritten by values in the ragged tensor. The default value must be compatible with this broadcast operation, and must have fewer dimensions than the value tensor.
row_partition_types: The types of the row partition tensors. At present, these can be: - "ROW_SPLITS": the row_splits tensor from the ragged tensor. - "VALUE_ROWIDS": the value_rowids tensor from the ragged tensor. - "FIRST_DIM_SIZE": if value_rowids is used for the first dimension, then it is preceeded by "FIRST_DIM_SIZE".
The tensors are in the order of the dimensions.
Returns The resulting dense tensor.
func RaggedTensorToVariant ¶
func RaggedTensorToVariant(scope *Scope, rt_nested_splits []tf.Output, rt_dense_values tf.Output, batched_input bool) (encoded_ragged tf.Output)
Encodes a `RaggedTensor` into a `variant` Tensor.
Encodes the given `RaggedTensor` and returns a `variant` Tensor. If `batched_input` is True, then input `RaggedTensor` is unbatched along the zero-th dimension, each component `RaggedTensor` is encoded into a scalar `variant` Tensor, and these are stacked to return a 1-D `variant` Tensor. If `batched_input` is False, then the input `RaggedTensor` is encoded as is and a scalar `variant` Tensor is returned. A `RaggedTensor` is encoded by first creating a 1-D `variant` Tensor with `ragged_rank + 1` elements, containing the splits and values Tensors of the `RaggedTensor`. Then the 1-D `variant` Tensor is wrapped in a scalar `variant` Tensor. See `RaggedTensorFromVariant` for the corresponding decoding logic.
Arguments:
rt_nested_splits: A list of one or more Tensors representing the splits of the input
`RaggedTensor`.
rt_dense_values: A Tensor representing the values of the input `RaggedTensor`. batched_input: A `bool` denoting whether the input is a batched `RaggedTensor`.
Returns A `variant` Tensor that containing encoded `RaggedTensor`.
func RaggedTensorToVariantGradient ¶
func RaggedTensorToVariantGradient(scope *Scope, encoded_ragged_grad tf.Output, row_splits tf.Output, dense_values_shape tf.Output, Tvalues tf.DataType) (dense_values_grad tf.Output)
Helper used to compute the gradient for `RaggedTensorToVariant`.
Computes the gradient for the dense_values input to the RaggedTensorToVariant op, given the variant-encoded ragged gradients of the outputs, along with the outer row-splits and the shape of the dense-values that were provided as inputs to the RaggedTensorToVariant op.
Arguments:
encoded_ragged_grad: A `variant` Tensor containing encoded `RaggedTensor` gradients. row_splits: Outermost row-splits that were used as input to the RaggedTensorToVariant op. dense_values_shape: Shape of the dense_values that was used as an input to the
RaggedTensorToVariant op.
Returns Gradient for the dense_values of the RaggedTensorToVariant op.
func RandomCrop ¶
func RandomCrop(scope *Scope, image tf.Output, size tf.Output, optional ...RandomCropAttr) (output tf.Output)
Randomly crop `image`.
DEPRECATED at GraphDef version 8: Random crop is now pure Python
`size` is a 1-D int64 tensor with 2 elements representing the crop height and width. The values must be non negative.
This Op picks a random location in `image` and crops a `height` by `width` rectangle from that location. The random location is picked so the cropped area will fit inside the original image.
Arguments:
image: 3-D of shape `[height, width, channels]`. size: 1-D of length 2 containing: `crop_height`, `crop_width`..
Returns 3-D of shape `[crop_height, crop_width, channels].`
func RandomDataset ¶
func RandomDataset(scope *Scope, seed tf.Output, seed2 tf.Output, output_types []tf.DataType, output_shapes []tf.Shape, optional ...RandomDatasetAttr) (handle tf.Output)
Creates a Dataset that returns pseudorandom numbers.
Creates a Dataset that returns a stream of uniformly distributed pseudorandom 64-bit signed integers.
In the TensorFlow Python API, you can instantiate this dataset via the class `tf.data.experimental.RandomDataset`.
Instances of this dataset are also created as a result of the `hoist_random_uniform` static optimization. Whether this optimization is performed is determined by the `experimental_optimization.hoist_random_uniform` option of `tf.data.Options`.
Arguments:
seed: A scalar seed for the random number generator. If either seed or
seed2 is set to be non-zero, the random number generator is seeded by the given seed. Otherwise, a random seed is used.
seed2: A second scalar seed to avoid seed collision.
func RandomDatasetV2 ¶ added in v0.4.0
func RandomDatasetV2(scope *Scope, seed tf.Output, seed2 tf.Output, seed_generator tf.Output, output_types []tf.DataType, output_shapes []tf.Shape, optional ...RandomDatasetV2Attr) (handle tf.Output)
Creates a Dataset that returns pseudorandom numbers.
Creates a Dataset that returns a stream of uniformly distributed pseudorandom 64-bit signed integers. It accepts a boolean attribute that determines if the random number generators are re-applied at each epoch. The default value is True which means that the seeds are applied and the same sequence of random numbers are generated at each epoch. If set to False, the seeds are not re-applied and a different sequence of random numbers are generated at each epoch.
In the TensorFlow Python API, you can instantiate this dataset via the class `tf.data.experimental.RandomDatasetV2`.
Arguments:
seed: A scalar seed for the random number generator. If either seed or
seed2 is set to be non-zero, the random number generator is seeded by the given seed. Otherwise, a random seed is used.
seed2: A second scalar seed to avoid seed collision. seed_generator: A resource for the random number seed generator.
func RandomGamma ¶
func RandomGamma(scope *Scope, shape tf.Output, alpha tf.Output, optional ...RandomGammaAttr) (output tf.Output)
Outputs random values from the Gamma distribution(s) described by alpha.
This op uses the algorithm by Marsaglia et al. to acquire samples via transformation-rejection from pairs of uniform and normal random variables. See http://dl.acm.org/citation.cfm?id=358414
Arguments:
shape: 1-D integer tensor. Shape of independent samples to draw from each
distribution described by the shape parameters given in alpha.
alpha: A tensor in which each scalar is a "shape" parameter describing the
associated gamma distribution.
Returns A tensor with shape `shape + shape(alpha)`. Each slice `[:, ..., :, i0, i1, ...iN]` contains the samples drawn for `alpha[i0, i1, ...iN]`. The dtype of the output matches the dtype of alpha.
func RandomGammaGrad ¶
Computes the derivative of a Gamma random sample w.r.t. `alpha`.
func RandomIndexShuffle ¶
func RandomIndexShuffle(scope *Scope, index tf.Output, seed tf.Output, max_index tf.Output, optional ...RandomIndexShuffleAttr) (output tf.Output)
Outputs the position of `value` in a permutation of [0, ..., max_index].
Output values are a bijection of the `index` for any combination and `seed` and `max_index`.
If multiple inputs are vectors (matrix in case of seed) then the size of the first dimension must match.
The outputs are deterministic.
Arguments:
index: A scalar tensor or a vector of dtype `dtype`. The index (or indices) to be shuffled. Must be within [0, max_index]. seed: A tensor of dtype `Tseed` and shape [3] or [n, 3]. The random seed. max_index: A scalar tensor or vector of dtype `dtype`. The upper bound(s) of the interval (inclusive).
Returns A scalar tensor of dtype `dtype`, within [0, max_index]. The randomly shuffled index.
func RandomPoisson ¶
func RandomPoisson(scope *Scope, shape tf.Output, rate tf.Output, optional ...RandomPoissonAttr) (output tf.Output)
Use RandomPoissonV2 instead.
DEPRECATED at GraphDef version 25: Replaced by RandomPoissonV2
func RandomPoissonV2 ¶
func RandomPoissonV2(scope *Scope, shape tf.Output, rate tf.Output, optional ...RandomPoissonV2Attr) (output tf.Output)
Outputs random values from the Poisson distribution(s) described by rate.
This op uses two algorithms, depending on rate. If rate >= 10, then the algorithm by Hormann is used to acquire samples via transformation-rejection. See http://www.sciencedirect.com/science/article/pii/0167668793909974.
Otherwise, Knuth's algorithm is used to acquire samples via multiplying uniform random variables. See Donald E. Knuth (1969). Seminumerical Algorithms. The Art of Computer Programming, Volume 2. Addison Wesley
Arguments:
shape: 1-D integer tensor. Shape of independent samples to draw from each
distribution described by the shape parameters given in rate.
rate: A tensor in which each scalar is a "rate" parameter describing the
associated poisson distribution.
Returns A tensor with shape `shape + shape(rate)`. Each slice `[:, ..., :, i0, i1, ...iN]` contains the samples drawn for `rate[i0, i1, ...iN]`.
func RandomShuffle ¶
Randomly shuffles a tensor along its first dimension.
The tensor is shuffled along dimension 0, such that each `value[j]` is mapped to one and only one `output[i]`. For example, a mapping that might occur for a 3x2 tensor is:
``` [[1, 2], [[5, 6],
[3, 4], ==> [1, 2], [5, 6]] [3, 4]]
```
Arguments:
value: The tensor to be shuffled.
Returns A tensor of same shape and type as `value`, shuffled along its first dimension.
func RandomShuffleQueueV2 ¶
func RandomShuffleQueueV2(scope *Scope, component_types []tf.DataType, optional ...RandomShuffleQueueV2Attr) (handle tf.Output)
A queue that randomizes the order of elements.
Arguments:
component_types: The type of each component in a value.
Returns The handle to the queue.
func RandomStandardNormal ¶
func RandomStandardNormal(scope *Scope, shape tf.Output, dtype tf.DataType, optional ...RandomStandardNormalAttr) (output tf.Output)
Outputs random values from a normal distribution.
The generated values will have mean 0 and standard deviation 1.
Arguments:
shape: The shape of the output tensor. dtype: The type of the output.
Returns A tensor of the specified shape filled with random normal values.
func RandomUniform ¶
func RandomUniform(scope *Scope, shape tf.Output, dtype tf.DataType, optional ...RandomUniformAttr) (output tf.Output)
Outputs random values from a uniform distribution.
The generated values follow a uniform distribution in the range `[0, 1)`. The lower bound 0 is included in the range, while the upper bound 1 is excluded.
Arguments:
shape: The shape of the output tensor. dtype: The type of the output.
Returns A tensor of the specified shape filled with uniform random values.
func RandomUniformInt ¶
func RandomUniformInt(scope *Scope, shape tf.Output, minval tf.Output, maxval tf.Output, optional ...RandomUniformIntAttr) (output tf.Output)
Outputs random integers from a uniform distribution.
The generated values are uniform integers in the range `[minval, maxval)`. The lower bound `minval` is included in the range, while the upper bound `maxval` is excluded.
The random integers are slightly biased unless `maxval - minval` is an exact power of two. The bias is small for values of `maxval - minval` significantly smaller than the range of the output (either `2^32` or `2^64`).
Arguments:
shape: The shape of the output tensor. minval: 0-D. Inclusive lower bound on the generated integers. maxval: 0-D. Exclusive upper bound on the generated integers.
Returns A tensor of the specified shape filled with uniform random integers.
func Range ¶
Creates a sequence of numbers.
This operation creates a sequence of numbers that begins at `start` and extends by increments of `delta` up to but not including `limit`.
For example:
``` # 'start' is 3 # 'limit' is 18 # 'delta' is 3 tf.range(start, limit, delta) ==> [3, 6, 9, 12, 15] ```
Arguments:
start: 0-D (scalar). First entry in the sequence. limit: 0-D (scalar). Upper limit of sequence, exclusive. delta: 0-D (scalar). Optional. Default is 1. Number that increments `start`.
Returns 1-D.
func RangeDataset ¶
func RangeDataset(scope *Scope, start tf.Output, stop tf.Output, step tf.Output, output_types []tf.DataType, output_shapes []tf.Shape, optional ...RangeDatasetAttr) (handle tf.Output)
Creates a dataset with a range of values. Corresponds to python's xrange.
Arguments:
start: corresponds to start in python's xrange(). stop: corresponds to stop in python's xrange(). step: corresponds to step in python's xrange().
func Rank ¶
Returns the rank of a tensor.
This operation returns an integer representing the rank of `input`.
For example:
``` # 't' is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]] # shape of tensor 't' is [2, 2, 3] rank(t) ==> 3 ```
**Note**: The rank of a tensor is not the same as the rank of a matrix. The rank of a tensor is the number of indices required to uniquely select each element of the tensor. Rank is also known as "order", "degree", or "ndims."
func ReadVariableOp ¶
Reads the value of a variable.
The tensor returned by this operation is immutable.
The value returned by this operation is guaranteed to be influenced by all the writes on which this operation depends directly or indirectly, and to not be influenced by any of the writes which depend directly or indirectly on this operation.
Arguments:
resource: handle to the resource in which to store the variable. dtype: the dtype of the value.
func ReadVariableXlaSplitND ¶
func ReadVariableXlaSplitND(scope *Scope, resource tf.Output, T tf.DataType, N int64, num_splits []int64, optional ...ReadVariableXlaSplitNDAttr) (outputs []tf.Output)
Splits resource variable input tensor across all dimensions.
An op which splits the resource variable input tensor based on the given num_splits attribute, pads slices optionally, and returned the slices. Slices are returned in row-major order.
This op may be generated via the TPU bridge.
For example, with `input` tensor: ``` [[0, 1, 2],
[3, 4, 5], [6, 7, 8]]
``` `num_splits`: ``` [2, 2] ``` and `paddings`: ``` [1, 1] ``` the expected `outputs` is: ``` [[0, 1],
[3, 4]]
[[2, 0],
[5, 0]]
[[6, 7],
[0, 0]]
[[8, 0],
[0, 0]]
```
Arguments:
resource: Resource variable of input tensor to split across all dimensions. num_splits: Number of ways to split per dimension. Shape dimensions must be evenly
divisible.
Returns Output slices based on input and num_splits defined, in row-major order.
func ReaderNumRecordsProducedV2 ¶
Returns the number of records this Reader has produced.
This is the same as the number of ReaderRead executions that have succeeded.
Arguments:
reader_handle: Handle to a Reader.
func ReaderNumWorkUnitsCompletedV2 ¶
func ReaderNumWorkUnitsCompletedV2(scope *Scope, reader_handle tf.Output) (units_completed tf.Output)
Returns the number of work units this Reader has finished processing.
Arguments:
reader_handle: Handle to a Reader.
func ReaderReadUpToV2 ¶
func ReaderReadUpToV2(scope *Scope, reader_handle tf.Output, queue_handle tf.Output, num_records tf.Output) (keys tf.Output, values tf.Output)
Returns up to `num_records` (key, value) pairs produced by a Reader.
Will dequeue from the input queue if necessary (e.g. when the Reader needs to start reading from a new file since it has finished with the previous file). It may return less than `num_records` even before the last batch.
Arguments:
reader_handle: Handle to a `Reader`. queue_handle: Handle to a `Queue`, with string work items. num_records: number of records to read from `Reader`.
Returns:
keys: A 1-D tensor. values: A 1-D tensor.
func ReaderReadV2 ¶
func ReaderReadV2(scope *Scope, reader_handle tf.Output, queue_handle tf.Output) (key tf.Output, value tf.Output)
Returns the next record (key, value pair) produced by a Reader.
Will dequeue from the input queue if necessary (e.g. when the Reader needs to start reading from a new file since it has finished with the previous file).
Arguments:
reader_handle: Handle to a Reader. queue_handle: Handle to a Queue, with string work items.
Returns:
key: A scalar. value: A scalar.
func ReaderResetV2 ¶
Restore a Reader to its initial clean state.
Arguments:
reader_handle: Handle to a Reader.
Returns the created operation.
func ReaderRestoreStateV2 ¶
Restore a reader to a previously saved state.
Not all Readers support being restored, so this can produce an Unimplemented error.
Arguments:
reader_handle: Handle to a Reader. state: Result of a ReaderSerializeState of a Reader with type
matching reader_handle.
Returns the created operation.
func ReaderSerializeStateV2 ¶
Produce a string tensor that encodes the state of a Reader.
Not all Readers support being serialized, so this can produce an Unimplemented error.
Arguments:
reader_handle: Handle to a Reader.
func Real ¶
Returns the real part of a complex number.
Given a tensor `input` of complex numbers, this operation returns a tensor of type `float` that is the real part of each element in `input`. All elements in `input` must be complex numbers of the form \\(a + bj\\), where *a* is the real
part returned by this operation and *b* is the imaginary part.
For example:
``` # tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j] tf.real(input) ==> [-2.25, 3.25] ```
func RealDiv ¶
Returns x / y element-wise for real types.
If `x` and `y` are reals, this will return the floating-point division.
*NOTE*: `Div` supports broadcasting. More about broadcasting [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
func RebatchDataset ¶
func RebatchDataset(scope *Scope, input_dataset tf.Output, num_replicas tf.Output, output_types []tf.DataType, output_shapes []tf.Shape, optional ...RebatchDatasetAttr) (handle tf.Output)
Creates a dataset that changes the batch size.
Creates a dataset that changes the batch size of the dataset to current batch size // num_workers.
Arguments:
input_dataset: A variant tensor representing the input dataset. num_replicas: A scalar representing the number of replicas to distribute this batch across. As
a result of this transformation the current batch size would end up being divided by this parameter.
func RebatchDatasetV2 ¶
func RebatchDatasetV2(scope *Scope, input_dataset tf.Output, batch_sizes tf.Output, drop_remainder tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output)
Creates a dataset that changes the batch size.
Creates a dataset that rebatches elements from `input_dataset` into new batch sizes.
Arguments:
input_dataset: A variant tensor representing the input dataset. batch_sizes: A vector of integers representing the size of batches to produce. These values
are cycled through in order.
func ReciprocalGrad ¶
Computes the gradient for the inverse of `x` wrt its input.
Specifically, `grad = -dy * y*y`, where `y = 1/x`, and `dy` is the corresponding input gradient.
func RecordInput ¶
func RecordInput(scope *Scope, file_pattern string, optional ...RecordInputAttr) (records tf.Output)
Emits randomized records.
Arguments:
file_pattern: Glob pattern for the data files.
Returns A tensor of shape [batch_size].
func Recv ¶
func Recv(scope *Scope, tensor_type tf.DataType, tensor_name string, send_device string, send_device_incarnation int64, recv_device string, optional ...RecvAttr) (tensor tf.Output)
Receives the named tensor from send_device on recv_device.
Arguments:
tensor_name: The name of the tensor to receive. send_device: The name of the device sending the tensor. send_device_incarnation: The current incarnation of send_device. recv_device: The name of the device receiving the tensor.
Returns The tensor to receive.
func RecvTPUEmbeddingActivations ¶
func RecvTPUEmbeddingActivations(scope *Scope, num_outputs int64, config string) (outputs []tf.Output)
An op that receives embedding activations on the TPU.
The TPU system performs the embedding lookups and aggregations specified by the arguments to TPUEmbeddingEnqueue(Integer/Sparse/SparseTensor)Batch. The results of these aggregations are visible to the Tensorflow Graph as the outputs of a RecvTPUEmbeddingActivations op. This op returns a list containing one Tensor of activations per table specified in the model. There can be at most one RecvTPUEmbeddingActivations op in the TPU graph.
Arguments:
num_outputs: The number of output activation tensors, equal to the number of
embedding tables in the model.
config: Serialized TPUEmbeddingConfiguration proto.
Returns A TensorList of embedding activations containing one Tensor per embedding table in the model.
func ReduceJoin ¶
func ReduceJoin(scope *Scope, inputs tf.Output, reduction_indices tf.Output, optional ...ReduceJoinAttr) (output tf.Output)
Joins a string Tensor across the given dimensions.
Computes the string join across dimensions in the given string Tensor of shape `[\\(d_0, d_1, ..., d_{n-1}\\)]`. Returns a new Tensor created by joining the input strings with the given separator (default: empty string). Negative indices are counted backwards from the end, with `-1` being equivalent to `n - 1`. If indices are not specified, joins across all dimensions beginning from `n - 1` through `0`.
For example:
```python # tensor `a` is [["a", "b"], ["c", "d"]] tf.reduce_join(a, 0) ==> ["ac", "bd"] tf.reduce_join(a, 1) ==> ["ab", "cd"] tf.reduce_join(a, -2) = tf.reduce_join(a, 0) ==> ["ac", "bd"] tf.reduce_join(a, -1) = tf.reduce_join(a, 1) ==> ["ab", "cd"] tf.reduce_join(a, 0, keep_dims=True) ==> [["ac", "bd"]] tf.reduce_join(a, 1, keep_dims=True) ==> [["ab"], ["cd"]] tf.reduce_join(a, 0, separator=".") ==> ["a.c", "b.d"] tf.reduce_join(a, [0, 1]) ==> "acbd" tf.reduce_join(a, [1, 0]) ==> "abcd" tf.reduce_join(a, []) ==> [["a", "b"], ["c", "d"]] tf.reduce_join(a) = tf.reduce_join(a, [1, 0]) ==> "abcd" ```
Arguments:
inputs: The input to be joined. All reduced indices must have non-zero size. reduction_indices: The dimensions to reduce over. Dimensions are reduced in the
order specified. Omitting `reduction_indices` is equivalent to passing `[n-1, n-2, ..., 0]`. Negative indices from `-n` to `-1` are supported.
Returns Has shape equal to that of the input with reduced dimensions removed or set to `1` depending on `keep_dims`.
func RegexFullMatch ¶
Check if the input matches the regex pattern.
The input is a string tensor of any shape. The pattern is a scalar string tensor which is applied to every element of the input tensor. The boolean values (True or False) of the output tensor indicate if the input matches the regex pattern provided.
The pattern follows the re2 syntax (https://github.com/google/re2/wiki/Syntax)
Examples:
>>> tf.strings.regex_full_match(["TF lib", "lib TF"], ".*lib$") <tf.Tensor: shape=(2,), dtype=bool, numpy=array([ True, False])> >>> tf.strings.regex_full_match(["TF lib", "lib TF"], ".*TF$") <tf.Tensor: shape=(2,), dtype=bool, numpy=array([False, True])>
Arguments:
input: A string tensor of the text to be processed. pattern: A scalar string tensor containing the regular expression to match the input.
Returns A bool tensor with the same shape as `input`.
func RegexReplace ¶
func RegexReplace(scope *Scope, input tf.Output, pattern tf.Output, rewrite tf.Output, optional ...RegexReplaceAttr) (output tf.Output)
Replaces matches of the `pattern` regular expression in `input` with the replacement string provided in `rewrite`.
It follows the re2 syntax (https://github.com/google/re2/wiki/Syntax)
Arguments:
input: The text to be processed. pattern: The regular expression to be matched in the `input` strings. rewrite: The rewrite string to be substituted for the `pattern` expression where it is
matched in the `input` strings.
Returns The text after applying pattern match and rewrite substitution.
func RegisterDataset ¶
func RegisterDataset(scope *Scope, dataset tf.Output, address tf.Output, protocol tf.Output, external_state_policy int64, optional ...RegisterDatasetAttr) (dataset_id tf.Output)
Registers a dataset with the tf.data service.
func RegisterDatasetV2 ¶ added in v0.2.0
func RegisterDatasetV2(scope *Scope, dataset tf.Output, address tf.Output, protocol tf.Output, external_state_policy int64, optional ...RegisterDatasetV2Attr) (dataset_id tf.Output)
Registers a dataset with the tf.data service.
func Relu ¶
Computes rectified linear: `max(features, 0)`.
See: https://en.wikipedia.org/wiki/Rectifier_(neural_networks) Example usage: >>> tf.nn.relu([-2., 0., 3.]).numpy() array([0., 0., 3.], dtype=float32)
func Relu6Grad ¶
Computes rectified linear 6 gradients for a Relu6 operation.
Arguments:
gradients: The backpropagated gradients to the corresponding Relu6 operation. features: The features passed as input to the corresponding Relu6 operation, or
its output; using either one produces the same result.
Returns The gradients: `gradients * (features > 0) * (features < 6)`.
func ReluGrad ¶
Computes rectified linear gradients for a Relu operation.
Arguments:
gradients: The backpropagated gradients to the corresponding Relu operation. features: The features passed as input to the corresponding Relu operation, OR
the outputs of that operation (both work equivalently).
Returns `gradients * (features > 0)`.
func RepeatDataset ¶
func RepeatDataset(scope *Scope, input_dataset tf.Output, count tf.Output, output_types []tf.DataType, output_shapes []tf.Shape, optional ...RepeatDatasetAttr) (handle tf.Output)
Creates a dataset that emits the outputs of `input_dataset` `count` times.
Arguments:
count: A scalar representing the number of times that `input_dataset` should
be repeated. A value of `-1` indicates that it should be repeated infinitely.
func RequantizationRange ¶
func RequantizationRange(scope *Scope, input tf.Output, input_min tf.Output, input_max tf.Output) (output_min tf.Output, output_max tf.Output)
Computes a range that covers the actual values present in a quantized tensor.
Given a quantized tensor described by `(input, input_min, input_max)`, outputs a range that covers the actual values present in that tensor. This op is typically used to produce the `requested_output_min` and `requested_output_max` for `Requantize`.
Arguments:
input_min: The float value that the minimum quantized input value represents. input_max: The float value that the maximum quantized input value represents.
Returns:
output_min: The computed min output. output_max: the computed max output.
func RequantizationRangePerChannel ¶
func RequantizationRangePerChannel(scope *Scope, input tf.Output, input_min tf.Output, input_max tf.Output, clip_value_max float32) (output_min tf.Output, output_max tf.Output)
Computes requantization range per channel.
Arguments:
input: The original input tensor. input_min: The minimum value of the input tensor input_max: The maximum value of the input tensor. clip_value_max: The maximum value of the output that needs to be clipped.
Example: set this to 6 for Relu6.
Returns:
output_min: The minimum value of the final output tensor output_max: The maximum value of the final output tensor.
func Requantize ¶
func Requantize(scope *Scope, input tf.Output, input_min tf.Output, input_max tf.Output, requested_output_min tf.Output, requested_output_max tf.Output, out_type tf.DataType) (output tf.Output, output_min tf.Output, output_max tf.Output)
Converts the quantized `input` tensor into a lower-precision `output`.
Converts the quantized `input` tensor into a lower-precision `output`, using the output range specified with `requested_output_min` and `requested_output_max`.
`[input_min, input_max]` are scalar floats that specify the range for the float interpretation of the `input` data. For example, if `input_min` is -1.0f and `input_max` is 1.0f, and we are dealing with `quint16` quantized data, then a 0 value in the 16-bit data should be interpreted as -1.0f, and a 65535 means 1.0f.
Arguments:
input_min: The float value that the minimum quantized input value represents. input_max: The float value that the maximum quantized input value represents. requested_output_min: The float value that the minimum quantized output value represents. requested_output_max: The float value that the maximum quantized output value represents. out_type: The type of the output. Should be a lower bit depth than Tinput.
Returns:
output output_min: The requested_output_min value is copied into this output. output_max: The requested_output_max value is copied into this output.
func RequantizePerChannel ¶
func RequantizePerChannel(scope *Scope, input tf.Output, input_min tf.Output, input_max tf.Output, requested_output_min tf.Output, requested_output_max tf.Output, optional ...RequantizePerChannelAttr) (output tf.Output, output_min tf.Output, output_max tf.Output)
Requantizes input with min and max values known per channel.
Arguments:
input: The original input tensor. input_min: The minimum value of the input tensor input_max: The maximum value of the input tensor. requested_output_min: The minimum value of the output tensor requested. requested_output_max: The maximum value of the output tensor requested.
Returns:
output: Output tensor. output_min: The minimum value of the final output tensor output_max: The maximum value of the final output tensor.
func Reshape ¶
Reshapes a tensor.
Given `tensor`, this operation returns a tensor that has the same values as `tensor` with shape `shape`.
If one component of 1-D tensor `shape` is the special value -1, the size of that dimension is computed so that the total size remains constant. In particular, a `shape` of `[-1]` flattens into 1-D. At most one component of `shape` may be unknown.
The `shape` must be 1-D and the operation returns a tensor with shape `shape` filled with the values of `tensor`. In this case, the number of elements implied by `shape` must be the same as the number of elements in `tensor`.
It is an error if `shape` is not 1-D.
For example:
``` # tensor 't' is [1, 2, 3, 4, 5, 6, 7, 8, 9] # tensor 't' has shape [9] reshape(t, [3, 3]) ==> [[1, 2, 3],
[4, 5, 6], [7, 8, 9]]
# tensor 't' is [[[1, 1], [2, 2]], # [[3, 3], [4, 4]]] # tensor 't' has shape [2, 2, 2] reshape(t, [2, 4]) ==> [[1, 1, 2, 2],
[3, 3, 4, 4]]
# tensor 't' is [[[1, 1, 1], # [2, 2, 2]], # [[3, 3, 3], # [4, 4, 4]], # [[5, 5, 5], # [6, 6, 6]]] # tensor 't' has shape [3, 2, 3] # pass '[-1]' to flatten 't' reshape(t, [-1]) ==> [1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6]
-1 can also be used to infer the shape ¶
# -1 is inferred to be 9: reshape(t, [2, -1]) ==> [[1, 1, 1, 2, 2, 2, 3, 3, 3],
[4, 4, 4, 5, 5, 5, 6, 6, 6]]
# -1 is inferred to be 2: reshape(t, [-1, 9]) ==> [[1, 1, 1, 2, 2, 2, 3, 3, 3],
[4, 4, 4, 5, 5, 5, 6, 6, 6]]
# -1 is inferred to be 3: reshape(t, [ 2, -1, 3]) ==> [[[1, 1, 1],
[2, 2, 2], [3, 3, 3]], [[4, 4, 4], [5, 5, 5], [6, 6, 6]]]
# tensor 't' is [7] # shape `[]` reshapes to a scalar reshape(t, []) ==> 7 ```
Arguments:
shape: Defines the shape of the output tensor.
func ResizeArea ¶
func ResizeArea(scope *Scope, images tf.Output, size tf.Output, optional ...ResizeAreaAttr) (resized_images tf.Output)
Resize `images` to `size` using area interpolation.
Input images can be of different types but output images are always float.
The range of pixel values for the output image might be slightly different from the range for the input image because of limited numerical precision. To guarantee an output range, for example `[0.0, 1.0]`, apply `tf.clip_by_value` to the output.
Each output pixel is computed by first transforming the pixel's footprint into the input tensor and then averaging the pixels that intersect the footprint. An input pixel's contribution to the average is weighted by the fraction of its area that intersects the footprint. This is the same as OpenCV's INTER_AREA.
Arguments:
images: 4-D with shape `[batch, height, width, channels]`. size: = A 1-D int32 Tensor of 2 elements: `new_height, new_width`. The
new size for the images.
Returns 4-D with shape `[batch, new_height, new_width, channels]`.
func ResizeBicubic ¶
func ResizeBicubic(scope *Scope, images tf.Output, size tf.Output, optional ...ResizeBicubicAttr) (resized_images tf.Output)
Resize `images` to `size` using bicubic interpolation.
Input images can be of different types but output images are always float.
Arguments:
images: 4-D with shape `[batch, height, width, channels]`. size: = A 1-D int32 Tensor of 2 elements: `new_height, new_width`. The
new size for the images.
Returns 4-D with shape `[batch, new_height, new_width, channels]`.
func ResizeBicubicGrad ¶
func ResizeBicubicGrad(scope *Scope, grads tf.Output, original_image tf.Output, optional ...ResizeBicubicGradAttr) (output tf.Output)
Computes the gradient of bicubic interpolation.
Arguments:
grads: 4-D with shape `[batch, height, width, channels]`. original_image: 4-D with shape `[batch, orig_height, orig_width, channels]`,
The image tensor that was resized.
Returns 4-D with shape `[batch, orig_height, orig_width, channels]`. Gradients with respect to the input image. Input image must have been float or double.
func ResizeBilinear ¶
func ResizeBilinear(scope *Scope, images tf.Output, size tf.Output, optional ...ResizeBilinearAttr) (resized_images tf.Output)
Resize `images` to `size` using bilinear interpolation.
Input images can be of different types but output images are always float.
Arguments:
images: 4-D with shape `[batch, height, width, channels]`. size: = A 1-D int32 Tensor of 2 elements: `new_height, new_width`. The
new size for the images.
Returns 4-D with shape `[batch, new_height, new_width, channels]`.
func ResizeBilinearGrad ¶
func ResizeBilinearGrad(scope *Scope, grads tf.Output, original_image tf.Output, optional ...ResizeBilinearGradAttr) (output tf.Output)
Computes the gradient of bilinear interpolation.
Arguments:
grads: 4-D with shape `[batch, height, width, channels]`. original_image: 4-D with shape `[batch, orig_height, orig_width, channels]`,
The image tensor that was resized.
Returns 4-D with shape `[batch, orig_height, orig_width, channels]`. Gradients with respect to the input image. Input image must have been float or double.
func ResizeNearestNeighbor ¶
func ResizeNearestNeighbor(scope *Scope, images tf.Output, size tf.Output, optional ...ResizeNearestNeighborAttr) (resized_images tf.Output)
Resize `images` to `size` using nearest neighbor interpolation.
Arguments:
images: 4-D with shape `[batch, height, width, channels]`. size: = A 1-D int32 Tensor of 2 elements: `new_height, new_width`. The
new size for the images.
Returns 4-D with shape `[batch, new_height, new_width, channels]`.
func ResizeNearestNeighborGrad ¶
func ResizeNearestNeighborGrad(scope *Scope, grads tf.Output, size tf.Output, optional ...ResizeNearestNeighborGradAttr) (output tf.Output)
Computes the gradient of nearest neighbor interpolation.
Arguments:
grads: 4-D with shape `[batch, height, width, channels]`. size: = A 1-D int32 Tensor of 2 elements: `orig_height, orig_width`. The
original input size.
Returns 4-D with shape `[batch, orig_height, orig_width, channels]`. Gradients with respect to the input image.
func ResourceAccumulatorApplyGradient ¶
func ResourceAccumulatorApplyGradient(scope *Scope, handle tf.Output, local_step tf.Output, gradient tf.Output) (o *tf.Operation)
Applies a gradient to a given accumulator.
Does not add if local_step is lesser than the accumulator's global_step.
Arguments:
handle: The handle to a accumulator. local_step: The local_step value at which the gradient was computed. gradient: A tensor of the gradient to be accumulated.
Returns the created operation.
func ResourceAccumulatorNumAccumulated ¶
Returns the number of gradients aggregated in the given accumulators.
Arguments:
handle: The handle to an accumulator.
Returns The number of gradients aggregated in the given accumulator.
func ResourceAccumulatorSetGlobalStep ¶
func ResourceAccumulatorSetGlobalStep(scope *Scope, handle tf.Output, new_global_step tf.Output) (o *tf.Operation)
Updates the accumulator with a new value for global_step.
Logs warning if the accumulator's value is already higher than new_global_step.
Arguments:
handle: The handle to an accumulator. new_global_step: The new global_step value to set.
Returns the created operation.
func ResourceAccumulatorTakeGradient ¶
func ResourceAccumulatorTakeGradient(scope *Scope, handle tf.Output, num_required tf.Output, dtype tf.DataType) (average tf.Output)
Extracts the average gradient in the given ConditionalAccumulator.
The op blocks until sufficient (i.e., more than num_required) gradients have been accumulated. If the accumulator has already aggregated more than num_required gradients, it returns the average of the accumulated gradients. Also automatically increments the recorded global_step in the accumulator by 1, and resets the aggregate to 0.
Arguments:
handle: The handle to an accumulator. num_required: Number of gradients required before we return an aggregate. dtype: The data type of accumulated gradients. Needs to correspond to the type
of the accumulator.
Returns The average of the accumulated gradients.
func ResourceApplyAdaMax ¶
func ResourceApplyAdaMax(scope *Scope, var_ tf.Output, m tf.Output, v tf.Output, beta1_power tf.Output, lr tf.Output, beta1 tf.Output, beta2 tf.Output, epsilon tf.Output, grad tf.Output, optional ...ResourceApplyAdaMaxAttr) (o *tf.Operation)
Update '*var' according to the AdaMax algorithm.
m_t <- beta1 * m_{t-1} + (1 - beta1) * g v_t <- max(beta2 * v_{t-1}, abs(g)) variable <- variable - learning_rate / (1 - beta1^t) * m_t / (v_t + epsilon)
Arguments:
var_: Should be from a Variable(). m: Should be from a Variable(). v: Should be from a Variable(). beta1_power: Must be a scalar. lr: Scaling factor. Must be a scalar. beta1: Momentum factor. Must be a scalar. beta2: Momentum factor. Must be a scalar. epsilon: Ridge term. Must be a scalar. grad: The gradient.
Returns the created operation.
func ResourceApplyAdadelta ¶
func ResourceApplyAdadelta(scope *Scope, var_ tf.Output, accum tf.Output, accum_update tf.Output, lr tf.Output, rho tf.Output, epsilon tf.Output, grad tf.Output, optional ...ResourceApplyAdadeltaAttr) (o *tf.Operation)
Update '*var' according to the adadelta scheme.
accum = rho() * accum + (1 - rho()) * grad.square(); update = (update_accum + epsilon).sqrt() * (accum + epsilon()).rsqrt() * grad; update_accum = rho() * update_accum + (1 - rho()) * update.square(); var -= update;
Arguments:
var_: Should be from a Variable(). accum: Should be from a Variable(). accum_update: Should be from a Variable(). lr: Scaling factor. Must be a scalar. rho: Decay factor. Must be a scalar. epsilon: Constant factor. Must be a scalar. grad: The gradient.
Returns the created operation.
func ResourceApplyAdagrad ¶
func ResourceApplyAdagrad(scope *Scope, var_ tf.Output, accum tf.Output, lr tf.Output, grad tf.Output, optional ...ResourceApplyAdagradAttr) (o *tf.Operation)
Update '*var' according to the adagrad scheme.
accum += grad * grad var -= lr * grad * (1 / sqrt(accum))
Arguments:
var_: Should be from a Variable(). accum: Should be from a Variable(). lr: Scaling factor. Must be a scalar. grad: The gradient.
Returns the created operation.
func ResourceApplyAdagradDA ¶
func ResourceApplyAdagradDA(scope *Scope, var_ tf.Output, gradient_accumulator tf.Output, gradient_squared_accumulator tf.Output, grad tf.Output, lr tf.Output, l1 tf.Output, l2 tf.Output, global_step tf.Output, optional ...ResourceApplyAdagradDAAttr) (o *tf.Operation)
Update '*var' according to the proximal adagrad scheme.
Arguments:
var_: Should be from a Variable(). gradient_accumulator: Should be from a Variable(). gradient_squared_accumulator: Should be from a Variable(). grad: The gradient. lr: Scaling factor. Must be a scalar. l1: L1 regularization. Must be a scalar. l2: L2 regularization. Must be a scalar. global_step: Training step number. Must be a scalar.
Returns the created operation.
func ResourceApplyAdagradV2 ¶
func ResourceApplyAdagradV2(scope *Scope, var_ tf.Output, accum tf.Output, lr tf.Output, epsilon tf.Output, grad tf.Output, optional ...ResourceApplyAdagradV2Attr) (o *tf.Operation)
Update '*var' according to the adagrad scheme.
accum += grad * grad var -= lr * grad * (1 / (sqrt(accum) + epsilon))
Arguments:
var_: Should be from a Variable(). accum: Should be from a Variable(). lr: Scaling factor. Must be a scalar. epsilon: Constant factor. Must be a scalar. grad: The gradient.
Returns the created operation.
func ResourceApplyAdam ¶
func ResourceApplyAdam(scope *Scope, var_ tf.Output, m tf.Output, v tf.Output, beta1_power tf.Output, beta2_power tf.Output, lr tf.Output, beta1 tf.Output, beta2 tf.Output, epsilon tf.Output, grad tf.Output, optional ...ResourceApplyAdamAttr) (o *tf.Operation)
Update '*var' according to the Adam algorithm.
$$\text{lr}_t := \mathrm{lr} \cdot \frac{\sqrt{1 - \beta_2^t}}{1 - \beta_1^t}$$ $$m_t := \beta_1 \cdot m_{t-1} + (1 - \beta_1) \cdot g$$ $$v_t := \beta_2 \cdot v_{t-1} + (1 - \beta_2) \cdot g^2$$ $$\text{var} := \begin{cases} \text{var} - (m_t \beta_1 + g \cdot (1 - \beta_1))\cdot\text{lr}_t/(\sqrt{v_t} + \epsilon), &\text{if use_nesterov}\\\\ \text{var} - m_t \cdot \text{lr}_t /(\sqrt{v_t} + \epsilon), &\text{otherwise} \end{cases}$$
Arguments:
var_: Should be from a Variable(). m: Should be from a Variable(). v: Should be from a Variable(). beta1_power: Must be a scalar. beta2_power: Must be a scalar. lr: Scaling factor. Must be a scalar. beta1: Momentum factor. Must be a scalar. beta2: Momentum factor. Must be a scalar. epsilon: Ridge term. Must be a scalar. grad: The gradient.
Returns the created operation.
func ResourceApplyAdamWithAmsgrad ¶
func ResourceApplyAdamWithAmsgrad(scope *Scope, var_ tf.Output, m tf.Output, v tf.Output, vhat tf.Output, beta1_power tf.Output, beta2_power tf.Output, lr tf.Output, beta1 tf.Output, beta2 tf.Output, epsilon tf.Output, grad tf.Output, optional ...ResourceApplyAdamWithAmsgradAttr) (o *tf.Operation)
Update '*var' according to the Adam algorithm.
$$\text{lr}_t := \mathrm{learning_rate} * \sqrt{1 - \beta_2^t} / (1 - \beta_1^t)$$ $$m_t := \beta_1 * m_{t-1} + (1 - \beta_1) * g$$ $$v_t := \beta_2 * v_{t-1} + (1 - \beta_2) * g * g$$ $$\hat{v}_t := max{\hat{v}_{t-1}, v_t}$$ $$\text{variable} := \text{variable} - \text{lr}_t * m_t / (\sqrt{\hat{v}_t} + \epsilon)$$
Arguments:
var_: Should be from a Variable(). m: Should be from a Variable(). v: Should be from a Variable(). vhat: Should be from a Variable(). beta1_power: Must be a scalar. beta2_power: Must be a scalar. lr: Scaling factor. Must be a scalar. beta1: Momentum factor. Must be a scalar. beta2: Momentum factor. Must be a scalar. epsilon: Ridge term. Must be a scalar. grad: The gradient.
Returns the created operation.
func ResourceApplyAddSign ¶
func ResourceApplyAddSign(scope *Scope, var_ tf.Output, m tf.Output, lr tf.Output, alpha tf.Output, sign_decay tf.Output, beta tf.Output, grad tf.Output, optional ...ResourceApplyAddSignAttr) (o *tf.Operation)
Update '*var' according to the AddSign update.
m_t <- beta1 * m_{t-1} + (1 - beta1) * g update <- (alpha + sign_decay * sign(g) *sign(m)) * g variable <- variable - lr_t * update
Arguments:
var_: Should be from a Variable(). m: Should be from a Variable(). lr: Scaling factor. Must be a scalar. alpha: Must be a scalar. sign_decay: Must be a scalar. beta: Must be a scalar. grad: The gradient.
Returns the created operation.
func ResourceApplyCenteredRMSProp ¶
func ResourceApplyCenteredRMSProp(scope *Scope, var_ tf.Output, mg tf.Output, ms tf.Output, mom tf.Output, lr tf.Output, rho tf.Output, momentum tf.Output, epsilon tf.Output, grad tf.Output, optional ...ResourceApplyCenteredRMSPropAttr) (o *tf.Operation)
Update '*var' according to the centered RMSProp algorithm.
The centered RMSProp algorithm uses an estimate of the centered second moment (i.e., the variance) for normalization, as opposed to regular RMSProp, which uses the (uncentered) second moment. This often helps with training, but is slightly more expensive in terms of computation and memory.
Note that in dense implementation of this algorithm, mg, ms, and mom will update even if the grad is zero, but in this sparse implementation, mg, ms, and mom will not update in iterations during which the grad is zero.
mean_square = decay * mean_square + (1-decay) * gradient ** 2 mean_grad = decay * mean_grad + (1-decay) * gradient
Delta = learning_rate * gradient / sqrt(mean_square + epsilon - mean_grad ** 2)
mg <- rho * mg_{t-1} + (1-rho) * grad ms <- rho * ms_{t-1} + (1-rho) * grad * grad mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms - mg * mg + epsilon) var <- var - mom
Arguments:
var_: Should be from a Variable(). mg: Should be from a Variable(). ms: Should be from a Variable(). mom: Should be from a Variable(). lr: Scaling factor. Must be a scalar. rho: Decay rate. Must be a scalar. momentum: Momentum Scale. Must be a scalar. epsilon: Ridge term. Must be a scalar. grad: The gradient.
Returns the created operation.
func ResourceApplyFtrl ¶
func ResourceApplyFtrl(scope *Scope, var_ tf.Output, accum tf.Output, linear tf.Output, grad tf.Output, lr tf.Output, l1 tf.Output, l2 tf.Output, lr_power tf.Output, optional ...ResourceApplyFtrlAttr) (o *tf.Operation)
Update '*var' according to the Ftrl-proximal scheme.
accum_new = accum + grad * grad linear += grad - (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var quadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2 var = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0 accum = accum_new
Arguments:
var_: Should be from a Variable(). accum: Should be from a Variable(). linear: Should be from a Variable(). grad: The gradient. lr: Scaling factor. Must be a scalar. l1: L1 regularization. Must be a scalar. l2: L2 regularization. Must be a scalar. lr_power: Scaling factor. Must be a scalar.
Returns the created operation.
func ResourceApplyFtrlV2 ¶
func ResourceApplyFtrlV2(scope *Scope, var_ tf.Output, accum tf.Output, linear tf.Output, grad tf.Output, lr tf.Output, l1 tf.Output, l2 tf.Output, l2_shrinkage tf.Output, lr_power tf.Output, optional ...ResourceApplyFtrlV2Attr) (o *tf.Operation)
Update '*var' according to the Ftrl-proximal scheme.
accum_new = accum + grad * grad grad_with_shrinkage = grad + 2 * l2_shrinkage * var linear += grad_with_shrinkage +
(accum_new^(-lr_power) - accum^(-lr_power)) / lr * var
quadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2 var = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0 accum = accum_new
Arguments:
var_: Should be from a Variable(). accum: Should be from a Variable(). linear: Should be from a Variable(). grad: The gradient. lr: Scaling factor. Must be a scalar. l1: L1 regularization. Must be a scalar. l2: L2 shrinkage regularization. Must be a scalar. lr_power: Scaling factor. Must be a scalar.
Returns the created operation.
func ResourceApplyGradientDescent ¶
func ResourceApplyGradientDescent(scope *Scope, var_ tf.Output, alpha tf.Output, delta tf.Output, optional ...ResourceApplyGradientDescentAttr) (o *tf.Operation)
Update '*var' by subtracting 'alpha' * 'delta' from it.
Arguments:
var_: Should be from a Variable(). alpha: Scaling factor. Must be a scalar. delta: The change.
Returns the created operation.
func ResourceApplyKerasMomentum ¶
func ResourceApplyKerasMomentum(scope *Scope, var_ tf.Output, accum tf.Output, lr tf.Output, grad tf.Output, momentum tf.Output, optional ...ResourceApplyKerasMomentumAttr) (o *tf.Operation)
Update '*var' according to the momentum scheme.
Set use_nesterov = True if you want to use Nesterov momentum.
accum = accum * momentum - lr * grad var += accum
Arguments:
var_: Should be from a Variable(). accum: Should be from a Variable(). lr: Scaling factor. Must be a scalar. grad: The gradient. momentum: Momentum. Must be a scalar.
Returns the created operation.
func ResourceApplyMomentum ¶
func ResourceApplyMomentum(scope *Scope, var_ tf.Output, accum tf.Output, lr tf.Output, grad tf.Output, momentum tf.Output, optional ...ResourceApplyMomentumAttr) (o *tf.Operation)
Update '*var' according to the momentum scheme.
Set use_nesterov = True if you want to use Nesterov momentum.
accum = accum * momentum + grad var -= lr * accum
Arguments:
var_: Should be from a Variable(). accum: Should be from a Variable(). lr: Scaling factor. Must be a scalar. grad: The gradient. momentum: Momentum. Must be a scalar.
Returns the created operation.
func ResourceApplyPowerSign ¶
func ResourceApplyPowerSign(scope *Scope, var_ tf.Output, m tf.Output, lr tf.Output, logbase tf.Output, sign_decay tf.Output, beta tf.Output, grad tf.Output, optional ...ResourceApplyPowerSignAttr) (o *tf.Operation)
Update '*var' according to the AddSign update.
m_t <- beta1 * m_{t-1} + (1 - beta1) * g update <- exp(logbase * sign_decay * sign(g) * sign(m_t)) * g variable <- variable - lr_t * update
Arguments:
var_: Should be from a Variable(). m: Should be from a Variable(). lr: Scaling factor. Must be a scalar. logbase: Must be a scalar. sign_decay: Must be a scalar. beta: Must be a scalar. grad: The gradient.
Returns the created operation.
func ResourceApplyProximalAdagrad ¶
func ResourceApplyProximalAdagrad(scope *Scope, var_ tf.Output, accum tf.Output, lr tf.Output, l1 tf.Output, l2 tf.Output, grad tf.Output, optional ...ResourceApplyProximalAdagradAttr) (o *tf.Operation)
Update '*var' and '*accum' according to FOBOS with Adagrad learning rate.
accum += grad * grad prox_v = var - lr * grad * (1 / sqrt(accum)) var = sign(prox_v)/(1+lr*l2) * max{|prox_v|-lr*l1,0}
Arguments:
var_: Should be from a Variable(). accum: Should be from a Variable(). lr: Scaling factor. Must be a scalar. l1: L1 regularization. Must be a scalar. l2: L2 regularization. Must be a scalar. grad: The gradient.
Returns the created operation.
func ResourceApplyProximalGradientDescent ¶
func ResourceApplyProximalGradientDescent(scope *Scope, var_ tf.Output, alpha tf.Output, l1 tf.Output, l2 tf.Output, delta tf.Output, optional ...ResourceApplyProximalGradientDescentAttr) (o *tf.Operation)
Update '*var' as FOBOS algorithm with fixed learning rate.
prox_v = var - alpha * delta var = sign(prox_v)/(1+alpha*l2) * max{|prox_v|-alpha*l1,0}
Arguments:
var_: Should be from a Variable(). alpha: Scaling factor. Must be a scalar. l1: L1 regularization. Must be a scalar. l2: L2 regularization. Must be a scalar. delta: The change.
Returns the created operation.
func ResourceApplyRMSProp ¶
func ResourceApplyRMSProp(scope *Scope, var_ tf.Output, ms tf.Output, mom tf.Output, lr tf.Output, rho tf.Output, momentum tf.Output, epsilon tf.Output, grad tf.Output, optional ...ResourceApplyRMSPropAttr) (o *tf.Operation)
Update '*var' according to the RMSProp algorithm.
Note that in dense implementation of this algorithm, ms and mom will update even if the grad is zero, but in this sparse implementation, ms and mom will not update in iterations during which the grad is zero.
mean_square = decay * mean_square + (1-decay) * gradient ** 2 Delta = learning_rate * gradient / sqrt(mean_square + epsilon)
ms <- rho * ms_{t-1} + (1-rho) * grad * grad mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon) var <- var - mom
Arguments:
var_: Should be from a Variable(). ms: Should be from a Variable(). mom: Should be from a Variable(). lr: Scaling factor. Must be a scalar. rho: Decay rate. Must be a scalar. epsilon: Ridge term. Must be a scalar. grad: The gradient.
Returns the created operation.
func ResourceConditionalAccumulator ¶
func ResourceConditionalAccumulator(scope *Scope, dtype tf.DataType, shape tf.Shape, optional ...ResourceConditionalAccumulatorAttr) (handle tf.Output)
A conditional accumulator for aggregating gradients.
The accumulator accepts gradients marked with local_step greater or equal to the most recent global_step known to the accumulator. The average can be extracted from the accumulator, provided sufficient gradients have been accumulated. Extracting the average automatically resets the aggregate to 0, and increments the global_step recorded by the accumulator. This is a resource version of ConditionalAccumulator that will work in TF2.0 with tf.cond version 2.
Arguments:
dtype: The type of the value being accumulated. shape: The shape of the values, can be [], in which case shape is unknown.
Returns The handle to the accumulator.
func ResourceCountUpTo ¶
func ResourceCountUpTo(scope *Scope, resource tf.Output, limit int64, T tf.DataType) (output tf.Output)
Increments variable pointed to by 'resource' until it reaches 'limit'.
Arguments:
resource: Should be from a scalar `Variable` node. limit: If incrementing ref would bring it above limit, instead generates an
'OutOfRange' error.
Returns A copy of the input before increment. If nothing else modifies the input, the values produced will all be distinct.
func ResourceGather ¶
func ResourceGather(scope *Scope, resource tf.Output, indices tf.Output, dtype tf.DataType, optional ...ResourceGatherAttr) (output tf.Output)
Gather slices from the variable pointed to by `resource` according to `indices`.
`indices` must be an integer tensor of any dimension (usually 0-D or 1-D). Produces an output tensor with shape `indices.shape + params.shape[1:]` where:
```python
# Scalar indices output[:, ..., :] = params[indices, :, ... :] # Vector indices output[i, :, ..., :] = params[indices[i], :, ... :] # Higher rank indices output[i, ..., j, :, ... :] = params[indices[i, ..., j], :, ..., :]
```
func ResourceScatterAdd ¶
func ResourceScatterAdd(scope *Scope, resource tf.Output, indices tf.Output, updates tf.Output) (o *tf.Operation)
Adds sparse updates to the variable referenced by `resource`.
This operation computes
# Scalar indices ref[indices, ...] += updates[...] # Vector indices (for each i) ref[indices[i], ...] += updates[i, ...] # High rank indices (for each i, ..., j) ref[indices[i, ..., j], ...] += updates[i, ..., j, ...]
Duplicate entries are handled correctly: if multiple `indices` reference the same location, their contributions add.
Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`.
<div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;"> <img style="width:100%" src='https://www.tensorflow.org/images/ScatterAdd.png' alt> </div>
Arguments:
resource: Should be from a `Variable` node. indices: A tensor of indices into the first dimension of `ref`. updates: A tensor of updated values to add to `ref`.
Returns the created operation.
func ResourceScatterDiv ¶
func ResourceScatterDiv(scope *Scope, resource tf.Output, indices tf.Output, updates tf.Output) (o *tf.Operation)
Divides sparse updates into the variable referenced by `resource`.
This operation computes
# Scalar indices ref[indices, ...] /= updates[...] # Vector indices (for each i) ref[indices[i], ...] /= updates[i, ...] # High rank indices (for each i, ..., j) ref[indices[i, ..., j], ...] /= updates[i, ..., j, ...]
Duplicate entries are handled correctly: if multiple `indices` reference the same location, their contributions multiply.
Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`.
<div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;"> <img style="width:100%" src='https://www.tensorflow.org/images/ScatterAdd.png' alt> </div>
Arguments:
resource: Should be from a `Variable` node. indices: A tensor of indices into the first dimension of `ref`. updates: A tensor of updated values to add to `ref`.
Returns the created operation.
func ResourceScatterMax ¶
func ResourceScatterMax(scope *Scope, resource tf.Output, indices tf.Output, updates tf.Output) (o *tf.Operation)
Reduces sparse updates into the variable referenced by `resource` using the `max` operation.
This operation computes
# Scalar indices ref[indices, ...] = max(ref[indices, ...], updates[...]) # Vector indices (for each i) ref[indices[i], ...] = max(ref[indices[i], ...], updates[i, ...]) # High rank indices (for each i, ..., j) ref[indices[i, ..., j], ...] = max(ref[indices[i, ..., j], ...], updates[i, ..., j, ...])
Duplicate entries are handled correctly: if multiple `indices` reference the same location, their contributions are combined.
Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`.
<div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;"> <img style="width:100%" src='https://www.tensorflow.org/images/ScatterAdd.png' alt> </div>
Arguments:
resource: Should be from a `Variable` node. indices: A tensor of indices into the first dimension of `ref`. updates: A tensor of updated values to add to `ref`.
Returns the created operation.
func ResourceScatterMin ¶
func ResourceScatterMin(scope *Scope, resource tf.Output, indices tf.Output, updates tf.Output) (o *tf.Operation)
Reduces sparse updates into the variable referenced by `resource` using the `min` operation.
This operation computes
# Scalar indices ref[indices, ...] = min(ref[indices, ...], updates[...]) # Vector indices (for each i) ref[indices[i], ...] = min(ref[indices[i], ...], updates[i, ...]) # High rank indices (for each i, ..., j) ref[indices[i, ..., j], ...] = min(ref[indices[i, ..., j], ...], updates[i, ..., j, ...])
Duplicate entries are handled correctly: if multiple `indices` reference the same location, their contributions are combined.
Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`.
<div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;"> <img style="width:100%" src='https://www.tensorflow.org/images/ScatterAdd.png' alt> </div>
Arguments:
resource: Should be from a `Variable` node. indices: A tensor of indices into the first dimension of `ref`. updates: A tensor of updated values to add to `ref`.
Returns the created operation.
func ResourceScatterMul ¶
func ResourceScatterMul(scope *Scope, resource tf.Output, indices tf.Output, updates tf.Output) (o *tf.Operation)
Multiplies sparse updates into the variable referenced by `resource`.
This operation computes
# Scalar indices ref[indices, ...] *= updates[...] # Vector indices (for each i) ref[indices[i], ...] *= updates[i, ...] # High rank indices (for each i, ..., j) ref[indices[i, ..., j], ...] *= updates[i, ..., j, ...]
Duplicate entries are handled correctly: if multiple `indices` reference the same location, their contributions multiply.
Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`.
<div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;"> <img style="width:100%" src='https://www.tensorflow.org/images/ScatterAdd.png' alt> </div>
Arguments:
resource: Should be from a `Variable` node. indices: A tensor of indices into the first dimension of `ref`. updates: A tensor of updated values to add to `ref`.
Returns the created operation.
func ResourceScatterNdAdd ¶
func ResourceScatterNdAdd(scope *Scope, ref tf.Output, indices tf.Output, updates tf.Output, optional ...ResourceScatterNdAddAttr) (o *tf.Operation)
Applies sparse addition to individual values or slices in a Variable.
`ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`.
`indices` must be integer tensor, containing indices into `ref`. It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.
The innermost dimension of `indices` (with length `K`) corresponds to indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th dimension of `ref`.
`updates` is `Tensor` of rank `Q-1+P-K` with shape:
``` [d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]] ```
For example, say we want to add 4 scattered elements to a rank-1 tensor to 8 elements. In Python, that addition would look like this:
```python ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8], use_resource=True) indices = tf.constant([[4], [3], [1], [7]]) updates = tf.constant([9, 10, 11, 12]) add = tf.scatter_nd_add(ref, indices, updates) with tf.Session() as sess:
print sess.run(add)
```
The resulting update to ref would look like this:
[1, 13, 3, 14, 14, 6, 7, 20]
See `tf.scatter_nd` for more details about how to make updates to slices.
Arguments:
ref: A resource handle. Must be from a VarHandleOp. indices: A Tensor. Must be one of the following types: int32, int64.
A tensor of indices into ref.
updates: A Tensor. Must have the same type as ref. A tensor of
values to add to ref.
Returns the created operation.
func ResourceScatterNdSub ¶
func ResourceScatterNdSub(scope *Scope, ref tf.Output, indices tf.Output, updates tf.Output, optional ...ResourceScatterNdSubAttr) (o *tf.Operation)
Applies sparse subtraction to individual values or slices in a Variable.
`ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`.
`indices` must be integer tensor, containing indices into `ref`. It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.
The innermost dimension of `indices` (with length `K`) corresponds to indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th dimension of `ref`.
`updates` is `Tensor` of rank `Q-1+P-K` with shape:
``` [d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]] ```
For example, say we want to subtract 4 scattered elements from a rank-1 tensor with 8 elements. In Python, that subtraction would look like this:
```python ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8], use_resource=True) indices = tf.constant([[4], [3], [1], [7]]) updates = tf.constant([9, 10, 11, 12]) sub = tf.scatter_nd_sub(ref, indices, updates) with tf.Session() as sess:
print sess.run(sub)
```
The resulting update to ref would look like this:
[1, -9, 3, -6, -4, 6, 7, -4]
See `tf.scatter_nd` for more details about how to make updates to slices.
Arguments:
ref: A resource handle. Must be from a VarHandleOp. indices: A Tensor. Must be one of the following types: int32, int64.
A tensor of indices into ref.
updates: A Tensor. Must have the same type as ref. A tensor of
values to add to ref.
Returns the created operation.
func ResourceScatterNdUpdate ¶
func ResourceScatterNdUpdate(scope *Scope, ref tf.Output, indices tf.Output, updates tf.Output, optional ...ResourceScatterNdUpdateAttr) (o *tf.Operation)
Applies sparse `updates` to individual values or slices within a given
variable according to `indices`.
`ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`.
`indices` must be integer tensor, containing indices into `ref`. It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.
The innermost dimension of `indices` (with length `K`) corresponds to indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th dimension of `ref`.
`updates` is `Tensor` of rank `Q-1+P-K` with shape:
``` [d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]]. ```
For example, say we want to update 4 scattered elements to a rank-1 tensor to 8 elements. In Python, that update would look like this:
```python
ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8]) indices = tf.constant([[4], [3], [1] ,[7]]) updates = tf.constant([9, 10, 11, 12]) update = tf.scatter_nd_update(ref, indices, updates) with tf.Session() as sess: print sess.run(update)
```
The resulting update to ref would look like this:
[1, 11, 3, 10, 9, 6, 7, 12]
See `tf.scatter_nd` for more details about how to make updates to slices.
Arguments:
ref: A resource handle. Must be from a VarHandleOp. indices: A Tensor. Must be one of the following types: int32, int64.
A tensor of indices into ref.
updates: A Tensor. Must have the same type as ref. A tensor of updated
values to add to ref.
Returns the created operation.
func ResourceScatterSub ¶
func ResourceScatterSub(scope *Scope, resource tf.Output, indices tf.Output, updates tf.Output) (o *tf.Operation)
Subtracts sparse updates from the variable referenced by `resource`.
This operation computes
# Scalar indices ref[indices, ...] -= updates[...] # Vector indices (for each i) ref[indices[i], ...] -= updates[i, ...] # High rank indices (for each i, ..., j) ref[indices[i, ..., j], ...] -= updates[i, ..., j, ...]
Duplicate entries are handled correctly: if multiple `indices` reference the same location, their contributions add.
Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`.
<div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;"> <img style="width:100%" src='https://www.tensorflow.org/images/ScatterAdd.png' alt> </div>
Arguments:
resource: Should be from a `Variable` node. indices: A tensor of indices into the first dimension of `ref`. updates: A tensor of updated values to add to `ref`.
Returns the created operation.
func ResourceScatterUpdate ¶
func ResourceScatterUpdate(scope *Scope, resource tf.Output, indices tf.Output, updates tf.Output) (o *tf.Operation)
Assigns sparse updates to the variable referenced by `resource`.
This operation computes
# Scalar indices ref[indices, ...] = updates[...] # Vector indices (for each i) ref[indices[i], ...] = updates[i, ...] # High rank indices (for each i, ..., j) ref[indices[i, ..., j], ...] = updates[i, ..., j, ...]
Arguments:
resource: Should be from a `Variable` node. indices: A tensor of indices into the first dimension of `ref`. updates: A tensor of updated values to add to `ref`.
Returns the created operation.
func ResourceSparseApplyAdadelta ¶
func ResourceSparseApplyAdadelta(scope *Scope, var_ tf.Output, accum tf.Output, accum_update tf.Output, lr tf.Output, rho tf.Output, epsilon tf.Output, grad tf.Output, indices tf.Output, optional ...ResourceSparseApplyAdadeltaAttr) (o *tf.Operation)
var: Should be from a Variable().
Arguments:
accum: Should be from a Variable(). accum_update: : Should be from a Variable(). lr: Learning rate. Must be a scalar. rho: Decay factor. Must be a scalar. epsilon: Constant factor. Must be a scalar. grad: The gradient. indices: A vector of indices into the first dimension of var and accum.
Returns the created operation.
func ResourceSparseApplyAdagrad ¶
func ResourceSparseApplyAdagrad(scope *Scope, var_ tf.Output, accum tf.Output, lr tf.Output, grad tf.Output, indices tf.Output, optional ...ResourceSparseApplyAdagradAttr) (o *tf.Operation)
Update relevant entries in '*var' and '*accum' according to the adagrad scheme.
That is for rows we have grad for, we update var and accum as follows: accum += grad * grad var -= lr * grad * (1 / sqrt(accum))
Arguments:
var_: Should be from a Variable(). accum: Should be from a Variable(). lr: Learning rate. Must be a scalar. grad: The gradient. indices: A vector of indices into the first dimension of var and accum.
Returns the created operation.
func ResourceSparseApplyAdagradDA ¶
func ResourceSparseApplyAdagradDA(scope *Scope, var_ tf.Output, gradient_accumulator tf.Output, gradient_squared_accumulator tf.Output, grad tf.Output, indices tf.Output, lr tf.Output, l1 tf.Output, l2 tf.Output, global_step tf.Output, optional ...ResourceSparseApplyAdagradDAAttr) (o *tf.Operation)
Update entries in '*var' and '*accum' according to the proximal adagrad scheme.
Arguments:
var_: Should be from a Variable(). gradient_accumulator: Should be from a Variable(). gradient_squared_accumulator: Should be from a Variable(). grad: The gradient. indices: A vector of indices into the first dimension of var and accum. lr: Learning rate. Must be a scalar. l1: L1 regularization. Must be a scalar. l2: L2 regularization. Must be a scalar. global_step: Training step number. Must be a scalar.
Returns the created operation.
func ResourceSparseApplyAdagradV2 ¶
func ResourceSparseApplyAdagradV2(scope *Scope, var_ tf.Output, accum tf.Output, lr tf.Output, epsilon tf.Output, grad tf.Output, indices tf.Output, optional ...ResourceSparseApplyAdagradV2Attr) (o *tf.Operation)
Update relevant entries in '*var' and '*accum' according to the adagrad scheme.
That is for rows we have grad for, we update var and accum as follows: accum += grad * grad var -= lr * grad * (1 / sqrt(accum))
Arguments:
var_: Should be from a Variable(). accum: Should be from a Variable(). lr: Learning rate. Must be a scalar. epsilon: Constant factor. Must be a scalar. grad: The gradient. indices: A vector of indices into the first dimension of var and accum.
Returns the created operation.
func ResourceSparseApplyCenteredRMSProp ¶
func ResourceSparseApplyCenteredRMSProp(scope *Scope, var_ tf.Output, mg tf.Output, ms tf.Output, mom tf.Output, lr tf.Output, rho tf.Output, momentum tf.Output, epsilon tf.Output, grad tf.Output, indices tf.Output, optional ...ResourceSparseApplyCenteredRMSPropAttr) (o *tf.Operation)
Update '*var' according to the centered RMSProp algorithm.
The centered RMSProp algorithm uses an estimate of the centered second moment (i.e., the variance) for normalization, as opposed to regular RMSProp, which uses the (uncentered) second moment. This often helps with training, but is slightly more expensive in terms of computation and memory.
Note that in dense implementation of this algorithm, mg, ms, and mom will update even if the grad is zero, but in this sparse implementation, mg, ms, and mom will not update in iterations during which the grad is zero.
mean_square = decay * mean_square + (1-decay) * gradient ** 2 mean_grad = decay * mean_grad + (1-decay) * gradient Delta = learning_rate * gradient / sqrt(mean_square + epsilon - mean_grad ** 2)
ms <- rho * ms_{t-1} + (1-rho) * grad * grad mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon) var <- var - mom
Arguments:
var_: Should be from a Variable(). mg: Should be from a Variable(). ms: Should be from a Variable(). mom: Should be from a Variable(). lr: Scaling factor. Must be a scalar. rho: Decay rate. Must be a scalar. epsilon: Ridge term. Must be a scalar. grad: The gradient. indices: A vector of indices into the first dimension of var, ms and mom.
Returns the created operation.
func ResourceSparseApplyFtrl ¶
func ResourceSparseApplyFtrl(scope *Scope, var_ tf.Output, accum tf.Output, linear tf.Output, grad tf.Output, indices tf.Output, lr tf.Output, l1 tf.Output, l2 tf.Output, lr_power tf.Output, optional ...ResourceSparseApplyFtrlAttr) (o *tf.Operation)
Update relevant entries in '*var' according to the Ftrl-proximal scheme.
That is for rows we have grad for, we update var, accum and linear as follows: accum_new = accum + grad * grad linear += grad - (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var quadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2 var = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0 accum = accum_new
Arguments:
var_: Should be from a Variable(). accum: Should be from a Variable(). linear: Should be from a Variable(). grad: The gradient. indices: A vector of indices into the first dimension of var and accum. lr: Scaling factor. Must be a scalar. l1: L1 regularization. Must be a scalar. l2: L2 regularization. Must be a scalar. lr_power: Scaling factor. Must be a scalar.
Returns the created operation.
func ResourceSparseApplyFtrlV2 ¶
func ResourceSparseApplyFtrlV2(scope *Scope, var_ tf.Output, accum tf.Output, linear tf.Output, grad tf.Output, indices tf.Output, lr tf.Output, l1 tf.Output, l2 tf.Output, l2_shrinkage tf.Output, lr_power tf.Output, optional ...ResourceSparseApplyFtrlV2Attr) (o *tf.Operation)
Update relevant entries in '*var' according to the Ftrl-proximal scheme.
That is for rows we have grad for, we update var, accum and linear as follows: grad_with_shrinkage = grad + 2 * l2_shrinkage * var accum_new = accum + grad_with_shrinkage * grad_with_shrinkage linear += grad_with_shrinkage +
(accum_new^(-lr_power) - accum^(-lr_power)) / lr * var
quadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2 var = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0 accum = accum_new
Arguments:
var_: Should be from a Variable(). accum: Should be from a Variable(). linear: Should be from a Variable(). grad: The gradient. indices: A vector of indices into the first dimension of var and accum. lr: Scaling factor. Must be a scalar. l1: L1 regularization. Must be a scalar. l2: L2 shrinkage regularization. Must be a scalar. lr_power: Scaling factor. Must be a scalar.
Returns the created operation.
func ResourceSparseApplyKerasMomentum ¶
func ResourceSparseApplyKerasMomentum(scope *Scope, var_ tf.Output, accum tf.Output, lr tf.Output, grad tf.Output, indices tf.Output, momentum tf.Output, optional ...ResourceSparseApplyKerasMomentumAttr) (o *tf.Operation)
Update relevant entries in '*var' and '*accum' according to the momentum scheme.
Set use_nesterov = True if you want to use Nesterov momentum.
That is for rows we have grad for, we update var and accum as follows:
accum = accum * momentum - lr * grad var += accum
Arguments:
var_: Should be from a Variable(). accum: Should be from a Variable(). lr: Learning rate. Must be a scalar. grad: The gradient. indices: A vector of indices into the first dimension of var and accum. momentum: Momentum. Must be a scalar.
Returns the created operation.
func ResourceSparseApplyMomentum ¶
func ResourceSparseApplyMomentum(scope *Scope, var_ tf.Output, accum tf.Output, lr tf.Output, grad tf.Output, indices tf.Output, momentum tf.Output, optional ...ResourceSparseApplyMomentumAttr) (o *tf.Operation)
Update relevant entries in '*var' and '*accum' according to the momentum scheme.
Set use_nesterov = True if you want to use Nesterov momentum.
That is for rows we have grad for, we update var and accum as follows:
accum = accum * momentum + grad var -= lr * accum
Arguments:
var_: Should be from a Variable(). accum: Should be from a Variable(). lr: Learning rate. Must be a scalar. grad: The gradient. indices: A vector of indices into the first dimension of var and accum. momentum: Momentum. Must be a scalar.
Returns the created operation.
func ResourceSparseApplyProximalAdagrad ¶
func ResourceSparseApplyProximalAdagrad(scope *Scope, var_ tf.Output, accum tf.Output, lr tf.Output, l1 tf.Output, l2 tf.Output, grad tf.Output, indices tf.Output, optional ...ResourceSparseApplyProximalAdagradAttr) (o *tf.Operation)
Sparse update entries in '*var' and '*accum' according to FOBOS algorithm.
That is for rows we have grad for, we update var and accum as follows: accum += grad * grad prox_v = var prox_v -= lr * grad * (1 / sqrt(accum)) var = sign(prox_v)/(1+lr*l2) * max{|prox_v|-lr*l1,0}
Arguments:
var_: Should be from a Variable(). accum: Should be from a Variable(). lr: Learning rate. Must be a scalar. l1: L1 regularization. Must be a scalar. l2: L2 regularization. Must be a scalar. grad: The gradient. indices: A vector of indices into the first dimension of var and accum.
Returns the created operation.
func ResourceSparseApplyProximalGradientDescent ¶
func ResourceSparseApplyProximalGradientDescent(scope *Scope, var_ tf.Output, alpha tf.Output, l1 tf.Output, l2 tf.Output, grad tf.Output, indices tf.Output, optional ...ResourceSparseApplyProximalGradientDescentAttr) (o *tf.Operation)
Sparse update '*var' as FOBOS algorithm with fixed learning rate.
That is for rows we have grad for, we update var as follows: prox_v = var - alpha * grad var = sign(prox_v)/(1+alpha*l2) * max{|prox_v|-alpha*l1,0}
Arguments:
var_: Should be from a Variable(). alpha: Scaling factor. Must be a scalar. l1: L1 regularization. Must be a scalar. l2: L2 regularization. Must be a scalar. grad: The gradient. indices: A vector of indices into the first dimension of var and accum.
Returns the created operation.
func ResourceSparseApplyRMSProp ¶
func ResourceSparseApplyRMSProp(scope *Scope, var_ tf.Output, ms tf.Output, mom tf.Output, lr tf.Output, rho tf.Output, momentum tf.Output, epsilon tf.Output, grad tf.Output, indices tf.Output, optional ...ResourceSparseApplyRMSPropAttr) (o *tf.Operation)
Update '*var' according to the RMSProp algorithm.
Note that in dense implementation of this algorithm, ms and mom will update even if the grad is zero, but in this sparse implementation, ms and mom will not update in iterations during which the grad is zero.
mean_square = decay * mean_square + (1-decay) * gradient ** 2 Delta = learning_rate * gradient / sqrt(mean_square + epsilon)
ms <- rho * ms_{t-1} + (1-rho) * grad * grad mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon) var <- var - mom
Arguments:
var_: Should be from a Variable(). ms: Should be from a Variable(). mom: Should be from a Variable(). lr: Scaling factor. Must be a scalar. rho: Decay rate. Must be a scalar. epsilon: Ridge term. Must be a scalar. grad: The gradient. indices: A vector of indices into the first dimension of var, ms and mom.
Returns the created operation.
func ResourceStridedSliceAssign ¶
func ResourceStridedSliceAssign(scope *Scope, ref tf.Output, begin tf.Output, end tf.Output, strides tf.Output, value tf.Output, optional ...ResourceStridedSliceAssignAttr) (o *tf.Operation)
Assign `value` to the sliced l-value reference of `ref`.
The values of `value` are assigned to the positions in the variable `ref` that are selected by the slice parameters. The slice parameters `begin, `end`, `strides`, etc. work exactly as in `StridedSlice`.
NOTE this op currently does not support broadcasting and so `value`'s shape must be exactly the shape produced by the slice of `ref`.
Returns the created operation.
func Restore ¶
func Restore(scope *Scope, file_pattern tf.Output, tensor_name tf.Output, dt tf.DataType, optional ...RestoreAttr) (tensor tf.Output)
Restores a tensor from checkpoint files.
Reads a tensor stored in one or several files. If there are several files (for instance because a tensor was saved as slices), `file_pattern` may contain wildcard symbols (`*` and `?`) in the filename portion only, not in the directory portion.
If a `file_pattern` matches several files, `preferred_shard` can be used to hint in which file the requested tensor is likely to be found. This op will first open the file at index `preferred_shard` in the list of matching files and try to restore tensors from that file. Only if some tensors or tensor slices are not found in that first file, then the Op opens all the files. Setting `preferred_shard` to match the value passed as the `shard` input of a matching `Save` Op may speed up Restore. This attribute only affects performance, not correctness. The default value -1 means files are processed in order.
See also `RestoreSlice`.
Arguments:
file_pattern: Must have a single element. The pattern of the files from
which we read the tensor.
tensor_name: Must have a single element. The name of the tensor to be
restored.
dt: The type of the tensor to be restored.
Returns The restored tensor.
func RestoreSlice ¶
func RestoreSlice(scope *Scope, file_pattern tf.Output, tensor_name tf.Output, shape_and_slice tf.Output, dt tf.DataType, optional ...RestoreSliceAttr) (tensor tf.Output)
Restores a tensor from checkpoint files.
This is like `Restore` except that restored tensor can be listed as filling only a slice of a larger tensor. `shape_and_slice` specifies the shape of the larger tensor and the slice that the restored tensor covers.
The `shape_and_slice` input has the same format as the elements of the `shapes_and_slices` input of the `SaveSlices` op.
Arguments:
file_pattern: Must have a single element. The pattern of the files from
which we read the tensor.
tensor_name: Must have a single element. The name of the tensor to be
restored.
shape_and_slice: Scalar. The shapes and slice specifications to use when
restoring a tensors.
dt: The type of the tensor to be restored.
Returns The restored tensor.
func RestoreV2 ¶
func RestoreV2(scope *Scope, prefix tf.Output, tensor_names tf.Output, shape_and_slices tf.Output, dtypes []tf.DataType) (tensors []tf.Output)
Restores tensors from a V2 checkpoint.
For backward compatibility with the V1 format, this Op currently allows restoring from a V1 checkpoint as well:
- This Op first attempts to find the V2 index file pointed to by "prefix", and if found proceed to read it as a V2 checkpoint;
- Otherwise the V1 read path is invoked.
Relying on this behavior is not recommended, as the ability to fall back to read V1 might be deprecated and eventually removed.
By default, restores the named tensors in full. If the caller wishes to restore specific slices of stored tensors, "shape_and_slices" should be non-empty strings and correspondingly well-formed.
Callers must ensure all the named tensors are indeed stored in the checkpoint.
Arguments:
prefix: Must have a single element. The prefix of a V2 checkpoint. tensor_names: shape {N}. The names of the tensors to be restored. shape_and_slices: shape {N}. The slice specs of the tensors to be restored.
Empty strings indicate that they are non-partitioned tensors.
dtypes: shape {N}. The list of expected dtype for the tensors. Must match
those stored in the checkpoint.
Returns shape {N}. The restored tensors, whose shapes are read from the checkpoint directly.
func RetrieveAllTPUEmbeddingParameters ¶
func RetrieveAllTPUEmbeddingParameters(scope *Scope, NumTables int64, config string, num_shards int64, shard_id int64) (parameters []tf.Output, auxiliary1 []tf.Output, auxiliary2 []tf.Output, auxiliary3 []tf.Output, auxiliary4 []tf.Output, auxiliary5 []tf.Output, auxiliary6 []tf.Output, auxiliary7 []tf.Output)
An op that retrieves optimization parameters from embedding to host memory.
An op that retrieves optimization parameters from embedding to host memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up the correct embedding table configuration. For example, this op is used to retrieve updated parameters before saving a checkpoint. For Adagrad, auxiliary1 will contain the accumulators after running this op. For SGD, all of the auxiliary* values will be empty (0x0 tensors for that table). For FTRL, auxiliary1 will contain the accumulators and auxiliary2 will contain the linear terms. For ADAM, auxiliary1 will contain the momenta and auxiliary2 will contain the velocities.
Arguments:
NumTables: The number of embedding tables. config: An TPUEmbeddingConfiguration proto describing the
table parameters being loaded, serialized to a string.
num_shards: Number of shards into which the embedding tables are divided. shard_id: Identifier of shard for this operation.
Returns:
parameters: A list of tensors, one for each embedding table, containing the
stored embedding table parameters.
auxiliary1: A list of tensors, one for each embedding table, containing the
first auxiliary optimization parameter stored. Elements are present in the list, but have zero size, for unused optimization parameters (based on the algorithm in use for each table).
auxiliary2: A list of tensors, one for each embedding table, containing the
second auxiliary optimization parameter stored. Elements are present in the list, but have zero size, for unused optimization parameters (based on the algorithm in use for each table).
auxiliary3: A list of tensors, one for each embedding table, containing the
third auxiliary optimization parameter stored. Elements are present in the list, but have zero size, for unused optimization parameters (based on the algorithm in use for each table).
auxiliary4: A list of tensors, one for each embedding table, containing the
fourth auxiliary optimization parameter stored. Elements are present in the list, but have zero size, for unused optimization parameters (based on the algorithm in use for each table).
auxiliary5: A list of tensors, one for each embedding table, containing the
fifth auxiliary optimization parameter stored. Elements are present in the list, but have zero size, for unused optimization parameters (based on the algorithm in use for each table).
auxiliary6: A list of tensors, one for each embedding table, containing the
six auxiliary optimization parameter stored. Elements are present in the list, but have zero size, for unused optimization parameters (based on the algorithm in use for each table).
auxiliary7: A list of tensors, one for each embedding table, containing the
seventh auxiliary optimization parameter stored. Elements are present in the list, but have zero size, for unused optimization parameters (based on the algorithm in use for each table).
func RetrieveTPUEmbeddingADAMParameters ¶
func RetrieveTPUEmbeddingADAMParameters(scope *Scope, num_shards int64, shard_id int64, optional ...RetrieveTPUEmbeddingADAMParametersAttr) (parameters tf.Output, momenta tf.Output, velocities tf.Output)
Retrieve ADAM embedding parameters.
An op that retrieves optimization parameters from embedding to host memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up the correct embedding table configuration. For example, this op is used to retrieve updated parameters before saving a checkpoint.
Returns:
parameters: Parameter parameters updated by the ADAM optimization algorithm. momenta: Parameter momenta updated by the ADAM optimization algorithm. velocities: Parameter velocities updated by the ADAM optimization algorithm.
func RetrieveTPUEmbeddingAdadeltaParameters ¶
func RetrieveTPUEmbeddingAdadeltaParameters(scope *Scope, num_shards int64, shard_id int64, optional ...RetrieveTPUEmbeddingAdadeltaParametersAttr) (parameters tf.Output, accumulators tf.Output, updates tf.Output)
Retrieve Adadelta embedding parameters.
An op that retrieves optimization parameters from embedding to host memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up the correct embedding table configuration. For example, this op is used to retrieve updated parameters before saving a checkpoint.
Returns:
parameters: Parameter parameters updated by the Adadelta optimization algorithm. accumulators: Parameter accumulators updated by the Adadelta optimization algorithm. updates: Parameter updates updated by the Adadelta optimization algorithm.
func RetrieveTPUEmbeddingAdagradMomentumParameters ¶
func RetrieveTPUEmbeddingAdagradMomentumParameters(scope *Scope, num_shards int64, shard_id int64, optional ...RetrieveTPUEmbeddingAdagradMomentumParametersAttr) (parameters tf.Output, accumulators tf.Output, momenta tf.Output)
Retrieve Adagrad Momentum embedding parameters.
An op that retrieves optimization parameters from embedding to host memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up the correct embedding table configuration. For example, this op is used to retrieve updated parameters before saving a checkpoint.
Returns:
parameters: Parameter parameters updated by the Adagrad Momentum optimization algorithm. accumulators: Parameter accumulators updated by the Adagrad Momentum optimization algorithm. momenta: Parameter momenta updated by the Adagrad Momentum optimization algorithm.
func RetrieveTPUEmbeddingAdagradParameters ¶
func RetrieveTPUEmbeddingAdagradParameters(scope *Scope, num_shards int64, shard_id int64, optional ...RetrieveTPUEmbeddingAdagradParametersAttr) (parameters tf.Output, accumulators tf.Output)
Retrieve Adagrad embedding parameters.
An op that retrieves optimization parameters from embedding to host memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up the correct embedding table configuration. For example, this op is used to retrieve updated parameters before saving a checkpoint.
Returns:
parameters: Parameter parameters updated by the Adagrad optimization algorithm. accumulators: Parameter accumulators updated by the Adagrad optimization algorithm.
func RetrieveTPUEmbeddingCenteredRMSPropParameters ¶
func RetrieveTPUEmbeddingCenteredRMSPropParameters(scope *Scope, num_shards int64, shard_id int64, optional ...RetrieveTPUEmbeddingCenteredRMSPropParametersAttr) (parameters tf.Output, ms tf.Output, mom tf.Output, mg tf.Output)
Retrieve centered RMSProp embedding parameters.
An op that retrieves optimization parameters from embedding to host memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up the correct embedding table configuration. For example, this op is used to retrieve updated parameters before saving a checkpoint.
Returns:
parameters: Parameter parameters updated by the centered RMSProp optimization algorithm. ms: Parameter ms updated by the centered RMSProp optimization algorithm. mom: Parameter mom updated by the centered RMSProp optimization algorithm. mg: Parameter mg updated by the centered RMSProp optimization algorithm.
func RetrieveTPUEmbeddingFTRLParameters ¶
func RetrieveTPUEmbeddingFTRLParameters(scope *Scope, num_shards int64, shard_id int64, optional ...RetrieveTPUEmbeddingFTRLParametersAttr) (parameters tf.Output, accumulators tf.Output, linears tf.Output)
Retrieve FTRL embedding parameters.
An op that retrieves optimization parameters from embedding to host memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up the correct embedding table configuration. For example, this op is used to retrieve updated parameters before saving a checkpoint.
Returns:
parameters: Parameter parameters updated by the FTRL optimization algorithm. accumulators: Parameter accumulators updated by the FTRL optimization algorithm. linears: Parameter linears updated by the FTRL optimization algorithm.
func RetrieveTPUEmbeddingFrequencyEstimatorParameters ¶
func RetrieveTPUEmbeddingFrequencyEstimatorParameters(scope *Scope, num_shards int64, shard_id int64, optional ...RetrieveTPUEmbeddingFrequencyEstimatorParametersAttr) (parameters tf.Output, last_hit_step tf.Output)
Retrieve frequency estimator embedding parameters.
An op that retrieves optimization parameters from embedding to host memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up the correct embedding table configuration. For example, this op is used to retrieve updated parameters before saving a checkpoint.
Returns:
parameters: Parameter parameters updated by the frequency estimator optimization algorithm. last_hit_step: Parameter last_hit_step updated by the frequency estimator optimization
algorithm.
func RetrieveTPUEmbeddingMDLAdagradLightParameters ¶
func RetrieveTPUEmbeddingMDLAdagradLightParameters(scope *Scope, num_shards int64, shard_id int64, optional ...RetrieveTPUEmbeddingMDLAdagradLightParametersAttr) (parameters tf.Output, accumulators tf.Output, weights tf.Output, benefits tf.Output)
Retrieve MDL Adagrad Light embedding parameters.
An op that retrieves optimization parameters from embedding to host memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up the correct embedding table configuration. For example, this op is used to retrieve updated parameters before saving a checkpoint.
Returns:
parameters: Parameter parameters updated by the MDL Adagrad Light optimization algorithm. accumulators: Parameter accumulators updated by the MDL Adagrad Light optimization algorithm. weights: Parameter weights updated by the MDL Adagrad Light optimization algorithm. benefits: Parameter benefits updated by the MDL Adagrad Light optimization algorithm.
func RetrieveTPUEmbeddingMomentumParameters ¶
func RetrieveTPUEmbeddingMomentumParameters(scope *Scope, num_shards int64, shard_id int64, optional ...RetrieveTPUEmbeddingMomentumParametersAttr) (parameters tf.Output, momenta tf.Output)
Retrieve Momentum embedding parameters.
An op that retrieves optimization parameters from embedding to host memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up the correct embedding table configuration. For example, this op is used to retrieve updated parameters before saving a checkpoint.
Returns:
parameters: Parameter parameters updated by the Momentum optimization algorithm. momenta: Parameter momenta updated by the Momentum optimization algorithm.
func RetrieveTPUEmbeddingProximalAdagradParameters ¶
func RetrieveTPUEmbeddingProximalAdagradParameters(scope *Scope, num_shards int64, shard_id int64, optional ...RetrieveTPUEmbeddingProximalAdagradParametersAttr) (parameters tf.Output, accumulators tf.Output)
Retrieve proximal Adagrad embedding parameters.
An op that retrieves optimization parameters from embedding to host memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up the correct embedding table configuration. For example, this op is used to retrieve updated parameters before saving a checkpoint.
Returns:
parameters: Parameter parameters updated by the proximal Adagrad optimization algorithm. accumulators: Parameter accumulators updated by the proximal Adagrad optimization algorithm.
func RetrieveTPUEmbeddingRMSPropParameters ¶
func RetrieveTPUEmbeddingRMSPropParameters(scope *Scope, num_shards int64, shard_id int64, optional ...RetrieveTPUEmbeddingRMSPropParametersAttr) (parameters tf.Output, ms tf.Output, mom tf.Output)
Retrieve RMSProp embedding parameters.
An op that retrieves optimization parameters from embedding to host memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up the correct embedding table configuration. For example, this op is used to retrieve updated parameters before saving a checkpoint.
Returns:
parameters: Parameter parameters updated by the RMSProp optimization algorithm. ms: Parameter ms updated by the RMSProp optimization algorithm. mom: Parameter mom updated by the RMSProp optimization algorithm.
func RetrieveTPUEmbeddingStochasticGradientDescentParameters ¶
func RetrieveTPUEmbeddingStochasticGradientDescentParameters(scope *Scope, num_shards int64, shard_id int64, optional ...RetrieveTPUEmbeddingStochasticGradientDescentParametersAttr) (parameters tf.Output)
Retrieve SGD embedding parameters.
An op that retrieves optimization parameters from embedding to host memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up the correct embedding table configuration. For example, this op is used to retrieve updated parameters before saving a checkpoint.
Returns Parameter parameters updated by the stochastic gradient descent optimization algorithm.
func Reverse ¶
Reverses specific dimensions of a tensor.
Given a `tensor`, and a `bool` tensor `dims` representing the dimensions of `tensor`, this operation reverses each dimension i of `tensor` where `dims[i]` is `True`.
`tensor` can have up to 8 dimensions. The number of dimensions of `tensor` must equal the number of elements in `dims`. In other words:
`rank(tensor) = size(dims)`
For example:
``` # tensor 't' is [[[[ 0, 1, 2, 3], # [ 4, 5, 6, 7], # [ 8, 9, 10, 11]], # [[12, 13, 14, 15], # [16, 17, 18, 19], # [20, 21, 22, 23]]]] # tensor 't' shape is [1, 2, 3, 4]
# 'dims' is [False, False, False, True] reverse(t, dims) ==> [[[[ 3, 2, 1, 0],
[ 7, 6, 5, 4], [ 11, 10, 9, 8]], [[15, 14, 13, 12], [19, 18, 17, 16], [23, 22, 21, 20]]]]
# 'dims' is [False, True, False, False] reverse(t, dims) ==> [[[[12, 13, 14, 15],
[16, 17, 18, 19], [20, 21, 22, 23] [[ 0, 1, 2, 3], [ 4, 5, 6, 7], [ 8, 9, 10, 11]]]]
# 'dims' is [False, False, True, False] reverse(t, dims) ==> [[[[8, 9, 10, 11],
[4, 5, 6, 7], [0, 1, 2, 3]] [[20, 21, 22, 23], [16, 17, 18, 19], [12, 13, 14, 15]]]]
```
Arguments:
tensor: Up to 8-D. dims: 1-D. The dimensions to reverse.
Returns The same shape as `tensor`.
func ReverseSequence ¶
func ReverseSequence(scope *Scope, input tf.Output, seq_lengths tf.Output, seq_dim int64, optional ...ReverseSequenceAttr) (output tf.Output)
Reverses variable length slices.
This op first slices `input` along the dimension `batch_dim`, and for each slice `i`, reverses the first `seq_lengths[i]` elements along the dimension `seq_dim`.
The elements of `seq_lengths` must obey `seq_lengths[i] <= input.dims[seq_dim]`, and `seq_lengths` must be a vector of length `input.dims[batch_dim]`.
The output slice `i` along dimension `batch_dim` is then given by input slice `i`, with the first `seq_lengths[i]` slices along dimension `seq_dim` reversed.
For example:
``` # Given this: batch_dim = 0 seq_dim = 1 input.dims = (4, 8, ...) seq_lengths = [7, 2, 3, 5]
# then slices of input are reversed on seq_dim, but only up to seq_lengths: output[0, 0:7, :, ...] = input[0, 7:0:-1, :, ...] output[1, 0:2, :, ...] = input[1, 2:0:-1, :, ...] output[2, 0:3, :, ...] = input[2, 3:0:-1, :, ...] output[3, 0:5, :, ...] = input[3, 5:0:-1, :, ...]
# while entries past seq_lens are copied through: output[0, 7:, :, ...] = input[0, 7:, :, ...] output[1, 2:, :, ...] = input[1, 2:, :, ...] output[2, 3:, :, ...] = input[2, 3:, :, ...] output[3, 2:, :, ...] = input[3, 2:, :, ...] ```
In contrast, if:
``` # Given this: batch_dim = 2 seq_dim = 0 input.dims = (8, ?, 4, ...) seq_lengths = [7, 2, 3, 5]
# then slices of input are reversed on seq_dim, but only up to seq_lengths: output[0:7, :, 0, :, ...] = input[7:0:-1, :, 0, :, ...] output[0:2, :, 1, :, ...] = input[2:0:-1, :, 1, :, ...] output[0:3, :, 2, :, ...] = input[3:0:-1, :, 2, :, ...] output[0:5, :, 3, :, ...] = input[5:0:-1, :, 3, :, ...]
# while entries past seq_lens are copied through: output[7:, :, 0, :, ...] = input[7:, :, 0, :, ...] output[2:, :, 1, :, ...] = input[2:, :, 1, :, ...] output[3:, :, 2, :, ...] = input[3:, :, 2, :, ...] output[2:, :, 3, :, ...] = input[2:, :, 3, :, ...] ```
Arguments:
input: The input to reverse. seq_lengths: 1-D with length `input.dims(batch_dim)` and
`max(seq_lengths) <= input.dims(seq_dim)`
seq_dim: The dimension which is partially reversed.
Returns The partially reversed input. It has the same shape as `input`.
func ReverseV2 ¶
Reverses specific dimensions of a tensor.
Given a `tensor`, and a `int32` tensor `axis` representing the set of dimensions of `tensor` to reverse. This operation reverses each dimension `i` for which there exists `j` s.t. `axis[j] == i`.
`tensor` can have up to 8 dimensions. The number of dimensions specified in `axis` may be 0 or more entries. If an index is specified more than once, a InvalidArgument error is raised.
For example:
``` # tensor 't' is [[[[ 0, 1, 2, 3], # [ 4, 5, 6, 7], # [ 8, 9, 10, 11]], # [[12, 13, 14, 15], # [16, 17, 18, 19], # [20, 21, 22, 23]]]] # tensor 't' shape is [1, 2, 3, 4]
# 'dims' is [3] or 'dims' is [-1] reverse(t, dims) ==> [[[[ 3, 2, 1, 0],
[ 7, 6, 5, 4], [ 11, 10, 9, 8]], [[15, 14, 13, 12], [19, 18, 17, 16], [23, 22, 21, 20]]]]
# 'dims' is '[1]' (or 'dims' is '[-3]') reverse(t, dims) ==> [[[[12, 13, 14, 15],
[16, 17, 18, 19], [20, 21, 22, 23] [[ 0, 1, 2, 3], [ 4, 5, 6, 7], [ 8, 9, 10, 11]]]]
# 'dims' is '[2]' (or 'dims' is '[-2]') reverse(t, dims) ==> [[[[8, 9, 10, 11],
[4, 5, 6, 7], [0, 1, 2, 3]] [[20, 21, 22, 23], [16, 17, 18, 19], [12, 13, 14, 15]]]]
```
Arguments:
tensor: Up to 8-D. axis: 1-D. The indices of the dimensions to reverse. Must be in the range
`[-rank(tensor), rank(tensor))`.
Returns The same shape as `tensor`.
func RightShift ¶
Elementwise computes the bitwise right-shift of `x` and `y`.
Performs a logical shift for unsigned integer types, and an arithmetic shift for signed integer types.
If `y` is negative, or greater than or equal to than the width of `x` in bits the result is implementation defined.
Example:
```python import tensorflow as tf from tensorflow.python.ops import bitwise_ops import numpy as np dtype_list = [tf.int8, tf.int16, tf.int32, tf.int64]
for dtype in dtype_list:
lhs = tf.constant([-1, -5, -3, -14], dtype=dtype) rhs = tf.constant([5, 0, 7, 11], dtype=dtype) right_shift_result = bitwise_ops.right_shift(lhs, rhs) print(right_shift_result)
# This will print: # tf.Tensor([-1 -5 -1 -1], shape=(4,), dtype=int8) # tf.Tensor([-1 -5 -1 -1], shape=(4,), dtype=int16) # tf.Tensor([-1 -5 -1 -1], shape=(4,), dtype=int32) # tf.Tensor([-1 -5 -1 -1], shape=(4,), dtype=int64)
lhs = np.array([-2, 64, 101, 32], dtype=np.int8) rhs = np.array([-1, -5, -3, -14], dtype=np.int8) bitwise_ops.right_shift(lhs, rhs) # <tf.Tensor: shape=(4,), dtype=int8, numpy=array([ -2, 64, 101, 32], dtype=int8)> ```
func Rint ¶
Returns element-wise integer closest to x.
If the result is midway between two representable values, the even representable is chosen. For example:
``` rint(-1.5) ==> -2.0 rint(0.5000001) ==> 1.0 rint([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0]) ==> [-2., -2., -0., 0., 2., 2., 2.] ```
func RiscAdd ¶
Returns x + y element-wise.
*NOTE*: `RiscAdd` does not supports broadcasting.
Given two input tensors, the `tf.risc_add` operation computes the sum for every element in the tensor.
Both input and output have a range `(-inf, inf)`.
func RiscMax ¶
Returns max(x, y) element-wise.
*NOTE*: `RiscMax` does not supports broadcasting.
Given two input tensors, the `tf.risc_max` operation computes the maximum for every element in the tensor.
func RngReadAndSkip ¶
func RngReadAndSkip(scope *Scope, resource tf.Output, alg tf.Output, delta tf.Output) (value tf.Output)
Advance the counter of a counter-based RNG.
The state of the RNG after `rng_read_and_skip(n)` will be the same as that after `uniform([n])` (or any other distribution). The actual increment added to the counter is an unspecified implementation choice.
In the case that the input algorithm is RNG_ALG_AUTO_SELECT, the counter in the state needs to be of size int64[2], the current maximal counter size among algorithms. In this case, this op will manage the counter as if it is an 128-bit integer with layout [lower_64bits, higher_64bits]. If an algorithm needs less than 128 bits for the counter, it should use the left portion of the int64[2]. In this way, the int64[2] is compatible with all current RNG algorithms (Philox, ThreeFry and xla::RandomAlgorithm::RNG_DEFAULT). Downstream RNG ops can thus use this counter with any RNG algorithm.
Arguments:
resource: The handle of the resource variable that stores the state of the RNG. The state consists of the counter followed by the key. alg: The RNG algorithm. delta: The amount of advancement.
Returns The old value of the resource variable, before incrementing. Since state size is algorithm-dependent, this output will be right-padded with zeros to reach shape int64[3] (the current maximal state size among algorithms).
func RngSkip ¶
func RngSkip(scope *Scope, resource tf.Output, algorithm tf.Output, delta tf.Output) (o *tf.Operation)
Advance the counter of a counter-based RNG.
The state of the RNG after `rng_skip(n)` will be the same as that after `stateful_uniform([n])` (or any other distribution). The actual increment added to the counter is an unspecified implementation detail.
Arguments:
resource: The handle of the resource variable that stores the state of the RNG. algorithm: The RNG algorithm. delta: The amount of advancement.
Returns the created operation.
func Roll ¶
Rolls the elements of a tensor along an axis.
The elements are shifted positively (towards larger indices) by the offset of `shift` along the dimension of `axis`. Negative `shift` values will shift elements in the opposite direction. Elements that roll passed the last position will wrap around to the first and vice versa. Multiple shifts along multiple axes may be specified.
For example:
``` # 't' is [0, 1, 2, 3, 4] roll(t, shift=2, axis=0) ==> [3, 4, 0, 1, 2]
# shifting along multiple dimensions # 't' is [[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]] roll(t, shift=[1, -2], axis=[0, 1]) ==> [[7, 8, 9, 5, 6], [2, 3, 4, 0, 1]]
# shifting along the same axis multiple times # 't' is [[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]] roll(t, shift=[2, -3], axis=[1, 1]) ==> [[1, 2, 3, 4, 0], [6, 7, 8, 9, 5]] ```
Arguments:
shift: Dimension must be 0-D or 1-D. `shift[i]` specifies the number of places by which
elements are shifted positively (towards larger indices) along the dimension specified by `axis[i]`. Negative shifts will roll the elements in the opposite direction.
axis: Dimension must be 0-D or 1-D. `axis[i]` specifies the dimension that the shift
`shift[i]` should occur. If the same axis is referenced more than once, the total shift for that axis will be the sum of all the shifts that belong to that axis.
Returns Has the same shape and size as the input. The elements are shifted positively (towards larger indices) by the offsets of `shift` along the dimensions of `axis`.
func Round ¶
Rounds the values of a tensor to the nearest integer, element-wise.
Rounds half to even. Also known as bankers rounding. If you want to round according to the current system rounding mode use std::cint.
func RsqrtGrad ¶
Computes the gradient for the rsqrt of `x` wrt its input.
Specifically, `grad = dy * -0.5 * y^3`, where `y = rsqrt(x)`, and `dy` is the corresponding input gradient.
func SampleDistortedBoundingBox ¶
func SampleDistortedBoundingBox(scope *Scope, image_size tf.Output, bounding_boxes tf.Output, optional ...SampleDistortedBoundingBoxAttr) (begin tf.Output, size tf.Output, bboxes tf.Output)
Generate a single randomly distorted bounding box for an image.
Bounding box annotations are often supplied in addition to ground-truth labels in image recognition or object localization tasks. A common technique for training such a system is to randomly distort an image while preserving its content, i.e. *data augmentation*. This Op outputs a randomly distorted localization of an object, i.e. bounding box, given an `image_size`, `bounding_boxes` and a series of constraints.
The output of this Op is a single bounding box that may be used to crop the original image. The output is returned as 3 tensors: `begin`, `size` and `bboxes`. The first 2 tensors can be fed directly into `tf.slice` to crop the image. The latter may be supplied to `tf.image.draw_bounding_boxes` to visualize what the bounding box looks like.
Bounding boxes are supplied and returned as `[y_min, x_min, y_max, x_max]`. The bounding box coordinates are floats in `[0.0, 1.0]` relative to the width and height of the underlying image.
For example,
```python
# Generate a single distorted bounding box. begin, size, bbox_for_draw = tf.image.sample_distorted_bounding_box( tf.shape(image), bounding_boxes=bounding_boxes) # Draw the bounding box in an image summary. image_with_box = tf.image.draw_bounding_boxes(tf.expand_dims(image, 0), bbox_for_draw) tf.summary.image('images_with_box', image_with_box) # Employ the bounding box to distort the image. distorted_image = tf.slice(image, begin, size)
```
Note that if no bounding box information is available, setting `use_image_if_no_bounding_boxes = true` will assume there is a single implicit bounding box covering the whole image. If `use_image_if_no_bounding_boxes` is false and no bounding boxes are supplied, an error is raised.
Arguments:
image_size: 1-D, containing `[height, width, channels]`. bounding_boxes: 3-D with shape `[batch, N, 4]` describing the N bounding boxes
associated with the image.
Returns:
begin: 1-D, containing `[offset_height, offset_width, 0]`. Provide as input to
`tf.slice`.
size: 1-D, containing `[target_height, target_width, -1]`. Provide as input to
`tf.slice`.
bboxes: 3-D with shape `[1, 1, 4]` containing the distorted bounding box.
Provide as input to `tf.image.draw_bounding_boxes`.
func SampleDistortedBoundingBoxV2 ¶
func SampleDistortedBoundingBoxV2(scope *Scope, image_size tf.Output, bounding_boxes tf.Output, min_object_covered tf.Output, optional ...SampleDistortedBoundingBoxV2Attr) (begin tf.Output, size tf.Output, bboxes tf.Output)
Generate a single randomly distorted bounding box for an image.
Bounding box annotations are often supplied in addition to ground-truth labels in image recognition or object localization tasks. A common technique for training such a system is to randomly distort an image while preserving its content, i.e. *data augmentation*. This Op outputs a randomly distorted localization of an object, i.e. bounding box, given an `image_size`, `bounding_boxes` and a series of constraints.
The output of this Op is a single bounding box that may be used to crop the original image. The output is returned as 3 tensors: `begin`, `size` and `bboxes`. The first 2 tensors can be fed directly into `tf.slice` to crop the image. The latter may be supplied to `tf.image.draw_bounding_boxes` to visualize what the bounding box looks like.
Bounding boxes are supplied and returned as `[y_min, x_min, y_max, x_max]`. The bounding box coordinates are floats in `[0.0, 1.0]` relative to the width and height of the underlying image.
For example,
```python
# Generate a single distorted bounding box. begin, size, bbox_for_draw = tf.image.sample_distorted_bounding_box( tf.shape(image), bounding_boxes=bounding_boxes) # Draw the bounding box in an image summary. image_with_box = tf.image.draw_bounding_boxes(tf.expand_dims(image, 0), bbox_for_draw) tf.summary.image('images_with_box', image_with_box) # Employ the bounding box to distort the image. distorted_image = tf.slice(image, begin, size)
```
Note that if no bounding box information is available, setting `use_image_if_no_bounding_boxes = true` will assume there is a single implicit bounding box covering the whole image. If `use_image_if_no_bounding_boxes` is false and no bounding boxes are supplied, an error is raised.
Arguments:
image_size: 1-D, containing `[height, width, channels]`. bounding_boxes: 3-D with shape `[batch, N, 4]` describing the N bounding boxes
associated with the image.
min_object_covered: The cropped area of the image must contain at least this
fraction of any bounding box supplied. The value of this parameter should be non-negative. In the case of 0, the cropped area does not need to overlap any of the bounding boxes supplied.
Returns:
begin: 1-D, containing `[offset_height, offset_width, 0]`. Provide as input to
`tf.slice`.
size: 1-D, containing `[target_height, target_width, -1]`. Provide as input to
`tf.slice`.
bboxes: 3-D with shape `[1, 1, 4]` containing the distorted bounding box.
Provide as input to `tf.image.draw_bounding_boxes`.
func SamplingDataset ¶
func SamplingDataset(scope *Scope, input_dataset tf.Output, rate tf.Output, seed tf.Output, seed2 tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output)
Creates a dataset that takes a Bernoulli sample of the contents of another dataset.
There is no transformation in the `tf.data` Python API for creating this dataset. Instead, it is created as a result of the `filter_with_random_uniform_fusion` static optimization. Whether this optimization is performed is determined by the `experimental_optimization.filter_with_random_uniform_fusion` option of `tf.data.Options`.
Arguments:
rate: A scalar representing the sample rate. Each element of `input_dataset` is
retained with this probability, independent of all other elements.
seed: A scalar representing seed of random number generator. seed2: A scalar representing seed2 of random number generator.
func Save ¶
func Save(scope *Scope, filename tf.Output, tensor_names tf.Output, data []tf.Output) (o *tf.Operation)
Saves the input tensors to disk.
The size of `tensor_names` must match the number of tensors in `data`. `data[i]` is written to `filename` with name `tensor_names[i]`.
See also `SaveSlices`.
Arguments:
filename: Must have a single element. The name of the file to which we write
the tensor.
tensor_names: Shape `[N]`. The names of the tensors to be saved. data: `N` tensors to save.
Returns the created operation.
func SaveSlices ¶
func SaveSlices(scope *Scope, filename tf.Output, tensor_names tf.Output, shapes_and_slices tf.Output, data []tf.Output) (o *tf.Operation)
Saves input tensors slices to disk.
This is like `Save` except that tensors can be listed in the saved file as being a slice of a larger tensor. `shapes_and_slices` specifies the shape of the larger tensor and the slice that this tensor covers. `shapes_and_slices` must have as many elements as `tensor_names`.
Elements of the `shapes_and_slices` input must either be:
- The empty string, in which case the corresponding tensor is saved normally.
- A string of the form `dim0 dim1 ... dimN-1 slice-spec` where the `dimI` are the dimensions of the larger tensor and `slice-spec` specifies what part is covered by the tensor to save.
`slice-spec` itself is a `:`-separated list: `slice0:slice1:...:sliceN-1` where each `sliceI` is either:
- The string `-` meaning that the slice covers all indices of this dimension
- `start,length` where `start` and `length` are integers. In that case the slice covers `length` indices starting at `start`.
See also `Save`.
Arguments:
filename: Must have a single element. The name of the file to which we write the
tensor.
tensor_names: Shape `[N]`. The names of the tensors to be saved. shapes_and_slices: Shape `[N]`. The shapes and slice specifications to use when
saving the tensors.
data: `N` tensors to save.
Returns the created operation.
func SaveV2 ¶
func SaveV2(scope *Scope, prefix tf.Output, tensor_names tf.Output, shape_and_slices tf.Output, tensors []tf.Output) (o *tf.Operation)
Saves tensors in V2 checkpoint format.
By default, saves the named tensors in full. If the caller wishes to save specific slices of full tensors, "shape_and_slices" should be non-empty strings and correspondingly well-formed.
Arguments:
prefix: Must have a single element. The prefix of the V2 checkpoint to which we
write the tensors.
tensor_names: shape {N}. The names of the tensors to be saved. shape_and_slices: shape {N}. The slice specs of the tensors to be saved.
Empty strings indicate that they are non-partitioned tensors.
tensors: `N` tensors to save.
Returns the created operation.
func ScalarSummary ¶
Outputs a `Summary` protocol buffer with scalar values.
The input `tags` and `values` must have the same shape. The generated summary has a summary value for each tag-value pair in `tags` and `values`.
Arguments:
tags: Tags for the summary. values: Same shape as `tags. Values for the summary.
Returns Scalar. Serialized `Summary` protocol buffer.
func ScatterNd ¶
func ScatterNd(scope *Scope, indices tf.Output, updates tf.Output, shape tf.Output, optional ...ScatterNdAttr) (output tf.Output)
Scatters `updates` into a tensor of shape `shape` according to `indices`.
Scatter sparse `updates` according to individual values at the specified `indices`. This op returns an output tensor with the `shape` you specify. This op is the inverse of the `tf.gather_nd` operator which extracts values or slices from a given tensor.
This operation is similar to `tf.tensor_scatter_nd_add`, except that the tensor is zero-initialized. Calling `tf.scatter_nd(indices, updates, shape)` is identical to calling `tf.tensor_scatter_nd_add(tf.zeros(shape, updates.dtype), indices, updates)`
If `indices` contains duplicates, the associated `updates` are accumulated (summed) into the output tensor.
**WARNING**: For floating-point data types, the output may be nondeterministic. This is because the order in which the updates are applied is nondeterministic and when floating-point numbers are added in different orders the resulting numerical approximation error can be slightly different. However, the output will be deterministic if op determinism is enabled via `tf.config.experimental.enable_op_determinism`.
`indices` is an integer tensor containing indices into the output tensor. The last dimension of `indices` can be at most the rank of `shape`:
indices.shape[-1] <= shape.rank
The last dimension of `indices` corresponds to indices of elements (if `indices.shape[-1] = shape.rank`) or slices (if `indices.shape[-1] < shape.rank`) along dimension `indices.shape[-1]` of `shape`.
`updates` is a tensor with shape:
indices.shape[:-1] + shape[indices.shape[-1]:]
The simplest form of the scatter op is to insert individual elements in a tensor by index. Consider an example where you want to insert 4 scattered elements in a rank-1 tensor with 8 elements.
<div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;"> <img style="width:100%" src="https://www.tensorflow.org/images/ScatterNd1.png" alt> </div>
In Python, this scatter operation would look like this:
```python
indices = tf.constant([[4], [3], [1], [7]]) updates = tf.constant([9, 10, 11, 12]) shape = tf.constant([8]) scatter = tf.scatter_nd(indices, updates, shape) print(scatter)
```
The resulting tensor would look like this:
[0, 11, 0, 10, 9, 0, 0, 12]
You can also insert entire slices of a higher rank tensor all at once. For example, you can insert two slices in the first dimension of a rank-3 tensor with two matrices of new values.
<div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;"> <img style="width:100%" src="https://www.tensorflow.org/images/ScatterNd2.png" alt> </div>
In Python, this scatter operation would look like this:
```python
indices = tf.constant([[1], [3]]) updates = tf.constant([[[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]], [[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]]]) shape = tf.constant([4, 4, 4]) scatter = tf.scatter_nd(indices, updates, shape) print(scatter)
```
The resulting tensor would look like this:
[[[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], [[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]], [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], [[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]]]
If `indices` contains any out-of-bound indices, depending on `bad_indices_policy`, the op will either return an error or ignore the out-of-bound indices. `bad_indices_policy` can be one of the following values:
- "" or "DEFAULT": raises on CPU and ignore on GPU. This is because historically on CPU and GPU we handle errors in different ways, and for backward compatibility we keep the default behavior.
- "ERROR": raises error; GPU does not support this value.
- "IGNORE": ignore the bad indices; supported on both CPU and GPU.
Arguments:
indices: Tensor of indices. updates: Values to scatter into the output tensor. shape: 1-D. The shape of the output tensor.
Returns A new tensor with the given shape and updates applied according to the indices.
func ScatterNdNonAliasingAdd ¶
func ScatterNdNonAliasingAdd(scope *Scope, input tf.Output, indices tf.Output, updates tf.Output, optional ...ScatterNdNonAliasingAddAttr) (output tf.Output)
Applies sparse addition to `input` using individual values or slices
from `updates` according to indices `indices`. The updates are non-aliasing: `input` is only modified in-place if no other operations will use it. Otherwise, a copy of `input` is made. This operation has a gradient with respect to both `input` and `updates`.
`input` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`.
`indices` must be integer tensor, containing indices into `input`. It must be shape \\([d_0, ..., d_{Q-2}, K]\\) where `0 < K <= P`.
The innermost dimension of `indices` (with length `K`) corresponds to indices into elements (if `K = P`) or `(P-K)`-dimensional slices (if `K < P`) along the `K`th dimension of `input`.
`updates` is `Tensor` of rank `Q-1+P-K` with shape:
$$[d_0, ..., d_{Q-2}, input.shape[K], ..., input.shape[P-1]].$$
For example, say we want to add 4 scattered elements to a rank-1 tensor to 8 elements. In Python, that addition would look like this:
input = tf.constant([1, 2, 3, 4, 5, 6, 7, 8]) indices = tf.constant([[4], [3], [1], [7]]) updates = tf.constant([9, 10, 11, 12]) output = tf.scatter_nd_non_aliasing_add(input, indices, updates) with tf.Session() as sess: print(sess.run(output))
The resulting value `output` would look like this:
[1, 13, 3, 14, 14, 6, 7, 20]
See `tf.scatter_nd` for more details about how to make updates to slices.
Arguments:
input: A Tensor. indices: A Tensor. Must be one of the following types: `int32`, `int64`.
A tensor of indices into `input`.
updates: A Tensor. Must have the same type as ref. A tensor of updated values
to add to `input`.
Returns A `Tensor` with the same shape as `input`, containing values of `input` updated with `updates`.
func SdcaFprint ¶
Computes fingerprints of the input strings.
Arguments:
input: vector of strings to compute fingerprints on.
Returns a (N,2) shaped matrix where N is the number of elements in the input vector. Each row contains the low and high parts of the fingerprint.
func SdcaOptimizer ¶
func SdcaOptimizer(scope *Scope, sparse_example_indices []tf.Output, sparse_feature_indices []tf.Output, sparse_feature_values []tf.Output, dense_features []tf.Output, example_weights tf.Output, example_labels tf.Output, sparse_indices []tf.Output, sparse_weights []tf.Output, dense_weights []tf.Output, example_state_data tf.Output, loss_type string, l1 float32, l2 float32, num_loss_partitions int64, num_inner_iterations int64, optional ...SdcaOptimizerAttr) (out_example_state_data tf.Output, out_delta_sparse_weights []tf.Output, out_delta_dense_weights []tf.Output)
Distributed version of Stochastic Dual Coordinate Ascent (SDCA) optimizer for
linear models with L1 + L2 regularization. As global optimization objective is strongly-convex, the optimizer optimizes the dual objective at each step. The optimizer applies each update one example at a time. Examples are sampled uniformly, and the optimizer is learning rate free and enjoys linear convergence rate.
[Proximal Stochastic Dual Coordinate Ascent](http://arxiv.org/pdf/1211.2717v1.pdf).<br> Shai Shalev-Shwartz, Tong Zhang. 2012
$$Loss Objective = \sum f_{i} (wx_{i}) + (l2 / 2) * |w|^2 + l1 * |w|$$
[Adding vs. Averaging in Distributed Primal-Dual Optimization](http://arxiv.org/abs/1502.03508).<br> Chenxin Ma, Virginia Smith, Martin Jaggi, Michael I. Jordan, Peter Richtarik, Martin Takac. 2015
[Stochastic Dual Coordinate Ascent with Adaptive Probabilities](https://arxiv.org/abs/1502.08053).<br> Dominik Csiba, Zheng Qu, Peter Richtarik. 2015
Arguments:
sparse_example_indices: a list of vectors which contain example indices. sparse_feature_indices: a list of vectors which contain feature indices. sparse_feature_values: a list of vectors which contains feature value
associated with each feature group.
dense_features: a list of matrices which contains the dense feature values. example_weights: a vector which contains the weight associated with each
example.
example_labels: a vector which contains the label/target associated with each
example.
sparse_indices: a list of vectors where each value is the indices which has
corresponding weights in sparse_weights. This field maybe omitted for the dense approach.
sparse_weights: a list of vectors where each value is the weight associated with
a sparse feature group.
dense_weights: a list of vectors where the values are the weights associated
with a dense feature group.
example_state_data: a list of vectors containing the example state data. loss_type: Type of the primal loss. Currently SdcaSolver supports logistic,
squared and hinge losses.
l1: Symmetric l1 regularization strength. l2: Symmetric l2 regularization strength. num_loss_partitions: Number of partitions of the global loss function. num_inner_iterations: Number of iterations per mini-batch.
Returns:
out_example_state_data: a list of vectors containing the updated example state
data.
out_delta_sparse_weights: a list of vectors where each value is the delta
weights associated with a sparse feature group.
out_delta_dense_weights: a list of vectors where the values are the delta
weights associated with a dense feature group.
func SdcaOptimizerV2 ¶
func SdcaOptimizerV2(scope *Scope, sparse_example_indices []tf.Output, sparse_feature_indices []tf.Output, sparse_feature_values []tf.Output, dense_features []tf.Output, example_weights tf.Output, example_labels tf.Output, sparse_indices []tf.Output, sparse_weights []tf.Output, dense_weights []tf.Output, example_state_data tf.Output, loss_type string, l1 float32, l2 float32, num_loss_partitions int64, num_inner_iterations int64, optional ...SdcaOptimizerV2Attr) (out_example_state_data tf.Output, out_delta_sparse_weights []tf.Output, out_delta_dense_weights []tf.Output)
Distributed version of Stochastic Dual Coordinate Ascent (SDCA) optimizer for
linear models with L1 + L2 regularization. As global optimization objective is strongly-convex, the optimizer optimizes the dual objective at each step. The optimizer applies each update one example at a time. Examples are sampled uniformly, and the optimizer is learning rate free and enjoys linear convergence rate.
[Proximal Stochastic Dual Coordinate Ascent](http://arxiv.org/pdf/1211.2717v1.pdf).<br> Shai Shalev-Shwartz, Tong Zhang. 2012
$$Loss Objective = \sum f_{i} (wx_{i}) + (l2 / 2) * |w|^2 + l1 * |w|$$
[Adding vs. Averaging in Distributed Primal-Dual Optimization](http://arxiv.org/abs/1502.03508).<br> Chenxin Ma, Virginia Smith, Martin Jaggi, Michael I. Jordan, Peter Richtarik, Martin Takac. 2015
[Stochastic Dual Coordinate Ascent with Adaptive Probabilities](https://arxiv.org/abs/1502.08053).<br> Dominik Csiba, Zheng Qu, Peter Richtarik. 2015
Arguments:
sparse_example_indices: a list of vectors which contain example indices. sparse_feature_indices: a list of vectors which contain feature indices. sparse_feature_values: a list of vectors which contains feature value
associated with each feature group.
dense_features: a list of matrices which contains the dense feature values. example_weights: a vector which contains the weight associated with each
example.
example_labels: a vector which contains the label/target associated with each
example.
sparse_indices: a list of vectors where each value is the indices which has
corresponding weights in sparse_weights. This field maybe omitted for the dense approach.
sparse_weights: a list of vectors where each value is the weight associated with
a sparse feature group.
dense_weights: a list of vectors where the values are the weights associated
with a dense feature group.
example_state_data: a list of vectors containing the example state data. loss_type: Type of the primal loss. Currently SdcaSolver supports logistic,
squared and hinge losses.
l1: Symmetric l1 regularization strength. l2: Symmetric l2 regularization strength. num_loss_partitions: Number of partitions of the global loss function. num_inner_iterations: Number of iterations per mini-batch.
Returns:
out_example_state_data: a list of vectors containing the updated example state
data.
out_delta_sparse_weights: a list of vectors where each value is the delta
weights associated with a sparse feature group.
out_delta_dense_weights: a list of vectors where the values are the delta
weights associated with a dense feature group.
func SegmentMax ¶
Computes the maximum along segments of a tensor.
Read [the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation) for an explanation of segments.
Computes a tensor such that \\(output_i = \max_j(data_j)\\) where `max` is over `j` such that `segment_ids[j] == i`.
If the max is empty for a given segment ID `i`, `output[i] = 0`.
Caution: On CPU, values in `segment_ids` are always validated to be sorted, and an error is thrown for indices that are not increasing. On GPU, this does not throw an error for unsorted indices. On GPU, out-of-order indices result in safe but unspecified behavior, which may include treating out-of-order indices as the same as a smaller following index.
<div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;"> <img style="width:100%" src="https://www.tensorflow.org/images/SegmentMax.png" alt> </div>
For example:
>>> c = tf.constant([[1,2,3,4], [4, 3, 2, 1], [5,6,7,8]]) >>> tf.math.segment_max(c, tf.constant([0, 0, 1])).numpy() array([[4, 3, 3, 4],
[5, 6, 7, 8]], dtype=int32)
Arguments:
segment_ids: A 1-D tensor whose size is equal to the size of `data`'s
first dimension. Values should be sorted and can be repeated.
Caution: The values are always validated to be sorted on CPU, never validated on GPU.
Returns Has same shape as data, except for dimension 0 which has size `k`, the number of segments.
func SegmentMaxV2 ¶ added in v0.5.0
func SegmentMaxV2(scope *Scope, data tf.Output, segment_ids tf.Output, num_segments tf.Output) (output tf.Output)
Computes the maximum along segments of a tensor.
Read [the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation) for an explanation of segments.
Computes a tensor such that \\(output_i = \max_j(data_j)\\) where `max` is over `j` such that `segment_ids[j] == i`.
If the maximum is empty for a given segment ID `i`, it outputs the smallest possible value for the specific numeric type, `output[i] = numeric_limits<T>::lowest()`.
Note: That this op is currently only supported with jit_compile=True.
Caution: On CPU, values in `segment_ids` are always validated to be sorted, and an error is thrown for indices that are not increasing. On GPU, this does not throw an error for unsorted indices. On GPU, out-of-order indices result in safe but unspecified behavior, which may include treating out-of-order indices as the same as a smaller following index.
The only difference with SegmentMax is the additional input `num_segments`. This helps in evaluating the output shape in compile time. `num_segments` should be consistent with segment_ids. e.g. Max(segment_ids) should be equal to `num_segments` - 1 for a 1-d segment_ids With inconsistent num_segments, the op still runs. only difference is, the output takes the size of num_segments irrespective of size of segment_ids and data. for num_segments less than expected output size, the last elements are ignored for num_segments more than the expected output size, last elements are assigned smallest possible value for the specific numeric type.
For example:
>>> @tf.function(jit_compile=True) ... def test(c): ... return tf.raw_ops.SegmentMaxV2(data=c, segment_ids=tf.constant([0, 0, 1]), num_segments=2) >>> c = tf.constant([[1,2,3,4], [4, 3, 2, 1], [5,6,7,8]]) >>> test(c).numpy() array([[4, 3, 3, 4],
[5, 6, 7, 8]], dtype=int32)
Arguments:
segment_ids: A 1-D tensor whose size is equal to the size of `data`'s
first dimension. Values should be sorted and can be repeated. The values must be less than `num_segments`.
Caution: The values are always validated to be sorted on CPU, never validated on GPU.
Returns Has same shape as data, except for the first `segment_ids.rank` dimensions, which are replaced with a single dimensionw which has size `num_segments`.
func SegmentMean ¶
Computes the mean along segments of a tensor.
Read [the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation) for an explanation of segments.
Computes a tensor such that \\(output_i = \frac{\sum_j data_j}{N}\\) where `mean` is over `j` such that `segment_ids[j] == i` and `N` is the total number of values summed.
If the mean is empty for a given segment ID `i`, `output[i] = 0`.
Caution: On CPU, values in `segment_ids` are always validated to be sorted, and an error is thrown for indices that are not increasing. On GPU, this does not throw an error for unsorted indices. On GPU, out-of-order indices result in safe but unspecified behavior, which may include treating out-of-order indices as a smaller following index when computing the numerator of the mean.
<div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;"> <img style="width:100%" src="https://www.tensorflow.org/images/SegmentMean.png" alt> </div>
For example:
>>> c = tf.constant([[1.0,2,3,4], [4, 3, 2, 1], [5,6,7,8]]) >>> tf.math.segment_mean(c, tf.constant([0, 0, 1])).numpy() array([[2.5, 2.5, 2.5, 2.5],
[5., 6., 7., 8.]], dtype=float32)
Arguments:
segment_ids: A 1-D tensor whose size is equal to the size of `data`'s
first dimension. Values should be sorted and can be repeated.
Caution: The values are always validated to be sorted on CPU, never validated on GPU.
Returns Has same shape as data, except for dimension 0 which has size `k`, the number of segments.
func SegmentMin ¶
Computes the minimum along segments of a tensor.
Read [the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation) for an explanation of segments.
Computes a tensor such that \\(output_i = \min_j(data_j)\\) where `min` is over `j` such that `segment_ids[j] == i`.
If the min is empty for a given segment ID `i`, `output[i] = 0`.
Caution: On CPU, values in `segment_ids` are always validated to be sorted, and an error is thrown for indices that are not increasing. On GPU, this does not throw an error for unsorted indices. On GPU, out-of-order indices result in safe but unspecified behavior, which may include treating out-of-order indices as the same as a smaller following index.
<div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;"> <img style="width:100%" src="https://www.tensorflow.org/images/SegmentMin.png" alt> </div>
For example:
>>> c = tf.constant([[1,2,3,4], [4, 3, 2, 1], [5,6,7,8]]) >>> tf.math.segment_min(c, tf.constant([0, 0, 1])).numpy() array([[1, 2, 2, 1],
[5, 6, 7, 8]], dtype=int32)
Arguments:
segment_ids: A 1-D tensor whose size is equal to the size of `data`'s
first dimension. Values should be sorted and can be repeated.
Caution: The values are always validated to be sorted on CPU, never validated on GPU.
Returns Has same shape as data, except for dimension 0 which has size `k`, the number of segments.
func SegmentMinV2 ¶ added in v0.5.0
func SegmentMinV2(scope *Scope, data tf.Output, segment_ids tf.Output, num_segments tf.Output) (output tf.Output)
Computes the minimum along segments of a tensor.
Read [the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation) for an explanation of segments.
Computes a tensor such that \\(output_i = \min_j(data_j)\\) where `min` is over `j` such that `segment_ids[j] == i`.
If the minimum is empty for a given segment ID `i`, it outputs the largest possible value for the specific numeric type, `output[i] = numeric_limits<T>::max()`.
Note: That this op is currently only supported with jit_compile=True.
Caution: On CPU, values in `segment_ids` are always validated to be sorted, and an error is thrown for indices that are not increasing. On GPU, this does not throw an error for unsorted indices. On GPU, out-of-order indices result in safe but unspecified behavior, which may include treating out-of-order indices as the same as a smaller following index.
The only difference with SegmentMin is the additional input `num_segments`. This helps in evaluating the output shape in compile time. `num_segments` should be consistent with segment_ids. e.g. Max(segment_ids) should be equal to `num_segments` - 1 for a 1-d segment_ids With inconsistent num_segments, the op still runs. only difference is, the output takes the size of num_segments irrespective of size of segment_ids and data. for num_segments less than expected output size, the last elements are ignored for num_segments more than the expected output size, last elements are assigned the largest possible value for the specific numeric type.
For example:
>>> @tf.function(jit_compile=True) ... def test(c): ... return tf.raw_ops.SegmentMinV2(data=c, segment_ids=tf.constant([0, 0, 1]), num_segments=2) >>> c = tf.constant([[1,2,3,4], [4, 3, 2, 1], [5,6,7,8]]) >>> test(c).numpy() array([[1, 2, 2, 1],
[5, 6, 7, 8]], dtype=int32)
Arguments:
segment_ids: A 1-D tensor whose size is equal to the size of `data`'s
first dimension. Values should be sorted and can be repeated. The values must be less than `num_segments`.
Caution: The values are always validated to be sorted on CPU, never validated on GPU.
Returns Has same shape as data, except for the first `segment_ids.rank` dimensions, which are replaced with a single dimensionw which has size `num_segments`.
func SegmentProd ¶
Computes the product along segments of a tensor.
Read [the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation) for an explanation of segments.
Computes a tensor such that \\(output_i = \prod_j data_j\\) where the product is over `j` such that `segment_ids[j] == i`.
If the product is empty for a given segment ID `i`, `output[i] = 1`.
Caution: On CPU, values in `segment_ids` are always validated to be sorted, and an error is thrown for indices that are not increasing. On GPU, this does not throw an error for unsorted indices. On GPU, out-of-order indices result in safe but unspecified behavior, which may include treating out-of-order indices as the same as a smaller following index.
<div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;"> <img style="width:100%" src="https://www.tensorflow.org/images/SegmentProd.png" alt> </div>
For example:
>>> c = tf.constant([[1,2,3,4], [4, 3, 2, 1], [5,6,7,8]]) >>> tf.math.segment_prod(c, tf.constant([0, 0, 1])).numpy() array([[4, 6, 6, 4],
[5, 6, 7, 8]], dtype=int32)
Arguments:
segment_ids: A 1-D tensor whose size is equal to the size of `data`'s
first dimension. Values should be sorted and can be repeated.
Caution: The values are always validated to be sorted on CPU, never validated on GPU.
Returns Has same shape as data, except for dimension 0 which has size `k`, the number of segments.
func SegmentProdV2 ¶ added in v0.4.0
func SegmentProdV2(scope *Scope, data tf.Output, segment_ids tf.Output, num_segments tf.Output) (output tf.Output)
Computes the product along segments of a tensor.
Read [the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation) for an explanation of segments.
Computes a tensor such that \\(output_i = \prod_j data_j\\) where the product is over `j` such that `segment_ids[j] == i`.
If the product is empty for a given segment ID `i`, `output[i] = 1`.
Note: That this op is currently only supported with jit_compile=True.
The only difference with SegmentProd is the additional input `num_segments`. This helps in evaluating the output shape in compile time. `num_segments` should be consistent with segment_ids. e.g. Max(segment_ids) - 1 should be equal to `num_segments` for a 1-d segment_ids With inconsistent num_segments, the op still runs. only difference is, the output takes the size of num_segments irrespective of size of segment_ids and data. for num_segments less than expected output size, the last elements are ignored for num_segments more than the expected output size, last elements are assigned 1.
For example:
>>> @tf.function(jit_compile=True) ... def test(c): ... return tf.raw_ops.SegmentProdV2(data=c, segment_ids=tf.constant([0, 0, 1]), num_segments=2) >>> c = tf.constant([[1,2,3,4], [4, 3, 2, 1], [5,6,7,8]]) >>> test(c).numpy() array([[4, 6, 6, 4],
[5, 6, 7, 8]], dtype=int32)
Arguments:
segment_ids: A 1-D tensor whose size is equal to the size of `data`'s
first dimension. Values should be sorted and can be repeated. The values must be less than `num_segments`.
Caution: The values are always validated to be sorted on CPU, never validated on GPU.
Returns Has same shape as data, except for the first `segment_ids.rank` dimensions, which are replaced with a single dimensionw which has size `num_segments`.
func SegmentSum ¶
Computes the sum along segments of a tensor.
Read [the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation) for an explanation of segments.
Computes a tensor such that \\(output_i = \sum_j data_j\\) where sum is over `j` such that `segment_ids[j] == i`.
If the sum is empty for a given segment ID `i`, `output[i] = 0`.
Caution: On CPU, values in `segment_ids` are always validated to be sorted, and an error is thrown for indices that are not increasing. On GPU, this does not throw an error for unsorted indices. On GPU, out-of-order indices result in safe but unspecified behavior, which may include treating out-of-order indices as the same as a smaller following index.
<div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;"> <img style="width:100%" src="https://www.tensorflow.org/images/SegmentSum.png" alt> </div>
For example:
>>> c = tf.constant([[1,2,3,4], [4, 3, 2, 1], [5,6,7,8]]) >>> tf.math.segment_sum(c, tf.constant([0, 0, 1])).numpy() array([[5, 5, 5, 5],
[5, 6, 7, 8]], dtype=int32)
Arguments:
segment_ids: A 1-D tensor whose size is equal to the size of `data`'s
first dimension. Values should be sorted and can be repeated.
Caution: The values are always validated to be sorted on CPU, never validated on GPU.
Returns Has same shape as data, except for dimension 0 which has size `k`, the number of segments.
func SegmentSumV2 ¶ added in v0.4.0
func SegmentSumV2(scope *Scope, data tf.Output, segment_ids tf.Output, num_segments tf.Output) (output tf.Output)
Computes the sum along segments of a tensor.
Read [the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation) for an explanation of segments.
Computes a tensor such that \\(output_i = \sum_j data_j\\) where sum is over `j` such that `segment_ids[j] == i`.
If the sum is empty for a given segment ID `i`, `output[i] = 0`.
Note that this op is currently only supported with jit_compile=True. </div>
Arguments:
segment_ids: A 1-D tensor whose size is equal to the size of `data`'s
first dimension. Values should be sorted and can be repeated. The values must be less than `num_segments`.
Caution: The values are always validated to be sorted on CPU, never validated on GPU.
Returns Has same shape as data, except for the first `segment_ids.rank` dimensions, which are replaced with a single dimension which has size `num_segments`.
func Select ¶
Selects elements from `x` or `y`, depending on `condition`.
The `x`, and `y` tensors must all have the same shape, and the output will also have that shape.
The `condition` tensor must be a scalar if `x` and `y` are scalars. If `x` and `y` are vectors or higher rank, then `condition` must be either a scalar, a vector with size matching the first dimension of `x`, or must have the same shape as `x`.
The `condition` tensor acts as a mask that chooses, based on the value at each element, whether the corresponding element / row in the output should be taken from `x` (if true) or `y` (if false).
If `condition` is a vector and `x` and `y` are higher rank matrices, then it chooses which row (outer dimension) to copy from `x` and `y`. If `condition` has the same shape as `x` and `y`, then it chooses which element to copy from `x` and `y`.
For example:
```python # 'condition' tensor is [[True, False] # [False, True]] # 't' is [[1, 2], # [3, 4]] # 'e' is [[5, 6], # [7, 8]] select(condition, t, e) # => [[1, 6], [7, 4]]
# 'condition' tensor is [True, False] # 't' is [[1, 2], # [3, 4]] # 'e' is [[5, 6], # [7, 8]] select(condition, t, e) ==> [[1, 2],
[7, 8]]
```
Arguments:
x: = A `Tensor` which may have the same shape as `condition`.
If `condition` is rank 1, `x` may have higher rank, but its first dimension must match the size of `condition`.
y: = A `Tensor` with the same type and shape as `x`.
Returns = A `Tensor` with the same type and shape as `x` and `y`.
func SelfAdjointEig ¶
Computes the Eigen Decomposition of a batch of square self-adjoint matrices.
DEPRECATED at GraphDef version 11: Use SelfAdjointEigV2 instead.
The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions form square matrices, with the same constraints as the single matrix SelfAdjointEig.
The result is a [..., M+1, M] matrix with [..., 0,:] containing the eigenvalues, and subsequent [...,1:, :] containing the eigenvectors. The eigenvalues are sorted in non-decreasing order.
Arguments:
input: Shape is `[..., M, M]`.
Returns Shape is `[..., M+1, M]`.
func SelfAdjointEigV2 ¶
func SelfAdjointEigV2(scope *Scope, input tf.Output, optional ...SelfAdjointEigV2Attr) (e tf.Output, v tf.Output)
Computes the eigen decomposition of one or more square self-adjoint matrices.
Computes the eigenvalues and (optionally) eigenvectors of each inner matrix in `input` such that `input[..., :, :] = v[..., :, :] * diag(e[..., :])`. The eigenvalues are sorted in non-decreasing order.
```python # a is a tensor. # e is a tensor of eigenvalues. # v is a tensor of eigenvectors. e, v = self_adjoint_eig(a) e = self_adjoint_eig(a, compute_v=False) ```
Arguments:
input: `Tensor` input of shape `[N, N]`.
Returns:
e: Eigenvalues. Shape is `[N]`. v: Eigenvectors. Shape is `[N, N]`.
func Selu ¶
Computes scaled exponential linear: `scale * alpha * (exp(features) - 1)`
if < 0, `scale * features` otherwise.
To be used together with `initializer = tf.variance_scaling_initializer(factor=1.0, mode='FAN_IN')`. For correct dropout, use `tf.contrib.nn.alpha_dropout`.
See [Self-Normalizing Neural Networks](https://arxiv.org/abs/1706.02515)
func SeluGrad ¶
Computes gradients for the scaled exponential linear (Selu) operation.
Arguments:
gradients: The backpropagated gradients to the corresponding Selu operation. outputs: The outputs of the corresponding Selu operation.
Returns The gradients: `gradients * (outputs + scale * alpha)` if outputs < 0, `scale * gradients` otherwise.
func Send ¶
func Send(scope *Scope, tensor tf.Output, tensor_name string, send_device string, send_device_incarnation int64, recv_device string, optional ...SendAttr) (o *tf.Operation)
Sends the named tensor from send_device to recv_device.
Arguments:
tensor: The tensor to send. tensor_name: The name of the tensor to send. send_device: The name of the device sending the tensor. send_device_incarnation: The current incarnation of send_device. recv_device: The name of the device receiving the tensor.
Returns the created operation.
func SendTPUEmbeddingGradients ¶
func SendTPUEmbeddingGradients(scope *Scope, inputs []tf.Output, learning_rates []tf.Output, config string) (o *tf.Operation)
Performs gradient updates of embedding tables.
Arguments:
inputs: A TensorList of gradients with which to update embedding tables.
This argument has the same length and shapes as the return value of RecvTPUEmbeddingActivations, but contains gradients of the model's loss with respect to the embedding activations. The embedding tables are updated from these gradients via the optimizer specified in the TPU embedding configuration given to tpu.initialize_system.
learning_rates: A TensorList of float32 scalars, one for each dynamic learning
rate tag: see the comments in //third_party/tensorflow/core/protobuf/tpu/optimization_parameters.proto. Multiple tables can share the same dynamic learning rate tag as specified in the configuration. If the learning rates for all tables are constant, this list should be empty.
config: Serialized TPUEmbeddingConfiguration proto.
Returns the created operation.
func SerializeIterator ¶
func SerializeIterator(scope *Scope, resource_handle tf.Output, optional ...SerializeIteratorAttr) (serialized tf.Output)
Converts the given `resource_handle` representing an iterator to a variant tensor.
Arguments:
resource_handle: A handle to an iterator resource.
Returns A variant tensor storing the state of the iterator contained in the resource.
func SerializeManySparse ¶
func SerializeManySparse(scope *Scope, sparse_indices tf.Output, sparse_values tf.Output, sparse_shape tf.Output, optional ...SerializeManySparseAttr) (serialized_sparse tf.Output)
Serialize an `N`-minibatch `SparseTensor` into an `[N, 3]` `Tensor` object.
The `SparseTensor` must have rank `R` greater than 1, and the first dimension is treated as the minibatch dimension. Elements of the `SparseTensor` must be sorted in increasing order of this first dimension. The serialized `SparseTensor` objects going into each row of `serialized_sparse` will have rank `R-1`.
The minibatch size `N` is extracted from `sparse_shape[0]`.
Arguments:
sparse_indices: 2-D. The `indices` of the minibatch `SparseTensor`. sparse_values: 1-D. The `values` of the minibatch `SparseTensor`. sparse_shape: 1-D. The `shape` of the minibatch `SparseTensor`.
func SerializeSparse ¶
func SerializeSparse(scope *Scope, sparse_indices tf.Output, sparse_values tf.Output, sparse_shape tf.Output, optional ...SerializeSparseAttr) (serialized_sparse tf.Output)
Serialize a `SparseTensor` into a `[3]` `Tensor` object.
Arguments:
sparse_indices: 2-D. The `indices` of the `SparseTensor`. sparse_values: 1-D. The `values` of the `SparseTensor`. sparse_shape: 1-D. The `shape` of the `SparseTensor`.
func SerializeTensor ¶
Transforms a Tensor into a serialized TensorProto proto.
Arguments:
tensor: A Tensor of type `T`.
Returns A serialized TensorProto proto of the input tensor.
func SetSize ¶
func SetSize(scope *Scope, set_indices tf.Output, set_values tf.Output, set_shape tf.Output, optional ...SetSizeAttr) (size tf.Output)
Number of unique elements along last dimension of input `set`.
Input `set` is a `SparseTensor` represented by `set_indices`, `set_values`, and `set_shape`. The last dimension contains values in a set, duplicates are allowed but ignored.
If `validate_indices` is `True`, this op validates the order and range of `set` indices. Setting is to `False` while passing invalid arguments results in undefined behavior.
Arguments:
set_indices: 2D `Tensor`, indices of a `SparseTensor`. set_values: 1D `Tensor`, values of a `SparseTensor`. set_shape: 1D `Tensor`, shape of a `SparseTensor`.
Returns For `set` ranked `n`, this is a `Tensor` with rank `n-1`, and the same 1st `n-1` dimensions as `set`. Each value is the number of unique elements in the corresponding `[0...n-1]` dimension of `set`.
func Shape ¶
Returns the shape of a tensor.
This operation returns a 1-D integer tensor representing the shape of `input`.
For example:
``` # 't' is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]] shape(t) ==> [2, 2, 3] ```
func ShapeN ¶
Returns shape of tensors.
This operation returns N 1-D integer tensors representing shape of `input[i]s`.
func ShardDataset ¶
func ShardDataset(scope *Scope, input_dataset tf.Output, num_shards tf.Output, index tf.Output, output_types []tf.DataType, output_shapes []tf.Shape, optional ...ShardDatasetAttr) (handle tf.Output)
Creates a `Dataset` that includes only 1/`num_shards` of this dataset.
Arguments:
num_shards: An integer representing the number of shards operating in parallel. index: An integer representing the current worker index.
func ShardedFilename ¶
func ShardedFilename(scope *Scope, basename tf.Output, shard tf.Output, num_shards tf.Output) (filename tf.Output)
Generate a sharded filename. The filename is printf formatted as
%s-%05d-of-%05d, basename, shard, num_shards.
func ShardedFilespec ¶
Generate a glob pattern matching all sharded file names.
func ShuffleAndRepeatDataset ¶
func ShuffleAndRepeatDataset(scope *Scope, input_dataset tf.Output, buffer_size tf.Output, seed tf.Output, seed2 tf.Output, count tf.Output, output_types []tf.DataType, output_shapes []tf.Shape, optional ...ShuffleAndRepeatDatasetAttr) (handle tf.Output)
Creates a dataset that shuffles and repeats elements from `input_dataset`
pseudorandomly.
Arguments:
buffer_size: The number of output elements to buffer in an iterator over
this dataset. Compare with the `min_after_dequeue` attr when creating a `RandomShuffleQueue`.
seed: A scalar seed for the random number generator. If either `seed` or
`seed2` is set to be non-zero, the random number generator is seeded by the given seed. Otherwise, a random seed is used.
seed2: A second scalar seed to avoid seed collision. count: A scalar representing the number of times the underlying dataset
should be repeated. The default is `-1`, which results in infinite repetition.
func ShuffleDataset ¶
func ShuffleDataset(scope *Scope, input_dataset tf.Output, buffer_size tf.Output, seed tf.Output, seed2 tf.Output, output_types []tf.DataType, output_shapes []tf.Shape, optional ...ShuffleDatasetAttr) (handle tf.Output)
Creates a dataset that shuffles elements from `input_dataset` pseudorandomly.
Arguments:
buffer_size: The number of output elements to buffer in an iterator over
this dataset. Compare with the `min_after_dequeue` attr when creating a `RandomShuffleQueue`.
seed: A scalar seed for the random number generator. If either `seed` or
`seed2` is set to be non-zero, the random number generator is seeded by the given seed. Otherwise, a random seed is used.
seed2: A second scalar seed to avoid seed collision.
func ShutdownDistributedTPU ¶
Shuts down a running distributed TPU system.
The op returns an error if no system is running.
Returns the created operation.
func ShutdownTPUSystem ¶ added in v0.2.0
An op that shuts down the TPU system.
Returns A boolean that indicates if the shut down process succeeds.
func SigmoidGrad ¶
Computes the gradient of the sigmoid of `x` wrt its input.
Specifically, `grad = dy * y * (1 - y)`, where `y = sigmoid(x)`, and `dy` is the corresponding input gradient.
func Sign ¶
Returns an element-wise indication of the sign of a number.
`y = sign(x) = -1` if `x < 0`; 0 if `x == 0`; 1 if `x > 0`.
For complex numbers, `y = sign(x) = x / |x|` if `x != 0`, otherwise `y = 0`.
Example usage: >>> tf.math.sign([0., 2., -3.]) <tf.Tensor: shape=(3,), dtype=float32, numpy=array([ 0., 1., -1.], dtype=float32)>
func Sin ¶
Computes sine of x element-wise.
Given an input tensor, this function computes sine of every element in the tensor. Input range is `(-inf, inf)` and output range is `[-1,1]`. ```python x = tf.constant([-float("inf"), -9, -0.5, 1, 1.2, 200, 10, float("inf")]) tf.math.sin(x) ==> [nan -0.4121185 -0.47942555 0.84147096 0.9320391 -0.87329733 -0.54402107 nan] ```
func Sinh ¶
Computes hyperbolic sine of x element-wise.
Given an input tensor, this function computes hyperbolic sine of every element in the tensor. Input range is `[-inf,inf]` and output range is `[-inf,inf]`. ```python x = tf.constant([-float("inf"), -9, -0.5, 1, 1.2, 2, 10, float("inf")]) tf.math.sinh(x) ==> [-inf -4.0515420e+03 -5.2109528e-01 1.1752012e+00 1.5094614e+00 3.6268604e+00 1.1013232e+04 inf] ```
func Size ¶
Returns the size of a tensor.
This operation returns an integer representing the number of elements in `input`.
For example:
``` # 't' is [[[1, 1,, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]]] size(t) ==> 12 ```
func SkipDataset ¶
func SkipDataset(scope *Scope, input_dataset tf.Output, count tf.Output, output_types []tf.DataType, output_shapes []tf.Shape, optional ...SkipDatasetAttr) (handle tf.Output)
Creates a dataset that skips `count` elements from the `input_dataset`.
Arguments:
count: A scalar representing the number of elements from the `input_dataset`
that should be skipped. If count is -1, skips everything.
func Skipgram ¶
func Skipgram(scope *Scope, filename string, batch_size int64, optional ...SkipgramAttr) (vocab_word tf.Output, vocab_freq tf.Output, words_per_epoch tf.Output, current_epoch tf.Output, total_words_processed tf.Output, examples tf.Output, labels tf.Output)
Parses a text file and creates a batch of examples.
DEPRECATED at GraphDef version 19: Moving word2vec into tensorflow_models/tutorials and deprecating its ops here as a result
Arguments:
filename: The corpus's text file name. batch_size: The size of produced batch.
Returns:
vocab_word: A vector of words in the corpus. vocab_freq: Frequencies of words. Sorted in the non-ascending order. words_per_epoch: Number of words per epoch in the data file. current_epoch: The current epoch number. total_words_processed: The total number of words processed so far. examples: A vector of word ids. labels: A vector of word ids.
func Slice ¶
Return a slice from 'input'.
The output tensor is a tensor with dimensions described by 'size' whose values are extracted from 'input' starting at the offsets in 'begin'.
*Requirements*:
0 <= begin[i] <= begin[i] + size[i] <= Di for i in [0, n)
Arguments:
begin: begin[i] specifies the offset into the 'i'th dimension of
'input' to slice from.
size: size[i] specifies the number of elements of the 'i'th dimension
of 'input' to slice. If size[i] is -1, all remaining elements in dimension i are included in the slice (i.e. this is equivalent to setting size[i] = input.dim_size(i) - begin[i]).
func SlidingWindowDataset ¶
func SlidingWindowDataset(scope *Scope, input_dataset tf.Output, window_size tf.Output, window_shift tf.Output, window_stride tf.Output, output_types []tf.DataType, output_shapes []tf.Shape, optional ...SlidingWindowDatasetAttr) (handle tf.Output)
Creates a dataset that passes a sliding window over `input_dataset`.
Arguments:
window_size: A scalar representing the number of elements in the
sliding window.
window_shift: A scalar representing the steps moving the sliding window
forward in one iteration. It must be positive.
window_stride: A scalar representing the stride of the input elements of the sliding window.
It must be positive.
func SnapshotDataset ¶
func SnapshotDataset(scope *Scope, input_dataset tf.Output, path tf.Output, output_types []tf.DataType, output_shapes []tf.Shape, optional ...SnapshotDatasetAttr) (handle tf.Output)
Creates a dataset that will write to / read from a snapshot.
This dataset attempts to determine whether a valid snapshot exists at the `snapshot_path`, and reads from the snapshot in lieu of using `input_dataset`. If not, it will run the preprocessing pipeline as usual, and write out a snapshot of the data processed for future use.
Arguments:
input_dataset: A variant tensor representing the input dataset. path: The path we should write snapshots to / read snapshots from.
func SobolSample ¶
func SobolSample(scope *Scope, dim tf.Output, num_results tf.Output, skip tf.Output, optional ...SobolSampleAttr) (samples tf.Output)
Generates points from the Sobol sequence.
Creates a Sobol sequence with `num_results` samples. Each sample has dimension `dim`. Skips the first `skip` samples.
Arguments:
dim: Positive scalar `Tensor` representing each sample's dimension. num_results: Positive scalar `Tensor` of dtype int32. The number of Sobol points to return
in the output.
skip: Positive scalar `Tensor` of dtype int32. The number of initial points of the
Sobol sequence to skip.
Returns `Tensor` of samples from Sobol sequence with `shape` [num_results, dim].
func Softmax ¶
Computes softmax activations.
For each batch `i` and class `j` we have
$$softmax[i, j] = exp(logits[i, j]) / sum_j(exp(logits[i, j]))$$
Arguments:
logits: 2-D with shape `[batch_size, num_classes]`.
Returns Same shape as `logits`.
func SoftmaxCrossEntropyWithLogits ¶
func SoftmaxCrossEntropyWithLogits(scope *Scope, features tf.Output, labels tf.Output) (loss tf.Output, backprop tf.Output)
Computes softmax cross entropy cost and gradients to backpropagate.
Inputs are the logits, not probabilities.
Arguments:
features: batch_size x num_classes matrix labels: batch_size x num_classes matrix
The caller must ensure that each batch of labels represents a valid probability distribution.
Returns:
loss: Per example loss (batch_size vector). backprop: backpropagated gradients (batch_size x num_classes matrix).
func SoftplusGrad ¶
Computes softplus gradients for a softplus operation.
Arguments:
gradients: The backpropagated gradients to the corresponding softplus operation. features: The features passed as input to the corresponding softplus operation.
Returns The gradients: `gradients / (1 + exp(-features))`.
func SoftsignGrad ¶
Computes softsign gradients for a softsign operation.
Arguments:
gradients: The backpropagated gradients to the corresponding softsign operation. features: The features passed as input to the corresponding softsign operation.
Returns The gradients: `gradients / (1 + abs(features)) ** 2`.
func SpaceToBatch ¶
func SpaceToBatch(scope *Scope, input tf.Output, paddings tf.Output, block_size int64) (output tf.Output)
SpaceToBatch for 4-D tensors of type T.
This is a legacy version of the more general SpaceToBatchND.
Zero-pads and then rearranges (permutes) blocks of spatial data into batch. More specifically, this op outputs a copy of the input tensor where values from the `height` and `width` dimensions are moved to the `batch` dimension. After the zero-padding, both `height` and `width` of the input must be divisible by the block size.
The attr `block_size` must be greater than one. It indicates the block size.
- Non-overlapping blocks of size `block_size x block size` in the height and width dimensions are rearranged into the batch dimension at each location.
- The batch of the output tensor is `batch * block_size * block_size`.
- Both height_pad and width_pad must be divisible by block_size.
The shape of the output will be:
[batch*block_size*block_size, height_pad/block_size, width_pad/block_size, depth]
Some examples:
(1) For the following input of shape `[1, 2, 2, 1]` and block_size of 2:
``` x = [[[[1], [2]], [[3], [4]]]] ```
The output tensor has shape `[4, 1, 1, 1]` and value:
``` [[[[1]]], [[[2]]], [[[3]]], [[[4]]]] ```
(2) For the following input of shape `[1, 2, 2, 3]` and block_size of 2:
``` x = [[[[1, 2, 3], [4, 5, 6]],
[[7, 8, 9], [10, 11, 12]]]]
```
The output tensor has shape `[4, 1, 1, 3]` and value:
``` [[[[1, 2, 3]]], [[[4, 5, 6]]], [[[7, 8, 9]]], [[[10, 11, 12]]]] ```
(3) For the following input of shape `[1, 4, 4, 1]` and block_size of 2:
``` x = [[[[1], [2], [3], [4]],
[[5], [6], [7], [8]], [[9], [10], [11], [12]], [[13], [14], [15], [16]]]]
```
The output tensor has shape `[4, 2, 2, 1]` and value:
``` x = [[[[1], [3]], [[9], [11]]],
[[[2], [4]], [[10], [12]]], [[[5], [7]], [[13], [15]]], [[[6], [8]], [[14], [16]]]]
```
(4) For the following input of shape `[2, 2, 4, 1]` and block_size of 2:
``` x = [[[[1], [2], [3], [4]],
[[5], [6], [7], [8]]], [[[9], [10], [11], [12]], [[13], [14], [15], [16]]]]
```
The output tensor has shape `[8, 1, 2, 1]` and value:
``` x = [[[[1], [3]]], [[[9], [11]]], [[[2], [4]]], [[[10], [12]]],
[[[5], [7]]], [[[13], [15]]], [[[6], [8]]], [[[14], [16]]]]
```
Among others, this operation is useful for reducing atrous convolution into regular convolution.
Arguments:
input: 4-D with shape `[batch, height, width, depth]`. paddings: 2-D tensor of non-negative integers with shape `[2, 2]`. It specifies the padding of the input with zeros across the spatial dimensions as follows: paddings = [[pad_top, pad_bottom], [pad_left, pad_right]] The effective spatial dimensions of the zero-padded input tensor will be: height_pad = pad_top + height + pad_bottom width_pad = pad_left + width + pad_right
func SpaceToBatchND ¶
func SpaceToBatchND(scope *Scope, input tf.Output, block_shape tf.Output, paddings tf.Output) (output tf.Output)
SpaceToBatch for N-D tensors of type T.
This operation divides "spatial" dimensions `[1, ..., M]` of the input into a grid of blocks of shape `block_shape`, and interleaves these blocks with the "batch" dimension (0) such that in the output, the spatial dimensions `[1, ..., M]` correspond to the position within the grid, and the batch dimension combines both the position within a spatial block and the original batch position. Prior to division into blocks, the spatial dimensions of the input are optionally zero padded according to `paddings`. See below for a precise description.
This operation is equivalent to the following steps:
- Zero-pad the start and end of dimensions `[1, ..., M]` of the input according to `paddings` to produce `padded` of shape `padded_shape`.
2. Reshape `padded` to `reshaped_padded` of shape:
[batch] + [padded_shape[1] / block_shape[0], block_shape[0], ..., padded_shape[M] / block_shape[M-1], block_shape[M-1]] + remaining_shape 3. Permute dimensions of `reshaped_padded` to produce `permuted_reshaped_padded` of shape: block_shape + [batch] + [padded_shape[1] / block_shape[0], ..., padded_shape[M] / block_shape[M-1]] + remaining_shape 4. Reshape `permuted_reshaped_padded` to flatten `block_shape` into the batch dimension, producing an output tensor of shape: [batch * prod(block_shape)] + [padded_shape[1] / block_shape[0], ..., padded_shape[M] / block_shape[M-1]] + remaining_shape
Some examples:
(1) For the following input of shape `[1, 2, 2, 1]`, `block_shape = [2, 2]`, and
`paddings = [[0, 0], [0, 0]]`:
``` x = [[[[1], [2]], [[3], [4]]]] ```
The output tensor has shape `[4, 1, 1, 1]` and value:
``` [[[[1]]], [[[2]]], [[[3]]], [[[4]]]] ```
(2) For the following input of shape `[1, 2, 2, 3]`, `block_shape = [2, 2]`, and
`paddings = [[0, 0], [0, 0]]`:
``` x = [[[[1, 2, 3], [4, 5, 6]],
[[7, 8, 9], [10, 11, 12]]]]
```
The output tensor has shape `[4, 1, 1, 3]` and value:
``` [[[[1, 2, 3]]], [[[4, 5, 6]]], [[[7, 8, 9]]], [[[10, 11, 12]]]] ```
(3) For the following input of shape `[1, 4, 4, 1]`, `block_shape = [2, 2]`, and
`paddings = [[0, 0], [0, 0]]`:
``` x = [[[[1], [2], [3], [4]],
[[5], [6], [7], [8]], [[9], [10], [11], [12]], [[13], [14], [15], [16]]]]
```
The output tensor has shape `[4, 2, 2, 1]` and value:
``` x = [[[[1], [3]], [[9], [11]]],
[[[2], [4]], [[10], [12]]], [[[5], [7]], [[13], [15]]], [[[6], [8]], [[14], [16]]]]
```
(4) For the following input of shape `[2, 2, 4, 1]`, block_shape = `[2, 2]`, and
paddings = `[[0, 0], [2, 0]]`:
``` x = [[[[1], [2], [3], [4]],
[[5], [6], [7], [8]]], [[[9], [10], [11], [12]], [[13], [14], [15], [16]]]]
```
The output tensor has shape `[8, 1, 3, 1]` and value:
``` x = [[[[0], [1], [3]]], [[[0], [9], [11]]],
[[[0], [2], [4]]], [[[0], [10], [12]]], [[[0], [5], [7]]], [[[0], [13], [15]]], [[[0], [6], [8]]], [[[0], [14], [16]]]]
```
Among others, this operation is useful for reducing atrous convolution into regular convolution.
Arguments:
input: N-D with shape `input_shape = [batch] + spatial_shape + remaining_shape`,
where spatial_shape has `M` dimensions.
block_shape: 1-D with shape `[M]`, all values must be >= 1. paddings: 2-D with shape `[M, 2]`, all values must be >= 0. `paddings[i] = [pad_start, pad_end]` specifies the padding for input dimension `i + 1`, which corresponds to spatial dimension `i`. It is required that `block_shape[i]` divides `input_shape[i + 1] + pad_start + pad_end`.
func SpaceToDepth ¶
func SpaceToDepth(scope *Scope, input tf.Output, block_size int64, optional ...SpaceToDepthAttr) (output tf.Output)
SpaceToDepth for tensors of type T.
Rearranges blocks of spatial data, into depth. More specifically, this op outputs a copy of the input tensor where values from the `height` and `width` dimensions are moved to the `depth` dimension. The attr `block_size` indicates the input block size.
- Non-overlapping blocks of size `block_size x block size` are rearranged into depth at each location.
- The depth of the output tensor is `block_size * block_size * input_depth`.
- The Y, X coordinates within each block of the input become the high order component of the output channel index.
- The input tensor's height and width must be divisible by block_size.
The `data_format` attr specifies the layout of the input and output tensors with the following options:
"NHWC": `[ batch, height, width, channels ]` "NCHW": `[ batch, channels, height, width ]` "NCHW_VECT_C": `qint8 [ batch, channels / 4, height, width, 4 ]`
It is useful to consider the operation as transforming a 6-D Tensor. e.g. for data_format = NHWC,
Each element in the input tensor can be specified via 6 coordinates, ordered by decreasing memory layout significance as: n,oY,bY,oX,bX,iC (where n=batch index, oX, oY means X or Y coordinates within the output image, bX, bY means coordinates within the input block, iC means input channels). The output would be a transpose to the following layout: n,oY,oX,bY,bX,iC
This operation is useful for resizing the activations between convolutions (but keeping all data), e.g. instead of pooling. It is also useful for training purely convolutional models.
For example, given an input of shape `[1, 2, 2, 1]`, data_format = "NHWC" and block_size = 2:
``` x = [[[[1], [2]],
[[3], [4]]]]
```
This operation will output a tensor of shape `[1, 1, 1, 4]`:
``` [[[[1, 2, 3, 4]]]] ```
Here, the input has a batch of 1 and each batch element has shape `[2, 2, 1]`, the corresponding output will have a single element (i.e. width and height are both 1) and will have a depth of 4 channels (1 * block_size * block_size). The output element shape is `[1, 1, 4]`.
For an input tensor with larger depth, here of shape `[1, 2, 2, 3]`, e.g.
``` x = [[[[1, 2, 3], [4, 5, 6]],
[[7, 8, 9], [10, 11, 12]]]]
```
This operation, for block_size of 2, will return the following tensor of shape `[1, 1, 1, 12]`
``` [[[[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]]]] ```
Similarly, for the following input of shape `[1 4 4 1]`, and a block size of 2:
``` x = [[[[1], [2], [5], [6]],
[[3], [4], [7], [8]], [[9], [10], [13], [14]], [[11], [12], [15], [16]]]]
```
the operator will return the following tensor of shape `[1 2 2 4]`:
``` x = [[[[1, 2, 3, 4],
[5, 6, 7, 8]], [[9, 10, 11, 12], [13, 14, 15, 16]]]]
```
Arguments:
block_size: The size of the spatial block.
func SparseAdd ¶
func SparseAdd(scope *Scope, a_indices tf.Output, a_values tf.Output, a_shape tf.Output, b_indices tf.Output, b_values tf.Output, b_shape tf.Output, thresh tf.Output) (sum_indices tf.Output, sum_values tf.Output, sum_shape tf.Output)
Adds two `SparseTensor` objects to produce another `SparseTensor`.
The input `SparseTensor` objects' indices are assumed ordered in standard lexicographic order. If this is not the case, before this step run `SparseReorder` to restore index ordering.
By default, if two values sum to zero at some index, the output `SparseTensor` would still include that particular location in its index, storing a zero in the corresponding value slot. To override this, callers can specify `thresh`, indicating that if the sum has a magnitude strictly smaller than `thresh`, its corresponding value and index would then not be included. In particular, `thresh == 0` (default) means everything is kept and actual thresholding happens only for a positive value.
In the following shapes, `nnz` is the count after taking `thresh` into account.
Arguments:
a_indices: 2-D. The `indices` of the first `SparseTensor`, size `[nnz, ndims]` Matrix. a_values: 1-D. The `values` of the first `SparseTensor`, size `[nnz]` Vector. a_shape: 1-D. The `shape` of the first `SparseTensor`, size `[ndims]` Vector. b_indices: 2-D. The `indices` of the second `SparseTensor`, size `[nnz, ndims]` Matrix. b_values: 1-D. The `values` of the second `SparseTensor`, size `[nnz]` Vector. b_shape: 1-D. The `shape` of the second `SparseTensor`, size `[ndims]` Vector. thresh: 0-D. The magnitude threshold that determines if an output value/index
pair takes space.
func SparseAddGrad ¶
func SparseAddGrad(scope *Scope, backprop_val_grad tf.Output, a_indices tf.Output, b_indices tf.Output, sum_indices tf.Output) (a_val_grad tf.Output, b_val_grad tf.Output)
The gradient operator for the SparseAdd op.
The SparseAdd op calculates A + B, where A, B, and the sum are all represented as `SparseTensor` objects. This op takes in the upstream gradient w.r.t. non-empty values of the sum, and outputs the gradients w.r.t. the non-empty values of A and B.
Arguments:
backprop_val_grad: 1-D with shape `[nnz(sum)]`. The gradient with respect to
the non-empty values of the sum.
a_indices: 2-D. The `indices` of the `SparseTensor` A, size `[nnz(A), ndims]`. b_indices: 2-D. The `indices` of the `SparseTensor` B, size `[nnz(B), ndims]`. sum_indices: 2-D. The `indices` of the sum `SparseTensor`, size
`[nnz(sum), ndims]`.
Returns:
a_val_grad: 1-D with shape `[nnz(A)]`. The gradient with respect to the
non-empty values of A.
b_val_grad: 1-D with shape `[nnz(B)]`. The gradient with respect to the
non-empty values of B.
func SparseBincount ¶
func SparseBincount(scope *Scope, indices tf.Output, values tf.Output, dense_shape tf.Output, size tf.Output, weights tf.Output, optional ...SparseBincountAttr) (output tf.Output)
Counts the number of occurrences of each value in an integer array.
Outputs a vector with length `size` and the same dtype as `weights`. If `weights` are empty, then index `i` stores the number of times the value `i` is counted in `arr`. If `weights` are non-empty, then index `i` stores the sum of the value in `weights` at each index where the corresponding value in `arr` is `i`.
Values in `arr` outside of the range [0, size) are ignored.
Arguments:
indices: 2D int64 `Tensor`. values: 1D int `Tensor`. dense_shape: 1D int64 `Tensor`. size: non-negative int scalar `Tensor`. weights: is an int32, int64, float32, or float64 `Tensor` with the same
shape as `input`, or a length-0 `Tensor`, in which case it acts as all weights equal to 1.
Returns 1D `Tensor` with length equal to `size` or 2D `Tensor` with [batch_size, `size`]. The counts or summed weights for each value in the range [0, size).
func SparseConcat ¶
func SparseConcat(scope *Scope, indices []tf.Output, values []tf.Output, shapes []tf.Output, concat_dim int64) (output_indices tf.Output, output_values tf.Output, output_shape tf.Output)
Concatenates a list of `SparseTensor` along the specified dimension.
Concatenation is with respect to the dense versions of these sparse tensors. It is assumed that each input is a `SparseTensor` whose elements are ordered along increasing dimension number.
All inputs' shapes must match, except for the concat dimension. The `indices`, `values`, and `shapes` lists must have the same length.
The output shape is identical to the inputs', except along the concat dimension, where it is the sum of the inputs' sizes along that dimension.
The output elements will be resorted to preserve the sort order along increasing dimension number.
This op runs in `O(M log M)` time, where `M` is the total number of non-empty values across all inputs. This is due to the need for an internal sort in order to concatenate efficiently across an arbitrary dimension.
For example, if `concat_dim = 1` and the inputs are
sp_inputs[0]: shape = [2, 3] [0, 2]: "a" [1, 0]: "b" [1, 1]: "c" sp_inputs[1]: shape = [2, 4] [0, 1]: "d" [0, 2]: "e"
then the output will be
shape = [2, 7] [0, 2]: "a" [0, 4]: "d" [0, 5]: "e" [1, 0]: "b" [1, 1]: "c"
Graphically this is equivalent to doing
[ a] concat [ d e ] = [ a d e ] [b c ] [ ] [b c ]
Arguments:
indices: 2-D. Indices of each input `SparseTensor`. values: 1-D. Non-empty values of each `SparseTensor`. shapes: 1-D. Shapes of each `SparseTensor`. concat_dim: Dimension to concatenate along. Must be in range [-rank, rank),
where rank is the number of dimensions in each input `SparseTensor`.
Returns:
output_indices: 2-D. Indices of the concatenated `SparseTensor`. output_values: 1-D. Non-empty values of the concatenated `SparseTensor`. output_shape: 1-D. Shape of the concatenated `SparseTensor`.
func SparseCountSparseOutput ¶
func SparseCountSparseOutput(scope *Scope, indices tf.Output, values tf.Output, dense_shape tf.Output, weights tf.Output, binary_output bool, optional ...SparseCountSparseOutputAttr) (output_indices tf.Output, output_values tf.Output, output_dense_shape tf.Output)
Performs sparse-output bin counting for a sparse tensor input.
Counts the number of times each value occurs in the input.
Arguments:
indices: Tensor containing the indices of the sparse tensor to count. values: Tensor containing values of the sparse tensor to count. dense_shape: Tensor containing the dense shape of the sparse tensor to count. weights: A Tensor of the same shape as indices containing per-index weight values.
May also be the empty tensor if no weights are used.
binary_output: Whether to output the number of occurrences of each value or 1.
Returns:
output_indices: Indices tensor for the resulting sparse tensor object. output_values: Values tensor for the resulting sparse tensor object. output_dense_shape: Shape tensor for the resulting sparse tensor object.
func SparseCross ¶
func SparseCross(scope *Scope, indices []tf.Output, values []tf.Output, shapes []tf.Output, dense_inputs []tf.Output, hashed_output bool, num_buckets int64, hash_key int64, out_type tf.DataType, internal_type tf.DataType) (output_indices tf.Output, output_values tf.Output, output_shape tf.Output)
Generates sparse cross from a list of sparse and dense tensors.
The op takes two lists, one of 2D `SparseTensor` and one of 2D `Tensor`, each representing features of one feature column. It outputs a 2D `SparseTensor` with the batchwise crosses of these features.
For example, if the inputs are
inputs[0]: SparseTensor with shape = [2, 2] [0, 0]: "a" [1, 0]: "b" [1, 1]: "c" inputs[1]: SparseTensor with shape = [2, 1] [0, 0]: "d" [1, 0]: "e" inputs[2]: Tensor [["f"], ["g"]]
then the output will be
shape = [2, 2] [0, 0]: "a_X_d_X_f" [1, 0]: "b_X_e_X_g" [1, 1]: "c_X_e_X_g"
if hashed_output=true then the output will be
shape = [2, 2] [0, 0]: FingerprintCat64( Fingerprint64("f"), FingerprintCat64( Fingerprint64("d"), Fingerprint64("a"))) [1, 0]: FingerprintCat64( Fingerprint64("g"), FingerprintCat64( Fingerprint64("e"), Fingerprint64("b"))) [1, 1]: FingerprintCat64( Fingerprint64("g"), FingerprintCat64( Fingerprint64("e"), Fingerprint64("c")))
Arguments:
indices: 2-D. Indices of each input `SparseTensor`. values: 1-D. values of each `SparseTensor`. shapes: 1-D. Shapes of each `SparseTensor`. dense_inputs: 2-D. Columns represented by dense `Tensor`. hashed_output: If true, returns the hash of the cross instead of the string.
This will allow us avoiding string manipulations.
num_buckets: It is used if hashed_output is true.
output = hashed_value%num_buckets if num_buckets > 0 else hashed_value.
hash_key: Specify the hash_key that will be used by the `FingerprintCat64`
function to combine the crosses fingerprints.
Returns:
output_indices: 2-D. Indices of the concatenated `SparseTensor`. output_values: 1-D. Non-empty values of the concatenated or hashed
`SparseTensor`.
output_shape: 1-D. Shape of the concatenated `SparseTensor`.
func SparseCrossHashed ¶
func SparseCrossHashed(scope *Scope, indices []tf.Output, values []tf.Output, shapes []tf.Output, dense_inputs []tf.Output, num_buckets tf.Output, strong_hash tf.Output, salt tf.Output) (output_indices tf.Output, output_values tf.Output, output_shape tf.Output)
Generates sparse cross from a list of sparse and dense tensors.
The op takes two lists, one of 2D `SparseTensor` and one of 2D `Tensor`, each representing features of one feature column. It outputs a 2D `SparseTensor` with the batchwise crosses of these features.
For example, if the inputs are
inputs[0]: SparseTensor with shape = [2, 2] [0, 0]: "a" [1, 0]: "b" [1, 1]: "c" inputs[1]: SparseTensor with shape = [2, 1] [0, 0]: "d" [1, 0]: "e" inputs[2]: Tensor [["f"], ["g"]]
then the output will be
shape = [2, 2] [0, 0]: "a_X_d_X_f" [1, 0]: "b_X_e_X_g" [1, 1]: "c_X_e_X_g"
if hashed_output=true then the output will be
shape = [2, 2] [0, 0]: FingerprintCat64( Fingerprint64("f"), FingerprintCat64( Fingerprint64("d"), Fingerprint64("a"))) [1, 0]: FingerprintCat64( Fingerprint64("g"), FingerprintCat64( Fingerprint64("e"), Fingerprint64("b"))) [1, 1]: FingerprintCat64( Fingerprint64("g"), FingerprintCat64( Fingerprint64("e"), Fingerprint64("c")))
Arguments:
indices: 2-D. Indices of each input `SparseTensor`. values: 1-D. values of each `SparseTensor`. shapes: 1-D. Shapes of each `SparseTensor`. dense_inputs: 2-D. Columns represented by dense `Tensor`. num_buckets: It is used if hashed_output is true.
output = hashed_value%num_buckets if num_buckets > 0 else hashed_value.
strong_hash: boolean, if true, siphash with salt will be used instead of farmhash. salt: Specify the salt that will be used by the siphash function.
Returns:
output_indices: 2-D. Indices of the concatenated `SparseTensor`. output_values: 1-D. Non-empty values of the concatenated or hashed
`SparseTensor`.
output_shape: 1-D. Shape of the concatenated `SparseTensor`.
func SparseCrossV2 ¶
func SparseCrossV2(scope *Scope, indices []tf.Output, values []tf.Output, shapes []tf.Output, dense_inputs []tf.Output, sep tf.Output) (output_indices tf.Output, output_values tf.Output, output_shape tf.Output)
Generates sparse cross from a list of sparse and dense tensors.
The op takes two lists, one of 2D `SparseTensor` and one of 2D `Tensor`, each representing features of one feature column. It outputs a 2D `SparseTensor` with the batchwise crosses of these features.
For example, if the inputs are
inputs[0]: SparseTensor with shape = [2, 2] [0, 0]: "a" [1, 0]: "b" [1, 1]: "c" inputs[1]: SparseTensor with shape = [2, 1] [0, 0]: "d" [1, 0]: "e" inputs[2]: Tensor [["f"], ["g"]]
then the output will be
shape = [2, 2] [0, 0]: "a_X_d_X_f" [1, 0]: "b_X_e_X_g" [1, 1]: "c_X_e_X_g"
if hashed_output=true then the output will be
shape = [2, 2] [0, 0]: FingerprintCat64( Fingerprint64("f"), FingerprintCat64( Fingerprint64("d"), Fingerprint64("a"))) [1, 0]: FingerprintCat64( Fingerprint64("g"), FingerprintCat64( Fingerprint64("e"), Fingerprint64("b"))) [1, 1]: FingerprintCat64( Fingerprint64("g"), FingerprintCat64( Fingerprint64("e"), Fingerprint64("c")))
Arguments:
indices: 2-D. Indices of each input `SparseTensor`. values: 1-D. values of each `SparseTensor`. shapes: 1-D. Shapes of each `SparseTensor`. dense_inputs: 2-D. Columns represented by dense `Tensor`. sep: string used when joining a list of string inputs, can be used as separator later.
Returns:
output_indices: 2-D. Indices of the concatenated `SparseTensor`. output_values: 1-D. Non-empty values of the concatenated or hashed
`SparseTensor`.
output_shape: 1-D. Shape of the concatenated `SparseTensor`.
func SparseDenseCwiseAdd ¶
func SparseDenseCwiseAdd(scope *Scope, sp_indices tf.Output, sp_values tf.Output, sp_shape tf.Output, dense tf.Output) (output tf.Output)
Adds up a SparseTensor and a dense Tensor, using these special rules:
(1) Broadcasts the dense side to have the same shape as the sparse side, if
eligible;
(2) Then, only the dense values pointed to by the indices of the SparseTensor
participate in the cwise addition.
By these rules, the result is a logical SparseTensor with exactly the same indices and shape, but possibly with different non-zero values. The output of this Op is the resultant non-zero values.
Arguments:
sp_indices: 2-D. `N x R` matrix with the indices of non-empty values in a
SparseTensor, possibly not in canonical ordering.
sp_values: 1-D. `N` non-empty values corresponding to `sp_indices`. sp_shape: 1-D. Shape of the input SparseTensor. dense: `R`-D. The dense Tensor operand.
Returns 1-D. The `N` values that are operated on.
func SparseDenseCwiseDiv ¶
func SparseDenseCwiseDiv(scope *Scope, sp_indices tf.Output, sp_values tf.Output, sp_shape tf.Output, dense tf.Output) (output tf.Output)
Component-wise divides a SparseTensor by a dense Tensor.
*Limitation*: this Op only broadcasts the dense side to the sparse side, but not the other direction.
Arguments:
sp_indices: 2-D. `N x R` matrix with the indices of non-empty values in a
SparseTensor, possibly not in canonical ordering.
sp_values: 1-D. `N` non-empty values corresponding to `sp_indices`. sp_shape: 1-D. Shape of the input SparseTensor. dense: `R`-D. The dense Tensor operand.
Returns 1-D. The `N` values that are operated on.
func SparseDenseCwiseMul ¶
func SparseDenseCwiseMul(scope *Scope, sp_indices tf.Output, sp_values tf.Output, sp_shape tf.Output, dense tf.Output) (output tf.Output)
Component-wise multiplies a SparseTensor by a dense Tensor.
The output locations corresponding to the implicitly zero elements in the sparse tensor will be zero (i.e., will not take up storage space), regardless of the contents of the dense tensor (even if it's +/-INF and that INF*0 == NaN).
*Limitation*: this Op only broadcasts the dense side to the sparse side, but not the other direction.
Arguments:
sp_indices: 2-D. `N x R` matrix with the indices of non-empty values in a
SparseTensor, possibly not in canonical ordering.
sp_values: 1-D. `N` non-empty values corresponding to `sp_indices`. sp_shape: 1-D. Shape of the input SparseTensor. dense: `R`-D. The dense Tensor operand.
Returns 1-D. The `N` values that are operated on.
func SparseFillEmptyRows ¶
func SparseFillEmptyRows(scope *Scope, indices tf.Output, values tf.Output, dense_shape tf.Output, default_value tf.Output) (output_indices tf.Output, output_values tf.Output, empty_row_indicator tf.Output, reverse_index_map tf.Output)
Fills empty rows in the input 2-D `SparseTensor` with a default value.
The input `SparseTensor` is represented via the tuple of inputs (`indices`, `values`, `dense_shape`). The output `SparseTensor` has the same `dense_shape` but with indices `output_indices` and values `output_values`.
This op inserts a single entry for every row that doesn't have any values. The index is created as `[row, 0, ..., 0]` and the inserted value is `default_value`.
For example, suppose `sp_input` has shape `[5, 6]` and non-empty values:
[0, 1]: a [0, 3]: b [2, 0]: c [3, 1]: d
Rows 1 and 4 are empty, so the output will be of shape `[5, 6]` with values:
[0, 1]: a [0, 3]: b [1, 0]: default_value [2, 0]: c [3, 1]: d [4, 0]: default_value
The output `SparseTensor` will be in row-major order and will have the same shape as the input.
This op also returns an indicator vector shaped `[dense_shape[0]]` such that
empty_row_indicator[i] = True iff row i was an empty row.
And a reverse index map vector shaped `[indices.shape[0]]` that is used during backpropagation,
reverse_index_map[j] = out_j s.t. indices[j, :] == output_indices[out_j, :]
Arguments:
indices: 2-D. the indices of the sparse tensor. values: 1-D. the values of the sparse tensor. dense_shape: 1-D. the shape of the sparse tensor. default_value: 0-D. default value to insert into location `[row, 0, ..., 0]` for rows missing from the input sparse tensor.
output indices: 2-D. the indices of the filled sparse tensor.
Returns:
output_indices output_values: 1-D. the values of the filled sparse tensor. empty_row_indicator: 1-D. whether the dense row was missing in the
input sparse tensor.
reverse_index_map: 1-D. a map from the input indices to the output indices.
func SparseFillEmptyRowsGrad ¶
func SparseFillEmptyRowsGrad(scope *Scope, reverse_index_map tf.Output, grad_values tf.Output) (d_values tf.Output, d_default_value tf.Output)
The gradient of SparseFillEmptyRows.
Takes vectors reverse_index_map, shaped `[N]`, and grad_values, shaped `[N_full]`, where `N_full >= N` and copies data into either `d_values` or `d_default_value`. Here `d_values` is shaped `[N]` and `d_default_value` is a scalar.
d_values[j] = grad_values[reverse_index_map[j]] d_default_value = sum_{k : 0 .. N_full - 1} ( grad_values[k] * 1{k not in reverse_index_map})
Arguments:
reverse_index_map: 1-D. The reverse index map from SparseFillEmptyRows. grad_values: 1-D. The gradients from backprop.
Returns:
d_values: 1-D. The backprop into values. d_default_value: 0-D. The backprop into default_value.
func SparseMatMul ¶
func SparseMatMul(scope *Scope, a tf.Output, b tf.Output, optional ...SparseMatMulAttr) (product tf.Output)
Multiply matrix "a" by matrix "b".
The inputs must be two-dimensional matrices and the inner dimension of "a" must match the outer dimension of "b". Both "a" and "b" must be `Tensor`s not `SparseTensor`s. This op is optimized for the case where at least one of "a" or "b" is sparse, in the sense that they have a large proportion of zero values. The breakeven for using this versus a dense matrix multiply on one platform was 30% zero values in the sparse matrix.
The gradient computation of this operation will only take advantage of sparsity in the input gradient when that gradient comes from a Relu.
func SparseMatrixAdd ¶
func SparseMatrixAdd(scope *Scope, a tf.Output, b tf.Output, alpha tf.Output, beta tf.Output) (c tf.Output)
Sparse addition of two CSR matrices, C = alpha * A + beta * B.
The gradients of SparseMatrixAdd outputs with respect to alpha and beta are not currently defined (TensorFlow will return zeros for these entries).
Arguments:
a: A CSRSparseMatrix. b: A CSRSparseMatrix. alpha: A constant scalar. beta: A constant scalar.
Returns A CSRSparseMatrix.
func SparseMatrixMatMul ¶
func SparseMatrixMatMul(scope *Scope, a tf.Output, b tf.Output, optional ...SparseMatrixMatMulAttr) (output tf.Output)
Matrix-multiplies a sparse matrix with a dense matrix.
Returns a dense matrix. For inputs A and B, where A is CSR and B is dense; this op returns a dense C;
If transpose_output is false, returns: ```
C = A . B
```
If transpose_output is `true`, returns: ```
C = transpose(A . B) = transpose(B) . transpose(A)
``` where the transposition is performed along the two innermost (matrix) dimensions.
If conjugate_output is `true`, returns: ```
C = conjugate(A . B) = conjugate(A) . conjugate(B)
```
If both conjugate_output and transpose_output are `true`, returns: ```
C = conjugate(transpose(A . B)) = conjugate(transpose(B)) . conjugate(transpose(A))
```
Arguments:
a: A CSRSparseMatrix. b: A dense tensor.
Returns A dense output tensor.
func SparseMatrixMul ¶
Element-wise multiplication of a sparse matrix with a dense tensor.
Returns a sparse matrix.
The dense tensor `b` may be either a scalar; otherwise `a` must be a rank-3 `SparseMatrix`; in this case `b` must be shaped `[batch_size, 1, 1]` and the multiply operation broadcasts.
**NOTE** even if `b` is zero, the sparsity structure of the output does not change.
Arguments:
a: A CSRSparseMatrix. b: A dense tensor.
Returns A dense output tensor.
func SparseMatrixNNZ ¶
Returns the number of nonzeroes of `sparse_matrix`.
Arguments:
sparse_matrix: A CSRSparseMatrix.
Returns The number of nonzeroes of `sparse_matrix`.
func SparseMatrixOrderingAMD ¶
Computes the Approximate Minimum Degree (AMD) ordering of `input`.
Computes the Approximate Minimum Degree (AMD) ordering for a sparse matrix.
The returned permutation may be used to permute the rows and columns of the given sparse matrix. This typically results in permuted sparse matrix's sparse Cholesky (or other decompositions) in having fewer zero fill-in compared to decomposition of the original matrix.
The input sparse matrix may have rank 2 or rank 3. The output Tensor, representing would then have rank 1 or 2 respectively, with the same batch shape as the input.
Each component of the input sparse matrix must represent a square symmetric matrix; only the lower triangular part of the matrix is read. The values of the sparse matrix does not affect the returned permutation, only the sparsity pattern of the sparse matrix is used. Hence, a single AMD ordering may be reused for the Cholesky decompositions of sparse matrices with the same sparsity pattern but with possibly different values.
Each batch component of the output permutation represents a permutation of `N` elements, where the input sparse matrix components each have `N` rows. That is, the component contains each of the integers `{0, .. N-1}` exactly once. The `i`th element represents the row index that the `i`th row maps to.
Usage example:
```python
from tensorflow.python.ops.linalg.sparse import sparse_csr_matrix_ops a_indices = np.array([[0, 0], [1, 1], [2, 1], [2, 2], [3, 3]]) a_values = np.array([1.0, 2.0, 1.0, 3.0, 4.0], np.float32) a_dense_shape = [4, 4] with tf.Session() as sess: # Define (COO format) SparseTensor over Numpy array. a_st = tf.sparse.SparseTensor(a_indices, a_values, a_dense_shape) # Convert SparseTensors to CSR SparseMatrix. a_sm = sparse_csr_matrix_ops.sparse_tensor_to_csr_sparse_matrix( a_st.indices, a_st.values, a_st.dense_shape) # Obtain the AMD Ordering for the CSR SparseMatrix. ordering_amd = sparse_csr_matrix_ops.sparse_matrix_ordering_amd(sparse_matrix) ordering_amd_value = sess.run(ordering_amd)
```
`ordering_amd_value` stores the AMD ordering: `[1 2 3 0]`.
input: A `CSRSparseMatrix`.
Arguments:
input: A `CSRSparseMatrix`.
Returns The Approximate Minimum Degree (AMD) ordering of `input`.
func SparseMatrixSoftmax ¶
Calculates the softmax of a CSRSparseMatrix.
Calculate the softmax of the innermost dimensions of a SparseMatrix.
Missing values are treated as `-inf` (i.e., logits of zero probability); and the output has the same sparsity structure as the input (though missing values in the output may now be treated as having probability zero).
Arguments:
logits: A CSRSparseMatrix.
Returns A CSRSparseMatrix.
func SparseMatrixSoftmaxGrad ¶
func SparseMatrixSoftmaxGrad(scope *Scope, softmax tf.Output, grad_softmax tf.Output, type_ tf.DataType) (gradient tf.Output)
Calculates the gradient of the SparseMatrixSoftmax op.
Arguments:
softmax: A CSRSparseMatrix. grad_softmax: The gradient of `softmax`.
Returns The output gradient.
func SparseMatrixSparseCholesky ¶
func SparseMatrixSparseCholesky(scope *Scope, input tf.Output, permutation tf.Output, type_ tf.DataType) (output tf.Output)
Computes the sparse Cholesky decomposition of `input`.
Computes the Sparse Cholesky decomposition of a sparse matrix, with the given fill-in reducing permutation.
The input sparse matrix and the fill-in reducing permutation `permutation` must have compatible shapes. If the sparse matrix has rank 3; with the batch dimension `B`, then the `permutation` must be of rank 2; with the same batch dimension `B`. There is no support for broadcasting.
Furthermore, each component vector of `permutation` must be of length `N`, containing each of the integers {0, 1, ..., N - 1} exactly once, where `N` is the number of rows of each component of the sparse matrix.
Each component of the input sparse matrix must represent a symmetric positive definite (SPD) matrix; although only the lower triangular part of the matrix is read. If any individual component is not SPD, then an InvalidArgument error is thrown.
The returned sparse matrix has the same dense shape as the input sparse matrix. For each component `A` of the input sparse matrix, the corresponding output sparse matrix represents `L`, the lower triangular Cholesky factor satisfying the following identity:
```
A = L * Lt
```
where Lt denotes the transpose of L (or its conjugate transpose, if `type` is `complex64` or `complex128`).
The `type` parameter denotes the type of the matrix elements. The supported types are: `float32`, `float64`, `complex64` and `complex128`.
Usage example:
```python
from tensorflow.python.ops.linalg.sparse import sparse_csr_matrix_ops a_indices = np.array([[0, 0], [1, 1], [2, 1], [2, 2], [3, 3]]) a_values = np.array([1.0, 2.0, 1.0, 3.0, 4.0], np.float32) a_dense_shape = [4, 4] with tf.Session() as sess: # Define (COO format) SparseTensor over Numpy array. a_st = tf.sparse.SparseTensor(a_indices, a_values, a_dense_shape) # Convert SparseTensors to CSR SparseMatrix. a_sm = sparse_csr_matrix_ops.sparse_tensor_to_csr_sparse_matrix( a_st.indices, a_st.values, a_st.dense_shape) # Obtain the Sparse Cholesky factor using AMD Ordering for reducing zero # fill-in (number of structural non-zeros in the sparse Cholesky factor). ordering_amd = sparse_csr_matrix_ops.sparse_matrix_ordering_amd(sparse_matrix) cholesky_sparse_matrices = ( sparse_csr_matrix_ops.sparse_matrix_sparse_cholesky( sparse_matrix, ordering_amd, type=tf.float32)) # Convert the CSRSparseMatrix Cholesky factor to a dense Tensor dense_cholesky = sparse_csr_matrix_ops.csr_sparse_matrix_to_dense( cholesky_sparse_matrices, tf.float32) # Evaluate the dense Tensor value. dense_cholesky_value = sess.run(dense_cholesky)
```
`dense_cholesky_value` stores the dense Cholesky factor:
```
[[ 1. 0. 0. 0.] [ 0. 1.41 0. 0.] [ 0. 0.70 1.58 0.] [ 0. 0. 0. 2.]]
```
input: A `CSRSparseMatrix`. permutation: A `Tensor`. type: The type of `input`.
Arguments:
input: A `CSRSparseMatrix`. permutation: A fill-in reducing permutation matrix.
Returns The sparse Cholesky decompsition of `input`.
func SparseMatrixSparseMatMul ¶
func SparseMatrixSparseMatMul(scope *Scope, a tf.Output, b tf.Output, type_ tf.DataType, optional ...SparseMatrixSparseMatMulAttr) (c tf.Output)
Sparse-matrix-multiplies two CSR matrices `a` and `b`.
Performs a matrix multiplication of a sparse matrix `a` with a sparse matrix `b`; returns a sparse matrix `a * b`, unless either `a` or `b` is transposed or adjointed.
Each matrix may be transposed or adjointed (conjugated and transposed) according to the Boolean parameters `transpose_a`, `adjoint_a`, `transpose_b` and `adjoint_b`. At most one of `transpose_a` or `adjoint_a` may be True. Similarly, at most one of `transpose_b` or `adjoint_b` may be True.
The inputs must have compatible shapes. That is, the inner dimension of `a` must be equal to the outer dimension of `b`. This requirement is adjusted according to whether either `a` or `b` is transposed or adjointed.
The `type` parameter denotes the type of the matrix elements. Both `a` and `b` must have the same type. The supported types are: `float32`, `float64`, `complex64` and `complex128`.
Both `a` and `b` must have the same rank. Broadcasting is not supported. If they have rank 3, each batch of 2D CSRSparseMatrices within `a` and `b` must have the same dense shape.
The sparse matrix product may have numeric (non-structural) zeros. TODO(anudhyan): Consider adding a boolean attribute to control whether to prune zeros.
Usage example:
```python
from tensorflow.python.ops.linalg.sparse import sparse_csr_matrix_ops a_indices = np.array([[0, 0], [2, 3], [2, 4], [3, 0]]) a_values = np.array([1.0, 5.0, -1.0, -2.0], np.float32) a_dense_shape = [4, 5] b_indices = np.array([[0, 0], [3, 0], [3, 1]]) b_values = np.array([2.0, 7.0, 8.0], np.float32) b_dense_shape = [5, 3] with tf.Session() as sess: # Define (COO format) Sparse Tensors over Numpy arrays a_st = tf.sparse.SparseTensor(a_indices, a_values, a_dense_shape) b_st = tf.sparse.SparseTensor(b_indices, b_values, b_dense_shape) # Convert SparseTensors to CSR SparseMatrix a_sm = sparse_csr_matrix_ops.sparse_tensor_to_csr_sparse_matrix( a_st.indices, a_st.values, a_st.dense_shape) b_sm = sparse_csr_matrix_ops.sparse_tensor_to_csr_sparse_matrix( b_st.indices, b_st.values, b_st.dense_shape) # Compute the CSR SparseMatrix matrix multiplication c_sm = sparse_csr_matrix_ops.sparse_matrix_sparse_mat_mul( a=a_sm, b=b_sm, type=tf.float32) # Convert the CSR SparseMatrix product to a dense Tensor c_sm_dense = sparse_csr_matrix_ops.csr_sparse_matrix_to_dense( c_sm, tf.float32) # Evaluate the dense Tensor value c_sm_dense_value = sess.run(c_sm_dense)
```
`c_sm_dense_value` stores the dense matrix product:
```
[[ 2. 0. 0.] [ 0. 0. 0.] [ 35. 40. 0.] [ -4. 0. 0.]]
```
a: A `CSRSparseMatrix`. b: A `CSRSparseMatrix` with the same type and rank as `a`. type: The type of both `a` and `b`. transpose_a: If True, `a` transposed before multiplication. transpose_b: If True, `b` transposed before multiplication. adjoint_a: If True, `a` adjointed before multiplication. adjoint_b: If True, `b` adjointed before multiplication.
Arguments:
a: A CSRSparseMatrix. b: A CSRSparseMatrix.
Returns A CSRSparseMatrix.
func SparseMatrixTranspose ¶
func SparseMatrixTranspose(scope *Scope, input tf.Output, type_ tf.DataType, optional ...SparseMatrixTransposeAttr) (output tf.Output)
Transposes the inner (matrix) dimensions of a CSRSparseMatrix.
Transposes the inner (matrix) dimensions of a SparseMatrix and optionally conjugates its values.
Arguments:
input: A CSRSparseMatrix.
Returns A CSRSparseMatrix.
func SparseMatrixZeros ¶
func SparseMatrixZeros(scope *Scope, dense_shape tf.Output, type_ tf.DataType) (sparse_matrix tf.Output)
Creates an all-zeros CSRSparseMatrix with shape `dense_shape`.
Arguments:
dense_shape: The desired matrix shape.
Returns An empty CSR matrix with shape `dense_shape`.
func SparseReduceMax ¶
func SparseReduceMax(scope *Scope, input_indices tf.Output, input_values tf.Output, input_shape tf.Output, reduction_axes tf.Output, optional ...SparseReduceMaxAttr) (output tf.Output)
Computes the max of elements across dimensions of a SparseTensor.
This Op takes a SparseTensor and is the sparse counterpart to `tf.reduce_max()`. In particular, this Op also returns a dense `Tensor` instead of a sparse one.
Reduces `sp_input` along the dimensions given in `reduction_axes`. Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in `reduction_axes`. If `keep_dims` is true, the reduced dimensions are retained with length 1.
If `reduction_axes` has no entries, all dimensions are reduced, and a tensor with a single element is returned. Additionally, the axes can be negative, which are interpreted according to the indexing rules in Python.
Arguments:
input_indices: 2-D. `N x R` matrix with the indices of non-empty values in a
SparseTensor, possibly not in canonical ordering.
input_values: 1-D. `N` non-empty values corresponding to `input_indices`. input_shape: 1-D. Shape of the input SparseTensor. reduction_axes: 1-D. Length-`K` vector containing the reduction axes.
Returns `R-K`-D. The reduced Tensor.
func SparseReduceMaxSparse ¶
func SparseReduceMaxSparse(scope *Scope, input_indices tf.Output, input_values tf.Output, input_shape tf.Output, reduction_axes tf.Output, optional ...SparseReduceMaxSparseAttr) (output_indices tf.Output, output_values tf.Output, output_shape tf.Output)
Computes the max of elements across dimensions of a SparseTensor.
This Op takes a SparseTensor and is the sparse counterpart to `tf.reduce_max()`. In contrast to SparseReduceMax, this Op returns a SparseTensor.
Reduces `sp_input` along the dimensions given in `reduction_axes`. Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in `reduction_axes`. If `keep_dims` is true, the reduced dimensions are retained with length 1.
If `reduction_axes` has no entries, all dimensions are reduced, and a tensor with a single element is returned. Additionally, the axes can be negative, which are interpreted according to the indexing rules in Python.
Arguments:
input_indices: 2-D. `N x R` matrix with the indices of non-empty values in a
SparseTensor, possibly not in canonical ordering.
input_values: 1-D. `N` non-empty values corresponding to `input_indices`. input_shape: 1-D. Shape of the input SparseTensor. reduction_axes: 1-D. Length-`K` vector containing the reduction axes.
func SparseReduceSum ¶
func SparseReduceSum(scope *Scope, input_indices tf.Output, input_values tf.Output, input_shape tf.Output, reduction_axes tf.Output, optional ...SparseReduceSumAttr) (output tf.Output)
Computes the sum of elements across dimensions of a SparseTensor.
This Op takes a SparseTensor and is the sparse counterpart to `tf.reduce_sum()`. In particular, this Op also returns a dense `Tensor` instead of a sparse one.
Reduces `sp_input` along the dimensions given in `reduction_axes`. Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in `reduction_axes`. If `keep_dims` is true, the reduced dimensions are retained with length 1.
If `reduction_axes` has no entries, all dimensions are reduced, and a tensor with a single element is returned. Additionally, the axes can be negative, which are interpreted according to the indexing rules in Python.
Arguments:
input_indices: 2-D. `N x R` matrix with the indices of non-empty values in a
SparseTensor, possibly not in canonical ordering.
input_values: 1-D. `N` non-empty values corresponding to `input_indices`. input_shape: 1-D. Shape of the input SparseTensor. reduction_axes: 1-D. Length-`K` vector containing the reduction axes.
Returns `R-K`-D. The reduced Tensor.
func SparseReduceSumSparse ¶
func SparseReduceSumSparse(scope *Scope, input_indices tf.Output, input_values tf.Output, input_shape tf.Output, reduction_axes tf.Output, optional ...SparseReduceSumSparseAttr) (output_indices tf.Output, output_values tf.Output, output_shape tf.Output)
Computes the sum of elements across dimensions of a SparseTensor.
This Op takes a SparseTensor and is the sparse counterpart to `tf.reduce_sum()`. In contrast to SparseReduceSum, this Op returns a SparseTensor.
Reduces `sp_input` along the dimensions given in `reduction_axes`. Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in `reduction_axes`. If `keep_dims` is true, the reduced dimensions are retained with length 1.
If `reduction_axes` has no entries, all dimensions are reduced, and a tensor with a single element is returned. Additionally, the axes can be negative, which are interpreted according to the indexing rules in Python.
Arguments:
input_indices: 2-D. `N x R` matrix with the indices of non-empty values in a
SparseTensor, possibly not in canonical ordering.
input_values: 1-D. `N` non-empty values corresponding to `input_indices`. input_shape: 1-D. Shape of the input SparseTensor. reduction_axes: 1-D. Length-`K` vector containing the reduction axes.
func SparseReorder ¶
func SparseReorder(scope *Scope, input_indices tf.Output, input_values tf.Output, input_shape tf.Output) (output_indices tf.Output, output_values tf.Output)
Reorders a SparseTensor into the canonical, row-major ordering.
Note that by convention, all sparse ops preserve the canonical ordering along increasing dimension number. The only time ordering can be violated is during manual manipulation of the indices and values vectors to add entries.
Reordering does not affect the shape of the SparseTensor.
If the tensor has rank `R` and `N` non-empty values, `input_indices` has shape `[N, R]`, input_values has length `N`, and input_shape has length `R`.
Arguments:
input_indices: 2-D. `N x R` matrix with the indices of non-empty values in a
SparseTensor, possibly not in canonical ordering.
input_values: 1-D. `N` non-empty values corresponding to `input_indices`. input_shape: 1-D. Shape of the input SparseTensor.
Returns:
output_indices: 2-D. `N x R` matrix with the same indices as input_indices, but
in canonical row-major ordering.
output_values: 1-D. `N` non-empty values corresponding to `output_indices`.
func SparseReshape ¶
func SparseReshape(scope *Scope, input_indices tf.Output, input_shape tf.Output, new_shape tf.Output) (output_indices tf.Output, output_shape tf.Output)
Reshapes a SparseTensor to represent values in a new dense shape.
This operation has the same semantics as reshape on the represented dense tensor. The `input_indices` are recomputed based on the requested `new_shape`.
If one component of `new_shape` is the special value -1, the size of that dimension is computed so that the total dense size remains constant. At most one component of `new_shape` can be -1. The number of dense elements implied by `new_shape` must be the same as the number of dense elements originally implied by `input_shape`.
Reshaping does not affect the order of values in the SparseTensor.
If the input tensor has rank `R_in` and `N` non-empty values, and `new_shape` has length `R_out`, then `input_indices` has shape `[N, R_in]`, `input_shape` has length `R_in`, `output_indices` has shape `[N, R_out]`, and `output_shape` has length `R_out`.
Arguments:
input_indices: 2-D. `N x R_in` matrix with the indices of non-empty values in a
SparseTensor.
input_shape: 1-D. `R_in` vector with the input SparseTensor's dense shape. new_shape: 1-D. `R_out` vector with the requested new dense shape.
Returns:
output_indices: 2-D. `N x R_out` matrix with the updated indices of non-empty
values in the output SparseTensor.
output_shape: 1-D. `R_out` vector with the full dense shape of the output
SparseTensor. This is the same as `new_shape` but with any -1 dimensions filled in.
func SparseSegmentMean ¶
func SparseSegmentMean(scope *Scope, data tf.Output, indices tf.Output, segment_ids tf.Output, optional ...SparseSegmentMeanAttr) (output tf.Output)
Computes the mean along sparse segments of a tensor.
See `tf.sparse.segment_sum` for usage examples.
Like `SegmentMean`, but `segment_ids` can have rank less than `data`'s first dimension, selecting a subset of dimension 0, specified by `indices`.
Arguments:
indices: A 1-D tensor. Has same rank as `segment_ids`. segment_ids: A 1-D tensor. Values should be sorted and can be repeated.
Returns Has same shape as data, except for dimension 0 which has size `k`, the number of segments.
func SparseSegmentMeanGrad ¶
func SparseSegmentMeanGrad(scope *Scope, grad tf.Output, indices tf.Output, segment_ids tf.Output, output_dim0 tf.Output) (output tf.Output)
Computes gradients for SparseSegmentMean.
Returns tensor "output" with same shape as grad, except for dimension 0 whose value is output_dim0.
Arguments:
grad: gradient propagated to the SparseSegmentMean op. indices: indices passed to the corresponding SparseSegmentMean op. segment_ids: segment_ids passed to the corresponding SparseSegmentMean op. output_dim0: dimension 0 of "data" passed to SparseSegmentMean op.
func SparseSegmentMeanGradV2 ¶ added in v0.6.0
func SparseSegmentMeanGradV2(scope *Scope, grad tf.Output, indices tf.Output, segment_ids tf.Output, dense_output_dim0 tf.Output) (output tf.Output, sorted_unique_indices tf.Output)
Computes gradients for SparseSegmentMean.
Returns tensor "output" with same shape as grad, except for dimension 0 whose value is the number of unique indexes in "indices". Also returns vector "sorted_unique_indices" containing the corresponding indexes from "indices".
Arguments:
grad: gradient propagated to the SparseSegmentMean op. indices: indices passed to the corresponding SparseSegmentMean op. segment_ids: segment_ids passed to the corresponding SparseSegmentMean op. dense_output_dim0: dimension 0 of "data" passed to SparseSegmentMean op.
func SparseSegmentMeanWithNumSegments ¶
func SparseSegmentMeanWithNumSegments(scope *Scope, data tf.Output, indices tf.Output, segment_ids tf.Output, num_segments tf.Output, optional ...SparseSegmentMeanWithNumSegmentsAttr) (output tf.Output)
Computes the mean along sparse segments of a tensor.
Like `SparseSegmentMean`, but allows missing ids in `segment_ids`. If an id is missing, the `output` tensor at that position will be zeroed.
Read [the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation) for an explanation of segments.
Arguments:
indices: A 1-D tensor. Has same rank as `segment_ids`. segment_ids: A 1-D tensor. Values should be sorted and can be repeated. num_segments: Should equal the number of distinct segment IDs.
Returns Has same shape as data, except for dimension 0 which has size `num_segments`.
func SparseSegmentSqrtN ¶
func SparseSegmentSqrtN(scope *Scope, data tf.Output, indices tf.Output, segment_ids tf.Output, optional ...SparseSegmentSqrtNAttr) (output tf.Output)
Computes the sum along sparse segments of a tensor divided by the sqrt of N.
N is the size of the segment being reduced.
See `tf.sparse.segment_sum` for usage examples.
Arguments:
indices: A 1-D tensor. Has same rank as `segment_ids`. segment_ids: A 1-D tensor. Values should be sorted and can be repeated.
Returns Has same shape as data, except for dimension 0 which has size `k`, the number of segments.
func SparseSegmentSqrtNGrad ¶
func SparseSegmentSqrtNGrad(scope *Scope, grad tf.Output, indices tf.Output, segment_ids tf.Output, output_dim0 tf.Output) (output tf.Output)
Computes gradients for SparseSegmentSqrtN.
Returns tensor "output" with same shape as grad, except for dimension 0 whose value is output_dim0.
Arguments:
grad: gradient propagated to the SparseSegmentSqrtN op. indices: indices passed to the corresponding SparseSegmentSqrtN op. segment_ids: segment_ids passed to the corresponding SparseSegmentSqrtN op. output_dim0: dimension 0 of "data" passed to SparseSegmentSqrtN op.
func SparseSegmentSqrtNGradV2 ¶ added in v0.6.0
func SparseSegmentSqrtNGradV2(scope *Scope, grad tf.Output, indices tf.Output, segment_ids tf.Output, dense_output_dim0 tf.Output) (output tf.Output, sorted_unique_indices tf.Output)
Computes gradients for SparseSegmentSqrtN.
Returns tensor "output" with same shape as grad, except for dimension 0 whose value is the number of unique indexes in "indices". Also returns vector "sorted_unique_indices" containing the corresponding indexes from "indices".
Arguments:
grad: gradient propagated to the SparseSegmentSqrtN op. indices: indices passed to the corresponding SparseSegmentSqrtN op. segment_ids: segment_ids passed to the corresponding SparseSegmentSqrtN op. dense_output_dim0: dimension 0 of "data" passed to SparseSegmentSqrtN op.
func SparseSegmentSqrtNWithNumSegments ¶
func SparseSegmentSqrtNWithNumSegments(scope *Scope, data tf.Output, indices tf.Output, segment_ids tf.Output, num_segments tf.Output, optional ...SparseSegmentSqrtNWithNumSegmentsAttr) (output tf.Output)
Computes the sum along sparse segments of a tensor divided by the sqrt of N.
N is the size of the segment being reduced.
Like `SparseSegmentSqrtN`, but allows missing ids in `segment_ids`. If an id is missing, the `output` tensor at that position will be zeroed.
Read [the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation) for an explanation of segments.
Arguments:
indices: A 1-D tensor. Has same rank as `segment_ids`. segment_ids: A 1-D tensor. Values should be sorted and can be repeated. num_segments: Should equal the number of distinct segment IDs.
Returns Has same shape as data, except for dimension 0 which has size `k`, the number of segments.
func SparseSegmentSum ¶
func SparseSegmentSum(scope *Scope, data tf.Output, indices tf.Output, segment_ids tf.Output, optional ...SparseSegmentSumAttr) (output tf.Output)
Computes the sum along sparse segments of a tensor.
Read [the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation) for an explanation of segments.
Like `SegmentSum`, but `segment_ids` can have rank less than `data`'s first dimension, selecting a subset of dimension 0, specified by `indices`.
For example:
```python c = tf.constant([[1,2,3,4], [-1,-2,-3,-4], [5,6,7,8]])
# Select two rows, one segment. tf.sparse_segment_sum(c, tf.constant([0, 1]), tf.constant([0, 0])) # => [[0 0 0 0]]
# Select two rows, two segment. tf.sparse_segment_sum(c, tf.constant([0, 1]), tf.constant([0, 1])) # => [[ 1 2 3 4] # [-1 -2 -3 -4]]
# Select all rows, two segments. tf.sparse_segment_sum(c, tf.constant([0, 1, 2]), tf.constant([0, 0, 1])) # => [[0 0 0 0] # [5 6 7 8]]
# Which is equivalent to: tf.segment_sum(c, tf.constant([0, 0, 1])) ```
Arguments:
indices: A 1-D tensor. Has same rank as `segment_ids`. segment_ids: A 1-D tensor. Values should be sorted and can be repeated.
Returns Has same shape as data, except for dimension 0 which has size `k`, the number of segments.
func SparseSegmentSumGrad ¶
func SparseSegmentSumGrad(scope *Scope, grad tf.Output, indices tf.Output, segment_ids tf.Output, output_dim0 tf.Output) (output tf.Output)
Computes gradients for SparseSegmentSum.
Returns tensor "output" with same shape as grad, except for dimension 0 whose value is output_dim0.
Arguments:
grad: gradient propagated to the SparseSegmentSum op. indices: indices passed to the corresponding SparseSegmentSum op. segment_ids: segment_ids passed to the corresponding SparseSegmentSum op. output_dim0: dimension 0 of "data" passed to SparseSegmentSum op.
func SparseSegmentSumGradV2 ¶ added in v0.6.0
func SparseSegmentSumGradV2(scope *Scope, grad tf.Output, indices tf.Output, segment_ids tf.Output, dense_output_dim0 tf.Output) (output tf.Output, sorted_unique_indices tf.Output)
Computes gradients for SparseSegmentSum.
Returns tensor "output" with same shape as grad, except for dimension 0 whose value is the number of unique indexes in "indices". Also returns vector "sorted_unique_indices" containing the corresponding indexes from "indices".
Arguments:
grad: gradient propagated to the SparseSegmentSum op. indices: indices passed to the corresponding SparseSegmentSum op. segment_ids: segment_ids passed to the corresponding SparseSegmentSum op. dense_output_dim0: dimension 0 of "data" passed to SparseSegmentSum op.
func SparseSegmentSumWithNumSegments ¶
func SparseSegmentSumWithNumSegments(scope *Scope, data tf.Output, indices tf.Output, segment_ids tf.Output, num_segments tf.Output, optional ...SparseSegmentSumWithNumSegmentsAttr) (output tf.Output)
Computes the sum along sparse segments of a tensor.
Like `SparseSegmentSum`, but allows missing ids in `segment_ids`. If an id is missing, the `output` tensor at that position will be zeroed.
Read [the section on segmentation](https://tensorflow.org/api_docs/python/tf/sparse#Segmentation) for an explanation of segments.
For example:
```python c = tf.constant([[1,2,3,4], [-1,-2,-3,-4], [5,6,7,8]])
tf.sparse_segment_sum_with_num_segments(
c, tf.constant([0, 1]), tf.constant([0, 0]), num_segments=3)
# => [[0 0 0 0] # [0 0 0 0] # [0 0 0 0]]
tf.sparse_segment_sum_with_num_segments(c,
tf.constant([0, 1]), tf.constant([0, 2], num_segments=4))
# => [[ 1 2 3 4] # [ 0 0 0 0] # [-1 -2 -3 -4] # [ 0 0 0 0]] ```
Arguments:
indices: A 1-D tensor. Has same rank as `segment_ids`. segment_ids: A 1-D tensor. Values should be sorted and can be repeated. num_segments: Should equal the number of distinct segment IDs.
Returns Has same shape as data, except for dimension 0 which has size `num_segments`.
func SparseSlice ¶
func SparseSlice(scope *Scope, indices tf.Output, values tf.Output, shape tf.Output, start tf.Output, size tf.Output) (output_indices tf.Output, output_values tf.Output, output_shape tf.Output)
Slice a `SparseTensor` based on the `start` and `size`.
For example, if the input is
input_tensor = shape = [2, 7] [ a d e ] [b c ]
Graphically the output tensors are:
sparse_slice([0, 0], [2, 4]) = shape = [2, 4] [ a ] [b c ] sparse_slice([0, 4], [2, 3]) = shape = [2, 3] [ d e ] [ ]
Arguments:
indices: 2-D tensor represents the indices of the sparse tensor. values: 1-D tensor represents the values of the sparse tensor. shape: 1-D. tensor represents the shape of the sparse tensor. start: 1-D. tensor represents the start of the slice. size: 1-D. tensor represents the size of the slice.
output indices: A list of 1-D tensors represents the indices of the output sparse tensors.
Returns:
output_indices output_values: A list of 1-D tensors represents the values of the output sparse
tensors.
output_shape: A list of 1-D tensors represents the shape of the output sparse
tensors.
func SparseSliceGrad ¶
func SparseSliceGrad(scope *Scope, backprop_val_grad tf.Output, input_indices tf.Output, input_start tf.Output, output_indices tf.Output) (val_grad tf.Output)
The gradient operator for the SparseSlice op.
This op takes in the upstream gradient w.r.t. non-empty values of the sliced `SparseTensor`, and outputs the gradients w.r.t. the non-empty values of input `SparseTensor`.
Arguments:
backprop_val_grad: 1-D. The gradient with respect to
the non-empty values of the sliced `SparseTensor`.
input_indices: 2-D. The `indices` of the input `SparseTensor`. input_start: 1-D. tensor represents the start of the slice. output_indices: 2-D. The `indices` of the sliced `SparseTensor`.
Returns 1-D. The gradient with respect to the non-empty values of input `SparseTensor`.
func SparseSoftmax ¶
func SparseSoftmax(scope *Scope, sp_indices tf.Output, sp_values tf.Output, sp_shape tf.Output) (output tf.Output)
Applies softmax to a batched N-D `SparseTensor`.
The inputs represent an N-D SparseTensor with logical shape `[..., B, C]` (where `N >= 2`), and with indices sorted in the canonical lexicographic order.
This op is equivalent to applying the normal `tf.nn.softmax()` to each innermost logical submatrix with shape `[B, C]`, but with the catch that *the implicitly zero elements do not participate*. Specifically, the algorithm is equivalent to the following:
(1) Applies `tf.nn.softmax()` to a densified view of each innermost submatrix with shape `[B, C]`, along the size-C dimension; (2) Masks out the original implicitly-zero locations; (3) Renormalizes the remaining elements.
Hence, the `SparseTensor` result has exactly the same non-zero indices and shape.
Arguments:
sp_indices: 2-D. `NNZ x R` matrix with the indices of non-empty values in a
SparseTensor, in canonical ordering.
sp_values: 1-D. `NNZ` non-empty values corresponding to `sp_indices`. sp_shape: 1-D. Shape of the input SparseTensor.
Returns 1-D. The `NNZ` values for the result `SparseTensor`.
func SparseSoftmaxCrossEntropyWithLogits ¶
func SparseSoftmaxCrossEntropyWithLogits(scope *Scope, features tf.Output, labels tf.Output) (loss tf.Output, backprop tf.Output)
Computes softmax cross entropy cost and gradients to backpropagate.
Unlike `SoftmaxCrossEntropyWithLogits`, this operation does not accept a matrix of label probabilities, but rather a single label per row of features. This label is considered to have probability 1.0 for the given row.
Inputs are the logits, not probabilities.
Arguments:
features: batch_size x num_classes matrix labels: batch_size vector with values in [0, num_classes).
This is the label for the given minibatch entry.
Returns:
loss: Per example loss (batch_size vector). backprop: backpropagated gradients (batch_size x num_classes matrix).
func SparseSparseMaximum ¶
func SparseSparseMaximum(scope *Scope, a_indices tf.Output, a_values tf.Output, a_shape tf.Output, b_indices tf.Output, b_values tf.Output, b_shape tf.Output) (output_indices tf.Output, output_values tf.Output)
Returns the element-wise max of two SparseTensors.
Assumes the two SparseTensors have the same shape, i.e., no broadcasting.
Arguments:
a_indices: 2-D. `N x R` matrix with the indices of non-empty values in a
SparseTensor, in the canonical lexicographic ordering.
a_values: 1-D. `N` non-empty values corresponding to `a_indices`. a_shape: 1-D. Shape of the input SparseTensor. b_indices: counterpart to `a_indices` for the other operand. b_values: counterpart to `a_values` for the other operand; must be of the same dtype. b_shape: counterpart to `a_shape` for the other operand; the two shapes must be equal.
Returns:
output_indices: 2-D. The indices of the output SparseTensor. output_values: 1-D. The values of the output SparseTensor.
func SparseSparseMinimum ¶
func SparseSparseMinimum(scope *Scope, a_indices tf.Output, a_values tf.Output, a_shape tf.Output, b_indices tf.Output, b_values tf.Output, b_shape tf.Output) (output_indices tf.Output, output_values tf.Output)
Returns the element-wise min of two SparseTensors.
Assumes the two SparseTensors have the same shape, i.e., no broadcasting.
Arguments:
a_indices: 2-D. `N x R` matrix with the indices of non-empty values in a
SparseTensor, in the canonical lexicographic ordering.
a_values: 1-D. `N` non-empty values corresponding to `a_indices`. a_shape: 1-D. Shape of the input SparseTensor. b_indices: counterpart to `a_indices` for the other operand. b_values: counterpart to `a_values` for the other operand; must be of the same dtype. b_shape: counterpart to `a_shape` for the other operand; the two shapes must be equal.
Returns:
output_indices: 2-D. The indices of the output SparseTensor. output_values: 1-D. The values of the output SparseTensor.
func SparseSplit ¶
func SparseSplit(scope *Scope, split_dim tf.Output, indices tf.Output, values tf.Output, shape tf.Output, num_split int64) (output_indices []tf.Output, output_values []tf.Output, output_shape []tf.Output)
Split a `SparseTensor` into `num_split` tensors along one dimension.
If the `shape[split_dim]` is not an integer multiple of `num_split`. Slices `[0 : shape[split_dim] % num_split]` gets one extra dimension. For example, if `split_dim = 1` and `num_split = 2` and the input is
input_tensor = shape = [2, 7] [ a d e ] [b c ]
Graphically the output tensors are:
output_tensor[0] = shape = [2, 4] [ a ] [b c ] output_tensor[1] = shape = [2, 3] [ d e ] [ ]
Arguments:
split_dim: 0-D. The dimension along which to split. Must be in the range
`[0, rank(shape))`.
indices: 2-D tensor represents the indices of the sparse tensor. values: 1-D tensor represents the values of the sparse tensor. shape: 1-D. tensor represents the shape of the sparse tensor.
output indices: A list of 1-D tensors represents the indices of the output sparse tensors.
num_split: The number of ways to split.
Returns:
output_indices output_values: A list of 1-D tensors represents the values of the output sparse
tensors.
output_shape: A list of 1-D tensors represents the shape of the output sparse
tensors.
func SparseTensorDenseAdd ¶
func SparseTensorDenseAdd(scope *Scope, a_indices tf.Output, a_values tf.Output, a_shape tf.Output, b tf.Output) (output tf.Output)
Adds up a `SparseTensor` and a dense `Tensor`, producing a dense `Tensor`.
This Op does not require `a_indices` be sorted in standard lexicographic order.
Arguments:
a_indices: 2-D. The `indices` of the `SparseTensor`, with shape `[nnz, ndims]`. a_values: 1-D. The `values` of the `SparseTensor`, with shape `[nnz]`. a_shape: 1-D. The `shape` of the `SparseTensor`, with shape `[ndims]`. b: `ndims`-D Tensor. With shape `a_shape`.
func SparseTensorDenseMatMul ¶
func SparseTensorDenseMatMul(scope *Scope, a_indices tf.Output, a_values tf.Output, a_shape tf.Output, b tf.Output, optional ...SparseTensorDenseMatMulAttr) (product tf.Output)
Multiply SparseTensor (of rank 2) "A" by dense matrix "B".
No validity checking is performed on the indices of A. However, the following input format is recommended for optimal behavior:
if adjoint_a == false:
A should be sorted in lexicographically increasing order. Use SparseReorder if you're not sure.
if adjoint_a == true:
A should be sorted in order of increasing dimension 1 (i.e., "column major" order instead of "row major" order).
Arguments:
a_indices: 2-D. The `indices` of the `SparseTensor`, size `[nnz, 2]` Matrix. a_values: 1-D. The `values` of the `SparseTensor`, size `[nnz]` Vector. a_shape: 1-D. The `shape` of the `SparseTensor`, size `[2]` Vector. b: 2-D. A dense Matrix.
func SparseTensorSliceDataset ¶
func SparseTensorSliceDataset(scope *Scope, indices tf.Output, values tf.Output, dense_shape tf.Output) (handle tf.Output)
Creates a dataset that splits a SparseTensor into elements row-wise.
func SparseTensorToCSRSparseMatrix ¶
func SparseTensorToCSRSparseMatrix(scope *Scope, indices tf.Output, values tf.Output, dense_shape tf.Output) (sparse_matrix tf.Output)
Converts a SparseTensor to a (possibly batched) CSRSparseMatrix.
Arguments:
indices: SparseTensor indices. values: SparseTensor values. dense_shape: SparseTensor dense shape.
Returns A (possibly batched) CSRSparseMatrix.
func SparseToDense ¶
func SparseToDense(scope *Scope, sparse_indices tf.Output, output_shape tf.Output, sparse_values tf.Output, default_value tf.Output, optional ...SparseToDenseAttr) (dense tf.Output)
Converts a sparse representation into a dense tensor.
Builds an array `dense` with shape `output_shape` such that
``` # If sparse_indices is scalar dense[i] = (i == sparse_indices ? sparse_values : default_value)
# If sparse_indices is a vector, then for each i dense[sparse_indices[i]] = sparse_values[i]
# If sparse_indices is an n by d matrix, then for each i in [0, n) dense[sparse_indices[i][0], ..., sparse_indices[i][d-1]] = sparse_values[i] ```
All other values in `dense` are set to `default_value`. If `sparse_values` is a scalar, all sparse indices are set to this single value.
Indices should be sorted in lexicographic order, and indices must not contain any repeats. If `validate_indices` is true, these properties are checked during execution.
Arguments:
sparse_indices: 0-D, 1-D, or 2-D. `sparse_indices[i]` contains the complete
index where `sparse_values[i]` will be placed.
output_shape: 1-D. Shape of the dense output tensor. sparse_values: 1-D. Values corresponding to each row of `sparse_indices`,
or a scalar value to be used for all sparse indices.
default_value: Scalar value to set for indices not specified in
`sparse_indices`.
Returns Dense output tensor of shape `output_shape`.
func SparseToSparseSetOperation ¶
func SparseToSparseSetOperation(scope *Scope, set1_indices tf.Output, set1_values tf.Output, set1_shape tf.Output, set2_indices tf.Output, set2_values tf.Output, set2_shape tf.Output, set_operation string, optional ...SparseToSparseSetOperationAttr) (result_indices tf.Output, result_values tf.Output, result_shape tf.Output)
Applies set operation along last dimension of 2 `SparseTensor` inputs.
See SetOperationOp::SetOperationFromContext for values of `set_operation`.
If `validate_indices` is `True`, `SparseToSparseSetOperation` validates the order and range of `set1` and `set2` indices.
Input `set1` is a `SparseTensor` represented by `set1_indices`, `set1_values`, and `set1_shape`. For `set1` ranked `n`, 1st `n-1` dimensions must be the same as `set2`. Dimension `n` contains values in a set, duplicates are allowed but ignored.
Input `set2` is a `SparseTensor` represented by `set2_indices`, `set2_values`, and `set2_shape`. For `set2` ranked `n`, 1st `n-1` dimensions must be the same as `set1`. Dimension `n` contains values in a set, duplicates are allowed but ignored.
If `validate_indices` is `True`, this op validates the order and range of `set1` and `set2` indices.
Output `result` is a `SparseTensor` represented by `result_indices`, `result_values`, and `result_shape`. For `set1` and `set2` ranked `n`, this has rank `n` and the same 1st `n-1` dimensions as `set1` and `set2`. The `nth` dimension contains the result of `set_operation` applied to the corresponding `[0...n-1]` dimension of `set`.
Arguments:
set1_indices: 2D `Tensor`, indices of a `SparseTensor`. Must be in row-major
order.
set1_values: 1D `Tensor`, values of a `SparseTensor`. Must be in row-major
order.
set1_shape: 1D `Tensor`, shape of a `SparseTensor`. `set1_shape[0...n-1]` must
be the same as `set2_shape[0...n-1]`, `set1_shape[n]` is the max set size across `0...n-1` dimensions.
set2_indices: 2D `Tensor`, indices of a `SparseTensor`. Must be in row-major
order.
set2_values: 1D `Tensor`, values of a `SparseTensor`. Must be in row-major
order.
set2_shape: 1D `Tensor`, shape of a `SparseTensor`. `set2_shape[0...n-1]` must
be the same as `set1_shape[0...n-1]`, `set2_shape[n]` is the max set size across `0...n-1` dimensions.
Returns:
result_indices: 2D indices of a `SparseTensor`. result_values: 1D values of a `SparseTensor`. result_shape: 1D `Tensor` shape of a `SparseTensor`. `result_shape[0...n-1]` is
the same as the 1st `n-1` dimensions of `set1` and `set2`, `result_shape[n]` is the max result set size across all `0...n-1` dimensions.
func Split ¶
Splits a tensor into `num_split` tensors along one dimension.
Arguments:
axis: 0-D. The dimension along which to split. Must be in the range
`[-rank(value), rank(value))`.
value: The tensor to split. num_split: The number of ways to split. Must evenly divide
`value.shape[split_dim]`.
Returns They are identically shaped tensors, whose shape matches that of `value` except along `axis`, where their sizes are `values.shape[split_dim] / num_split`.
func SplitDedupData ¶ added in v0.4.0
func SplitDedupData(scope *Scope, input tf.Output, integer_type tf.DataType, float_type tf.DataType, tuple_mask string, optional ...SplitDedupDataAttr) (integer_tensor tf.Output, float_tensor tf.Output)
An op splits input deduplication data XLA tuple into integer and floating point tensors.
Deduplication data is an XLA tuple, which consists of integer and floating point values. This op is to split these values into two groups for two types, and construct each group as one tensor to return.
Arguments:
input: An XLA tuple including integer and float elements as deduplication data tuple. integer_type: integer_tensor type. Allowed types: int32, int64, uint32, uint64. float_type: float_tensor type. Allowed types: half, bfloat16, float. tuple_mask: A serialized TensorProto string of output tuple mask. This mask is a 2-D tensor,
with first column as tuple element type, and second column as span of this type. For example, an output tuple of (1, 2, 0.1, 3), its mask is [[0, 2], [1, 1], [0, 1]]. We expect only two types of elements: integer(0) and float(1).
Returns:
integer_tensor: A 1-D integer tensor, includes integer elements of deduplication data tuple. float_tensor: A 1-D float tensor, includes float elements of deduplication data tuple.
func SplitV ¶
func SplitV(scope *Scope, value tf.Output, size_splits tf.Output, axis tf.Output, num_split int64) (output []tf.Output)
Splits a tensor into `num_split` tensors along one dimension.
Arguments:
value: The tensor to split. size_splits: list containing the sizes of each output tensor along the split
dimension. Must sum to the dimension of value along split_dim. Can contain one -1 indicating that dimension is to be inferred.
axis: 0-D. The dimension along which to split. Must be in the range
`[-rank(value), rank(value))`.
Returns Tensors whose shape matches that of `value` except along `axis`, where their sizes are `size_splits[i]`.
func SqlDataset ¶
func SqlDataset(scope *Scope, driver_name tf.Output, data_source_name tf.Output, query tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output)
Creates a dataset that executes a SQL query and emits rows of the result set.
Arguments:
driver_name: The database type. Currently, the only supported type is 'sqlite'. data_source_name: A connection string to connect to the database. query: A SQL query to execute.
func SqrtGrad ¶
Computes the gradient for the sqrt of `x` wrt its input.
Specifically, `grad = dy * 0.5 / y`, where `y = sqrt(x)`, and `dy` is the corresponding input gradient.
func SquaredDifference ¶
Returns conj(x - y)(x - y) element-wise.
*NOTE*: `SquaredDifference` supports broadcasting. More about broadcasting [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
func Squeeze ¶
Removes dimensions of size 1 from the shape of a tensor.
Given a tensor `input`, this operation returns a tensor of the same type with all dimensions of size 1 removed. If you don't want to remove all size 1 dimensions, you can remove specific size 1 dimensions by specifying `axis`.
For example:
``` # 't' is a tensor of shape [1, 2, 1, 3, 1, 1] shape(squeeze(t)) ==> [2, 3] ```
Or, to remove specific size 1 dimensions:
``` # 't' is a tensor of shape [1, 2, 1, 3, 1, 1] shape(squeeze(t, [2, 4])) ==> [1, 2, 3, 1] ```
Arguments:
input: The `input` to squeeze.
Returns Contains the same data as `input`, but has one or more dimensions of size 1 removed.
func StackCloseV2 ¶
Delete the stack from its resource container.
Arguments:
handle: The handle to a stack.
Returns the created operation.
func StackPopV2 ¶
Pop the element at the top of the stack.
Arguments:
handle: The handle to a stack. elem_type: The type of the elem that is popped.
Returns The tensor that is popped from the top of the stack.
func StackPushV2 ¶
func StackPushV2(scope *Scope, handle tf.Output, elem tf.Output, optional ...StackPushV2Attr) (output tf.Output)
Push an element onto the stack.
Arguments:
handle: The handle to a stack. elem: The tensor to be pushed onto the stack.
Returns The same tensor as the input 'elem'.
func StackV2 ¶
func StackV2(scope *Scope, max_size tf.Output, elem_type tf.DataType, optional ...StackV2Attr) (handle tf.Output)
A stack that produces elements in first-in last-out order.
Arguments:
max_size: The maximum size of the stack if non-negative. If negative, the stack
size is unlimited.
elem_type: The type of the elements on the stack.
Returns The handle to the stack.
func Stage ¶
Stage values similar to a lightweight Enqueue.
The basic functionality of this Op is similar to a queue with many fewer capabilities and options. This Op is optimized for performance.
Arguments:
values: a list of tensors
dtypes A list of data types that inserted values should adhere to.
Returns the created operation.
func StageClear ¶
Op removes all elements in the underlying container.
Returns the created operation.
func StagePeek ¶
func StagePeek(scope *Scope, index tf.Output, dtypes []tf.DataType, optional ...StagePeekAttr) (values []tf.Output)
Op peeks at the values at the specified index. If the
underlying container does not contain sufficient elements this op will block until it does. This Op is optimized for performance.
func StatefulStandardNormal ¶
func StatefulStandardNormal(scope *Scope, resource tf.Output, shape tf.Output, optional ...StatefulStandardNormalAttr) (output tf.Output)
Outputs random values from a normal distribution. This op is deprecated in favor of op 'StatefulStandardNormalV2'
DEPRECATED at GraphDef version 29: Use StatefulStandardNormalV2 instead
The generated values will have mean 0 and standard deviation 1.
Arguments:
resource: The handle of the resource variable that stores the state of the RNG. shape: The shape of the output tensor.
Returns A tensor of the specified shape filled with random normal values.
func StatefulStandardNormalV2 ¶
func StatefulStandardNormalV2(scope *Scope, resource tf.Output, algorithm tf.Output, shape tf.Output, optional ...StatefulStandardNormalV2Attr) (output tf.Output)
Outputs random values from a normal distribution.
The generated values will have mean 0 and standard deviation 1.
Arguments:
resource: The handle of the resource variable that stores the state of the RNG. algorithm: The RNG algorithm. shape: The shape of the output tensor.
Returns A tensor of the specified shape filled with random normal values.
func StatefulTruncatedNormal ¶
func StatefulTruncatedNormal(scope *Scope, resource tf.Output, algorithm tf.Output, shape tf.Output, optional ...StatefulTruncatedNormalAttr) (output tf.Output)
Outputs random values from a truncated normal distribution.
The generated values follow a normal distribution with mean 0 and standard deviation 1, except that values whose magnitude is more than 2 standard deviations from the mean are dropped and re-picked.
Arguments:
resource: The handle of the resource variable that stores the state of the RNG. algorithm: The RNG algorithm. shape: The shape of the output tensor.
Returns Random values with specified shape.
func StatefulUniform ¶
func StatefulUniform(scope *Scope, resource tf.Output, algorithm tf.Output, shape tf.Output, optional ...StatefulUniformAttr) (output tf.Output)
Outputs random values from a uniform distribution.
The generated values follow a uniform distribution in the range `[0, 1)`. The lower bound 0 is included in the range, while the upper bound 1 is excluded.
Arguments:
resource: The handle of the resource variable that stores the state of the RNG. algorithm: The RNG algorithm. shape: The shape of the output tensor.
Returns Random values with specified shape.
func StatefulUniformFullInt ¶
func StatefulUniformFullInt(scope *Scope, resource tf.Output, algorithm tf.Output, shape tf.Output, optional ...StatefulUniformFullIntAttr) (output tf.Output)
Outputs random integers from a uniform distribution.
The generated values are uniform integers covering the whole range of `dtype`.
Arguments:
resource: The handle of the resource variable that stores the state of the RNG. algorithm: The RNG algorithm. shape: The shape of the output tensor.
Returns Random values with specified shape.
func StatefulUniformInt ¶
func StatefulUniformInt(scope *Scope, resource tf.Output, algorithm tf.Output, shape tf.Output, minval tf.Output, maxval tf.Output) (output tf.Output)
Outputs random integers from a uniform distribution.
The generated values are uniform integers in the range `[minval, maxval)`. The lower bound `minval` is included in the range, while the upper bound `maxval` is excluded.
The random integers are slightly biased unless `maxval - minval` is an exact power of two. The bias is small for values of `maxval - minval` significantly smaller than the range of the output (either `2^32` or `2^64`).
Arguments:
resource: The handle of the resource variable that stores the state of the RNG. algorithm: The RNG algorithm. shape: The shape of the output tensor. minval: Minimum value (inclusive, scalar). maxval: Maximum value (exclusive, scalar).
Returns Random values with specified shape.
func StatelessMultinomial ¶
func StatelessMultinomial(scope *Scope, logits tf.Output, num_samples tf.Output, seed tf.Output, optional ...StatelessMultinomialAttr) (output tf.Output)
Draws samples from a multinomial distribution.
Arguments:
logits: 2-D Tensor with shape `[batch_size, num_classes]`. Each slice `[i, :]`
represents the unnormalized log probabilities for all classes.
num_samples: 0-D. Number of independent samples to draw for each row slice. seed: 2 seeds (shape [2]).
Returns 2-D Tensor with shape `[batch_size, num_samples]`. Each slice `[i, :]` contains the drawn class labels with range `[0, num_classes)`.
func StatelessRandomBinomial ¶
func StatelessRandomBinomial(scope *Scope, shape tf.Output, seed tf.Output, counts tf.Output, probs tf.Output, optional ...StatelessRandomBinomialAttr) (output tf.Output)
Outputs deterministic pseudorandom random numbers from a binomial distribution.
Outputs random values from a binomial distribution.
The outputs are a deterministic function of `shape`, `seed`, `counts`, and `probs`.
Arguments:
shape: The shape of the output tensor. seed: 2 seeds (shape [2]). counts: The counts of the binomial distribution. Must be broadcastable with `probs`,
and broadcastable with the rightmost dimensions of `shape`.
probs: The probability of success for the binomial distribution. Must be broadcastable
with `counts` and broadcastable with the rightmost dimensions of `shape`.
Returns Random values with specified shape.
func StatelessRandomGammaV2 ¶
func StatelessRandomGammaV2(scope *Scope, shape tf.Output, seed tf.Output, alpha tf.Output) (output tf.Output)
Outputs deterministic pseudorandom random numbers from a gamma distribution.
Outputs random values from a gamma distribution.
The outputs are a deterministic function of `shape`, `seed`, and `alpha`.
Arguments:
shape: The shape of the output tensor. seed: 2 seeds (shape [2]). alpha: The concentration of the gamma distribution. Shape must match the rightmost
dimensions of `shape`.
Returns Random values with specified shape.
func StatelessRandomGammaV3 ¶ added in v0.4.0
func StatelessRandomGammaV3(scope *Scope, shape tf.Output, key tf.Output, counter tf.Output, alg tf.Output, alpha tf.Output) (output tf.Output)
Outputs deterministic pseudorandom random numbers from a gamma distribution.
Outputs random values from a gamma distribution.
The outputs are a deterministic function of the inputs.
Arguments:
shape: The shape of the output tensor. key: Key for the counter-based RNG algorithm (shape uint64[1]). counter: Initial counter for the counter-based RNG algorithm (shape uint64[2] or uint64[1] depending on the algorithm). If a larger vector is given, only the needed portion on the left (i.e. [:N]) will be used. alg: The RNG algorithm (shape int32[]). alpha: The concentration of the gamma distribution. Shape must match the rightmost
dimensions of `shape`.
Returns Random values with specified shape.
func StatelessRandomGetAlg ¶
Picks the best counter-based RNG algorithm based on device.
This op picks the best counter-based RNG algorithm based on device.
Returns The RNG algorithm (shape int32[]).
func StatelessRandomGetKeyCounter ¶
Scrambles seed into key and counter, using the best algorithm based on device.
This op scrambles a shape-[2] seed into a key and a counter, both needed by counter-based RNG algorithms. The scrambing uses the best algorithm based on device. The scrambling is opaque but approximately satisfies the property that different seed results in different key/counter pair (which will in turn result in different random numbers).
Arguments:
seed: 2 seeds (shape [2]).
Returns:
key: Key for the counter-based RNG algorithm (shape uint64[1]). counter: Counter for the counter-based RNG algorithm. Since counter size is algorithm-dependent, this output will be right-padded with zeros to reach shape uint64[2] (the current maximal counter size among algorithms).
func StatelessRandomGetKeyCounterAlg ¶
func StatelessRandomGetKeyCounterAlg(scope *Scope, seed tf.Output) (key tf.Output, counter tf.Output, alg tf.Output)
Picks the best algorithm based on device, and scrambles seed into key and counter.
This op picks the best counter-based RNG algorithm based on device, and scrambles a shape-[2] seed into a key and a counter, both needed by the counter-based algorithm. The scrambling is opaque but approximately satisfies the property that different seed results in different key/counter pair (which will in turn result in different random numbers).
Arguments:
seed: 2 seeds (shape [2]).
Returns:
key: Key for the counter-based RNG algorithm (shape uint64[1]). counter: Counter for the counter-based RNG algorithm. Since counter size is algorithm-dependent, this output will be right-padded with zeros to reach shape uint64[2] (the current maximal counter size among algorithms). alg: The RNG algorithm (shape int32[]).
func StatelessRandomNormal ¶
func StatelessRandomNormal(scope *Scope, shape tf.Output, seed tf.Output, optional ...StatelessRandomNormalAttr) (output tf.Output)
Outputs deterministic pseudorandom values from a normal distribution.
The generated values will have mean 0 and standard deviation 1.
The outputs are a deterministic function of `shape` and `seed`.
Arguments:
shape: The shape of the output tensor. seed: 2 seeds (shape [2]).
Returns Random values with specified shape.
func StatelessRandomNormalV2 ¶
func StatelessRandomNormalV2(scope *Scope, shape tf.Output, key tf.Output, counter tf.Output, alg tf.Output, optional ...StatelessRandomNormalV2Attr) (output tf.Output)
Outputs deterministic pseudorandom values from a normal distribution.
The generated values will have mean 0 and standard deviation 1.
The outputs are a deterministic function of `shape`, `key`, `counter` and `alg`.
Arguments:
shape: The shape of the output tensor. key: Key for the counter-based RNG algorithm (shape uint64[1]). counter: Initial counter for the counter-based RNG algorithm (shape uint64[2] or uint64[1] depending on the algorithm). If a larger vector is given, only the needed portion on the left (i.e. [:N]) will be used. alg: The RNG algorithm (shape int32[]).
Returns Random values with specified shape.
func StatelessRandomPoisson ¶
func StatelessRandomPoisson(scope *Scope, shape tf.Output, seed tf.Output, lam tf.Output, dtype tf.DataType) (output tf.Output)
Outputs deterministic pseudorandom random numbers from a Poisson distribution.
Outputs random values from a Poisson distribution.
The outputs are a deterministic function of `shape`, `seed`, and `lam`.
Arguments:
shape: The shape of the output tensor. seed: 2 seeds (shape [2]). lam: The rate of the Poisson distribution. Shape must match the rightmost dimensions
of `shape`.
dtype: The type of the output.
Returns Random values with specified shape.
func StatelessRandomUniform ¶
func StatelessRandomUniform(scope *Scope, shape tf.Output, seed tf.Output, optional ...StatelessRandomUniformAttr) (output tf.Output)
Outputs deterministic pseudorandom random values from a uniform distribution.
The generated values follow a uniform distribution in the range `[0, 1)`. The lower bound 0 is included in the range, while the upper bound 1 is excluded.
The outputs are a deterministic function of `shape` and `seed`.
Arguments:
shape: The shape of the output tensor. seed: 2 seeds (shape [2]).
Returns Random values with specified shape.
func StatelessRandomUniformFullInt ¶
func StatelessRandomUniformFullInt(scope *Scope, shape tf.Output, seed tf.Output, optional ...StatelessRandomUniformFullIntAttr) (output tf.Output)
Outputs deterministic pseudorandom random integers from a uniform distribution.
The generated values are uniform integers covering the whole range of `dtype`.
The outputs are a deterministic function of `shape` and `seed`.
Arguments:
shape: The shape of the output tensor. seed: 2 seeds (shape [2]).
Returns Random values with specified shape.
func StatelessRandomUniformFullIntV2 ¶
func StatelessRandomUniformFullIntV2(scope *Scope, shape tf.Output, key tf.Output, counter tf.Output, alg tf.Output, optional ...StatelessRandomUniformFullIntV2Attr) (output tf.Output)
Outputs deterministic pseudorandom random integers from a uniform distribution.
The generated values are uniform integers covering the whole range of `dtype`.
The outputs are a deterministic function of `shape`, `key`, `counter` and `alg`.
Arguments:
shape: The shape of the output tensor. key: Key for the counter-based RNG algorithm (shape uint64[1]). counter: Initial counter for the counter-based RNG algorithm (shape uint64[2] or uint64[1] depending on the algorithm). If a larger vector is given, only the needed portion on the left (i.e. [:N]) will be used. alg: The RNG algorithm (shape int32[]).
Returns Random values with specified shape.
func StatelessRandomUniformInt ¶
func StatelessRandomUniformInt(scope *Scope, shape tf.Output, seed tf.Output, minval tf.Output, maxval tf.Output) (output tf.Output)
Outputs deterministic pseudorandom random integers from a uniform distribution.
The generated values follow a uniform distribution in the range `[minval, maxval)`.
The outputs are a deterministic function of `shape`, `seed`, `minval`, and `maxval`.
Arguments:
shape: The shape of the output tensor. seed: 2 seeds (shape [2]). minval: Minimum value (inclusive, scalar). maxval: Maximum value (exclusive, scalar).
Returns Random values with specified shape.
func StatelessRandomUniformIntV2 ¶
func StatelessRandomUniformIntV2(scope *Scope, shape tf.Output, key tf.Output, counter tf.Output, alg tf.Output, minval tf.Output, maxval tf.Output) (output tf.Output)
Outputs deterministic pseudorandom random integers from a uniform distribution.
The generated values follow a uniform distribution in the range `[minval, maxval)`.
The outputs are a deterministic function of `shape`, `key`, `counter`, `alg`, `minval` and `maxval`.
Arguments:
shape: The shape of the output tensor. key: Key for the counter-based RNG algorithm (shape uint64[1]). counter: Initial counter for the counter-based RNG algorithm (shape uint64[2] or uint64[1] depending on the algorithm). If a larger vector is given, only the needed portion on the left (i.e. [:N]) will be used. alg: The RNG algorithm (shape int32[]). minval: Minimum value (inclusive, scalar). maxval: Maximum value (exclusive, scalar).
Returns Random values with specified shape.
func StatelessRandomUniformV2 ¶
func StatelessRandomUniformV2(scope *Scope, shape tf.Output, key tf.Output, counter tf.Output, alg tf.Output, optional ...StatelessRandomUniformV2Attr) (output tf.Output)
Outputs deterministic pseudorandom random values from a uniform distribution.
The generated values follow a uniform distribution in the range `[0, 1)`. The lower bound 0 is included in the range, while the upper bound 1 is excluded.
The outputs are a deterministic function of `shape`, `key`, `counter` and `alg`.
Arguments:
shape: The shape of the output tensor. key: Key for the counter-based RNG algorithm (shape uint64[1]). counter: Initial counter for the counter-based RNG algorithm (shape uint64[2] or uint64[1] depending on the algorithm). If a larger vector is given, only the needed portion on the left (i.e. [:N]) will be used. alg: The RNG algorithm (shape int32[]).
Returns Random values with specified shape.
func StatelessSampleDistortedBoundingBox ¶
func StatelessSampleDistortedBoundingBox(scope *Scope, image_size tf.Output, bounding_boxes tf.Output, min_object_covered tf.Output, seed tf.Output, optional ...StatelessSampleDistortedBoundingBoxAttr) (begin tf.Output, size tf.Output, bboxes tf.Output)
Generate a randomly distorted bounding box for an image deterministically.
Bounding box annotations are often supplied in addition to ground-truth labels in image recognition or object localization tasks. A common technique for training such a system is to randomly distort an image while preserving its content, i.e. *data augmentation*. This Op, given the same `seed`, deterministically outputs a randomly distorted localization of an object, i.e. bounding box, given an `image_size`, `bounding_boxes` and a series of constraints.
The output of this Op is a single bounding box that may be used to crop the original image. The output is returned as 3 tensors: `begin`, `size` and `bboxes`. The first 2 tensors can be fed directly into `tf.slice` to crop the image. The latter may be supplied to `tf.image.draw_bounding_boxes` to visualize what the bounding box looks like.
Bounding boxes are supplied and returned as `[y_min, x_min, y_max, x_max]`. The bounding box coordinates are floats in `[0.0, 1.0]` relative to the width and the height of the underlying image.
The output of this Op is guaranteed to be the same given the same `seed` and is independent of how many times the function is called, and independent of global seed settings (e.g. `tf.random.set_seed`).
Example usage:
>>> image = np.array([[[1], [2], [3]], [[4], [5], [6]], [[7], [8], [9]]]) >>> bbox = tf.constant( ... [0.0, 0.0, 1.0, 1.0], dtype=tf.float32, shape=[1, 1, 4]) >>> seed = (1, 2) >>> # Generate a single distorted bounding box. >>> bbox_begin, bbox_size, bbox_draw = ( ... tf.image.stateless_sample_distorted_bounding_box( ... tf.shape(image), bounding_boxes=bbox, seed=seed)) >>> # Employ the bounding box to distort the image. >>> tf.slice(image, bbox_begin, bbox_size) <tf.Tensor: shape=(2, 2, 1), dtype=int64, numpy= array([[[1],
[2]], [[4], [5]]])>
>>> # Draw the bounding box in an image summary. >>> colors = np.array([[1.0, 0.0, 0.0], [0.0, 0.0, 1.0]]) >>> tf.image.draw_bounding_boxes( ... tf.expand_dims(tf.cast(image, tf.float32),0), bbox_draw, colors) <tf.Tensor: shape=(1, 3, 3, 1), dtype=float32, numpy= array([[[[1.],
[1.], [3.]], [[1.], [1.], [6.]], [[7.], [8.], [9.]]]], dtype=float32)>
Note that if no bounding box information is available, setting `use_image_if_no_bounding_boxes = true` will assume there is a single implicit bounding box covering the whole image. If `use_image_if_no_bounding_boxes` is false and no bounding boxes are supplied, an error is raised.
Arguments:
image_size: 1-D, containing `[height, width, channels]`. bounding_boxes: 3-D with shape `[batch, N, 4]` describing the N bounding boxes
associated with the image.
min_object_covered: The cropped area of the image must contain at least this
fraction of any bounding box supplied. The value of this parameter should be non-negative. In the case of 0, the cropped area does not need to overlap any of the bounding boxes supplied.
seed: 1-D with shape `[2]`. The seed to the random number generator. Must have dtype
`int32` or `int64`. (When using XLA, only `int32` is allowed.)
Returns:
begin: 1-D, containing `[offset_height, offset_width, 0]`. Provide as input to
`tf.slice`.
size: 1-D, containing `[target_height, target_width, -1]`. Provide as input to
`tf.slice`.
bboxes: 3-D with shape `[1, 1, 4]` containing the distorted bounding box.
Provide as input to `tf.image.draw_bounding_boxes`.
func StatelessShuffle ¶ added in v0.2.0
func StatelessShuffle(scope *Scope, value tf.Output, key tf.Output, counter tf.Output, alg tf.Output) (output tf.Output)
Randomly and deterministically shuffles a tensor along its first dimension.
The tensor is shuffled along dimension 0, such that each `value[j]` is mapped to one and only one `output[i]`. For example, a mapping that might occur for a 3x2 tensor is:
``` [[1, 2], [[5, 6],
[3, 4], ==> [1, 2], [5, 6]] [3, 4]]
```
The outputs are a deterministic function of `value`, `key`, `counter` and `alg`.
Arguments:
value: The tensor to be shuffled. key: Key for the counter-based RNG algorithm (shape uint64[1]). counter: Initial counter for the counter-based RNG algorithm (shape uint64[2] or uint64[1] depending on the algorithm). If a larger vector is given, only the needed portion on the left (i.e. [:N]) will be used. alg: The RNG algorithm (shape int32[]).
Returns A tensor of same shape and type as `value`, shuffled along its first dimension.
func StatelessTruncatedNormal ¶
func StatelessTruncatedNormal(scope *Scope, shape tf.Output, seed tf.Output, optional ...StatelessTruncatedNormalAttr) (output tf.Output)
Outputs deterministic pseudorandom values from a truncated normal distribution.
The generated values follow a normal distribution with mean 0 and standard deviation 1, except that values whose magnitude is more than 2 standard deviations from the mean are dropped and re-picked.
The outputs are a deterministic function of `shape` and `seed`.
Arguments:
shape: The shape of the output tensor. seed: 2 seeds (shape [2]).
Returns Random values with specified shape.
func StatelessTruncatedNormalV2 ¶
func StatelessTruncatedNormalV2(scope *Scope, shape tf.Output, key tf.Output, counter tf.Output, alg tf.Output, optional ...StatelessTruncatedNormalV2Attr) (output tf.Output)
Outputs deterministic pseudorandom values from a truncated normal distribution.
The generated values follow a normal distribution with mean 0 and standard deviation 1, except that values whose magnitude is more than 2 standard deviations from the mean are dropped and re-picked.
The outputs are a deterministic function of `shape`, `key`, `counter` and `alg`.
Arguments:
shape: The shape of the output tensor. key: Key for the counter-based RNG algorithm (shape uint64[1]). counter: Initial counter for the counter-based RNG algorithm (shape uint64[2] or uint64[1] depending on the algorithm). If a larger vector is given, only the needed portion on the left (i.e. [:N]) will be used. alg: The RNG algorithm (shape int32[]).
Returns Random values with specified shape.
func StaticRegexFullMatch ¶
Check if the input matches the regex pattern.
The input is a string tensor of any shape. The pattern is the regular expression to be matched with every element of the input tensor. The boolean values (True or False) of the output tensor indicate if the input matches the regex pattern provided.
The pattern follows the re2 syntax (https://github.com/google/re2/wiki/Syntax)
Arguments:
input: A string tensor of the text to be processed. pattern: The regular expression to match the input.
Returns A bool tensor with the same shape as `input`.
func StaticRegexReplace ¶
func StaticRegexReplace(scope *Scope, input tf.Output, pattern string, rewrite string, optional ...StaticRegexReplaceAttr) (output tf.Output)
Replaces the match of pattern in input with rewrite.
It follows the re2 syntax (https://github.com/google/re2/wiki/Syntax)
Arguments:
input: The text to be processed. pattern: The regular expression to match the input. rewrite: The rewrite to be applied to the matched expression.
Returns The text after applying pattern and rewrite.
func StatsAggregatorHandle ¶
func StatsAggregatorHandle(scope *Scope, optional ...StatsAggregatorHandleAttr) (handle tf.Output)
Creates a statistics manager resource.
func StatsAggregatorSetSummaryWriter ¶
func StatsAggregatorSetSummaryWriter(scope *Scope, stats_aggregator tf.Output, summary tf.Output) (o *tf.Operation)
Set a summary_writer_interface to record statistics using given stats_aggregator.
Returns the created operation.
func StatsAggregatorSummary ¶
Produces a summary of any statistics recorded by the given statistics manager.
func StochasticCastToInt ¶ added in v0.5.0
func StochasticCastToInt(scope *Scope, input tf.Output, key tf.Output, counter tf.Output, alg tf.Output, Tout tf.DataType) (output tf.Output)
Stochastically cast a given tensor from floats to ints.
The values are cast with a deterministic pseudo-random tensor from a uniform distribution generated from user given key, counter, algorithm. Values will saturate if out of the specified integer type range, and will become zero if inputs are NaN.
The outputs are a deterministic function of `input`, `key`, `counter`, `alg`.
Arguments:
input: The operand to stochastically cast to int. key: Key for the counter-based RNG algorithm (shape uint64[1]). counter: Initial counter for the counter-based RNG algorithm (shape uint64[2] or uint64[1] depending on the algorithm). If a larger vector is given, only the needed portion on the left (i.e. [:N]) will be used. alg: The RNG algorithm (shape int32[]). Tout: The type of the output.
Returns The cast result with the same shape as the input.
func StopGradient ¶
Stops gradient computation.
When executed in a graph, this op outputs its input tensor as-is.
When building ops to compute gradients, this op prevents the contribution of its inputs to be taken into account. Normally, the gradient generator adds ops to a graph to compute the derivatives of a specified 'loss' by recursively finding out inputs that contributed to its computation. If you insert this op in the graph it inputs are masked from the gradient generator. They are not taken into account for computing gradients.
This is useful any time you want to compute a value with TensorFlow but need to pretend that the value was a constant. For example, the softmax function for a vector x can be written as
```python
def softmax(x): numerator = tf.exp(x) denominator = tf.reduce_sum(numerator) return numerator / denominator
```
This however is susceptible to overflow if the values in x are large. An alternative more stable way is to subtract the maximum of x from each of the values.
```python
def stable_softmax(x): z = x - tf.reduce_max(x) numerator = tf.exp(z) denominator = tf.reduce_sum(numerator) return numerator / denominator
```
However, when we backprop through the softmax to x, we dont want to backprop through the `tf.reduce_max(x)` (if the max values are not unique then the gradient could flow to the wrong input) calculation and treat that as a constant. Therefore, we should write this out as
```python
def stable_softmax(x): z = x - tf.stop_gradient(tf.reduce_max(x)) numerator = tf.exp(z) denominator = tf.reduce_sum(numerator) return numerator / denominator
```
Some other examples include:
- The *EM* algorithm where the *M-step* should not involve backpropagation through the output of the *E-step*.
- Contrastive divergence training of Boltzmann machines where, when differentiating the energy function, the training must not backpropagate through the graph that generated the samples from the model.
- Adversarial training, where no backprop should happen through the adversarial example generation process.
func StridedSlice ¶
func StridedSlice(scope *Scope, input tf.Output, begin tf.Output, end tf.Output, strides tf.Output, optional ...StridedSliceAttr) (output tf.Output)
Return a strided slice from `input`.
Note, most python users will want to use the Python `Tensor.__getitem__` or `Variable.__getitem__` rather than this op directly.
The goal of this op is to produce a new tensor with a subset of the elements from the `n` dimensional `input` tensor. The subset is chosen using a sequence of `m` sparse range specifications encoded into the arguments of this function. Note, in some cases `m` could be equal to `n`, but this need not be the case. Each range specification entry can be one of the following:
An ellipsis (...). Ellipses are used to imply zero or more dimensions of full-dimension selection and are produced using `ellipsis_mask`. For example, `foo[...]` is the identity slice.
A new axis. This is used to insert a new shape=1 dimension and is produced using `new_axis_mask`. For example, `foo[:, ...]` where `foo` is shape `(3, 4)` produces a `(1, 3, 4)` tensor.
A range `begin:end:stride`. This is used to specify how much to choose from a given dimension. `stride` can be any integer but 0. `begin` is an integer which represents the index of the first value to select while `end` represents the index of the last value to select. The number of values selected in each dimension is `end - begin` if `stride > 0` and `begin - end` if `stride < 0`. `begin` and `end` can be negative where `-1` is the last element, `-2` is the second to last. `begin_mask` controls whether to replace the explicitly given `begin` with an implicit effective value of `0` if `stride > 0` and `-1` if `stride < 0`. `end_mask` is analogous but produces the number required to create the largest open interval. For example, given a shape `(3,)` tensor `foo[:]`, the effective `begin` and `end` are `0` and `3`. Do not assume this is equivalent to `foo[0:-1]` which has an effective `begin` and `end` of `0` and `2`. Another example is `foo[-2::-1]` which reverses the first dimension of a tensor while dropping the last two (in the original order elements). For example `foo = [1,2,3,4]; foo[-2::-1]` is `[4,3]`.
A single index. This is used to keep only elements that have a given index. For example (`foo[2, :]` on a shape `(5,6)` tensor produces a shape `(6,)` tensor. This is encoded in `begin` and `end` and `shrink_axis_mask`.
Each conceptual range specification is encoded in the op's argument. This encoding is best understand by considering a non-trivial example. In particular, `foo[1, 2:4, None, ..., :-3:-1, :]` will be encoded as
``` begin = [1, 2, x, x, 0, x] # x denotes don't care (usually 0) end = [2, 4, x, x, -3, x] strides = [1, 1, x, x, -1, 1] begin_mask = 1<<4 | 1<<5 = 48 end_mask = 1<<5 = 32 ellipsis_mask = 1<<3 = 8 new_axis_mask = 1<<2 = 4 shrink_axis_mask = 1<<0 = 1 ```
In this case if `foo.shape` is (5, 5, 5, 5, 5, 5) the final shape of the slice becomes (2, 1, 5, 5, 2, 5). Let us walk step by step through each argument specification.
1. The first argument in the example slice is turned into `begin = 1` and `end = begin + 1 = 2`. To disambiguate from the original spec `2:4` we also set the appropriate bit in `shrink_axis_mask`.
2. `2:4` is contributes 2, 4, 1 to begin, end, and stride. All masks have zero bits contributed.
3. None is a synonym for `tf.newaxis`. This means insert a dimension of size 1 dimension in the final shape. Dummy values are contributed to begin, end and stride, while the new_axis_mask bit is set.
4. `...` grab the full ranges from as many dimensions as needed to fully specify a slice for every dimension of the input shape.
5. `:-3:-1` shows the use of negative indices. A negative index `i` associated with a dimension that has shape `s` is converted to a positive index `s + i`. So `-1` becomes `s-1` (i.e. the last element). This conversion is done internally so begin, end and strides receive x, -3, and -1. The appropriate begin_mask bit is set to indicate the start range is the full range (ignoring the x).
6. `:` indicates that the entire contents of the corresponding dimension is selected. This is equivalent to `::` or `0::1`. begin, end, and strides receive 0, 0, and 1, respectively. The appropriate bits in `begin_mask` and `end_mask` are also set.
*Requirements*:
`0 != strides[i] for i in [0, m)` `ellipsis_mask must be a power of two (only one ellipsis)`
Arguments:
begin: `begin[k]` specifies the offset into the `k`th range specification.
The exact dimension this corresponds to will be determined by context. Out-of-bounds values will be silently clamped. If the `k`th bit of `begin_mask` then `begin[k]` is ignored and the full range of the appropriate dimension is used instead. Negative values causes indexing to start from the highest element e.g. If `foo==[1,2,3]` then `foo[-1]==3`.
end: `end[i]` is like `begin` with the exception that `end_mask` is
used to determine full ranges.
strides: `strides[i]` specifies the increment in the `i`th specification
after extracting a given element. Negative indices will reverse the original order. Out or range values are clamped to `[0,dim[i]) if slice[i]>0` or `[-1,dim[i]-1] if slice[i] < 0`
func StridedSliceGrad ¶
func StridedSliceGrad(scope *Scope, shape tf.Output, begin tf.Output, end tf.Output, strides tf.Output, dy tf.Output, optional ...StridedSliceGradAttr) (output tf.Output)
Returns the gradient of `StridedSlice`.
Since `StridedSlice` cuts out pieces of its `input` which is size `shape`, its gradient will have the same shape (which is passed here as `shape`). The gradient will be zero in any element that the slice does not select.
Arguments are the same as StridedSliceGrad with the exception that `dy` is the input gradient to be propagated and `shape` is the shape of `StridedSlice`'s `input`.
func StringFormat ¶
func StringFormat(scope *Scope, inputs []tf.Output, optional ...StringFormatAttr) (output tf.Output)
Formats a string template using a list of tensors.
Formats a string template using a list of tensors, pretty-printing tensor summaries.
Arguments:
inputs: The list of tensors to format into the placeholder string.
Returns = The resulting string scalar.
func StringJoin ¶
Joins the strings in the given list of string tensors into one tensor;
with the given separator (default is an empty separator).
Examples:
>>> s = ["hello", "world", "tensorflow"] >>> tf.strings.join(s, " ") <tf.Tensor: shape=(), dtype=string, numpy=b'hello world tensorflow'>
Arguments:
inputs: A list of string tensors. The tensors must all have the same shape,
or be scalars. Scalars may be mixed in; these will be broadcast to the shape of non-scalar inputs.
func StringLength ¶
String lengths of `input`.
Computes the length of each string given in the input tensor.
>>> strings = tf.constant(['Hello','TensorFlow', '\U0001F642']) >>> tf.strings.length(strings).numpy() # default counts bytes array([ 5, 10, 4], dtype=int32) >>> tf.strings.length(strings, unit="UTF8_CHAR").numpy() array([ 5, 10, 1], dtype=int32)
Arguments:
input: The strings for which to compute the length for each element.
Returns Integer tensor that has the same shape as `input`. The output contains the element-wise string lengths of `input`.
func StringLower ¶
Converts all uppercase characters into their respective lowercase replacements.
Example:
>>> tf.strings.lower("CamelCase string and ALL CAPS") <tf.Tensor: shape=(), dtype=string, numpy=b'camelcase string and all caps'>
Arguments:
input: The input to be lower-cased.
func StringNGrams ¶
func StringNGrams(scope *Scope, data tf.Output, data_splits tf.Output, separator string, ngram_widths []int64, left_pad string, right_pad string, pad_width int64, preserve_short_sequences bool) (ngrams tf.Output, ngrams_splits tf.Output)
Creates ngrams from ragged string data.
This op accepts a ragged tensor with 1 ragged dimension containing only strings and outputs a ragged tensor with 1 ragged dimension containing ngrams of that string, joined along the innermost axis.
Arguments:
data: The values tensor of the ragged string tensor to make ngrams out of. Must be a
1D string tensor.
data_splits: The splits tensor of the ragged string tensor to make ngrams out of. separator: The string to append between elements of the token. Use "" for no separator. ngram_widths: The sizes of the ngrams to create. left_pad: The string to use to pad the left side of the ngram sequence. Only used if
pad_width != 0.
right_pad: The string to use to pad the right side of the ngram sequence. Only used if
pad_width != 0.
pad_width: The number of padding elements to add to each side of each
sequence. Note that padding will never be greater than 'ngram_widths'-1 regardless of this value. If `pad_width=-1`, then add `max(ngram_widths)-1` elements.
Returns:
ngrams: The values tensor of the output ngrams ragged tensor. ngrams_splits: The splits tensor of the output ngrams ragged tensor.
func StringSplit ¶
func StringSplit(scope *Scope, input tf.Output, delimiter tf.Output, optional ...StringSplitAttr) (indices tf.Output, values tf.Output, shape tf.Output)
Split elements of `input` based on `delimiter` into a `SparseTensor`.
Let N be the size of source (typically N will be the batch size). Split each element of `input` based on `delimiter` and return a `SparseTensor` containing the splitted tokens. Empty tokens are ignored.
`delimiter` can be empty, or a string of split characters. If `delimiter` is an
empty string, each element of `input` is split into individual single-byte character strings, including splitting of UTF-8 multibyte sequences. Otherwise every character of `delimiter` is a potential split point.
For example:
N = 2, input[0] is 'hello world' and input[1] is 'a b c', then the output will be indices = [0, 0; 0, 1; 1, 0; 1, 1; 1, 2] shape = [2, 3] values = ['hello', 'world', 'a', 'b', 'c']
Arguments:
input: 1-D. Strings to split. delimiter: 0-D. Delimiter characters (bytes), or empty string.
Returns:
indices: A dense matrix of int64 representing the indices of the sparse tensor. values: A vector of strings corresponding to the splited values. shape: a length-2 vector of int64 representing the shape of the sparse
tensor, where the first value is N and the second value is the maximum number of tokens in a single input entry.
func StringSplitV2 ¶
func StringSplitV2(scope *Scope, input tf.Output, sep tf.Output, optional ...StringSplitV2Attr) (indices tf.Output, values tf.Output, shape tf.Output)
Split elements of `source` based on `sep` into a `SparseTensor`.
Let N be the size of source (typically N will be the batch size). Split each element of `source` based on `sep` and return a `SparseTensor` containing the split tokens. Empty tokens are ignored.
For example, N = 2, source[0] is 'hello world' and source[1] is 'a b c', then the output will be ``` st.indices = [0, 0;
0, 1; 1, 0; 1, 1; 1, 2]
st.shape = [2, 3] st.values = ['hello', 'world', 'a', 'b', 'c'] ```
If `sep` is given, consecutive delimiters are not grouped together and are deemed to delimit empty strings. For example, source of `"1<>2<><>3"` and sep of `"<>"` returns `["1", "2", "", "3"]`. If `sep` is None or an empty string, consecutive whitespace are regarded as a single separator, and the result will contain no empty strings at the startor end if the string has leading or trailing whitespace.
Note that the above mentioned behavior matches python's str.split.
Arguments:
input: `1-D` string `Tensor`, the strings to split. sep: `0-D` string `Tensor`, the delimiter character.
func StringStrip ¶
Strip leading and trailing whitespaces from the Tensor.
Examples:
>>> tf.strings.strip(["\nTensorFlow", " The python library "]).numpy() array([b'TensorFlow', b'The python library'], dtype=object)
Arguments:
input: A string `Tensor` of any shape.
Returns A string `Tensor` of the same shape as the input.
func StringToHashBucket ¶
func StringToHashBucket(scope *Scope, string_tensor tf.Output, num_buckets int64) (output tf.Output)
Converts each string in the input Tensor to its hash mod by a number of buckets.
The hash function is deterministic on the content of the string within the process.
Note that the hash function may change from time to time. This functionality will be deprecated and it's recommended to use `tf.string_to_hash_bucket_fast()` or `tf.string_to_hash_bucket_strong()`.
Arguments:
num_buckets: The number of buckets.
Returns A Tensor of the same shape as the input `string_tensor`.
func StringToHashBucketFast ¶
Converts each string in the input Tensor to its hash mod by a number of buckets.
The hash function is deterministic on the content of the string within the process and will never change. However, it is not suitable for cryptography. This function may be used when CPU time is scarce and inputs are trusted or unimportant. There is a risk of adversaries constructing inputs that all hash to the same bucket. To prevent this problem, use a strong hash function with `tf.string_to_hash_bucket_strong`.
Examples:
>>> tf.strings.to_hash_bucket_fast(["Hello", "TensorFlow", "2.x"], 3).numpy() array([0, 2, 2])
Arguments:
input: The strings to assign a hash bucket. num_buckets: The number of buckets.
Returns A Tensor of the same shape as the input `string_tensor`.
func StringToHashBucketStrong ¶
func StringToHashBucketStrong(scope *Scope, input tf.Output, num_buckets int64, key []int64) (output tf.Output)
Converts each string in the input Tensor to its hash mod by a number of buckets.
The hash function is deterministic on the content of the string within the process. The hash function is a keyed hash function, where attribute `key` defines the key of the hash function. `key` is an array of 2 elements.
A strong hash is important when inputs may be malicious, e.g. URLs with additional components. Adversaries could try to make their inputs hash to the same bucket for a denial-of-service attack or to skew the results. A strong hash can be used to make it difficult to find inputs with a skewed hash value distribution over buckets. This requires that the hash function is seeded by a high-entropy (random) "key" unknown to the adversary.
The additional robustness comes at a cost of roughly 4x higher compute time than `tf.string_to_hash_bucket_fast`.
Examples:
>>> tf.strings.to_hash_bucket_strong(["Hello", "TF"], 3, [1, 2]).numpy() array([2, 0])
Arguments:
input: The strings to assign a hash bucket. num_buckets: The number of buckets. key: The key used to seed the hash function, passed as a list of two uint64
elements.
Returns A Tensor of the same shape as the input `string_tensor`.
func StringToNumber ¶
func StringToNumber(scope *Scope, string_tensor tf.Output, optional ...StringToNumberAttr) (output tf.Output)
Converts each string in the input Tensor to the specified numeric type.
(Note that int32 overflow results in an error while float overflow results in a rounded value.)
Example:
>>> strings = ["5.0", "3.0", "7.0"] >>> tf.strings.to_number(strings) <tf.Tensor: shape=(3,), dtype=float32, numpy=array([5., 3., 7.], dtype=float32)>
Returns A Tensor of the same shape as the input `string_tensor`.
func StringUpper ¶
Converts all lowercase characters into their respective uppercase replacements.
Example:
>>> tf.strings.upper("CamelCase string and ALL CAPS") <tf.Tensor: shape=(), dtype=string, numpy=b'CAMELCASE STRING AND ALL CAPS'>
Arguments:
input: The input to be upper-cased.
func Sub ¶
Returns x - y element-wise.
*NOTE*: `Subtract` supports broadcasting. More about broadcasting [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
func Substr ¶
func Substr(scope *Scope, input tf.Output, pos tf.Output, len tf.Output, optional ...SubstrAttr) (output tf.Output)
Return substrings from `Tensor` of strings.
For each string in the input `Tensor`, creates a substring starting at index `pos` with a total length of `len`.
If `len` defines a substring that would extend beyond the length of the input string, or if `len` is negative, then as many characters as possible are used.
A negative `pos` indicates distance within the string backwards from the end.
If `pos` specifies an index which is out of range for any of the input strings, then an `InvalidArgumentError` is thrown.
`pos` and `len` must have the same shape, otherwise a `ValueError` is thrown on Op creation.
*NOTE*: `Substr` supports broadcasting up to two dimensions. More about broadcasting [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
---
Examples ¶
Using scalar `pos` and `len`:
```python input = [b'Hello', b'World'] position = 1 length = 3
output = [b'ell', b'orl'] ```
Using `pos` and `len` with same shape as `input`:
```python input = [[b'ten', b'eleven', b'twelve'],
[b'thirteen', b'fourteen', b'fifteen'], [b'sixteen', b'seventeen', b'eighteen']]
position = [[1, 2, 3],
[1, 2, 3], [1, 2, 3]]
length = [[2, 3, 4],
[4, 3, 2], [5, 5, 5]]
output = [[b'en', b'eve', b'lve'],
[b'hirt', b'urt', b'te'], [b'ixtee', b'vente', b'hteen']]
```
Broadcasting `pos` and `len` onto `input`:
``` input = [[b'ten', b'eleven', b'twelve'],
[b'thirteen', b'fourteen', b'fifteen'], [b'sixteen', b'seventeen', b'eighteen'], [b'nineteen', b'twenty', b'twentyone']]
position = [1, 2, 3] length = [1, 2, 3]
output = [[b'e', b'ev', b'lve'],
[b'h', b'ur', b'tee'], [b'i', b've', b'hte'], [b'i', b'en', b'nty']]
```
Broadcasting `input` onto `pos` and `len`:
``` input = b'thirteen' position = [1, 5, 7] length = [3, 2, 1]
output = [b'hir', b'ee', b'n'] ```
Raises:
- `ValueError`: If the first argument cannot be converted to a Tensor of `dtype string`.
- `InvalidArgumentError`: If indices are out of range.
- `ValueError`: If `pos` and `len` are not the same shape.
Arguments:
input: Tensor of strings pos: Scalar defining the position of first character in each substring len: Scalar defining the number of characters to include in each substring
Returns Tensor of substrings
func Sum ¶
Computes the sum of elements across dimensions of a tensor.
Reduces `input` along the dimensions given in `axis`. Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in `axis`. If `keep_dims` is true, the reduced dimensions are retained with length 1.
Arguments:
input: The tensor to reduce. axis: The dimensions to reduce. Must be in the range
`[-rank(input), rank(input))`.
Returns The reduced tensor.
func Svd ¶
func Svd(scope *Scope, input tf.Output, optional ...SvdAttr) (s tf.Output, u tf.Output, v tf.Output)
Computes the singular value decompositions of one or more matrices.
Computes the SVD of each inner matrix in `input` such that `input[..., :, :] = u[..., :, :] * diag(s[..., :, :]) * transpose(v[..., :, :])`
```python # a is a tensor containing a batch of matrices. # s is a tensor of singular values for each matrix. # u is the tensor containing the left singular vectors for each matrix. # v is the tensor containing the right singular vectors for each matrix. s, u, v = svd(a) s, _, _ = svd(a, compute_uv=False) ```
Arguments:
input: A tensor of shape `[..., M, N]` whose inner-most 2 dimensions
form matrices of size `[M, N]`. Let `P` be the minimum of `M` and `N`.
Returns:
s: Singular values. Shape is `[..., P]`. u: Left singular vectors. If `full_matrices` is `False` then shape is
`[..., M, P]`; if `full_matrices` is `True` then shape is `[..., M, M]`. Undefined if `compute_uv` is `False`.
v: Left singular vectors. If `full_matrices` is `False` then shape is
`[..., N, P]`. If `full_matrices` is `True` then shape is `[..., N, N]`. Undefined if `compute_uv` is false.
func Switch ¶
func Switch(scope *Scope, data tf.Output, pred tf.Output) (output_false tf.Output, output_true tf.Output)
Forwards `data` to the output port determined by `pred`.
If `pred` is true, the `data` input is forwarded to `output_true`. Otherwise, the data goes to `output_false`.
See also `RefSwitch` and `Merge`.
Arguments:
data: The tensor to be forwarded to the appropriate output. pred: A scalar that specifies which output port will receive data.
Returns:
output_false: If `pred` is false, data will be forwarded to this output. output_true: If `pred` is true, data will be forwarded to this output.
func SyncDevice ¶ added in v0.4.0
Synchronizes the device this op is run on.
Only GPU ops are asynchrous in TensorFlow, and so this only has an effect when run on GPUs. On GPUs, this op synchronizes the GPU's compute stream.
Returns the created operation.
func TFRecordDataset ¶
func TFRecordDataset(scope *Scope, filenames tf.Output, compression_type tf.Output, buffer_size tf.Output, optional ...TFRecordDatasetAttr) (handle tf.Output)
Creates a dataset that emits the records from one or more TFRecord files.
Arguments:
filenames: A scalar or vector containing the name(s) of the file(s) to be
read.
compression_type: A scalar containing either (i) the empty string (no
compression), (ii) "ZLIB", or (iii) "GZIP".
buffer_size: A scalar representing the number of bytes to buffer. A value of
0 means no buffering will be performed.
func TFRecordDatasetV2 ¶ added in v0.6.0
func TFRecordDatasetV2(scope *Scope, filenames tf.Output, compression_type tf.Output, buffer_size tf.Output, byte_offsets tf.Output, optional ...TFRecordDatasetV2Attr) (handle tf.Output)
Creates a dataset that emits the records from one or more TFRecord files.
Arguments:
filenames: A scalar or vector containing the name(s) of the file(s) to be
read.
compression_type: A scalar containing either (i) the empty string (no
compression), (ii) "ZLIB", or (iii) "GZIP".
buffer_size: A scalar representing the number of bytes to buffer. A value of
0 means no buffering will be performed.
byte_offsets: A scalar or vector containing the number of bytes for each file
that will be skipped prior to reading.
func TFRecordReaderV2 ¶
func TFRecordReaderV2(scope *Scope, optional ...TFRecordReaderV2Attr) (reader_handle tf.Output)
A Reader that outputs the records from a TensorFlow Records file.
Returns The handle to reference the Reader.
func TPUCompilationResult ¶
Returns the result of a TPU compilation.
This operation returns the result of a TPU compilation as a serialized CompilationResultProto, which holds a status and an error message if an error occurred during compilation.
func TPUCompileSucceededAssert ¶
Asserts that compilation succeeded.
This op produces no output and closes the device during failure to ensure all pending device interactions fail.
'compilation_status' is a serialized CompilationResultProto.
Returns the created operation.
func TPUCopyWithDynamicShape ¶ added in v0.8.0
func TPUCopyWithDynamicShape(scope *Scope, tensors []tf.Output, unpadded_sizes []tf.Output) (tpu_tensors []tf.Output)
Op that copies host tensor to device with dynamic shape support. For internal use only.
func TPUEmbeddingActivations ¶
func TPUEmbeddingActivations(scope *Scope, embedding_variable tf.Output, sliced_activations tf.Output, table_id int64, lookup_id int64) (output tf.Output)
An op enabling differentiation of TPU Embeddings.
This op simply returns its first input, which is assumed to have been sliced from the Tensors returned by TPUEmbeddingDequeueActivations. The presence of this op, and its first argument being a trainable Variable, enables automatic differentiation of graphs containing embeddings via the TPU Embedding Python libraries.
Arguments:
embedding_variable: A trainable variable, enabling optimizers to find this op. sliced_activations: The embedding activations Tensor to return. table_id: The id of the table in the embedding layer configuration from which
these activations were computed.
lookup_id: Identifier of the set of embedding indices which produced these
activations.
func TPUExecute ¶
func TPUExecute(scope *Scope, args []tf.Output, key tf.Output, Tresults []tf.DataType) (results []tf.Output)
Op that loads and executes a TPU program on a TPU device.
For the internal use of the distributed TPU compiler.
func TPUExecuteAndUpdateVariables ¶
func TPUExecuteAndUpdateVariables(scope *Scope, args []tf.Output, key tf.Output, Tresults []tf.DataType, device_var_reads_indices []int64, device_var_updates_indices []int64) (results []tf.Output)
Op that executes a program with optional in-place variable updates.
It (optionally) reads device variables, loads and executes a TPU program on a TPU device, and then (optionally) in-place updates variables using the program outputs, as specified in attributes device_var_reads_indices (program input indices from directly reading variables) and device_var_updates_indices (program output indices used to update variables, -1 means no-update/read-only). Such program outputs are consumed by these variables will not appear in the op output. For the internal use of the distributed TPU compiler.
func TPUOrdinalSelector ¶
A TPU core selector Op.
This Op produces a set of TPU cores (for warm-up) or a single TPU core (for regular inference) to execute the TPU program on. The output is consumed by TPUPartitionedCall.
Returns A vector 1 or more TPU cores.
func TPUPartitionedInput ¶
func TPUPartitionedInput(scope *Scope, inputs []tf.Output, optional ...TPUPartitionedInputAttr) (output tf.Output)
An op that groups a list of partitioned inputs together. This op
Arguments:
inputs: A list of partitioned inputs which must have the same shape.
Returns A handle which represents the full shape of partitioned tensors.
func TPUPartitionedInputV2 ¶ added in v0.4.0
func TPUPartitionedInputV2(scope *Scope, inputs []tf.Output, partition_dims []int64, optional ...TPUPartitionedInputV2Attr) (output tf.Output)
An op that groups a list of partitioned inputs together. Supports ND sharding.
Arguments:
inputs: A list of partitioned inputs which must have the same shape. partition_dims: A list of integers describing how each dimension is partitioned. Emptiness
indicates the inputs are replicated.
Returns A handle which represents the full shape of partitioned tensors.
func TPUPartitionedOutput ¶
func TPUPartitionedOutput(scope *Scope, inputs tf.Output, num_splits int64, optional ...TPUPartitionedOutputAttr) (output []tf.Output)
An op that demultiplexes a tensor to be sharded by XLA to a list of partitioned
outputs outside the XLA computation.
Arguments:
inputs: A tensor which represents the full shape of partitioned tensors.
Returns A list of partitioned inputs which must have the same shape.
func TPUPartitionedOutputV2 ¶ added in v0.4.0
func TPUPartitionedOutputV2(scope *Scope, inputs tf.Output, num_splits int64, partition_dims []int64) (output []tf.Output)
An op that demultiplexes a tensor to be sharded by XLA to a list of partitioned
outputs outside the XLA computation. Supports ND sharding.
Arguments:
inputs: A tensor which represents the full shape of partitioned tensors. partition_dims: A list of integers describing how each dimension is partitioned. Emptiness
indicates the inputs are replicated.
Returns A list of partitioned outputs which have the same shape.
func TPUReplicateMetadata ¶
func TPUReplicateMetadata(scope *Scope, num_replicas int64, optional ...TPUReplicateMetadataAttr) (o *tf.Operation)
Metadata indicating how the TPU computation should be replicated.
This operation holds the metadata common to operations of a `tpu.replicate()` computation subgraph.
Arguments:
num_replicas: Number of replicas of the computation
Returns the created operation.
func TPUReplicatedInput ¶
func TPUReplicatedInput(scope *Scope, inputs []tf.Output, optional ...TPUReplicatedInputAttr) (output tf.Output)
Connects N inputs to an N-way replicated TPU computation.
This operation holds a replicated input to a `tpu.replicate()` computation subgraph. Each replicated input has the same shape and type alongside the output.
For example: ``` %a = "tf.opA"() %b = "tf.opB"() %replicated_input = "tf.TPUReplicatedInput"(%a, %b) %computation = "tf.Computation"(%replicated_input) ``` The above computation has a replicated input of two replicas.
func TPUReplicatedOutput ¶
Connects N outputs from an N-way replicated TPU computation.
This operation holds a replicated output from a `tpu.replicate()` computation subgraph. Each replicated output has the same shape and type alongside the input.
For example: ``` %computation = "tf.Computation"() %replicated_output:2 = "tf.TPUReplicatedOutput"(%computation) ``` The above computation has a replicated output of two replicas.
func TPUReshardVariables ¶
func TPUReshardVariables(scope *Scope, vars []tf.Output, new_format_key tf.Output, format_state_var tf.Output) (o *tf.Operation)
Op that reshards on-device TPU variables to specified state.
Op that reshards on-device TPU variables to specified state. Internal use only.
The sharding state is represented as the key of the compilation that generated the sharding/unsharding programs along with the main program. new_format_key specifies the desired state, and format_state_var is the current state of the variables.
Returns the created operation.
func TPURoundRobin ¶ added in v0.2.0
Round-robin load balancing on TPU cores.
A load balancing op that round-robins among TPU cores.
This op round-robins between the integers in [0, NumTPUCoresVisiblePerHost]. It is useful for interfacing with TensorFlow ops that take as input a TPU core on which to execute computations, such as `TPUPartitionedCall`.
device_ordinal: An integer in [0, NumTPUCoresVisiblePerHost].
func TakeDataset ¶
func TakeDataset(scope *Scope, input_dataset tf.Output, count tf.Output, output_types []tf.DataType, output_shapes []tf.Shape, optional ...TakeDatasetAttr) (handle tf.Output)
Creates a dataset that contains `count` elements from the `input_dataset`.
Arguments:
count: A scalar representing the number of elements from the `input_dataset`
that should be taken. A value of `-1` indicates that all of `input_dataset` is taken.
func TakeManySparseFromTensorsMap ¶
func TakeManySparseFromTensorsMap(scope *Scope, sparse_handles tf.Output, dtype tf.DataType, optional ...TakeManySparseFromTensorsMapAttr) (sparse_indices tf.Output, sparse_values tf.Output, sparse_shape tf.Output)
Read `SparseTensors` from a `SparseTensorsMap` and concatenate them.
The input `sparse_handles` must be an `int64` matrix of shape `[N, 1]` where `N` is the minibatch size and the rows correspond to the output handles of `AddSparseToTensorsMap` or `AddManySparseToTensorsMap`. The ranks of the original `SparseTensor` objects that went into the given input ops must all match. When the final `SparseTensor` is created, it has rank one higher than the ranks of the incoming `SparseTensor` objects (they have been concatenated along a new row dimension on the left).
The output `SparseTensor` object's shape values for all dimensions but the first are the max across the input `SparseTensor` objects' shape values for the corresponding dimensions. Its first shape value is `N`, the minibatch size.
The input `SparseTensor` objects' indices are assumed ordered in standard lexicographic order. If this is not the case, after this step run `SparseReorder` to restore index ordering.
For example, if the handles represent an input, which is a `[2, 3]` matrix representing two original `SparseTensor` objects:
```
index = [ 0] [10] [20] values = [1, 2, 3] shape = [50]
```
and
```
index = [ 2] [10] values = [4, 5] shape = [30]
```
then the final `SparseTensor` will be:
```
index = [0 0] [0 10] [0 20] [1 2] [1 10] values = [1, 2, 3, 4, 5] shape = [2 50]
```
Arguments:
sparse_handles: 1-D, The `N` serialized `SparseTensor` objects.
Shape: `[N]`.
dtype: The `dtype` of the `SparseTensor` objects stored in the
`SparseTensorsMap`.
Returns:
sparse_indices: 2-D. The `indices` of the minibatch `SparseTensor`. sparse_values: 1-D. The `values` of the minibatch `SparseTensor`. sparse_shape: 1-D. The `shape` of the minibatch `SparseTensor`.
func Tan ¶
Computes tan of x element-wise.
Given an input tensor, this function computes tangent of every element in the tensor. Input range is `(-inf, inf)` and output range is `(-inf, inf)`. If input lies outside the boundary, `nan` is returned. ```python x = tf.constant([-float("inf"), -9, -0.5, 1, 1.2, 200, 10000, float("inf")]) tf.math.tan(x) ==> [nan 0.45231566 -0.5463025 1.5574077 2.572152 -1.7925274 0.32097113 nan] ```
func Tanh ¶
Computes hyperbolic tangent of `x` element-wise.
Given an input tensor, this function computes hyperbolic tangent of every element in the tensor. Input range is `[-inf, inf]` and output range is `[-1,1]`. >>> x = tf.constant([-float("inf"), -5, -0.5, 1, 1.2, 2, 3, float("inf")]) >>> tf.math.tanh(x) <tf.Tensor: shape=(8,), dtype=float32, numpy= array([-1.0, -0.99990916, -0.46211717, 0.7615942 , 0.8336547 , 0.9640276 , 0.9950547 , 1.0], dtype=float32)>
func TanhGrad ¶
Computes the gradient for the tanh of `x` wrt its input.
Specifically, `grad = dy * (1 - y*y)`, where `y = tanh(x)`, and `dy` is the corresponding input gradient.
func TensorArrayCloseV2 ¶
Deprecated. Use TensorArrayCloseV3
DEPRECATED at GraphDef version 26: Use TensorArrayCloseV3
Returns the created operation.
func TensorArrayCloseV3 ¶
Delete the TensorArray from its resource container.
This enables the user to close and release the resource in the middle of a step/run.
Arguments:
handle: The handle to a TensorArray (output of TensorArray or TensorArrayGrad).
Returns the created operation.
func TensorArrayConcatV2 ¶
func TensorArrayConcatV2(scope *Scope, handle tf.Output, flow_in tf.Output, dtype tf.DataType, optional ...TensorArrayConcatV2Attr) (value tf.Output, lengths tf.Output)
Deprecated. Use TensorArrayConcatV3
func TensorArrayConcatV3 ¶
func TensorArrayConcatV3(scope *Scope, handle tf.Output, flow_in tf.Output, dtype tf.DataType, optional ...TensorArrayConcatV3Attr) (value tf.Output, lengths tf.Output)
Concat the elements from the TensorArray into value `value`.
Takes `T` elements of shapes
``` (n0 x d0 x d1 x ...), (n1 x d0 x d1 x ...), ..., (n(T-1) x d0 x d1 x ...) ```
and concatenates them into a Tensor of shape:
``` (n0 + n1 + ... + n(T-1) x d0 x d1 x ...) ```
All elements must have the same shape (excepting the first dimension).
Arguments:
handle: The handle to a TensorArray. flow_in: A float scalar that enforces proper chaining of operations. dtype: The type of the elem that is returned.
Returns:
value: All of the elements in the TensorArray, concatenated along the first
axis.
lengths: A vector of the row sizes of the original T elements in the
value output. In the example above, this would be the values: `(n1, n2, ..., n(T-1))`.
func TensorArrayGatherV2 ¶
func TensorArrayGatherV2(scope *Scope, handle tf.Output, indices tf.Output, flow_in tf.Output, dtype tf.DataType, optional ...TensorArrayGatherV2Attr) (value tf.Output)
Deprecated. Use TensorArrayGatherV3
DEPRECATED at GraphDef version 26: Use TensorArrayGatherV3
func TensorArrayGatherV3 ¶
func TensorArrayGatherV3(scope *Scope, handle tf.Output, indices tf.Output, flow_in tf.Output, dtype tf.DataType, optional ...TensorArrayGatherV3Attr) (value tf.Output)
Gather specific elements from the TensorArray into output `value`.
All elements selected by `indices` must have the same shape.
Arguments:
handle: The handle to a TensorArray. indices: The locations in the TensorArray from which to read tensor elements. flow_in: A float scalar that enforces proper chaining of operations. dtype: The type of the elem that is returned.
Returns All of the elements in the TensorArray, concatenated along a new axis (the new dimension 0).
func TensorArrayGradV2 ¶
func TensorArrayGradV2(scope *Scope, handle tf.Output, flow_in tf.Output, source string) (grad_handle tf.Output)
Deprecated. Use TensorArrayGradV3
DEPRECATED at GraphDef version 26: Use TensorArrayGradV3
func TensorArrayGradV3 ¶
func TensorArrayGradV3(scope *Scope, handle tf.Output, flow_in tf.Output, source string) (grad_handle tf.Output, flow_out tf.Output)
Creates a TensorArray for storing the gradients of values in the given handle.
If the given TensorArray gradient already exists, returns a reference to it.
Locks the size of the original TensorArray by disabling its dynamic size flag.
**A note about the input flow_in:**
The handle flow_in forces the execution of the gradient lookup to occur only after certain other operations have occurred. For example, when the forward TensorArray is dynamically sized, writes to this TensorArray may resize the object. The gradient TensorArray is statically sized based on the size of the forward TensorArray when this operation executes. Furthermore, the size of the forward TensorArray is frozen by this call. As a result, the flow is used to ensure that the call to generate the gradient TensorArray only happens after all writes are executed.
In the case of dynamically sized TensorArrays, gradient computation should only be performed on read operations that have themselves been chained via flow to occur only after all writes have executed. That way the final size of the forward TensorArray is known when this operation is called.
**A note about the source attribute:**
TensorArray gradient calls use an accumulator TensorArray object. If multiple gradients are calculated and run in the same session, the multiple gradient nodes may accidentally flow through the same accumulator TensorArray. This double counts and generally breaks the TensorArray gradient flow.
The solution is to identify which gradient call this particular TensorArray gradient is being called in. This is performed by identifying a unique string (e.g. "gradients", "gradients_1", ...) from the input gradient Tensor's name. This string is used as a suffix when creating the TensorArray gradient object here (the attribute `source`).
The attribute `source` is added as a suffix to the forward TensorArray's name when performing the creation / lookup, so that each separate gradient calculation gets its own TensorArray accumulator.
Arguments:
handle: The handle to the forward TensorArray. flow_in: A float scalar that enforces proper chaining of operations. source: The gradient source string, used to decide which gradient TensorArray
to return.
func TensorArrayGradWithShape ¶
func TensorArrayGradWithShape(scope *Scope, handle tf.Output, flow_in tf.Output, shape_to_prepend tf.Output, source string) (grad_handle tf.Output, flow_out tf.Output)
Creates a TensorArray for storing multiple gradients of values in the given handle.
Similar to TensorArrayGradV3. However it creates an accumulator with an expanded shape compared to the input TensorArray whose gradient is being computed. This enables multiple gradients for the same TensorArray to be calculated using the same accumulator.
Arguments:
handle: The handle to the forward TensorArray. flow_in: A float scalar that enforces proper chaining of operations. shape_to_prepend: An int32 vector representing a shape. Elements in the gradient accumulator will
have shape which is this shape_to_prepend value concatenated with shape of the elements in the TensorArray corresponding to the input handle.
source: The gradient source string, used to decide which gradient TensorArray
to return.
func TensorArrayReadV2 ¶
func TensorArrayReadV2(scope *Scope, handle tf.Output, index tf.Output, flow_in tf.Output, dtype tf.DataType) (value tf.Output)
Deprecated. Use TensorArrayReadV3
DEPRECATED at GraphDef version 26: Use TensorArrayReadV3
func TensorArrayReadV3 ¶
func TensorArrayReadV3(scope *Scope, handle tf.Output, index tf.Output, flow_in tf.Output, dtype tf.DataType) (value tf.Output)
Read an element from the TensorArray into output `value`.
Arguments:
handle: The handle to a TensorArray. flow_in: A float scalar that enforces proper chaining of operations. dtype: The type of the elem that is returned.
Returns The tensor that is read from the TensorArray.
func TensorArrayScatterV2 ¶
func TensorArrayScatterV2(scope *Scope, handle tf.Output, indices tf.Output, value tf.Output, flow_in tf.Output) (flow_out tf.Output)
Deprecated. Use TensorArrayScatterV3
DEPRECATED at GraphDef version 26: Use TensorArrayScatterV3
func TensorArrayScatterV3 ¶
func TensorArrayScatterV3(scope *Scope, handle tf.Output, indices tf.Output, value tf.Output, flow_in tf.Output) (flow_out tf.Output)
Scatter the data from the input value into specific TensorArray elements.
`indices` must be a vector, its length must match the first dim of `value`.
Arguments:
handle: The handle to a TensorArray. indices: The locations at which to write the tensor elements. value: The concatenated tensor to write to the TensorArray. flow_in: A float scalar that enforces proper chaining of operations.
Returns A float scalar that enforces proper chaining of operations.
func TensorArraySizeV2 ¶
Deprecated. Use TensorArraySizeV3
DEPRECATED at GraphDef version 26: Use TensorArraySizeV3
func TensorArraySizeV3 ¶
Get the current size of the TensorArray.
Arguments:
handle: The handle to a TensorArray (output of TensorArray or TensorArrayGrad). flow_in: A float scalar that enforces proper chaining of operations.
Returns The current size of the TensorArray.
func TensorArraySplitV2 ¶
func TensorArraySplitV2(scope *Scope, handle tf.Output, value tf.Output, lengths tf.Output, flow_in tf.Output) (flow_out tf.Output)
Deprecated. Use TensorArraySplitV3
DEPRECATED at GraphDef version 26: Use TensorArraySplitV3
func TensorArraySplitV3 ¶
func TensorArraySplitV3(scope *Scope, handle tf.Output, value tf.Output, lengths tf.Output, flow_in tf.Output) (flow_out tf.Output)
Split the data from the input value into TensorArray elements.
Assuming that `lengths` takes on values
``` (n0, n1, ..., n(T-1)) ```
and that `value` has shape
``` (n0 + n1 + ... + n(T-1) x d0 x d1 x ...), ```
this splits values into a TensorArray with T tensors.
TensorArray index t will be the subtensor of values with starting position
``` (n0 + n1 + ... + n(t-1), 0, 0, ...) ```
and having size
``` nt x d0 x d1 x ... ```
Arguments:
handle: The handle to a TensorArray. value: The concatenated tensor to write to the TensorArray. lengths: The vector of lengths, how to split the rows of value into the
TensorArray.
flow_in: A float scalar that enforces proper chaining of operations.
Returns A float scalar that enforces proper chaining of operations.
func TensorArrayV2 ¶
func TensorArrayV2(scope *Scope, size tf.Output, dtype tf.DataType, optional ...TensorArrayV2Attr) (handle tf.Output)
Deprecated. Use TensorArrayV3
DEPRECATED at GraphDef version 26: Use TensorArrayV3
func TensorArrayV3 ¶
func TensorArrayV3(scope *Scope, size tf.Output, dtype tf.DataType, optional ...TensorArrayV3Attr) (handle tf.Output, flow tf.Output)
An array of Tensors of given size.
Write data via Write and read via Read or Pack.
Arguments:
size: The size of the array. dtype: The type of the elements on the tensor_array.
Returns:
handle: The handle to the TensorArray. flow: A scalar used to control gradient flow.
func TensorArrayWriteV2 ¶
func TensorArrayWriteV2(scope *Scope, handle tf.Output, index tf.Output, value tf.Output, flow_in tf.Output) (flow_out tf.Output)
Deprecated. Use TensorArrayGradV3
DEPRECATED at GraphDef version 26: Use TensorArrayWriteV3
func TensorArrayWriteV3 ¶
func TensorArrayWriteV3(scope *Scope, handle tf.Output, index tf.Output, value tf.Output, flow_in tf.Output) (flow_out tf.Output)
Push an element onto the tensor_array.
Arguments:
handle: The handle to a TensorArray. index: The position to write to inside the TensorArray. value: The tensor to write to the TensorArray. flow_in: A float scalar that enforces proper chaining of operations.
Returns A float scalar that enforces proper chaining of operations.
func TensorDataset ¶
func TensorDataset(scope *Scope, components []tf.Output, output_shapes []tf.Shape, optional ...TensorDatasetAttr) (handle tf.Output)
Creates a dataset that emits `components` as a tuple of tensors once.
func TensorListConcat ¶
func TensorListConcat(scope *Scope, input_handle tf.Output, element_dtype tf.DataType, optional ...TensorListConcatAttr) (tensor tf.Output, lengths tf.Output)
Concats all tensors in the list along the 0th dimension.
Requires that all tensors have the same shape except the first dimension.
input_handle: The input list. tensor: The concated result. lengths: Output tensor containing sizes of the 0th dimension of tensors in the list, used for computing the gradient.
func TensorListConcatV2 ¶
func TensorListConcatV2(scope *Scope, input_handle tf.Output, element_shape tf.Output, leading_dims tf.Output, element_dtype tf.DataType) (tensor tf.Output, lengths tf.Output)
Concats all tensors in the list along the 0th dimension.
Requires that all tensors have the same shape except the first dimension.
input_handle: The input list. element_shape: The shape of the uninitialized elements in the list. If the first
dimension is not -1, it is assumed that all list elements have the same leading dim.
leading_dims: The list of leading dims of uninitialized list elements. Used if
the leading dim of input_handle.element_shape or the element_shape input arg is not already set.
tensor: The concated result. lengths: Output tensor containing sizes of the 0th dimension of tensors in the list, used for computing the gradient.
func TensorListElementShape ¶
func TensorListElementShape(scope *Scope, input_handle tf.Output, shape_type tf.DataType) (element_shape tf.Output)
The shape of the elements of the given list, as a tensor.
input_handle: the list element_shape: the shape of elements of the list
func TensorListFromTensor ¶
func TensorListFromTensor(scope *Scope, tensor tf.Output, element_shape tf.Output) (output_handle tf.Output)
Creates a TensorList which, when stacked, has the value of `tensor`.
Each tensor in the result list corresponds to one row of the input tensor.
tensor: The input tensor. output_handle: The list.
func TensorListGather ¶
func TensorListGather(scope *Scope, input_handle tf.Output, indices tf.Output, element_shape tf.Output, element_dtype tf.DataType) (values tf.Output)
Creates a Tensor by indexing into the TensorList.
Each row in the produced Tensor corresponds to the element in the TensorList specified by the given index (see `tf.gather`).
input_handle: The input tensor list. indices: The indices used to index into the list. values: The tensor.
func TensorListGetItem ¶
func TensorListGetItem(scope *Scope, input_handle tf.Output, index tf.Output, element_shape tf.Output, element_dtype tf.DataType) (item tf.Output)
Returns the item in the list with the given index.
input_handle: the list index: the position in the list from which an element will be retrieved item: the element at that position
func TensorListLength ¶
Returns the number of tensors in the input tensor list.
input_handle: the input list length: the number of tensors in the list
func TensorListPopBack ¶
func TensorListPopBack(scope *Scope, input_handle tf.Output, element_shape tf.Output, element_dtype tf.DataType) (output_handle tf.Output, tensor tf.Output)
Returns the last element of the input list as well as a list with all but that element.
Fails if the list is empty.
input_handle: the input list tensor: the withdrawn last element of the list element_dtype: the type of elements in the list element_shape: the shape of the output tensor
func TensorListPushBack ¶
func TensorListPushBack(scope *Scope, input_handle tf.Output, tensor tf.Output) (output_handle tf.Output)
Returns a list which has the passed-in `Tensor` as last element and the other elements of the given list in `input_handle`.
tensor: The tensor to put on the list. input_handle: The old list. output_handle: A list with the elements of the old list followed by tensor. element_dtype: the type of elements in the list. element_shape: a shape compatible with that of elements in the list.
func TensorListReserve ¶
func TensorListReserve(scope *Scope, element_shape tf.Output, num_elements tf.Output, element_dtype tf.DataType) (handle tf.Output)
List of the given size with empty elements.
element_shape: the shape of the future elements of the list num_elements: the number of elements to reserve handle: the output list element_dtype: the desired type of elements in the list.
func TensorListResize ¶
func TensorListResize(scope *Scope, input_handle tf.Output, size tf.Output) (output_handle tf.Output)
Resizes the list.
input_handle: the input list size: size of the output list
func TensorListScatter ¶
func TensorListScatter(scope *Scope, tensor tf.Output, indices tf.Output, element_shape tf.Output) (output_handle tf.Output)
Creates a TensorList by indexing into a Tensor.
Each member of the TensorList corresponds to one row of the input tensor, specified by the given index (see `tf.gather`).
tensor: The input tensor. indices: The indices used to index into the list. element_shape: The shape of the elements in the list (can be less specified than
the shape of the tensor).
output_handle: The TensorList.
func TensorListScatterIntoExistingList ¶
func TensorListScatterIntoExistingList(scope *Scope, input_handle tf.Output, tensor tf.Output, indices tf.Output) (output_handle tf.Output)
Scatters tensor at indices in an input list.
Each member of the TensorList corresponds to one row of the input tensor, specified by the given index (see `tf.gather`).
input_handle: The list to scatter into. tensor: The input tensor. indices: The indices used to index into the list. output_handle: The TensorList.
func TensorListScatterV2 ¶
func TensorListScatterV2(scope *Scope, tensor tf.Output, indices tf.Output, element_shape tf.Output, num_elements tf.Output) (output_handle tf.Output)
Creates a TensorList by indexing into a Tensor.
Each member of the TensorList corresponds to one row of the input tensor, specified by the given index (see `tf.gather`).
tensor: The input tensor. indices: The indices used to index into the list. element_shape: The shape of the elements in the list (can be less specified than
the shape of the tensor).
num_elements: The size of the output list. Must be large enough to accommodate
the largest index in indices. If -1, the list is just large enough to include the largest index in indices.
output_handle: The TensorList.
func TensorListSetItem ¶
func TensorListSetItem(scope *Scope, input_handle tf.Output, index tf.Output, item tf.Output, optional ...TensorListSetItemAttr) (output_handle tf.Output)
Sets the index-th position of the list to contain the given tensor.
input_handle: the list index: the position in the list to which the tensor will be assigned item: the element to be assigned to that position output_handle: the new list, with the element in the proper position
func TensorListSplit ¶
func TensorListSplit(scope *Scope, tensor tf.Output, element_shape tf.Output, lengths tf.Output) (output_handle tf.Output)
Splits a tensor into a list.
list[i] corresponds to lengths[i] tensors from the input tensor. The tensor must have rank at least 1 and contain exactly sum(lengths) elements.
tensor: The input tensor. element_shape: A shape compatible with that of elements in the tensor. lengths: Vector of sizes of the 0th dimension of tensors in the list. output_handle: The list.
func TensorListStack ¶
func TensorListStack(scope *Scope, input_handle tf.Output, element_shape tf.Output, element_dtype tf.DataType, optional ...TensorListStackAttr) (tensor tf.Output)
Stacks all tensors in the list.
Requires that all tensors have the same shape.
input_handle: the input list tensor: the gathered result num_elements: optional. If not -1, the number of elements in the list.
func TensorMapErase ¶
func TensorMapErase(scope *Scope, input_handle tf.Output, key tf.Output, value_dtype tf.DataType) (output_handle tf.Output)
Returns a tensor map with item from given key erased.
input_handle: the original map output_handle: the map with value from given key removed key: the key of the value to be erased
func TensorMapHasKey ¶
Returns whether the given key exists in the map.
input_handle: the input map key: the key to check has_key: whether the key is already in the map or not
func TensorMapInsert ¶
func TensorMapInsert(scope *Scope, input_handle tf.Output, key tf.Output, value tf.Output) (output_handle tf.Output)
Returns a map that is the 'input_handle' with the given key-value pair inserted.
input_handle: the original map output_handle: the map with key and value inserted key: the key to be inserted value: the value to be inserted
func TensorMapLookup ¶
func TensorMapLookup(scope *Scope, input_handle tf.Output, key tf.Output, value_dtype tf.DataType) (value tf.Output)
Returns the value from a given key in a tensor map.
input_handle: the input map key: the key to be looked up value: the value found from the given key
func TensorMapSize ¶
Returns the number of tensors in the input tensor map.
input_handle: the input map size: the number of tensors in the map
func TensorMapStackKeys ¶
func TensorMapStackKeys(scope *Scope, input_handle tf.Output, key_dtype tf.DataType) (keys tf.Output)
Returns a Tensor stack of all keys in a tensor map.
input_handle: the input map keys: the returned Tensor of all keys in the map
func TensorScatterAdd ¶
func TensorScatterAdd(scope *Scope, tensor tf.Output, indices tf.Output, updates tf.Output, optional ...TensorScatterAddAttr) (output tf.Output)
Adds sparse `updates` to an existing tensor according to `indices`.
This operation creates a new tensor by adding sparse `updates` to the passed in `tensor`. This operation is very similar to `tf.compat.v1.scatter_nd_add`, except that the updates are added onto an existing tensor (as opposed to a variable). If the memory for the existing tensor cannot be re-used, a copy is made and updated.
`indices` is an integer tensor containing indices into a new tensor of shape `tensor.shape`. The last dimension of `indices` can be at most the rank of `tensor.shape`:
``` indices.shape[-1] <= tensor.shape.rank ```
The last dimension of `indices` corresponds to indices into elements (if `indices.shape[-1] = tensor.shape.rank`) or slices (if `indices.shape[-1] < tensor.shape.rank`) along dimension `indices.shape[-1]` of `tensor.shape`. `updates` is a tensor with shape
``` indices.shape[:-1] + tensor.shape[indices.shape[-1]:] ```
The simplest form of `tensor_scatter_nd_add` is to add individual elements to a tensor by index. For example, say we want to add 4 elements in a rank-1 tensor with 8 elements.
In Python, this scatter add operation would look like this:
>>> indices = tf.constant([[4], [3], [1], [7]]) >>> updates = tf.constant([9, 10, 11, 12]) >>> tensor = tf.ones([8], dtype=tf.int32) >>> updated = tf.tensor_scatter_nd_add(tensor, indices, updates) >>> updated <tf.Tensor: shape=(8,), dtype=int32, numpy=array([ 1, 12, 1, 11, 10, 1, 1, 13], dtype=int32)>
We can also, insert entire slices of a higher rank tensor all at once. For example, if we wanted to insert two slices in the first dimension of a rank-3 tensor with two matrices of new values.
In Python, this scatter add operation would look like this:
>>> indices = tf.constant([[0], [2]]) >>> updates = tf.constant([[[5, 5, 5, 5], [6, 6, 6, 6], ... [7, 7, 7, 7], [8, 8, 8, 8]], ... [[5, 5, 5, 5], [6, 6, 6, 6], ... [7, 7, 7, 7], [8, 8, 8, 8]]]) >>> tensor = tf.ones([4, 4, 4],dtype=tf.int32) >>> updated = tf.tensor_scatter_nd_add(tensor, indices, updates) >>> updated <tf.Tensor: shape=(4, 4, 4), dtype=int32, numpy=array([[[6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8], [9, 9, 9, 9]],
[[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]], [[6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8], [9, 9, 9, 9]], [[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]]], dtype=int32)>
If `indices` contains any out-of-bound indices, depending on `bad_indices_policy`, the op will either return an error or ignore the out-of-bound indices. `bad_indices_policy` can be one of the following values:
- "" or "DEFAULT": raises on CPU and ignore on GPU. This is because historically on CPU and GPU we handle errors in different ways, and for backward compatibility we keep the default behavior.
- "ERROR": raises error; GPU does not support this value.
- "IGNORE": ignore the bad indices; supported on both CPU and GPU.
Arguments:
tensor: Tensor to copy/update. indices: Index tensor. updates: Updates to scatter into output.
Returns A new tensor copied from tensor and updates added according to the indices.
func TensorScatterMax ¶
func TensorScatterMax(scope *Scope, tensor tf.Output, indices tf.Output, updates tf.Output, optional ...TensorScatterMaxAttr) (output tf.Output)
Apply a sparse update to a tensor taking the element-wise maximum.
Returns a new tensor copied from `tensor` whose values are element-wise maximum between tensor and updates according to the indices.
>>> tensor = [0, 0, 0, 0, 0, 0, 0, 0] >>> indices = [[1], [4], [5]] >>> updates = [1, -1, 1] >>> tf.tensor_scatter_nd_max(tensor, indices, updates).numpy() array([0, 1, 0, 0, 0, 1, 0, 0], dtype=int32)
Refer to `tf.tensor_scatter_nd_update` for more details.
Arguments:
tensor: Tensor to update. indices: Index tensor. updates: Updates to scatter into output.
Returns A new tensor copied from tensor whose values are element-wise maximum between tensor and updates according to the indices.
func TensorScatterSub ¶
func TensorScatterSub(scope *Scope, tensor tf.Output, indices tf.Output, updates tf.Output, optional ...TensorScatterSubAttr) (output tf.Output)
Subtracts sparse `updates` from an existing tensor according to `indices`.
This operation creates a new tensor by subtracting sparse `updates` from the passed in `tensor`. This operation is very similar to `tf.scatter_nd_sub`, except that the updates are subtracted from an existing tensor (as opposed to a variable). If the memory for the existing tensor cannot be re-used, a copy is made and updated.
`indices` is an integer tensor containing indices into a new tensor of shape `shape`. The last dimension of `indices` can be at most the rank of `shape`:
indices.shape[-1] <= shape.rank
The last dimension of `indices` corresponds to indices into elements (if `indices.shape[-1] = shape.rank`) or slices (if `indices.shape[-1] < shape.rank`) along dimension `indices.shape[-1]` of `shape`. `updates` is a tensor with shape
indices.shape[:-1] + shape[indices.shape[-1]:]
The simplest form of tensor_scatter_sub is to subtract individual elements from a tensor by index. For example, say we want to insert 4 scattered elements in a rank-1 tensor with 8 elements.
In Python, this scatter subtract operation would look like this:
```python
indices = tf.constant([[4], [3], [1], [7]]) updates = tf.constant([9, 10, 11, 12]) tensor = tf.ones([8], dtype=tf.int32) updated = tf.tensor_scatter_nd_sub(tensor, indices, updates) print(updated)
```
The resulting tensor would look like this:
[1, -10, 1, -9, -8, 1, 1, -11]
We can also, insert entire slices of a higher rank tensor all at once. For example, if we wanted to insert two slices in the first dimension of a rank-3 tensor with two matrices of new values.
In Python, this scatter add operation would look like this:
```python
indices = tf.constant([[0], [2]]) updates = tf.constant([[[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]], [[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]]]) tensor = tf.ones([4, 4, 4],dtype=tf.int32) updated = tf.tensor_scatter_nd_sub(tensor, indices, updates) print(updated)
```
The resulting tensor would look like this:
[[[-4, -4, -4, -4], [-5, -5, -5, -5], [-6, -6, -6, -6], [-7, -7, -7, -7]], [[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]], [[-4, -4, -4, -4], [-5, -5, -5, -5], [-6, -6, -6, -6], [-7, -7, -7, -7]], [[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]]]
Note that on CPU, if an out of bound index is found, an error is returned. On GPU, if an out of bound index is found, the index is ignored.
Arguments:
tensor: Tensor to copy/update. indices: Index tensor. updates: Updates to scatter into output.
Returns A new tensor copied from tensor and updates subtracted according to the indices.
func TensorScatterUpdate ¶
func TensorScatterUpdate(scope *Scope, tensor tf.Output, indices tf.Output, updates tf.Output, optional ...TensorScatterUpdateAttr) (output tf.Output)
Scatter `updates` into an existing tensor according to `indices`.
This operation creates a new tensor by applying sparse `updates` to the passed in `tensor`. This operation is very similar to `tf.scatter_nd`, except that the updates are scattered onto an existing tensor (as opposed to a zero-tensor). If the memory for the existing tensor cannot be re-used, a copy is made and updated.
If `indices` contains duplicates, then we pick the last update for the index.
**WARNING**: There are some GPU specific semantics for this operation. - If an out of bound index is found, the index is ignored. - The order in which updates are applied is nondeterministic, so the output will be nondeterministic if `indices` contains duplicates.
`indices` is an integer tensor containing indices into a new tensor of shape `shape`.
- `indices` must have at least 2 axes: `(num_updates, index_depth)`.
- The last axis of `indices` is how deep to index into `tensor` so this index depth must be less than the rank of `tensor`: `indices.shape[-1] <= tensor.ndim`
if `indices.shape[-1] = tensor.rank` this Op indexes and updates scalar elements. if `indices.shape[-1] < tensor.rank` it indexes and updates slices of the input `tensor`.
Each `update` has a rank of `tensor.rank - indices.shape[-1]`. The overall shape of `updates` is:
``` indices.shape[:-1] + tensor.shape[indices.shape[-1]:] ```
If `indices` contains any out-of-bound indices, depending on `bad_indices_policy`, the op will either return an error or ignore the out-of-bound indices. `bad_indices_policy` can be one of the following values:
- "" or "DEFAULT": raises on CPU and ignore on GPU. This is because historically on CPU and GPU we handle errors in different ways, and for backward compatibility we keep the default behavior.
- "ERROR": raises error; GPU does not support this value.
- "IGNORE": ignore the bad indices; supported on both CPU and GPU.
For usage examples see the python [tf.tensor_scatter_nd_update]( https://www.tensorflow.org/api_docs/python/tf/tensor_scatter_nd_update) function
Arguments:
tensor: Tensor to copy/update. indices: Index tensor. updates: Updates to scatter into output.
Returns A new tensor with the given shape and updates applied according to the indices.
func TensorSliceDataset ¶
func TensorSliceDataset(scope *Scope, components []tf.Output, output_shapes []tf.Shape, optional ...TensorSliceDatasetAttr) (handle tf.Output)
Creates a dataset that emits each dim-0 slice of `components` once.
func TensorStridedSliceUpdate ¶
func TensorStridedSliceUpdate(scope *Scope, input tf.Output, begin tf.Output, end tf.Output, strides tf.Output, value tf.Output, optional ...TensorStridedSliceUpdateAttr) (output tf.Output)
Assign `value` to the sliced l-value reference of `input`.
The values of `value` are assigned to the positions in the tensor `input` that are selected by the slice parameters. The slice parameters `begin` `end` `strides` etc. work exactly as in `StridedSlice`.
NOTE this op currently does not support broadcasting and so `value`'s shape must be exactly the shape produced by the slice of `input`.
func TensorSummary ¶
func TensorSummary(scope *Scope, tensor tf.Output, optional ...TensorSummaryAttr) (summary tf.Output)
Outputs a `Summary` protocol buffer with a tensor.
This op is being phased out in favor of TensorSummaryV2, which lets callers pass a tag as well as a serialized SummaryMetadata proto string that contains plugin-specific data. We will keep this op to maintain backwards compatibility.
Arguments:
tensor: A tensor to serialize.
func TensorSummaryV2 ¶
func TensorSummaryV2(scope *Scope, tag tf.Output, tensor tf.Output, serialized_summary_metadata tf.Output) (summary tf.Output)
Outputs a `Summary` protocol buffer with a tensor and per-plugin data.
Arguments:
tag: A string attached to this summary. Used for organization in TensorBoard. tensor: A tensor to serialize. serialized_summary_metadata: A serialized SummaryMetadata proto. Contains plugin
data.
func TextLineDataset ¶
func TextLineDataset(scope *Scope, filenames tf.Output, compression_type tf.Output, buffer_size tf.Output, optional ...TextLineDatasetAttr) (handle tf.Output)
Creates a dataset that emits the lines of one or more text files.
Arguments:
filenames: A scalar or a vector containing the name(s) of the file(s) to be
read.
compression_type: A scalar containing either (i) the empty string (no
compression), (ii) "ZLIB", or (iii) "GZIP".
buffer_size: A scalar containing the number of bytes to buffer.
func TextLineReaderV2 ¶
func TextLineReaderV2(scope *Scope, optional ...TextLineReaderV2Attr) (reader_handle tf.Output)
A Reader that outputs the lines of a file delimited by '\n'.
Returns The handle to reference the Reader.
func ThreadPoolDataset ¶
func ThreadPoolDataset(scope *Scope, input_dataset tf.Output, thread_pool tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output)
Creates a dataset that uses a custom thread pool to compute `input_dataset`.
Arguments:
thread_pool: A resource produced by the ThreadPoolHandle op.
func ThreadPoolHandle ¶
func ThreadPoolHandle(scope *Scope, num_threads int64, display_name string, optional ...ThreadPoolHandleAttr) (handle tf.Output)
Creates a dataset that uses a custom thread pool to compute `input_dataset`.
Arguments:
num_threads: The number of threads in the thread pool. display_name: A human-readable name for the threads that may be visible in some
visualizations. threadpool.
Returns A resource that can be consumed by one or more ExperimentalThreadPoolDataset ops.
func ThreadUnsafeUnigramCandidateSampler ¶
func ThreadUnsafeUnigramCandidateSampler(scope *Scope, true_classes tf.Output, num_true int64, num_sampled int64, unique bool, range_max int64, optional ...ThreadUnsafeUnigramCandidateSamplerAttr) (sampled_candidates tf.Output, true_expected_count tf.Output, sampled_expected_count tf.Output)
Generates labels for candidate sampling with a learned unigram distribution.
See explanations of candidate sampling and the data formats at go/candidate-sampling.
For each batch, this op picks a single set of sampled candidate labels.
The advantages of sampling candidates per-batch are simplicity and the possibility of efficient dense matrix multiplication. The disadvantage is that the sampled candidates must be chosen independently of the context and of the true labels.
Arguments:
true_classes: A batch_size * num_true matrix, in which each row contains the
IDs of the num_true target_classes in the corresponding original label.
num_true: Number of true labels per context. num_sampled: Number of candidates to randomly sample. unique: If unique is true, we sample with rejection, so that all sampled
candidates in a batch are unique. This requires some approximation to estimate the post-rejection sampling probabilities.
range_max: The sampler will sample integers from the interval [0, range_max).
Returns:
sampled_candidates: A vector of length num_sampled, in which each element is
the ID of a sampled candidate.
true_expected_count: A batch_size * num_true matrix, representing
the number of times each candidate is expected to occur in a batch of sampled candidates. If unique=true, then this is a probability.
sampled_expected_count: A vector of length num_sampled, for each sampled
candidate representing the number of times the candidate is expected to occur in a batch of sampled candidates. If unique=true, then this is a probability.
func Tile ¶
Constructs a tensor by tiling a given tensor.
This operation creates a new tensor by replicating `input` `multiples` times. The output tensor's i'th dimension has `input.dims(i) * multiples[i]` elements, and the values of `input` are replicated `multiples[i]` times along the 'i'th dimension. For example, tiling `[a b c d]` by `[2]` produces `[a b c d a b c d]`.
>>> a = tf.constant([[1,2,3],[4,5,6]], tf.int32) >>> b = tf.constant([1,2], tf.int32) >>> tf.tile(a, b) <tf.Tensor: shape=(2, 6), dtype=int32, numpy= array([[1, 2, 3, 1, 2, 3],
[4, 5, 6, 4, 5, 6]], dtype=int32)>
>>> c = tf.constant([2,1], tf.int32) >>> tf.tile(a, c) <tf.Tensor: shape=(4, 3), dtype=int32, numpy= array([[1, 2, 3],
[4, 5, 6], [1, 2, 3], [4, 5, 6]], dtype=int32)>
>>> d = tf.constant([2,2], tf.int32) >>> tf.tile(a, d) <tf.Tensor: shape=(4, 6), dtype=int32, numpy= array([[1, 2, 3, 1, 2, 3],
[4, 5, 6, 4, 5, 6], [1, 2, 3, 1, 2, 3], [4, 5, 6, 4, 5, 6]], dtype=int32)>
Arguments:
input: Can be of any rank. multiples: 1-D. Length must be the same as the number of dimensions in `input`
func TileGrad ¶
Returns the gradient of `Tile`.
DEPRECATED at GraphDef version 3: TileGrad has been replaced with reduce_sum
Since `Tile` takes an input and repeats the input `multiples` times along each dimension, `TileGrad` takes in `multiples` and aggregates each repeated tile of `input` into `output`.
func Timestamp ¶
Provides the time since epoch in seconds.
Returns the timestamp as a `float64` for seconds since the Unix epoch.
Common usages include: * Logging * Providing a random number seed * Debugging graph execution * Generating timing information, mainly through comparison of timestamps
Note: In graph mode, the timestamp is computed when the op is executed, not when it is added to the graph. In eager mode, the timestamp is computed when the op is eagerly executed.
func ToBool ¶
Converts a tensor to a scalar predicate.
Converts a tensor to a scalar predicate with the following rules:
For 0D tensors, truthiness is determined by comparing against a "zero" value. For numerical types it is the obvious zero. For strings it is the empty string.
For >0D tensors, truthiness is determined by looking at the number of elements. If has zero elements, then the result is false. Otherwise the result is true.
This matches the behavior of If and While for determining if a tensor counts as true/false for a branch condition.
func TopK ¶
func TopK(scope *Scope, input tf.Output, k int64, optional ...TopKAttr) (values tf.Output, indices tf.Output)
Finds values and indices of the `k` largest elements for the last dimension.
DEPRECATED at GraphDef version 7: Use TopKV2 instead
If the input is a vector (rank-1), finds the `k` largest entries in the vector and outputs their values and indices as vectors. Thus `values[j]` is the `j`-th largest entry in `input`, and its index is `indices[j]`.
For matrices (resp. higher rank input), computes the top `k` entries in each row (resp. vector along the last dimension). Thus,
values.shape = indices.shape = input.shape[:-1] + [k]
If two elements are equal, the lower-index element appears first.
If `k` varies dynamically, use `TopKV2` below.
Arguments:
input: 1-D or higher with last dimension at least `k`. k: Number of top elements to look for along the last dimension (along each
row for matrices).
Returns:
values: The `k` largest elements along each last dimensional slice. indices: The indices of `values` within the last dimension of `input`.
func TopKUnique ¶
Returns the TopK unique values in the array in sorted order.
The running time is proportional to the product of K and the input size. Sorting the whole array is more efficient for sufficiently large values of K. The median-of-medians algorithm is probably faster, but difficult to implement efficiently in XLA. If there are fewer than K unique numbers (not NANs), the results are padded with negative infinity. NaNs are never returned. Subnormal numbers are flushed to zero. If an element appears at multiple indices, the highest index is returned. If a TopK element never appears in the input due to padding values, the indices are padded with negative one. If a padding value appears in the input and padding is needed, the highest index of the padding value will be returned. The semantics are not the same as kth_order_statistic.
func TopKV2 ¶
func TopKV2(scope *Scope, input tf.Output, k tf.Output, optional ...TopKV2Attr) (values tf.Output, indices tf.Output)
Finds values and indices of the `k` largest elements for the last dimension.
If the input is a vector (rank-1), finds the `k` largest entries in the vector and outputs their values and indices as vectors. Thus `values[j]` is the `j`-th largest entry in `input`, and its index is `indices[j]`.
For matrices (resp. higher rank input), computes the top `k` entries in each row (resp. vector along the last dimension). Thus,
values.shape = indices.shape = input.shape[:-1] + [k]
If two elements are equal, the lower-index element appears first.
Arguments:
input: 1-D or higher with last dimension at least `k`. k: 0-D. Number of top elements to look for along the last dimension (along each
row for matrices).
Returns:
values: The `k` largest elements along each last dimensional slice. indices: The indices of `values` within the last dimension of `input`.
func TopKWithUnique ¶
func TopKWithUnique(scope *Scope, input tf.Output, k int64) (topk tf.Output, topk_indices tf.Output)
Returns the TopK values in the array in sorted order.
This is a combination of MakeUnique and TopKUnique. The returned top-K will have its lower bits replaced by iota, thus it will be close to the original value but not exactly the same. The running time is proportional to the product of K and the input size. NaNs are never returned. Subnormal numbers are flushed to zero.
func TpuHandleToProtoKey ¶ added in v0.2.0
Converts XRT's uid handles to TensorFlow-friendly input format.
Converts a uid handle for a compiled program into a vector of proto keys.
XRT compile ops return uids, and the TensorFlow execute op takes a proto key. This op enables a client to compile on TPU using XRT and execute using the standard TensorFlow execute op.
'uid' is the input handle. 'proto_keys' is a vector of proto keys, one for each core program.
func Transpose ¶
Shuffle dimensions of x according to a permutation.
The output `y` has the same rank as `x`. The shapes of `x` and `y` satisfy:
`y.shape[i] == x.shape[perm[i]] for i in [0, 1, ..., rank(x) - 1]`
func TridiagonalMatMul ¶
func TridiagonalMatMul(scope *Scope, superdiag tf.Output, maindiag tf.Output, subdiag tf.Output, rhs tf.Output) (output tf.Output)
Calculate product with tridiagonal matrix.
Calculates product of two matrices, where left matrix is a tridiagonal matrix.
Arguments:
superdiag: Tensor of shape `[..., 1, M]`, representing superdiagonals of
tri-diagonal matrices to the left of multiplication. Last element is ignored.
maindiag: Tensor of shape `[..., 1, M]`, representing main diagonals of tri-diagonal
matrices to the left of multiplication.
subdiag: Tensor of shape `[..., 1, M]`, representing subdiagonals of tri-diagonal
matrices to the left of multiplication. First element is ignored.
rhs: Tensor of shape `[..., M, N]`, representing MxN matrices to the right of
multiplication.
Returns Tensor of shape `[..., M, N]` containing the product.
func TridiagonalSolve ¶
func TridiagonalSolve(scope *Scope, diagonals tf.Output, rhs tf.Output, optional ...TridiagonalSolveAttr) (output tf.Output)
Solves tridiagonal systems of equations.
Solves tridiagonal systems of equations. Supports batch dimensions and multiple right-hand sides per each left-hand side. On CPU, solution is computed via Gaussian elimination with or without partial pivoting, depending on `partial_pivoting` attribute. On GPU, Nvidia's cuSPARSE library is used: https://docs.nvidia.com/cuda/cusparse/index.html#gtsv Partial pivoting is not yet supported by XLA backends.
Arguments:
diagonals: Tensor of shape `[..., 3, M]` whose innermost 2 dimensions represent the
tridiagonal matrices with three rows being the superdiagonal, diagonals, and subdiagonals, in order. The last element of the superdiagonal and the first element of the subdiagonal is ignored.
rhs: Tensor of shape `[..., M, K]`, representing K right-hand sides per each
left-hand side.
Returns Tensor of shape `[..., M, K]` containing the solutions
func TruncateDiv ¶
Returns x / y element-wise, rounded towards zero.
Truncation designates that negative numbers will round fractional quantities toward zero. I.e. -7 / 5 = -1. This matches C semantics but it is different than Python semantics. See `FloorDiv` for a division function that matches Python Semantics.
*NOTE*: `TruncateDiv` supports broadcasting. More about broadcasting [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
func TruncateMod ¶
Returns element-wise remainder of division. This emulates C semantics in that
the result here is consistent with a truncating divide. E.g. `truncate(x / y) * y + truncate_mod(x, y) = x`.
*NOTE*: `TruncateMod` supports broadcasting. More about broadcasting [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
func TruncatedNormal ¶
func TruncatedNormal(scope *Scope, shape tf.Output, dtype tf.DataType, optional ...TruncatedNormalAttr) (output tf.Output)
Outputs random values from a truncated normal distribution.
The generated values follow a normal distribution with mean 0 and standard deviation 1, except that values whose magnitude is more than 2 standard deviations from the mean are dropped and re-picked.
Arguments:
shape: The shape of the output tensor. dtype: The type of the output.
Returns A tensor of the specified shape filled with random truncated normal values.
func Unbatch ¶
func Unbatch(scope *Scope, batched_tensor tf.Output, batch_index tf.Output, id tf.Output, timeout_micros int64, optional ...UnbatchAttr) (unbatched_tensor tf.Output)
Reverses the operation of Batch for a single output Tensor.
An instance of Unbatch either receives an empty batched_tensor, in which case it asynchronously waits until the values become available from a concurrently running instance of Unbatch with the same container and shared_name, or receives a non-empty batched_tensor in which case it finalizes all other concurrently running instances and outputs its own element from the batch.
batched_tensor: The possibly transformed output of Batch. The size of the first
dimension should remain unchanged by the transformations for the operation to work.
batch_index: The matching batch_index obtained from Batch. id: The id scalar emitted by Batch. unbatched_tensor: The Tensor corresponding to this execution. timeout_micros: Maximum amount of time (in microseconds) to wait to receive the
batched input tensor associated with a given invocation of the op.
container: Container to control resource sharing. shared_name: Instances of Unbatch with the same container and shared_name are
assumed to possibly belong to the same batch. If left empty, the op name will be used as the shared name.
func UnbatchDataset ¶
func UnbatchDataset(scope *Scope, input_dataset tf.Output, output_types []tf.DataType, output_shapes []tf.Shape, optional ...UnbatchDatasetAttr) (handle tf.Output)
A dataset that splits the elements of its input into multiple elements.
func UnbatchGrad ¶
func UnbatchGrad(scope *Scope, original_input tf.Output, batch_index tf.Output, grad tf.Output, id tf.Output, optional ...UnbatchGradAttr) (batched_grad tf.Output)
Gradient of Unbatch.
Acts like Batch but using the given batch_index index of batching things as they become available. This ensures that the gradients are propagated back in the same session which did the forward pass.
original_input: The input to the Unbatch operation this is the gradient of. batch_index: The batch_index given to the Unbatch operation this is the gradient of. grad: The downstream gradient. id: The id scalar emitted by Batch. batched_grad: The return value, either an empty tensor or the batched gradient. container: Container to control resource sharing. shared_name: Instances of UnbatchGrad with the same container and shared_name
are assumed to possibly belong to the same batch. If left empty, the op name will be used as the shared name.
func UncompressElement ¶
func UncompressElement(scope *Scope, compressed tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (components []tf.Output)
Uncompresses a compressed dataset element.
func UnicodeDecode ¶
func UnicodeDecode(scope *Scope, input tf.Output, input_encoding string, optional ...UnicodeDecodeAttr) (row_splits tf.Output, char_values tf.Output)
Decodes each string in `input` into a sequence of Unicode code points.
The character codepoints for all strings are returned using a single vector `char_values`, with strings expanded to characters in row-major order.
The `row_splits` tensor indicates where the codepoints for each input string begin and end within the `char_values` tensor. In particular, the values for the `i`th string (in row-major order) are stored in the slice `[row_splits[i]:row_splits[i+1]]`. Thus:
- `char_values[row_splits[i]+j]` is the Unicode codepoint for the `j`th character in the `i`th string (in row-major order).
- `row_splits[i+1] - row_splits[i]` is the number of characters in the `i`th string (in row-major order).
Arguments:
input: The text to be decoded. Can have any shape. Note that the output is flattened
to a vector of char values.
input_encoding: Text encoding of the input strings. This is any of the encodings supported
by ICU ucnv algorithmic converters. Examples: `"UTF-16", "US ASCII", "UTF-8"`.
Returns:
row_splits: A 1D int32 tensor containing the row splits. char_values: A 1D int32 Tensor containing the decoded codepoints.
func UnicodeDecodeWithOffsets ¶
func UnicodeDecodeWithOffsets(scope *Scope, input tf.Output, input_encoding string, optional ...UnicodeDecodeWithOffsetsAttr) (row_splits tf.Output, char_values tf.Output, char_to_byte_starts tf.Output)
Decodes each string in `input` into a sequence of Unicode code points.
The character codepoints for all strings are returned using a single vector `char_values`, with strings expanded to characters in row-major order. Similarly, the character start byte offsets are returned using a single vector `char_to_byte_starts`, with strings expanded in row-major order.
The `row_splits` tensor indicates where the codepoints and start offsets for each input string begin and end within the `char_values` and `char_to_byte_starts` tensors. In particular, the values for the `i`th string (in row-major order) are stored in the slice `[row_splits[i]:row_splits[i+1]]`. Thus:
- `char_values[row_splits[i]+j]` is the Unicode codepoint for the `j`th character in the `i`th string (in row-major order).
- `char_to_bytes_starts[row_splits[i]+j]` is the start byte offset for the `j`th character in the `i`th string (in row-major order).
- `row_splits[i+1] - row_splits[i]` is the number of characters in the `i`th string (in row-major order).
Arguments:
input: The text to be decoded. Can have any shape. Note that the output is flattened
to a vector of char values.
input_encoding: Text encoding of the input strings. This is any of the encodings supported
by ICU ucnv algorithmic converters. Examples: `"UTF-16", "US ASCII", "UTF-8"`.
Returns:
row_splits: A 1D int32 tensor containing the row splits. char_values: A 1D int32 Tensor containing the decoded codepoints. char_to_byte_starts: A 1D int32 Tensor containing the byte index in the input string where each
character in `char_values` starts.
func UnicodeEncode ¶
func UnicodeEncode(scope *Scope, input_values tf.Output, input_splits tf.Output, output_encoding string, optional ...UnicodeEncodeAttr) (output tf.Output)
Encode a tensor of ints into unicode strings.
Returns a vector of strings, where `output[i]` is constructed by encoding the Unicode codepoints in `input_values[input_splits[i]:input_splits[i+1]]` using `output_encoding`.
---
Example:
``` input_values = [72, 101, 108, 108, 111, 87, 111, 114, 108, 100] input_splits = [0, 5, 10] output_encoding = 'UTF-8'
output = ['Hello', 'World'] ```
Arguments:
input_values: A 1D tensor containing the unicode codepoints that should be encoded. input_splits: A 1D tensor specifying how the unicode codepoints should be split into strings.
In particular, `output[i]` is constructed by encoding the codepoints in the slice `input_values[input_splits[i]:input_splits[i+1]]`.
output_encoding: Unicode encoding of the output strings. Valid encodings are: `"UTF-8",
"UTF-16-BE", and "UTF-32-BE"`.
Returns The 1-D Tensor of strings encoded from the provided unicode codepoints.
func UnicodeScript ¶
Determine the script codes of a given tensor of Unicode integer code points.
This operation converts Unicode code points to script codes corresponding to each code point. Script codes correspond to International Components for Unicode (ICU) UScriptCode values.
See [ICU project docs](http://icu-project.org/apiref/icu4c/uscript_8h.html) for more details on script codes.
For an example, see the unicode strings guide on [unicode scripts] (https://www.tensorflow.org/tutorials/load_data/unicode#representing_unicode).
Returns -1 (USCRIPT_INVALID_CODE) for invalid codepoints. Output shape will match input shape.
Examples:
>>> tf.strings.unicode_script([1, 31, 38]) <tf.Tensor: shape=(3,), dtype=int32, numpy=array([0, 0, 0], dtype=int32)>
Arguments:
input: A Tensor of int32 Unicode code points.
Returns A Tensor of int32 script codes corresponding to each input code point.
func UnicodeTranscode ¶
func UnicodeTranscode(scope *Scope, input tf.Output, input_encoding string, output_encoding string, optional ...UnicodeTranscodeAttr) (output tf.Output)
Transcode the input text from a source encoding to a destination encoding.
The input is a string tensor of any shape. The output is a string tensor of the same shape containing the transcoded strings. Output strings are always valid unicode. If the input contains invalid encoding positions, the `errors` attribute sets the policy for how to deal with them. If the default error-handling policy is used, invalid formatting will be substituted in the output by the `replacement_char`. If the errors policy is to `ignore`, any invalid encoding positions in the input are skipped and not included in the output. If it set to `strict` then any invalid formatting will result in an InvalidArgument error.
This operation can be used with `output_encoding = input_encoding` to enforce correct formatting for inputs even if they are already in the desired encoding.
If the input is prefixed by a Byte Order Mark needed to determine encoding (e.g. if the encoding is UTF-16 and the BOM indicates big-endian), then that BOM will be consumed and not emitted into the output. If the input encoding is marked with an explicit endianness (e.g. UTF-16-BE), then the BOM is interpreted as a non-breaking-space and is preserved in the output (including always for UTF-8).
The end result is that if the input is marked as an explicit endianness the transcoding is faithful to all codepoints in the source. If it is not marked with an explicit endianness, the BOM is not considered part of the string itself but as metadata, and so is not preserved in the output.
Examples:
>>> tf.strings.unicode_transcode(["Hello", "TensorFlow", "2.x"], "UTF-8", "UTF-16-BE") <tf.Tensor: shape=(3,), dtype=string, numpy= array([b'\x00H\x00e\x00l\x00l\x00o',
b'\x00T\x00e\x00n\x00s\x00o\x00r\x00F\x00l\x00o\x00w', b'\x002\x00.\x00x'], dtype=object)>
>>> tf.strings.unicode_transcode(["A", "B", "C"], "US ASCII", "UTF-8").numpy() array([b'A', b'B', b'C'], dtype=object)
Arguments:
input: The text to be processed. Can have any shape. input_encoding: Text encoding of the input strings. This is any of the encodings supported
by ICU ucnv algorithmic converters. Examples: `"UTF-16", "US ASCII", "UTF-8"`.
output_encoding: The unicode encoding to use in the output. Must be one of
`"UTF-8", "UTF-16-BE", "UTF-32-BE"`. Multi-byte encodings will be big-endian.
Returns A string tensor containing unicode text encoded using `output_encoding`.
func UniformCandidateSampler ¶
func UniformCandidateSampler(scope *Scope, true_classes tf.Output, num_true int64, num_sampled int64, unique bool, range_max int64, optional ...UniformCandidateSamplerAttr) (sampled_candidates tf.Output, true_expected_count tf.Output, sampled_expected_count tf.Output)
Generates labels for candidate sampling with a uniform distribution.
See explanations of candidate sampling and the data formats at go/candidate-sampling.
For each batch, this op picks a single set of sampled candidate labels.
The advantages of sampling candidates per-batch are simplicity and the possibility of efficient dense matrix multiplication. The disadvantage is that the sampled candidates must be chosen independently of the context and of the true labels.
Arguments:
true_classes: A batch_size * num_true matrix, in which each row contains the
IDs of the num_true target_classes in the corresponding original label.
num_true: Number of true labels per context. num_sampled: Number of candidates to randomly sample. unique: If unique is true, we sample with rejection, so that all sampled
candidates in a batch are unique. This requires some approximation to estimate the post-rejection sampling probabilities.
range_max: The sampler will sample integers from the interval [0, range_max).
Returns:
sampled_candidates: A vector of length num_sampled, in which each element is
the ID of a sampled candidate.
true_expected_count: A batch_size * num_true matrix, representing
the number of times each candidate is expected to occur in a batch of sampled candidates. If unique=true, then this is a probability.
sampled_expected_count: A vector of length num_sampled, for each sampled
candidate representing the number of times the candidate is expected to occur in a batch of sampled candidates. If unique=true, then this is a probability.
func UniformDequantize ¶ added in v0.2.0
func UniformDequantize(scope *Scope, input tf.Output, scales tf.Output, zero_points tf.Output, Tout tf.DataType, quantization_min_val int64, quantization_max_val int64, optional ...UniformDequantizeAttr) (output tf.Output)
Perform dequantization on the quantized Tensor `input`.
Given quantized `input` which was quantized using `scales` and `zero_points`, performs dequantization using the formula: dequantized_data = (quantized_data - zero_point) * scale.
Arguments:
input: Must be a Tensor of Tin. scales: The float value(s) used as scale(s) when quantizing original data that input represents.
Must be a scalar Tensor if quantization_axis is -1 (per-tensor quantization), otherwise 1D Tensor of size (input.dim_size(quantization_axis),) (per-axis quantization).
zero_points: The int32 value(s) used as zero_point(s) when quantizing original data that input represents.
Same shape condition as scales.
Tout: The type of output Tensor. A tf.DType from: tf.qint8, tf.qint32 quantization_min_val: The quantization min value that was used when input was quantized.
The purpose of this attribute is typically (but not limited to) to indicate narrow range, where this is set to: `(Tin lowest) + 1` if narrow range, and `(Tin lowest)` otherwise. For example, if Tin is qint8, this is set to -127 if narrow range quantized or -128 if not.
quantization_max_val: The quantization max value that was used when input was quantized.
The purpose of this attribute is typically (but not limited to) indicate narrow range, where this is set to: `(Tout max)` for both narrow range and not narrow range. For example, if Tin is qint8, this is set to 127.
Returns The output dequantized Tensor of Tout, whose shape is same as input.
func UniformQuantize ¶ added in v0.3.0
func UniformQuantize(scope *Scope, input tf.Output, scales tf.Output, zero_points tf.Output, Tout tf.DataType, quantization_min_val int64, quantization_max_val int64, optional ...UniformQuantizeAttr) (output tf.Output)
Perform quantization on Tensor `input`.
Given `input`, `scales` and `zero_points`, performs quantization using the formula: quantized_data = floor(input_data * (1.0f / scale) + 0.5f) + zero_point
Arguments:
input: Must be a Tensor of Tin. scales: The float value(s) to use as scale(s) to quantize `input`.
Must be a scalar Tensor if quantization_axis is -1 (per-tensor quantization), otherwise 1D Tensor of size (input.dim_size(quantization_axis),) (per-axis quantization).
zero_points: The int32 value(s) to use as zero_point(s) to quantize `input`.
Same shape condition as scales.
Tout: The type of output Tensor. A tf.DType from: tf.float32 quantization_min_val: The quantization min value to quantize `input`.
The purpose of this attribute is typically (but not limited to) to indicate narrow range, where this is set to: `(Tin lowest) + 1` if narrow range, and `(Tin lowest)` otherwise. For example, if Tin is qint8, this is set to -127 if narrow range quantized or -128 if not.
quantization_max_val: The quantization max value to quantize `input`.
The purpose of this attribute is typically (but not limited to) indicate narrow range, where this is set to: `(Tout max)` for both narrow range and not narrow range. For example, if Tin is qint8, this is set to 127.
Returns The output quantized Tensor of Tout, whose shape is same as input.
func UniformQuantizedAdd ¶ added in v0.4.0
func UniformQuantizedAdd(scope *Scope, lhs tf.Output, rhs tf.Output, lhs_scales tf.Output, lhs_zero_points tf.Output, rhs_scales tf.Output, rhs_zero_points tf.Output, output_scales tf.Output, output_zero_points tf.Output, lhs_quantization_min_val int64, lhs_quantization_max_val int64, rhs_quantization_min_val int64, rhs_quantization_max_val int64, output_quantization_min_val int64, output_quantization_max_val int64, optional ...UniformQuantizedAddAttr) (output tf.Output)
Perform quantized add of quantized Tensor `lhs` and quantized Tensor `rhs` to make quantized `output`.
Given quantized `lhs` and quantized `rhs`, performs quantized add on `lhs` and `rhs` to make quantized `output`.
`UniformQuantizedAdd` follows Numpy broadcasting rules. The two input array shapes are compared element-wise. Starting with the trailing dimensions, the two dimensions either have to be equal or one of them needs to be 1.
`lhs` and `rhs` must be quantized Tensor, where data value is quantized using the formula: ``` quantized_data = clip(original_data / scale + zero_point, quantization_min_val, quantization_max_val) ``` `output` is also quantized, using the same formula.
If `lhs` and `output` is both per-axis quantized, the quantization axis must match. Also, if `rhs` and `output` is both per-axis quantized, the quantization axis must match. *Match* means the axis must match when adding, regarding the broadcasting. i.e. For both operands `lhs` and `rhs`, if `operand.quantization_axis` >= 0 and `output.quantization_axis` >= 0, `operand.dims` - `operand.quantization_axis` must be equal to `output.dims` - `output.quantization_axis`.
Arguments:
lhs: Must be a quantized tensor. rhs: Must be a quantized tensor. lhs_scales: The float value(s) used as scale factors when quantizing the original data that `lhs` represents. lhs_zero_points: The int32 value(s) used as zero points when quantizing original data that `lhs` represents.
Must have same shape with `lhs_scales`.
rhs_scales: The float value(s) used as scale factors when quantizing the original data that `rhs` represents. rhs_zero_points: The int32 value(s) used as zero points when quantizing original data that `rhs` represents.
Must have same shape with `rhs_scales`.
output_scales: The float value(s) to use as scale factors when quantizing original data that `output` represents. output_zero_points: The int32 value(s) used as zero points when quantizing original data that output represents.
Must have same shape with `output_scales`.
lhs_quantization_min_val: The min value of the quantized data stored in `lhs`.
For example, if `Tin` is `qint8`, this must be set to -127 if narrow range quantized or -128 if not.
lhs_quantization_max_val: The max value of the quantized data stored in `lhs`.
For example, if `Tin` is `qint8`, this must be set to 127.
rhs_quantization_min_val: The min value of the quantized data stored in `rhs`.
For example, if `Tin` is `qint8`, this must be set to -127 if narrow range quantized or -128 if not.
rhs_quantization_max_val: The max value of the quantized data stored in `rhs`.
For example, if `Tin` is `qint8`, this must be set to 127.
output_quantization_min_val: The min value of the quantized data stored in `output`.
For example, if `Tout` is `qint8`, this must be set to -127 if narrow range quantized or -128 if not.
output_quantization_max_val: The max value of the quantized data stored in `output`.
For example, if `Tout` is `qint8`, this must be set to 127.
Returns The output quantized tensor.
func UniformQuantizedClipByValue ¶ added in v0.3.0
func UniformQuantizedClipByValue(scope *Scope, operand tf.Output, min tf.Output, max tf.Output, scales tf.Output, zero_points tf.Output, quantization_min_val int64, quantization_max_val int64, optional ...UniformQuantizedClipByValueAttr) (output tf.Output)
Perform clip by value on the quantized Tensor `operand`.
Given quantized `operand` which was quantized using `scales` and `zero_points`, performs clip by value using `min` and `max` values. If quantization_axis is -1 (per-tensor quantized), the entire operand is clipped using scalar min, max. Otherwise (per-channel quantized), the clipping is also done per-channel.
Arguments:
operand: Must be a Tensor of T. min: The min value(s) to clip operand. Must be a Tensor of T.
Must be a scalar Tensor if quantization_axis is -1 (per-tensor quantization), otherwise 1D Tensor of size (operand.dim_size(quantization_axis),) (per-axis quantization).
max: The min value(s) to clip operand. Must be a Tensor of T.
Must be a scalar Tensor if quantization_axis is -1 (per-tensor quantization), otherwise 1D Tensor of size (operand.dim_size(quantization_axis),) (per-axis quantization).
scales: The float value(s) used as scale(s) when quantizing `operand`, `min` and `max`.
Must be a scalar Tensor if quantization_axis is -1 (per-tensor quantization), otherwise 1D Tensor of size (operand.dim_size(quantization_axis),) (per-axis quantization).
zero_points: The int32 value(s) used as zero_point(s) when quantizing `operand`, `min` and `max`.
Same shape condition as scales.
quantization_min_val: The quantization min value that was used when operand was quantized. quantization_max_val: The quantization max value that was used when operand was quantized.
Returns The output clipped Tensor of T, whose shape is same as operand.
func UniformQuantizedConvolution ¶ added in v0.4.0
func UniformQuantizedConvolution(scope *Scope, lhs tf.Output, rhs tf.Output, lhs_scales tf.Output, lhs_zero_points tf.Output, rhs_scales tf.Output, rhs_zero_points tf.Output, output_scales tf.Output, output_zero_points tf.Output, Tout tf.DataType, padding string, lhs_quantization_min_val int64, lhs_quantization_max_val int64, rhs_quantization_min_val int64, rhs_quantization_max_val int64, output_quantization_min_val int64, output_quantization_max_val int64, optional ...UniformQuantizedConvolutionAttr) (output tf.Output)
Perform quantized convolution of quantized Tensor `lhs` and quantized Tensor `rhs`. to make quantized `output`.
Given quantized `lhs` and quantized `rhs`, performs quantized dot on `lhs` and `rhs` to make quantized `output`.
`lhs` and `rhs` must be Tensors of same rank, and meet following shape conditions. - `lhs_feature` % `feature_group_count` == 0 - `lhs_feature` % `rhs_input_feature` == 0 - `lhs_feature` / `feature_group_count` == `rhs_input_feature` - `rhs_output_feature` % `feature_group_count` == 0 - `lhs_batch` % `batch_group_count` == 0 - `rhs_output_feature` % `batch_group_count` == 0
`lhs` and `rhs` must be quantized Tensor, where data value is quantized using the formula: ``` quantized_data = clip(original_data / scale + zero_point, quantization_min_val, quantization_max_val) ``` `output` is also quantized, using the same formula. If `rhs` is per-tensor quantized, `output` must be also per-tensor quantized.
Arguments:
lhs: Must be a quantized tensor, rank >= 3. rhs: Must be a quantized tensor, same rank as `lhs`. lhs_scales: The float value(s) used as scale factors when quantizing the original data that `lhs` represents.
Must be a scalar `Tensor` (`lhs` supports only per-tensor quantization).
lhs_zero_points: The int32 value(s) used as zero points when quantizing original data that `lhs` represents.
Same shape condition as `lhs_scales`.
rhs_scales: The float value(s) used as scale factors when quantizing the original data that `rhs` represents.
Must be a scalar `Tensor` for per-tensor quantization, or 1D `Tensor` of size `rhs.dim_size(kernel_output_feature_dimension)`, for per-channel quantization.
rhs_zero_points: The int32 value(s) used as zero points when quantizing original data that `rhs` represents.
Same shape condition as `rhs_scales`.
output_scales: The float value(s) to use as scale factors when quantizing original data that `output` represents.
Must be a scalar `Tensor` for per-tensor quantization, or 1D `Tensor` of size `rhs.dim_size(kernel_output_feature_dimension)` - which is equal to `output.dim_size(output_feature_dimension)`, for per-channel quantization. If `rhs` is per-tensor quantized, output must be also per-tensor quantized. This means that if `rhs_scales` and `rhs_zero_points` are scalar `Tensor`s, `output_scales` and `output_zero_points` must be scalar `Tensor`s as well.
output_zero_points: The int32 value(s) used as zero points when quantizing original data that output represents.
Same shape condition as `output_scales`.
Tout: The type of `output` `Tensor`. padding: string from: `"SAME"`, `"VALID"`, or `"EXPLICIT"`, indicating the type of padding algorithm to use. lhs_quantization_min_val: The min value of the quantized data stored in `lhs`.
For example, if `Tin` is `qint8`, this must be set to -127 if narrow range quantized or -128 if not.
lhs_quantization_max_val: The max value of the quantized data stored in `lhs`.
For example, if `Tin` is `qint8`, this must be set to 127.
rhs_quantization_min_val: The min value of the quantized data stored in `rhs`.
For example, if `Tin` is `qint8`, this must be set to -127 if narrow range quantized or -128 if not.
rhs_quantization_max_val: The max value of the quantized data stored in `rhs`.
For example, if `Tin` is `qint8`, this must be set to 127.
output_quantization_min_val: The min value of the quantized data stored in `output`.
For example, if `Tout` is `qint8`, this must be set to -127 if narrow range quantized or -128 if not.
output_quantization_max_val: The max value of the quantized data stored in `output`.
For example, if `Tout` is `qint8`, this must be set to 127.
Returns The output quantized tensor of `Tout`, same rank as `lhs` and `rhs`.
func UniformQuantizedConvolutionHybrid ¶ added in v0.4.0
func UniformQuantizedConvolutionHybrid(scope *Scope, lhs tf.Output, rhs tf.Output, rhs_scales tf.Output, rhs_zero_points tf.Output, Tout tf.DataType, padding string, rhs_quantization_min_val int64, rhs_quantization_max_val int64, optional ...UniformQuantizedConvolutionHybridAttr) (output tf.Output)
Perform hybrid quantized convolution of float Tensor `lhs` and quantized Tensor `rhs`.
Given float `lhs` and quantized `rhs`, internally performs quantization on `lhs`, and then performs quantized convolution on quantized `lhs` and `rhs`.
The internal quantization on `lhs` is a quantization to `Trhs`, dynamic range, per-batch (per-axis along axis `dimension_numbers.input_batch_dimension`), asymmetric, and not narrow range (the range is [Trhs_MIN, Trhs_MAX]).
`lhs` and `rhs` must be Tensors of same rank, and meet following shape conditions. - lhs_feature % feature_group_count == 0 - lhs_feature % rhs_input_feature == 0 - lhs_feature / feature_group_count == rhs_input_feature - rhs_output_feature % feature_group_count == 0 - lhs_batch % batch_group_count == 0 - rhs_output_feature % batch_group_count == 0
`rhs` must be quantized Tensor, where its data value is quantized using the formula: quantized_data = clip(original_data / scale + zero_point, quantization_min_val, quantization_max_val).
Arguments:
lhs: Must be a non-quantized Tensor of `Tlhs`, rank >= 3. rhs: Must be a quantized Tensor of `Trhs`, same rank as `lhs`. rhs_scales: The float value(s) used as scale factors when quantizing the original data that `rhs` represents.
Must be a scalar Tensor for per-tensor quantization, or 1D Tensor of size `rhs.dim_size(kernel_output_feature_dimension)`, for per-channel quantization.
rhs_zero_points: The int32 value(s) used as zero_point when quantizing original data that `rhs` represents.
Same shape condition as `rhs_scales`.
Tout: The type of output Tensor. padding: string from: `"SAME"`, `"VALID"`, or `"EXPLICIT"`, indicating the type of padding algorithm to use. rhs_quantization_min_val: The min value of the quantized data stored in `rhs`.
For example, if `Trhs` is qint8, this must be set to -127 if narrow range quantized or -128 if not.
rhs_quantization_max_val: The max value of the quantized data stored in `rhs`.
For example, if `Trhs` is qint8, this must be set to 127.
Returns The output Tensor of `Tout`, same rank as `lhs` and `rhs`. The output data is the non-quantized output data.
func UniformQuantizedDot ¶ added in v0.3.0
func UniformQuantizedDot(scope *Scope, lhs tf.Output, rhs tf.Output, lhs_scales tf.Output, lhs_zero_points tf.Output, rhs_scales tf.Output, rhs_zero_points tf.Output, output_scales tf.Output, output_zero_points tf.Output, Tout tf.DataType, lhs_quantization_min_val int64, lhs_quantization_max_val int64, rhs_quantization_min_val int64, rhs_quantization_max_val int64, output_quantization_min_val int64, output_quantization_max_val int64, optional ...UniformQuantizedDotAttr) (output tf.Output)
Perform quantized dot of quantized Tensor `lhs` and quantized Tensor `rhs` to make quantized `output`.
Given quantized `lhs` and quantized `rhs`, performs quantized dot on `lhs` and `rhs` to make quantized `output`. `lhs` and `rhs` must be 2D Tensors and the lhs.dim_size(1) must match rhs.dim_size(0). `lhs` and `rhs` must be quantized Tensor, where data value is quantized using the formula: quantized_data = clip(original_data / scale + zero_point, quantization_min_val, quantization_max_val). `output` is also quantized, using the same formula. If `rhs` is per-tensor quantized, `output` must be also per-tensor quantized.
Arguments:
lhs: Must be a 2D Tensor of Tin. rhs: Must be a 2D Tensor of Tin. lhs_scales: The float value(s) used as scale when quantizing original data that lhs represents.
Must be a scalar Tensor (lhs supports only per-tensor quantization).
lhs_zero_points: The int32 value(s) used as zero_point when quantizing original data that lhs represents.
Same shape condition as lhs_scales.
rhs_scales: The float value(s) used as scale when quantizing original data that rhs represents.
Must be a scalar Tensor (per-tensor quantization) or 1D Tensor of size (rhs.dim_size(1),) (per-channel quantization).
rhs_zero_points: The int32 value(s) used as zero_point when quantizing original data that rhs represents.
Same shape condition as rhs_scales.
output_scales: The float value(s) to use as scales when quantizing original data that output represents.
Must be a scalar Tensor (per-tensor quantization) or 1D Tensor of size (output.dim_size(1),) (per-channel quantization). If rhs is per-tensor quantized, output must be also per-tensor quantized. This means that if rhs_scales and rhs_zero_points are scalar Tensors, output_scales and output_zero_points must be scalar Tensors as well.
output_zero_points: The int32 value(s) used as zero_point when quantizing original data that output represents.
Same shape condition as rhs_scales.
Tout: The type of output Tensor. lhs_quantization_min_val: The min value of the quantized data stored in lhs.
For example, if Tin is qint8, this must be set to -127 if narrow range quantized or -128 if not.
lhs_quantization_max_val: The max value of the quantized data stored in rhs.
For example, if Tin is qint8, this must be set to 127.
rhs_quantization_min_val: The min value of the quantized data stored in rhs.
For example, if Trhs is qint8, this must be set to -127 if narrow range quantized or -128 if not.
rhs_quantization_max_val: The max value of the quantized data stored in rhs.
For example, if Trhs is qint8, this must be set to 127.
output_quantization_min_val: The min value of the quantized data stored in output.
For example, if Tout is qint8, this must be set to -127 if narrow range quantized or -128 if not.
output_quantization_max_val: The max value of the quantized data stored in output.
For example, if Tout is qint8, this must be set to 127.
Returns The output 2D Tensor of Tout, whose shape is (lhs.dim_size(0), rhs.dim_size(1)).
func UniformQuantizedDotHybrid ¶ added in v0.2.0
func UniformQuantizedDotHybrid(scope *Scope, lhs tf.Output, rhs tf.Output, rhs_scales tf.Output, rhs_zero_points tf.Output, Tout tf.DataType, rhs_quantization_min_val int64, rhs_quantization_max_val int64, optional ...UniformQuantizedDotHybridAttr) (output tf.Output)
Perform hybrid quantized dot of float Tensor `lhs` and quantized Tensor `rhs`.
Given float `lhs` and quantized `rhs`, internally performs quantization on `lhs`, and then performs quantized dot on quantized lhs and `rhs`. The internal quantization on `lhs` is a quantization to qint8, dynamic range, per-batch (per-axis along axis 0), asymmetric, and not narrow range (the range is [-128, 127]). `lhs` and `rhs` must be 2D Tensors and the lhs.dim_size(1) must match rhs.dim_size(0). `rhs` must be quantized Tensor, where its data value is quantized using the formula: quantized_data = clip(original_data / scale + zero_point, quantization_min_val, quantization_max_val).
Arguments:
lhs: Must be a 2D Tensor of Tlhs. rhs: Must be a 2D Tensor of Trhs. rhs_scales: The float value(s) used as scale when quantizing original data that rhs represents.
Must be a scalar Tensor (per-tensor quantization) or 1D Tensor of size (rhs.dim_size(1),) (per-channel quantization).
rhs_zero_points: The int32 value(s) used as zero_point when quantizing original data that rhs represents.
Same shape condition as rhs_scales.
Tout: The type of output Tensor. rhs_quantization_min_val: The min value of the quantized data stored in rhs.
For example, if Trhs is qint8, this must be set to -127 if narrow range quantized or -128 if not.
rhs_quantization_max_val: The max value of the quantized data stored in rhs.
For example, if Trhs is qint8, this must be set to 127.
Returns The output 2D Tensor of Tout, whose shape is (lhs.dim_size(0), rhs.dim_size(1)). The output data is the original output data itself (Not quantized).
func UniformRequantize ¶ added in v0.3.0
func UniformRequantize(scope *Scope, input tf.Output, input_scales tf.Output, input_zero_points tf.Output, output_scales tf.Output, output_zero_points tf.Output, Tout tf.DataType, input_quantization_min_val int64, input_quantization_max_val int64, output_quantization_min_val int64, output_quantization_max_val int64, optional ...UniformRequantizeAttr) (output tf.Output)
Given quantized tensor `input`, requantize it with new quantization parameters.
Given quantized tensor `input`, which was quantized using {input_scales, input_zero_points, input_quantization_axis, input_quantization_min_val, input_quantization_max_val}, requantize it to a tensor, which is quantized using {output_scales, output_zero_points, output_quantization_axis, output_quantization_min_val, output_quantization_max_val}. The requantization is done by using the formula: output_quantized_data = clip(
(input_quantized_data - input_zero_point) * (input_scale / output_scale) + output_zero_point, output_quantization_min_val, output_quantization_max_val)
Per-tensor and per-axis quantization supported cases are followings: * per-tensor -> per-tensor * per-tensor -> per-axis * per-axis -> per-axis where input_quantization_axis equals output_quantization_axis. i.e. At least one among input_quantization_axis and output_quantization_axis must be -1, or two must be equal.
Arguments:
input: Must be a Tensor of Tin. input_scales: The float value(s) used as scale(s) when quantizing original data that `input` represents.
Must be a scalar Tensor if quantization_axis is -1 (per-tensor quantization), otherwise 1D Tensor of size (input.dim_size(quantization_axis),) (per-axis quantization).
input_zero_points: The int32 value(s) used as zero_point(s) when quantizing original data that `input` represents.
Same shape condition as scales.
output_scales: The float value(s) to use as new scale(s) to quantize original data that `input` represents.
Must be a scalar Tensor if quantization_axis is -1 (per-tensor quantization), otherwise 1D Tensor of size (input.dim_size(quantization_axis),) (per-axis quantization).
output_zero_points: The int32 value(s) to use as new zero_point(s) to quantize original data that `input` represents.
Same shape condition as scales.
Tout: The type of output Tensor. A tf.DType from: tf.qint8, tf.qint32 input_quantization_min_val: The quantization min value that was used when quantizing original data that `input` represents.
The purpose of this attribute is typically (but not limited to) to indicate narrow range, where this is set to: `(Tin lowest) + 1` if narrow range, and `(Tin lowest)` otherwise. For example, if Tin is qint8, this is set to -127 if narrow range quantized or -128 if not.
input_quantization_max_val: The quantization max value that was used when quantizing original data that `input` represents.
The purpose of this attribute is typically (but not limited to) indicate narrow range, where this is set to: `(Tout max)` for both narrow range and not narrow range. For example, if Tin is qint8, this is set to 127.
output_quantization_min_val: The new quantization min value to quantize original data that `input` represents. output_quantization_max_val: The new quantization max value to quantize original data that `input` represents.
Returns The output quantized Tensor of Tout, whose shape is same as input.
func Unique ¶
Finds unique elements in a 1-D tensor.
This operation returns a tensor `y` containing all of the unique elements of `x` sorted in the same order that they occur in `x`; `x` does not need to be sorted. This operation also returns a tensor `idx` the same size as `x` that contains the index of each value of `x` in the unique output `y`. In other words:
`y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]`
Examples:
``` # tensor 'x' is [1, 1, 2, 4, 4, 4, 7, 8, 8] y, idx = unique(x) y ==> [1, 2, 4, 7, 8] idx ==> [0, 0, 1, 2, 2, 2, 3, 4, 4] ```
``` # tensor 'x' is [4, 5, 1, 2, 3, 3, 4, 5] y, idx = unique(x) y ==> [4, 5, 1, 2, 3] idx ==> [0, 1, 2, 3, 4, 4, 0, 1] ```
Arguments:
x: 1-D.
Returns:
y: 1-D. idx: 1-D.
func UniqueDataset ¶
func UniqueDataset(scope *Scope, input_dataset tf.Output, output_types []tf.DataType, output_shapes []tf.Shape, optional ...UniqueDatasetAttr) (handle tf.Output)
Creates a dataset that contains the unique elements of `input_dataset`.
func UniqueV2 ¶
func UniqueV2(scope *Scope, x tf.Output, axis tf.Output, optional ...UniqueV2Attr) (y tf.Output, idx tf.Output)
Finds unique elements along an axis of a tensor.
This operation either returns a tensor `y` containing unique elements along the `axis` of a tensor. The returned unique elements is sorted in the same order as they occur along `axis` in `x`. This operation also returns a tensor `idx` that is the same size as the number of the elements in `x` along the `axis` dimension. It contains the index in the unique output `y`. In other words, for an `1-D` tensor `x` with `axis = None:
`y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]`
For example:
``` # tensor 'x' is [1, 1, 2, 4, 4, 4, 7, 8, 8] y, idx = unique(x) y ==> [1, 2, 4, 7, 8] idx ==> [0, 0, 1, 2, 2, 2, 3, 4, 4] ```
For an `2-D` tensor `x` with `axis = 0`:
``` # tensor 'x' is [[1, 0, 0], # [1, 0, 0], # [2, 0, 0]] y, idx = unique(x, axis=0) y ==> [[1, 0, 0],
[2, 0, 0]]
idx ==> [0, 0, 1] ```
For an `2-D` tensor `x` with `axis = 1`:
``` # tensor 'x' is [[1, 0, 0], # [1, 0, 0], # [2, 0, 0]] y, idx = unique(x, axis=1) y ==> [[1, 0],
[1, 0], [2, 0]]
idx ==> [0, 1, 1] ```
Arguments:
x: A `Tensor`. axis: A `Tensor` of type `int32` (default: None). The axis of the Tensor to
find the unique elements.
Returns:
y: A `Tensor`. Unique elements along the `axis` of `Tensor` x. idx: A 1-D Tensor. Has the same type as x that contains the index of each
value of x in the output y.
func UniqueWithCounts ¶
func UniqueWithCounts(scope *Scope, x tf.Output, optional ...UniqueWithCountsAttr) (y tf.Output, idx tf.Output, count tf.Output)
Finds unique elements in a 1-D tensor.
This operation returns a tensor `y` containing all of the unique elements of `x` sorted in the same order that they occur in `x`. This operation also returns a tensor `idx` the same size as `x` that contains the index of each value of `x` in the unique output `y`. Finally, it returns a third tensor `count` that contains the count of each element of `y` in `x`. In other words:
`y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]`
For example:
``` # tensor 'x' is [1, 1, 2, 4, 4, 4, 7, 8, 8] y, idx, count = unique_with_counts(x) y ==> [1, 2, 4, 7, 8] idx ==> [0, 0, 1, 2, 2, 2, 3, 4, 4] count ==> [2, 1, 3, 1, 2] ```
Arguments:
x: 1-D.
Returns:
y: 1-D. idx: 1-D. count: 1-D.
func UniqueWithCountsV2 ¶
func UniqueWithCountsV2(scope *Scope, x tf.Output, axis tf.Output, optional ...UniqueWithCountsV2Attr) (y tf.Output, idx tf.Output, count tf.Output)
Finds unique elements along an axis of a tensor.
This operation either returns a tensor `y` containing unique elements along the `axis` of a tensor. The returned unique elements is sorted in the same order as they occur along `axis` in `x`. This operation also returns a tensor `idx` and a tensor `count` that are the same size as the number of the elements in `x` along the `axis` dimension. The `idx` contains the index in the unique output `y` and the `count` contains the count in the unique output `y`. In other words, for an `1-D` tensor `x` with `axis = None:
`y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]`
For example:
``` x = tf.constant([1, 1, 2, 4, 4, 4, 7, 8, 8]) y, idx, count = tf.raw_ops.UniqueWithCountsV2(x=x, axis = [0]) y ==> [1, 2, 4, 7, 8] idx ==> [0, 0, 1, 2, 2, 2, 3, 4, 4] count ==> [2, 1, 3, 1, 2] ```
For a `2-D` tensor `x` with `axis = 0`:
``` x = tf.constant([[1, 0, 0],
[1, 0, 0], [2, 0, 0]])
y, idx, count = tf.raw_ops.UniqueWithCountsV2(x=x, axis=[0]) y ==> [[1, 0, 0],
[2, 0, 0]]
idx ==> [0, 0, 1] count ==> [2, 1] ```
For a `2-D` tensor `x` with `axis = 1`:
``` x = tf.constant([[1, 0, 0],
[1, 0, 0], [2, 0, 0]])
y, idx, count = tf.raw_ops.UniqueWithCountsV2(x=x, axis=[1]) y ==> [[1, 0],
[1, 0], [2, 0]]
idx ==> [0, 1, 1] count ==> [1, 2] ```
Arguments:
x: A `Tensor`. axis: A `Tensor` of type `int32` (default: None). The axis of the Tensor to
find the unique elements.
Returns:
y: A `Tensor`. Unique elements along the `axis` of `Tensor` x. idx: A 1-D Tensor. Has the same type as x that contains the index of each
value of x in the output y.
count: A 1-D Tensor. The count of each value of x in the output y.
func Unpack ¶
Unpacks a given dimension of a rank-`R` tensor into `num` rank-`(R-1)` tensors.
Unpacks `num` tensors from `value` by chipping it along the `axis` dimension. For example, given a tensor of shape `(A, B, C, D)`;
If `axis == 0` then the i'th tensor in `output` is the slice `value[i, :, :, :]`
and each tensor in `output` will have shape `(B, C, D)`. (Note that the dimension unpacked along is gone, unlike `split`).
If `axis == 1` then the i'th tensor in `output` is the slice `value[:, i, :, :]`
and each tensor in `output` will have shape `(A, C, D)`.
Etc.
This is the opposite of `pack`.
Arguments:
value: 1-D or higher, with `axis` dimension size equal to `num`.
Returns The list of tensors unpacked from `value`.
func UnravelIndex ¶
Converts an array of flat indices into a tuple of coordinate arrays.
Example:
``` y = tf.unravel_index(indices=[2, 5, 7], dims=[3, 3]) # 'dims' represent a hypothetical (3, 3) tensor of indices: # [[0, 1, *2*], # [3, 4, *5*], # [6, *7*, 8]] # For each entry from 'indices', this operation returns # its coordinates (marked with '*'), such as # 2 ==> (0, 2) # 5 ==> (1, 2) # 7 ==> (2, 1) y ==> [[0, 1, 2], [2, 2, 1]] ```
@compatibility(numpy) Equivalent to np.unravel_index @end_compatibility
Arguments:
indices: An 0-D or 1-D `int` Tensor whose elements are indices into the
flattened version of an array of dimensions dims.
dims: An 1-D `int` Tensor. The shape of the array to use for unraveling
indices.
Returns An 2-D (or 1-D if indices is 0-D) tensor where each row has the same shape as the indices array.
func UnsortedSegmentMax ¶
func UnsortedSegmentMax(scope *Scope, data tf.Output, segment_ids tf.Output, num_segments tf.Output) (output tf.Output)
Computes the maximum along segments of a tensor.
Read [the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation) for an explanation of segments.
This operator is similar to `tf.math.unsorted_segment_sum`, Instead of computing the sum over segments, it computes the maximum such that:
\\(output_i = \max_{j...} data[j...]\\) where max is over tuples `j...` such that `segment_ids[j...] == i`.
If the maximum is empty for a given segment ID `i`, it outputs the smallest possible value for the specific numeric type, `output[i] = numeric_limits<T>::lowest()`.
If the given segment ID `i` is negative, then the corresponding value is dropped, and will not be included in the result.
Caution: On CPU, values in `segment_ids` are always validated to be less than `num_segments`, and an error is thrown for out-of-bound indices. On GPU, this does not throw an error for out-of-bound indices. On Gpu, out-of-bound indices result in safe but unspecified behavior, which may include ignoring out-of-bound indices or outputting a tensor with a 0 stored in the first dimension of its shape if `num_segments` is 0.
<div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;"> <img style="width:100%" src="https://www.tensorflow.org/images/UnsortedSegmentMax.png" alt> </div>
For example:
>>> c = tf.constant([[1,2,3,4], [5,6,7,8], [4,3,2,1]]) >>> tf.math.unsorted_segment_max(c, tf.constant([0, 1, 0]), num_segments=2).numpy() array([[4, 3, 3, 4],
[5, 6, 7, 8]], dtype=int32)
Arguments:
segment_ids: A tensor whose shape is a prefix of `data.shape`.
The values must be less than `num_segments`.
Caution: The values are always validated to be in range on CPU, never validated on GPU.
Returns Has same shape as data, except for the first `segment_ids.rank` dimensions, which are replaced with a single dimension which has size `num_segments`.
func UnsortedSegmentMin ¶
func UnsortedSegmentMin(scope *Scope, data tf.Output, segment_ids tf.Output, num_segments tf.Output) (output tf.Output)
Computes the minimum along segments of a tensor.
Read [the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation) for an explanation of segments.
This operator is similar to `tf.math.unsorted_segment_sum`, Instead of computing the sum over segments, it computes the minimum such that:
\\(output_i = \min_{j...} data_[j...]\\) where min is over tuples `j...` such that `segment_ids[j...] == i`.
If the minimum is empty for a given segment ID `i`, it outputs the largest possible value for the specific numeric type, `output[i] = numeric_limits<T>::max()`.
For example:
>>> c = tf.constant([[1,2,3,4], [5,6,7,8], [4,3,2,1]]) >>> tf.math.unsorted_segment_min(c, tf.constant([0, 1, 0]), num_segments=2).numpy() array([[1, 2, 2, 1],
[5, 6, 7, 8]], dtype=int32)
If the given segment ID `i` is negative, then the corresponding value is dropped, and will not be included in the result.
Caution: On CPU, values in `segment_ids` are always validated to be less than `num_segments`, and an error is thrown for out-of-bound indices. On GPU, this does not throw an error for out-of-bound indices. On Gpu, out-of-bound indices result in safe but unspecified behavior, which may include ignoring out-of-bound indices or outputting a tensor with a 0 stored in the first dimension of its shape if `num_segments` is 0.
Arguments:
segment_ids: A tensor whose shape is a prefix of `data.shape`.
The values must be less than `num_segments`.
Caution: The values are always validated to be in range on CPU, never validated on GPU.
Returns Has same shape as data, except for the first `segment_ids.rank` dimensions, which are replaced with a single dimension which has size `num_segments`.
func UnsortedSegmentProd ¶
func UnsortedSegmentProd(scope *Scope, data tf.Output, segment_ids tf.Output, num_segments tf.Output) (output tf.Output)
Computes the product along segments of a tensor.
Read [the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation) for an explanation of segments.
This operator is similar to `tf.math.unsorted_segment_sum`, Instead of computing the sum over segments, it computes the product of all entries belonging to a segment such that:
\\(output_i = \prod_{j...} data[j...]\\) where the product is over tuples `j...` such that `segment_ids[j...] == i`.
For example:
>>> c = tf.constant([[1,2,3,4], [5,6,7,8], [4,3,2,1]]) >>> tf.math.unsorted_segment_prod(c, tf.constant([0, 1, 0]), num_segments=2).numpy() array([[4, 6, 6, 4],
[5, 6, 7, 8]], dtype=int32)
If there is no entry for a given segment ID `i`, it outputs 1.
If the given segment ID `i` is negative, then the corresponding value is dropped, and will not be included in the result. Caution: On CPU, values in `segment_ids` are always validated to be less than `num_segments`, and an error is thrown for out-of-bound indices. On GPU, this does not throw an error for out-of-bound indices. On Gpu, out-of-bound indices result in safe but unspecified behavior, which may include ignoring out-of-bound indices or outputting a tensor with a 0 stored in the first dimension of its shape if `num_segments` is 0.
Arguments:
segment_ids: A tensor whose shape is a prefix of `data.shape`.
The values must be less than `num_segments`.
Caution: The values are always validated to be in range on CPU, never validated on GPU.
Returns Has same shape as data, except for the first `segment_ids.rank` dimensions, which are replaced with a single dimension which has size `num_segments`.
func UnsortedSegmentSum ¶
func UnsortedSegmentSum(scope *Scope, data tf.Output, segment_ids tf.Output, num_segments tf.Output) (output tf.Output)
Computes the sum along segments of a tensor.
Read [the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation) for an explanation of segments.
Computes a tensor such that \\(output[i] = \sum_{j...} data[j...]\\) where the sum is over tuples `j...` such that `segment_ids[j...] == i`. Unlike `SegmentSum`, `segment_ids` need not be sorted and need not cover all values in the full range of valid values.
If the sum is empty for a given segment ID `i`, `output[i] = 0`. If the given segment ID `i` is negative, the value is dropped and will not be added to the sum of the segment.
`num_segments` should equal the number of distinct segment IDs.
Caution: On CPU, values in `segment_ids` are always validated to be less than `num_segments`, and an error is thrown for out-of-bound indices. On GPU, this does not throw an error for out-of-bound indices. On Gpu, out-of-bound indices result in safe but unspecified behavior, which may include ignoring out-of-bound indices or outputting a tensor with a 0 stored in the first dimension of its shape if `num_segments` is 0.
<div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;"> <img style="width:100%" src="https://www.tensorflow.org/images/UnsortedSegmentSum.png" alt> </div>
>>> c = [[1,2,3,4], [5,6,7,8], [4,3,2,1]] >>> tf.math.unsorted_segment_sum(c, [0, 1, 0], num_segments=2).numpy() array([[5, 5, 5, 5],
[5, 6, 7, 8]], dtype=int32)
Arguments:
segment_ids: A tensor whose shape is a prefix of `data.shape`.
The values must be less than `num_segments`.
Caution: The values are always validated to be in range on CPU, never validated on GPU.
Returns Has same shape as data, except for the first `segment_ids.rank` dimensions, which are replaced with a single dimension which has size `num_segments`.
func Unstage ¶
Op is similar to a lightweight Dequeue.
The basic functionality is similar to dequeue with many fewer capabilities and options. This Op is optimized for performance.
func UpdateTaskIdAndGlobalCoreArray ¶ added in v0.8.2
func UpdateTaskIdAndGlobalCoreArray(scope *Scope, tpu_task_id_to_shard_id []tf.Output) (o *tf.Operation)
An op to update the task ID and global core array.
This op is to update the task ID and global core array.
Arguments:
tpu_task_id_to_shard_id: An array of int32 that maps TPU task ID to shard ID.
Returns the created operation.
func UpperBound ¶
func UpperBound(scope *Scope, sorted_inputs tf.Output, values tf.Output, optional ...UpperBoundAttr) (output tf.Output)
Applies upper_bound(sorted_search_values, values) along each row.
Each set of rows with the same index in (sorted_inputs, values) is treated independently. The resulting row is the equivalent of calling `np.searchsorted(sorted_inputs, values, side='right')`.
The result is not a global index to the entire `Tensor`, but rather just the index in the last dimension.
A 2-D example:
sorted_sequence = [[0, 3, 9, 9, 10], [1, 2, 3, 4, 5]] values = [[2, 4, 9], [0, 2, 6]] result = UpperBound(sorted_sequence, values) result == [[1, 2, 4], [0, 2, 5]]
Arguments:
sorted_inputs: 2-D Tensor where each row is ordered. values: 2-D Tensor with the same numbers of rows as `sorted_search_values`. Contains
the values that will be searched for in `sorted_search_values`.
Returns A `Tensor` with the same shape as `values`. It contains the last scalar index into the last dimension where values can be inserted without changing the ordered property.
func VarHandleOp ¶
func VarHandleOp(scope *Scope, dtype tf.DataType, shape tf.Shape, optional ...VarHandleOpAttr) (resource tf.Output)
Creates a handle to a Variable resource.
Arguments:
dtype: the type of this variable. Must agree with the dtypes
of all ops using this variable.
shape: The (possibly partially specified) shape of this variable.
func VarIsInitializedOp ¶
Checks whether a resource handle-based variable has been initialized.
Arguments:
resource: the input resource handle.
Returns a scalar boolean which is true if the variable has been initialized.
func VariableShape ¶
Returns the shape of the variable pointed to by `resource`.
This operation returns a 1-D integer tensor representing the shape of `input`.
For example:
``` # 't' is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]] shape(t) ==> [2, 2, 3] ```
func Where ¶
Returns locations of nonzero / true values in a tensor.
This operation returns the coordinates of true elements in `condition`. The coordinates are returned in a 2-D tensor where the first dimension (rows) represents the number of true elements, and the second dimension (columns) represents the coordinates of the true elements. Keep in mind, the shape of the output tensor can vary depending on how many true values there are in `condition`. Indices are output in row-major order.
For example:
``` # 'input' tensor is [[True, False] # [True, False]] # 'input' has two true values, so output has two coordinates. # 'input' has rank of 2, so coordinates have two indices. where(input) ==> [[0, 0],
[1, 0]]
# `condition` tensor is [[[True, False] # [True, False]] # [[False, True] # [False, True]] # [[False, False] # [False, True]]] # 'input' has 5 true values, so output has 5 coordinates. # 'input' has rank of 3, so coordinates have three indices. where(input) ==> [[0, 0, 0],
[0, 1, 0], [1, 0, 1], [1, 1, 1], [2, 1, 1]]
# `condition` tensor is [[[1.5, 0.0] # [-0.5, 0.0]] # [[0.0, 0.25] # [0.0, 0.75]] # [[0.0, 0.0] # [0.0, 0.01]]] # 'input' has 5 nonzero values, so output has 5 coordinates. # 'input' has rank of 3, so coordinates have three indices. where(input) ==> [[0, 0, 0],
[0, 1, 0], [1, 0, 1], [1, 1, 1], [2, 1, 1]]
# `condition` tensor is [[[1.5 + 0.0j, 0.0 + 0.0j] # [0.0 + 0.5j, 0.0 + 0.0j]] # [[0.0 + 0.0j, 0.25 + 1.5j] # [0.0 + 0.0j, 0.75 + 0.0j]] # [[0.0 + 0.0j, 0.0 + 0.0j] # [0.0 + 0.0j, 0.01 + 0.0j]]] # 'input' has 5 nonzero magnitude values, so output has 5 coordinates. # 'input' has rank of 3, so coordinates have three indices. where(input) ==> [[0, 0, 0],
[0, 1, 0], [1, 0, 1], [1, 1, 1], [2, 1, 1]]
```
func WholeFileReaderV2 ¶
func WholeFileReaderV2(scope *Scope, optional ...WholeFileReaderV2Attr) (reader_handle tf.Output)
A Reader that outputs the entire contents of a file as a value.
To use, enqueue filenames in a Queue. The output of ReaderRead will be a filename (key) and the contents of that file (value).
Returns The handle to reference the Reader.
func WindowDataset ¶
func WindowDataset(scope *Scope, input_dataset tf.Output, size tf.Output, shift tf.Output, stride tf.Output, drop_remainder tf.Output, output_types []tf.DataType, output_shapes []tf.Shape, optional ...WindowDatasetAttr) (handle tf.Output)
Combines (nests of) input elements into a dataset of (nests of) windows. A "window" is a finite dataset of flat elements of size `size` (or possibly fewer if there are not enough input elements to fill the window and `drop_remainder` evaluates to false). The `shift` argument determines the number of input elements by which the window moves on each iteration. The first element in the `k`th window will be element ``` 1 + (k-1) * shift ``` of the input dataset. In particular, the first element of the first window will always be the first element of the input dataset. If the `stride` parameter is greater than 1, then each window will skip `(stride - 1)` input elements between each element that appears in the window. Output windows will still contain `size` elements regardless of the value of `stride`. The `stride` argument determines the stride of the input elements, and the `shift` argument determines the shift of the window. For example, letting `{...}` to represent a Dataset: - `tf.data.Dataset.range(7).window(2)` produces `{{0, 1}, {2, 3}, {4, 5}, {6}}` - `tf.data.Dataset.range(7).window(3, 2, 1, True)` produces `{{0, 1, 2}, {2, 3, 4}, {4, 5, 6}}` - `tf.data.Dataset.range(7).window(3, 1, 2, True)` produces `{{0, 2, 4}, {1, 3, 5}, {2, 4, 6}}` Note that when the `window` transformation is applied to a dataset of nested elements, it produces a dataset of nested windows. For example: - `tf.data.Dataset.from_tensor_slices((range(4), range(4))).window(2)` produces `{({0, 1}, {0, 1}), ({2, 3}, {2, 3})}` - `tf.data.Dataset.from_tensor_slices({"a": range(4)}).window(2)` produces `{{"a": {0, 1}}, {"a": {2, 3}}}`
Arguments:
size: An integer scalar, representing the number of elements
of the input dataset to combine into a window. Must be positive.
shift: An integer scalar, representing the number of input elements
by which the window moves in each iteration. Defaults to `size`. Must be positive.
stride: An integer scalar, representing the stride of the input elements
in the sliding window. Must be positive. The default value of 1 means "retain every input element".
drop_remainder: A Boolean scalar, representing whether the last window should be
dropped if its size is smaller than `window_size`.
func WorkerHeartbeat ¶
Worker heartbeat op.
Heartbeats may be sent periodically to indicate the coordinator is still active, to retrieve the current worker status and to expedite shutdown when necessary.
Arguments:
request: A string tensor containing a serialized WorkerHeartbeatRequest
Returns A string tensor containing a serialized WorkerHeartbeatResponse
func WriteAudioSummary ¶
func WriteAudioSummary(scope *Scope, writer tf.Output, step tf.Output, tag tf.Output, tensor tf.Output, sample_rate tf.Output, optional ...WriteAudioSummaryAttr) (o *tf.Operation)
Writes an audio summary.
Writes encoded audio summary `tensor` at `step` with `tag` using summary `writer`. `sample_rate` is the audio sample rate is Hz.
Returns the created operation.
func WriteFile ¶
Writes `contents` to the file at input `filename`.
Creates the file and recursively creates directory if it does not exist.
Arguments:
filename: scalar. The name of the file to which we write the contents. contents: scalar. The content to be written to the output file.
Returns the created operation.
func WriteGraphSummary ¶
func WriteGraphSummary(scope *Scope, writer tf.Output, step tf.Output, tensor tf.Output) (o *tf.Operation)
Writes a graph summary.
Writes TensorFlow graph `tensor` at `step` using summary `writer`.
Returns the created operation.
func WriteHistogramSummary ¶
func WriteHistogramSummary(scope *Scope, writer tf.Output, step tf.Output, tag tf.Output, values tf.Output) (o *tf.Operation)
Writes a histogram summary.
Writes histogram `values` at `step` with `tag` using summary `writer`.
Returns the created operation.
func WriteImageSummary ¶
func WriteImageSummary(scope *Scope, writer tf.Output, step tf.Output, tag tf.Output, tensor tf.Output, bad_color tf.Output, optional ...WriteImageSummaryAttr) (o *tf.Operation)
Writes an image summary.
Writes image `tensor` at `step` with `tag` using summary `writer`. `tensor` is image with shape [height, width, channels].
Returns the created operation.
func WriteRawProtoSummary ¶
func WriteRawProtoSummary(scope *Scope, writer tf.Output, step tf.Output, tensor tf.Output) (o *tf.Operation)
Writes a serialized proto summary.
Writes `tensor`, a serialized proto at `step` using summary `writer`.
Returns the created operation.
func WriteScalarSummary ¶
func WriteScalarSummary(scope *Scope, writer tf.Output, step tf.Output, tag tf.Output, value tf.Output) (o *tf.Operation)
Writes a scalar summary.
Writes scalar `value` at `step` with `tag` using summary `writer`.
Returns the created operation.
func WriteSummary ¶
func WriteSummary(scope *Scope, writer tf.Output, step tf.Output, tensor tf.Output, tag tf.Output, summary_metadata tf.Output) (o *tf.Operation)
Writes a tensor summary.
Writes `tensor` at `step` with `tag` using summary `writer`.
Returns the created operation.
func XlaAllReduce ¶
func XlaAllReduce(scope *Scope, input tf.Output, group_assignment tf.Output, reduce_op string, mode string) (output tf.Output)
Wraps the XLA AllReduce operator
documented at https://www.tensorflow.org/xla/operation_semantics#allreduce.
Arguments:
input: Array or a non-empty tuple of arrays to reduce across replicas. group_assignment: Groups between which the reductions are performed. reduce_op: Reduction computation. mode: group mode.
CrossReplica: group_assignment contains replica_id. Each group contains the
replicas for the current partition.
CrossReplicaAndPartition: group_assignment contains replica_id. Each group
contains the replicas for all partitions.
func XlaBroadcastHelper ¶
func XlaBroadcastHelper(scope *Scope, lhs tf.Output, rhs tf.Output, broadcast_dims tf.Output) (lhs_output tf.Output, rhs_output tf.Output)
Helper operator for performing XLA-style broadcasts
Broadcasts `lhs` and `rhs` to the same rank, by adding size 1 dimensions to whichever of `lhs` and `rhs` has the lower rank, using XLA's broadcasting rules for binary operators.
Arguments:
lhs: the LHS input tensor rhs: the RHS input tensor broadcast_dims: an XLA-style broadcast dimension specification
Returns:
lhs_output: the broadcasted LHS tensor rhs_output: the broadcasted RHS tensor
func XlaConcatND ¶
func XlaConcatND(scope *Scope, inputs []tf.Output, num_concats []int64, optional ...XlaConcatNDAttr) (output tf.Output)
Concats input tensor across all dimensions.
An op which merges slices the input tensor based on the given num_splits attribute, strips paddings optionally, and returns the merged tensor without paddings.
This op may be generated via the TPU bridge.
For example, with `input` tensor: ``` [[0, 1],
[4, 5]]
[[2, 3],
[6, 7]]
[[8, 9],
[12, 13]]
[[10, 11],
[14, 15]]
``` `num_splits`: ``` [2, 2] ``` and `paddings`: ``` [1, 1] ``` the expected `outputs` is: ``` [[0, 1, 2],
[4, 5, 6], [8, 9, 10]]
```
Arguments:
inputs: Input tensor slices in row-major order to merge across all dimensions. All
inputs must have the same shape.
num_concats: Number of ways to merge per dimension.
Returns Output tensor formed from merging input slices based on num_concats defined.
func XlaConv ¶
func XlaConv(scope *Scope, lhs tf.Output, rhs tf.Output, window_strides tf.Output, padding tf.Output, lhs_dilation tf.Output, rhs_dilation tf.Output, feature_group_count tf.Output, dimension_numbers string, precision_config string) (output tf.Output)
Wraps the XLA ConvGeneralDilated operator, documented at
https://www.tensorflow.org/performance/xla/operation_semantics#conv_convolution
.
Arguments:
lhs: the input tensor rhs: the kernel tensor window_strides: the inter-window strides padding: the padding to apply at the start and end of each input dimensions lhs_dilation: dilation to apply between input elements rhs_dilation: dilation to apply between kernel elements feature_group_count: number of feature groups for grouped convolution. dimension_numbers: a serialized xla::ConvolutionDimensionNumbers proto. precision_config: a serialized xla::PrecisionConfig proto.
func XlaConvV2 ¶
func XlaConvV2(scope *Scope, lhs tf.Output, rhs tf.Output, window_strides tf.Output, padding tf.Output, lhs_dilation tf.Output, rhs_dilation tf.Output, feature_group_count tf.Output, dimension_numbers string, precision_config string, preferred_element_type tf.DataType, optional ...XlaConvV2Attr) (output tf.Output)
Wraps the XLA ConvGeneralDilated operator, documented at
https://www.tensorflow.org/performance/xla/operation_semantics#conv_convolution
.
Arguments:
lhs: input tensor rhs: kernel tensor window_strides: inter-window strides padding: padding to apply at the start and end of each input dimensions lhs_dilation: dilation to apply between input elements rhs_dilation: dilation to apply between kernel elements feature_group_count: number of feature groups for grouped convolution. dimension_numbers: serialized xla::ConvolutionDimensionNumbers proto. precision_config: serialized xla::PrecisionConfig proto. preferred_element_type: type of the tensor.
func XlaCustomCall ¶
func XlaCustomCall(scope *Scope, args []tf.Output, target_name string, backend_config string, dtype tf.DataType, shape tf.Shape) (output tf.Output)
Wraps the XLA CustomCall operator
documented at https://www.tensorflow.org/xla/operation_semantics#customcall.
Arguments:
args: A list of `Tensor` with possibly different types. target_name: Name of the function. A call instruction will be emitted which
targets this symbol name.
backend_config: String, used to encode serialized metadata to the backend. dtype: Output tensor data type. shape: Output tensor shape.
func XlaCustomCallV2 ¶ added in v0.3.0
func XlaCustomCallV2(scope *Scope, operands []tf.Output, call_target_name string, backend_config string, has_side_effect bool, result_dtypes []tf.DataType, result_shapes []tf.Shape) (results []tf.Output)
Emits an HLO `CustomCall` operation with multiple outputs.
As opposed to `XlaCustomCall`, this operation supports multiple outputs.
See `CustomCall` specification at
https://tensorflow.org/xla/operation_semantics#customcall,
and `mhlo.custom_call` specification at
https://tensorflow.org/mlir/hlo_ops#mhlocustom_call_mlirmhlocustomcallop.
Arguments:
operands: A sequence of tensors with possibly different types. call_target_name: Name of the user function. The function signature must conform
to version 3 of the API, see `API_VERSION_STATUS_RETURNING_UNIFIED`. All operands and results assumed to be in the default layout.
backend_config: A string that encodes a metadata for the backend. has_side_effect: Indicates whether the custom call has side effects. result_dtypes: Types of all results. result_shapes: Shapes of all results.
func XlaDequantize ¶
func XlaDequantize(scope *Scope, input tf.Output, min_range float32, max_range float32, mode string, transpose_output bool) (output tf.Output)
Takes the packed uint32 input and unpacks the input to uint8 to do
Dequantization on device.
Arguments:
input: Input tensors whose types is uint32, shape is [d0, ..., dn]. min_range: The minimum scalar value possibly produced for the input. max_range: The maximum scalar value possibly produced for the input. mode: String to determine the dequantize mode in {"MIN_COMBINED", "MIN_FIRST", "SCALED"}. transpose_output: Boolean to determine if output is transposed. transpose_output
is faster when input is large and rank of input is higher than 1.
Returns Output tensors whose types is bfloat16. If transpose_output is true, output shape is [dn * 4, dn-1, ..., d1, d0]. If transpose_output is false, output shape is [d0,..., dn * 4].
func XlaDot ¶
func XlaDot(scope *Scope, lhs tf.Output, rhs tf.Output, dimension_numbers string, precision_config string) (output tf.Output)
Wraps the XLA DotGeneral operator, documented at
https://www.tensorflow.org/performance/xla/operation_semantics#dotgeneral
.
Arguments:
lhs: the LHS tensor rhs: the RHS tensor dimension_numbers: a serialized xla::DotDimensionNumbers proto. precision_config: a serialized xla::PrecisionConfig proto.
func XlaDotV2 ¶
func XlaDotV2(scope *Scope, lhs tf.Output, rhs tf.Output, dimension_numbers string, precision_config string, preferred_element_type tf.DataType) (output tf.Output)
Wraps the XLA DotGeneral operator, documented at
https://www.tensorflow.org/performance/xla/operation_semantics#dotgeneral
.
Arguments:
lhs: the LHS tensor rhs: the RHS tensor dimension_numbers: a serialized xla::DotDimensionNumbers proto. precision_config: a serialized xla::PrecisionConfig proto. preferred_element_type: The type of the tensor.
func XlaDynamicSlice ¶
func XlaDynamicSlice(scope *Scope, input tf.Output, start_indices tf.Output, size_indices tf.Output) (output tf.Output)
Wraps the XLA DynamicSlice operator, documented at
https://www.tensorflow.org/performance/xla/operation_semantics#dynamicslice
.
DynamicSlice extracts a sub-array from the input array at dynamic start_indices. The size of the slice in each dimension is passed in size_indices, which specify the end point of exclusive slice intervals in each dimension -- [start, start + size). The shape of start_indices must have rank 1, with dimension size equal to the rank of operand.
Arguments:
input: A `Tensor` of type T. start_indices: List of N integers containing the slice size for each
dimension. Each value must be strictly greater than zero, and start + size must be less than or equal to the size of the dimension to avoid implementation defined behavior.
func XlaDynamicUpdateSlice ¶
func XlaDynamicUpdateSlice(scope *Scope, input tf.Output, update tf.Output, indices tf.Output) (output tf.Output)
Wraps the XLA DynamicUpdateSlice operator, documented at
https://www.tensorflow.org/performance/xla/operation_semantics#dynamicupdateslice
.
XlaDynamicUpdateSlice generates a result which is the value of the `input` operand, with a slice update overwritten at `indices`. The shape of `update` determines the shape of the sub-array of the result which is updated. The shape of indices must be rank == 1, with dimension size equal to the rank of `input`.
Handling of out-of-bounds slice indices is implementation-defined.
Arguments:
input: A `Tensor` of type T. update: A `Tensor` of type T. Same rank as `input`. indices: A vector of indices into `input`. Must have length equal to the rank of
`input`.
Returns A `Tensor` of type T.
func XlaEinsum ¶
An op which supports basic einsum op with 2 inputs and 1 output.
This op has better TPU performance since it doesn't have explicitly reshape and transpose operations as tf.einsum does.
func XlaGather ¶
func XlaGather(scope *Scope, operand tf.Output, start_indices tf.Output, slice_sizes tf.Output, dimension_numbers string, indices_are_sorted bool) (output tf.Output)
Wraps the XLA Gather operator documented at
https://www.tensorflow.org/xla/operation_semantics#gather
Arguments:
operand: The array we're gathering from. start_indices: Array containing the starting indices of the slices we gather. slice_sizes: slice_sizes[i] is the bounds for the slice on dimension i. dimension_numbers: A serialized xla::GatherDimensionNumbers proto. indices_are_sorted: Boolean indicating if the indices are sorted.
func XlaKeyValueSort ¶
func XlaKeyValueSort(scope *Scope, keys tf.Output, values tf.Output) (sorted_keys tf.Output, sorted_values tf.Output)
Wraps the XLA Sort operator, documented at
https://www.tensorflow.org/performance/xla/operation_semantics#sort
.
Sorts a tensor. Currently only sorts in ascending order are supported.
Arguments:
keys: A `Tensor` of type K. values: A `Tensor` of type V.
Returns:
sorted_keys: A `Tensor` of type K. sorted_values: A `Tensor` of type V.
func XlaOptimizationBarrier ¶
Wraps the XLA OptimizationBarrier operator.
Documented at https://www.tensorflow.org/xla/operation_semantics#optimizationbarrier.
Arguments:
input: A Tuple of Arrays of any type.
func XlaPad ¶
func XlaPad(scope *Scope, input tf.Output, padding_value tf.Output, padding_low tf.Output, padding_high tf.Output, padding_interior tf.Output) (output tf.Output)
Wraps the XLA Pad operator, documented at
https://www.tensorflow.org/performance/xla/operation_semantics#pad
.
Arguments:
input: A `Tensor` of type T. padding_value: A scalar `Tensor` of type T. padding_low: the padding to apply at the start of each input dimensions. Must
be a compile-time constant 1D tensor of length equal to rank of input.
padding_high: the padding to apply at the end of each input dimension. Must
be a compile-time constant 1D tensor of length equal to rank of input.
padding_interior: the padding to apply between each input element. Must
be a compile-time constant 1D tensor of length equal to rank of input, containing only non-negative values.
Returns A `Tensor` of type T.
func XlaRecv ¶
func XlaRecv(scope *Scope, dtype tf.DataType, tensor_name string, shape tf.Shape) (tensor tf.Output)
Receives the named tensor from another XLA computation. Wraps the XLA Recv
operator documented at
https://www.tensorflow.org/performance/xla/operation_semantics#recv .
Arguments:
dtype: The type of the tensor. tensor_name: A string key that identifies the channel. shape: The shape of the tensor.
Returns The tensor to receive.
func XlaRecvFromHost ¶
func XlaRecvFromHost(scope *Scope, Toutput tf.DataType, shape tf.Shape, key string) (output tf.Output)
An op to receive a tensor from the host.
output: the tensor that will be received from the host. Toutput: element type for output. shape: shape for output. key: A unique identifier for this region used to match up host transfers.
func XlaRecvTPUEmbeddingActivations ¶ added in v0.2.0
func XlaRecvTPUEmbeddingActivations(scope *Scope, deduplication_data tf.Output, num_tables int64, config string) (outputs []tf.Output)
An op that receives embedding activations on the TPU.
The TPU system performs the embedding lookups and aggregations. The results of these aggregations are visible to the Tensorflow Graph as the outputs of a XlaRecvTPUEmbeddingActivations Op. This op returns a list containing one Tensor of activations per table specified in the model.
Arguments:
deduplication_data: A Tensor with type=DT_VARIANT containing the deduplication
data. The tensor is an XLA nested tuple containing N elements (where N is the ratio of the number of embedding to tensor cores per TPU chip). Each element of the nested tuple is a tuple of rank 1 tensors. Each tensor either contains indices (DT_UINT32) for embedding lookup on the TensorCore or weights (DT_FLOAT) to apply to the output of the embedding lookup operation.
num_tables: The number of output activation tensors. If feature descriptor is
present in the tpu embedding config, it is equal to the number of features otherwise equal to number of embedding tables in the model.
config: Serialized TPUEmbeddingConfiguration proto.
Returns A TensorList of embedding activations containing one Tensor per embedding table in the model.
func XlaRecvTPUEmbeddingActivationsV2 ¶ added in v0.8.2
func XlaRecvTPUEmbeddingActivationsV2(scope *Scope, deduplication_data tf.Output, num_tables int64, config string, embedding_partitions string, hbm_buffers_config string, tpu_topology string) (outputs []tf.Output)
An op that receives embedding activations on the TPU.
The TPU system performs the embedding lookups and aggregations. The results of these aggregations are visible to the Tensorflow Graph as the outputs of a XlaRecvTPUEmbeddingActivations Op. This op returns a list containing one Tensor of activations per table specified in the model.
Arguments:
deduplication_data: A Tensor with type=DT_VARIANT containing the deduplication
data. The tensor is an XLA nested tuple containing N elements (where N is the ratio of the number of embedding to tensor cores per TPU chip). Each element of the nested tuple is a tuple of rank 1 tensors. Each tensor either contains indices (DT_UINT32) for embedding lookup on the TensorCore or weights (DT_FLOAT) to apply to the output of the embedding lookup operation.
num_tables: The number of output activation tensors. If feature descriptor is
present in the tpu embedding config, it is equal to the number of features otherwise equal to number of embedding tables in the model.
config: Serialized TPUEmbeddingConfiguration proto. embedding_partitions: Serialized EmbeddingPartitionsProto proto. hbm_buffers_config: Serialized HbmBuffersConfig proto. tpu_topology: Serialized TpuTopologyArgsProto proto.
Returns A TensorList of embedding activations containing one Tensor per embedding table in the model.
func XlaRecvTPUEmbeddingDeduplicationData ¶ added in v0.2.0
Receives deduplication data (indices and weights) from the embedding core.
The deduplication data is a Tensor with type=DT_VARIANT. The tensor itself is an XLA nested tuple containing N elements (where N is the ratio of the number of embedding to tensor cores per TPU chip). Each element of the nested tuple is a tuple of rank 1 tensors. Each tensor either contains indices (DT_UINT32) for embedding lookup on the TensorCore or weights (DT_FLOAT) to apply to the output of the embedding lookup operation.
Arguments:
config: Serialized TPUEmbeddingConfiguration proto.
func XlaRecvTPUEmbeddingDeduplicationDataV2 ¶ added in v0.8.2
func XlaRecvTPUEmbeddingDeduplicationDataV2(scope *Scope, config string, embedding_partitions string, hbm_buffers_config string, tpu_topology string) (output tf.Output)
Receives deduplication data (indices and weights) from the embedding core.
The deduplication data is a Tensor with type=DT_VARIANT. The tensor itself is an XLA nested tuple containing N elements (where N is the ratio of the number of embedding to tensor cores per TPU chip). Each element of the nested tuple is a tuple of rank 1 tensors. Each tensor either contains indices (DT_UINT32) for embedding lookup on the TensorCore or weights (DT_FLOAT) to apply to the output of the embedding lookup operation.
Arguments:
config: Serialized TPUEmbeddingConfiguration proto. embedding_partitions: Serialized EmbeddingPartitionsProto proto. hbm_buffers_config: Serialized HbmBuffersConfig proto. tpu_topology: Serialized TpuTopologyArgsProto proto.
func XlaReducePrecision ¶ added in v0.3.0
func XlaReducePrecision(scope *Scope, operand tf.Output, exponent_bits int64, mantissa_bits int64) (output tf.Output)
Wraps the XLA ReducePrecision operator
documented at https://www.tensorflow.org/xla/operation_semantics#reduceprecision.
Arguments:
operand: array of floating-point type. exponent_bits: number of exponent bits in lower-precision format mantissa_bits: number of mantissa bits in lower-precision format
func XlaReduceScatter ¶
func XlaReduceScatter(scope *Scope, input tf.Output, group_assignment tf.Output, scatter_dimension tf.Output, reduce_op string) (output tf.Output)
Wraps the XLA ReduceScatter operator
documented at https://www.tensorflow.org/xla/operation_semantics#reducescatter.
Arguments:
input: Array or a non-empty tuple of arrays to reduce across replicas. group_assignment: Groups between which the reductions are performed. scatter_dimension: Dimension to scatter. reduce_op: Reduction computation.
func XlaRemoveDynamicDimensionSize ¶
func XlaRemoveDynamicDimensionSize(scope *Scope, input tf.Output, dim_index tf.Output) (output tf.Output)
Inverse of XlaSetDynamicDimensionSize.
Make an xla bounded dynamic dimension into a static dimension. The bound of the size of dimension `dim_index` becomes the static dimension size.
func XlaRngBitGenerator ¶
func XlaRngBitGenerator(scope *Scope, algorithm tf.Output, initial_state tf.Output, shape tf.Output, optional ...XlaRngBitGeneratorAttr) (output_key tf.Output, output tf.Output)
Stateless PRNG bit generator.
Wraps the XLA RngBitGenerator operator, documented at
https://www.tensorflow.org/performance/xla/operation_semantics#rngbitgenerator.
Arguments:
algorithm: The PRNG algorithm to use, one of
tf.random.Algorithm.{PHILOX, THREEFRY, AUTO_SELECT}.
initial_state: Initial state for the PRNG algorithm. For THREEFRY, it should be
a u64[2] and for PHILOX a u64[3].
shape: The output shape of the generated data.
func XlaSelfAdjointEig ¶
func XlaSelfAdjointEig(scope *Scope, a tf.Output, lower bool, max_iter int64, epsilon float32) (w tf.Output, v tf.Output)
Computes the eigen decomposition of a batch of self-adjoint matrices
(Note: Only real inputs are supported).
Computes the eigenvalues and eigenvectors of the innermost N-by-N matrices in tensor such that tensor[...,:,:] * v[..., :,i] = e[..., i] * v[...,:,i], for i=0...N-1.
Arguments:
a: the input tensor. lower: a boolean specifies whether the calculation is done with the lower
triangular part or the upper triangular part.
max_iter: maximum number of sweep update, i.e., the whole lower triangular
part or upper triangular part based on parameter lower. Heuristically, it has been argued that approximately logN sweeps are needed in practice (Ref: Golub & van Loan "Matrix Computation").
epsilon: the tolerance ratio.
Returns:
w: The eigenvalues in ascending order, each repeated according to its
multiplicity.
v: The column v[..., :, i] is the normalized eigenvector corresponding to the
eigenvalue w[..., i].
func XlaSend ¶
Sends the named tensor to another XLA computation. Wraps the XLA Send operator
documented at
https://www.tensorflow.org/performance/xla/operation_semantics#send .
Arguments:
tensor: The tensor to send. tensor_name: A string key that identifies the channel.
Returns the created operation.
func XlaSendTPUEmbeddingGradients ¶ added in v0.2.0
func XlaSendTPUEmbeddingGradients(scope *Scope, gradients []tf.Output, learning_rates []tf.Output, deduplication_data tf.Output, config string) (o *tf.Operation)
An op that performs gradient updates of embedding tables.
The gradients argument is a TensorList having the same length and shapes as the return value of XlaRecvTPUEmbeddingActivations, but contains gradients of the model's loss with respect to the embedding activations. The embedding tables are updated from these gradients via the optimizer specified in the TPUEmbeddingConfiguration proto given to tpu.initialize_system.
Arguments:
gradients: A TensorList of gradients with which to update embedding tables. learning_rates: A TensorList of learning rates used for updating the embedding
tables via the optimizer. The length of the TensorList must be equal to the number of dynamic learning rate tags specified in the TPUEmbeddingConfiguration proto.
deduplication_data: A Tensor with type=DT_VARIANT containing the deduplication
data. The tensor is an XLA nested tuple containing N elements (where N is the ratio of the number of embedding to tensor cores per TPU chip). Each element of the nested tuple is a tuple of rank 1 tensors. Each tensor either contains indices (DT_UINT32) for embedding lookup on the TensorCore or weights (DT_FLOAT) to apply to the output of the embedding lookup operation.
config: Serialized TPUEmbeddingConfiguration proto.
Returns the created operation.
func XlaSendTPUEmbeddingGradientsV2 ¶ added in v0.8.2
func XlaSendTPUEmbeddingGradientsV2(scope *Scope, gradients []tf.Output, learning_rates []tf.Output, deduplication_data tf.Output, config string, embedding_partitions string, hbm_buffers_config string, tpu_topology string) (o *tf.Operation)
An op that performs gradient updates of embedding tables.
The gradients argument is a TensorList having the same length and shapes as the return value of XlaRecvTPUEmbeddingActivations, but contains gradients of the model's loss with respect to the embedding activations. The embedding tables are updated from these gradients via the optimizer specified in the TPUEmbeddingConfiguration proto given to tpu.initialize_system.
Arguments:
gradients: A TensorList of gradients with which to update embedding tables. learning_rates: A TensorList of learning rates used for updating the embedding
tables via the optimizer. The length of the TensorList must be equal to the number of dynamic learning rate tags specified in the TPUEmbeddingConfiguration proto.
deduplication_data: A Tensor with type=DT_VARIANT containing the deduplication
data. The tensor is an XLA nested tuple containing N elements (where N is the ratio of the number of embedding to tensor cores per TPU chip). Each element of the nested tuple is a tuple of rank 1 tensors. Each tensor either contains indices (DT_UINT32) for embedding lookup on the TensorCore or weights (DT_FLOAT) to apply to the output of the embedding lookup operation.
config: Serialized TPUEmbeddingConfiguration proto. embedding_partitions: Serialized EmbeddingPartitionsProto proto. hbm_buffers_config: Serialized HbmBuffersConfig proto. tpu_topology: Serialized TpuTopologyArgsProto proto.
Returns the created operation.
func XlaSendToHost ¶
An op to send a tensor to the host.
input: the tensor that will be sent to the host. Tinput: element type for input. key: A unique identifier for this region used to match up host transfers.
Returns the created operation.
func XlaSetBound ¶
Set a bound for the given input value as a hint to Xla compiler,
returns the same value.
func XlaSetDynamicDimensionSize ¶
func XlaSetDynamicDimensionSize(scope *Scope, input tf.Output, dim_index tf.Output, size tf.Output) (output tf.Output)
Make a static dimension into a xla bounded dynamic dimension.
The current static dimension size will become the bound and the second operand becomes the dynamic size of the dimension.
func XlaSharding ¶
An op which shards the input based on the given sharding attribute. It can
selectively annotate a subset of tensor dimensions by skipping unspecified_dims, and the sharding annotation should be replicated in those dims.
func XlaSort ¶
Wraps the XLA Sort operator, documented at
https://www.tensorflow.org/performance/xla/operation_semantics#sort
.
Sorts a tensor. Currently only sorts in ascending order are supported.
Arguments:
input: A `Tensor` of type T.
Returns A `Tensor` of type T.
func XlaSplitND ¶
func XlaSplitND(scope *Scope, input tf.Output, N int64, num_splits []int64, optional ...XlaSplitNDAttr) (outputs []tf.Output)
Splits input tensor across all dimensions.
An op which slices the input tensor based on the given num_splits attribute, pads slices optionally, and returned the slices. Slices are returned in row-major order.
This op may be generated via the TPU bridge.
For example, with `input` tensor: ``` [[0, 1, 2],
[3, 4, 5], [6, 7, 8]]
``` `num_splits`: ``` [2, 2] ``` and `paddings`: ``` [1, 1] ``` the expected `outputs` is: ``` [[0, 1],
[3, 4]]
[[2, 0],
[5, 0]]
[[6, 7],
[0, 0]]
[[8, 0],
[0, 0]]
```
Arguments:
input: Input tensor to split across all dimensions. num_splits: Number of ways to split per dimension. Shape dimensions must be evenly
divisible.
Returns Output slices based on input and num_splits defined, in row-major order.
func XlaSpmdFullToShardShape ¶
func XlaSpmdFullToShardShape(scope *Scope, input tf.Output, manual_sharding string, optional ...XlaSpmdFullToShardShapeAttr) (output tf.Output)
An op used by XLA SPMD partitioner to switch from automatic partitioning to
manual partitioning. It annotates the input (full-shape, to be automatically partitioned) with the same sharding used by manual partitioning, and outputs a shard-shaped tensor to be consumed by later manually-partitioned ops. If the shape is not evenly partitionable, the padding region will be masked with 0s. The conversion can happen partially in subgroups, by specifying the dim attribute, where only that dim will be converted.
func XlaSpmdShardToFullShape ¶
func XlaSpmdShardToFullShape(scope *Scope, input tf.Output, manual_sharding string, full_shape tf.Shape, optional ...XlaSpmdShardToFullShapeAttr) (output tf.Output)
An op used by XLA SPMD partitioner to switch from manual partitioning to
automatic partitioning. It converts the shard-shaped, manually partitioned input into full-shaped tensor to be partitioned automatically with the same sharding used by manual partitioning. The conversion can happen partially in subgroups, by specifying the dim attribute, where only that dim will be converted.
func XlaSvd ¶
func XlaSvd(scope *Scope, a tf.Output, max_iter int64, epsilon float32, precision_config string) (s tf.Output, u tf.Output, v tf.Output)
Computes the eigen decomposition of a batch of self-adjoint matrices
(Note: Only real inputs are supported).
Computes the eigenvalues and eigenvectors of the innermost M-by-N matrices in tensor such that tensor[...,:,:] = u[..., :, :] * Diag(s[..., :]) * Transpose(v[...,:,:]).
Arguments:
a: the input tensor. max_iter: maximum number of sweep update, i.e., the whole lower triangular
part or upper triangular part based on parameter lower. Heuristically, it has been argued that approximately log(min (M, N)) sweeps are needed in practice (Ref: Golub & van Loan "Matrix Computation").
epsilon: the tolerance ratio. precision_config: a serialized xla::PrecisionConfig proto.
Returns:
s: Singular values. The values are sorted in reverse order of magnitude, so
s[..., 0] is the largest value, s[..., 1] is the second largest, etc.
u: Left singular vectors. v: Right singular vectors.
func ZerosLike ¶
Returns a tensor of zeros with the same shape and type as x.
Arguments:
x: a tensor of type T.
Returns a tensor of the same shape and type as x but filled with zeros.
func Zeta ¶
Compute the Hurwitz zeta function \\(\zeta(x, q)\\).
The Hurwitz zeta function is defined as:
\\(\zeta(x, q) = \sum_{n=0}^{\infty} (q + n)^{-x}\\)
func ZipDataset ¶
func ZipDataset(scope *Scope, input_datasets []tf.Output, output_types []tf.DataType, output_shapes []tf.Shape, optional ...ZipDatasetAttr) (handle tf.Output)
Creates a dataset that zips together `input_datasets`.
The elements of the resulting dataset are created by zipping corresponding elements from each of the input datasets.
The size of the resulting dataset will match the size of the smallest input dataset, and no error will be raised if input datasets have different sizes.
Arguments:
input_datasets: List of `N` variant Tensors representing datasets to be zipped together.
Types ¶
type AbortAttr ¶
type AbortAttr func(optionalAttr)
AbortAttr is an optional argument to Abort.
func AbortErrorMsg ¶
AbortErrorMsg sets the optional error_msg attribute to value.
value: A string which is the message associated with the exception. If not specified, defaults to ""
func AbortExitWithoutError ¶
AbortExitWithoutError sets the optional exit_without_error attribute to value. If not specified, defaults to false
type AddManySparseToTensorsMapAttr ¶
type AddManySparseToTensorsMapAttr func(optionalAttr)
AddManySparseToTensorsMapAttr is an optional argument to AddManySparseToTensorsMap.
func AddManySparseToTensorsMapContainer ¶
func AddManySparseToTensorsMapContainer(value string) AddManySparseToTensorsMapAttr
AddManySparseToTensorsMapContainer sets the optional container attribute to value.
value: The container name for the `SparseTensorsMap` created by this op. If not specified, defaults to ""
func AddManySparseToTensorsMapSharedName ¶
func AddManySparseToTensorsMapSharedName(value string) AddManySparseToTensorsMapAttr
AddManySparseToTensorsMapSharedName sets the optional shared_name attribute to value.
value: The shared name for the `SparseTensorsMap` created by this op. If blank, the new Operation's unique name is used. If not specified, defaults to ""
type AddSparseToTensorsMapAttr ¶
type AddSparseToTensorsMapAttr func(optionalAttr)
AddSparseToTensorsMapAttr is an optional argument to AddSparseToTensorsMap.
func AddSparseToTensorsMapContainer ¶
func AddSparseToTensorsMapContainer(value string) AddSparseToTensorsMapAttr
AddSparseToTensorsMapContainer sets the optional container attribute to value.
value: The container name for the `SparseTensorsMap` created by this op. If not specified, defaults to ""
func AddSparseToTensorsMapSharedName ¶
func AddSparseToTensorsMapSharedName(value string) AddSparseToTensorsMapAttr
AddSparseToTensorsMapSharedName sets the optional shared_name attribute to value.
value: The shared name for the `SparseTensorsMap` created by this op. If blank, the new Operation's unique name is used. If not specified, defaults to ""
type AllAttr ¶
type AllAttr func(optionalAttr)
AllAttr is an optional argument to All.
func AllKeepDims ¶
AllKeepDims sets the optional keep_dims attribute to value.
value: If true, retain reduced dimensions with length 1. If not specified, defaults to false
type AllCandidateSamplerAttr ¶
type AllCandidateSamplerAttr func(optionalAttr)
AllCandidateSamplerAttr is an optional argument to AllCandidateSampler.
func AllCandidateSamplerSeed ¶
func AllCandidateSamplerSeed(value int64) AllCandidateSamplerAttr
AllCandidateSamplerSeed sets the optional seed attribute to value.
value: If either seed or seed2 are set to be non-zero, the random number generator is seeded by the given seed. Otherwise, it is seeded by a random seed. If not specified, defaults to 0
func AllCandidateSamplerSeed2 ¶
func AllCandidateSamplerSeed2(value int64) AllCandidateSamplerAttr
AllCandidateSamplerSeed2 sets the optional seed2 attribute to value.
value: An second seed to avoid seed collision. If not specified, defaults to 0
type AnonymousMutableDenseHashTableAttr ¶
type AnonymousMutableDenseHashTableAttr func(optionalAttr)
AnonymousMutableDenseHashTableAttr is an optional argument to AnonymousMutableDenseHashTable.
func AnonymousMutableDenseHashTableInitialNumBuckets ¶
func AnonymousMutableDenseHashTableInitialNumBuckets(value int64) AnonymousMutableDenseHashTableAttr
AnonymousMutableDenseHashTableInitialNumBuckets sets the optional initial_num_buckets attribute to value.
value: The initial number of hash table buckets. Must be a power to 2. If not specified, defaults to 131072
func AnonymousMutableDenseHashTableMaxLoadFactor ¶
func AnonymousMutableDenseHashTableMaxLoadFactor(value float32) AnonymousMutableDenseHashTableAttr
AnonymousMutableDenseHashTableMaxLoadFactor sets the optional max_load_factor attribute to value.
value: The maximum ratio between number of entries and number of buckets before growing the table. Must be between 0 and 1. If not specified, defaults to 0.8
func AnonymousMutableDenseHashTableValueShape ¶
func AnonymousMutableDenseHashTableValueShape(value tf.Shape) AnonymousMutableDenseHashTableAttr
AnonymousMutableDenseHashTableValueShape sets the optional value_shape attribute to value.
value: The shape of each value. If not specified, defaults to {}
type AnonymousMutableHashTableOfTensorsAttr ¶
type AnonymousMutableHashTableOfTensorsAttr func(optionalAttr)
AnonymousMutableHashTableOfTensorsAttr is an optional argument to AnonymousMutableHashTableOfTensors.
func AnonymousMutableHashTableOfTensorsValueShape ¶
func AnonymousMutableHashTableOfTensorsValueShape(value tf.Shape) AnonymousMutableHashTableOfTensorsAttr
AnonymousMutableHashTableOfTensorsValueShape sets the optional value_shape attribute to value. If not specified, defaults to {}
type AnyAttr ¶
type AnyAttr func(optionalAttr)
AnyAttr is an optional argument to Any.
func AnyKeepDims ¶
AnyKeepDims sets the optional keep_dims attribute to value.
value: If true, retain reduced dimensions with length 1. If not specified, defaults to false
type ApproxTopKAttr ¶ added in v0.2.0
type ApproxTopKAttr func(optionalAttr)
ApproxTopKAttr is an optional argument to ApproxTopK.
func ApproxTopKAggregateToTopk ¶ added in v0.2.0
func ApproxTopKAggregateToTopk(value bool) ApproxTopKAttr
ApproxTopKAggregateToTopk sets the optional aggregate_to_topk attribute to value.
value: When true, aggregates approximate results to top-k. When false, returns the approximate results. The number of the approximate results is implementation defined and is greater equals to the specified `k`. If not specified, defaults to true
func ApproxTopKIsMaxK ¶ added in v0.2.0
func ApproxTopKIsMaxK(value bool) ApproxTopKAttr
ApproxTopKIsMaxK sets the optional is_max_k attribute to value.
value: When true, computes max-k; otherwise computes min-k. If not specified, defaults to true
func ApproxTopKRecallTarget ¶ added in v0.2.0
func ApproxTopKRecallTarget(value float32) ApproxTopKAttr
ApproxTopKRecallTarget sets the optional recall_target attribute to value.
value: Recall target for the approximation. Range in (0,1] If not specified, defaults to 0.95
func ApproxTopKReductionDimension ¶ added in v0.2.0
func ApproxTopKReductionDimension(value int64) ApproxTopKAttr
ApproxTopKReductionDimension sets the optional reduction_dimension attribute to value.
value: Integer dimension along which to search. Default: -1. If not specified, defaults to -1
func ApproxTopKReductionInputSizeOverride ¶ added in v0.2.0
func ApproxTopKReductionInputSizeOverride(value int64) ApproxTopKAttr
ApproxTopKReductionInputSizeOverride sets the optional reduction_input_size_override attribute to value.
value: When set to a positive value, it overrides the size determined by `input[reduction_dim]` for evaluating the recall. This option is useful when the given `input` is only a subset of the overall computation in SPMD or distributed pipelines, where the true input size cannot be deferred by the `input` shape. If not specified, defaults to -1
type ApproximateEqualAttr ¶
type ApproximateEqualAttr func(optionalAttr)
ApproximateEqualAttr is an optional argument to ApproximateEqual.
func ApproximateEqualTolerance ¶
func ApproximateEqualTolerance(value float32) ApproximateEqualAttr
ApproximateEqualTolerance sets the optional tolerance attribute to value. If not specified, defaults to 1e-05
type ArgMaxAttr ¶
type ArgMaxAttr func(optionalAttr)
ArgMaxAttr is an optional argument to ArgMax.
func ArgMaxOutputType ¶
func ArgMaxOutputType(value tf.DataType) ArgMaxAttr
ArgMaxOutputType sets the optional output_type attribute to value. If not specified, defaults to DT_INT64
type ArgMinAttr ¶
type ArgMinAttr func(optionalAttr)
ArgMinAttr is an optional argument to ArgMin.
func ArgMinOutputType ¶
func ArgMinOutputType(value tf.DataType) ArgMinAttr
ArgMinOutputType sets the optional output_type attribute to value. If not specified, defaults to DT_INT64
type AsStringAttr ¶
type AsStringAttr func(optionalAttr)
AsStringAttr is an optional argument to AsString.
func AsStringFill ¶
func AsStringFill(value string) AsStringAttr
AsStringFill sets the optional fill attribute to value.
value: The value to pad if width > -1. If empty, pads with spaces. Another typical value is '0'. String cannot be longer than 1 character. If not specified, defaults to ""
func AsStringPrecision ¶
func AsStringPrecision(value int64) AsStringAttr
AsStringPrecision sets the optional precision attribute to value.
value: The post-decimal precision to use for floating point numbers. Only used if precision > -1. If not specified, defaults to -1
func AsStringScientific ¶
func AsStringScientific(value bool) AsStringAttr
AsStringScientific sets the optional scientific attribute to value.
value: Use scientific notation for floating point numbers. If not specified, defaults to false
func AsStringShortest ¶
func AsStringShortest(value bool) AsStringAttr
AsStringShortest sets the optional shortest attribute to value.
value: Use shortest representation (either scientific or standard) for floating point numbers. If not specified, defaults to false
func AsStringWidth ¶
func AsStringWidth(value int64) AsStringAttr
AsStringWidth sets the optional width attribute to value.
value: Pad pre-decimal numbers to this width. Applies to both floating point and integer numbers. Only used if width > -1. If not specified, defaults to -1
type AssertAttr ¶
type AssertAttr func(optionalAttr)
AssertAttr is an optional argument to Assert.
func AssertSummarize ¶
func AssertSummarize(value int64) AssertAttr
AssertSummarize sets the optional summarize attribute to value.
value: Print this many entries of each tensor. If not specified, defaults to 3
type AssignVariableOpAttr ¶
type AssignVariableOpAttr func(optionalAttr)
AssignVariableOpAttr is an optional argument to AssignVariableOp.
func AssignVariableOpValidateShape ¶
func AssignVariableOpValidateShape(value bool) AssignVariableOpAttr
AssignVariableOpValidateShape sets the optional validate_shape attribute to value. If not specified, defaults to false
type AssignVariableXlaConcatNDAttr ¶
type AssignVariableXlaConcatNDAttr func(optionalAttr)
AssignVariableXlaConcatNDAttr is an optional argument to AssignVariableXlaConcatND.
func AssignVariableXlaConcatNDPaddings ¶
func AssignVariableXlaConcatNDPaddings(value []int64) AssignVariableXlaConcatNDAttr
AssignVariableXlaConcatNDPaddings sets the optional paddings attribute to value.
value: Optional list of right paddings per dimension to strip from the final merged tensor. These paddings must not exceed the dimension size of the merged result prior to stripping paddings. If not specified, defaults to {}
type AudioSpectrogramAttr ¶
type AudioSpectrogramAttr func(optionalAttr)
AudioSpectrogramAttr is an optional argument to AudioSpectrogram.
func AudioSpectrogramMagnitudeSquared ¶
func AudioSpectrogramMagnitudeSquared(value bool) AudioSpectrogramAttr
AudioSpectrogramMagnitudeSquared sets the optional magnitude_squared attribute to value.
value: Whether to return the squared magnitude or just the magnitude. Using squared magnitude can avoid extra calculations. If not specified, defaults to false
type AudioSummaryAttr ¶
type AudioSummaryAttr func(optionalAttr)
AudioSummaryAttr is an optional argument to AudioSummary.
func AudioSummaryMaxOutputs ¶
func AudioSummaryMaxOutputs(value int64) AudioSummaryAttr
AudioSummaryMaxOutputs sets the optional max_outputs attribute to value.
value: Max number of batch elements to generate audio for. If not specified, defaults to 3
REQUIRES: value >= 1
type AudioSummaryV2Attr ¶
type AudioSummaryV2Attr func(optionalAttr)
AudioSummaryV2Attr is an optional argument to AudioSummaryV2.
func AudioSummaryV2MaxOutputs ¶
func AudioSummaryV2MaxOutputs(value int64) AudioSummaryV2Attr
AudioSummaryV2MaxOutputs sets the optional max_outputs attribute to value.
value: Max number of batch elements to generate audio for. If not specified, defaults to 3
REQUIRES: value >= 1
type AutoShardDatasetAttr ¶
type AutoShardDatasetAttr func(optionalAttr)
AutoShardDatasetAttr is an optional argument to AutoShardDataset.
func AutoShardDatasetAutoShardPolicy ¶
func AutoShardDatasetAutoShardPolicy(value int64) AutoShardDatasetAttr
AutoShardDatasetAutoShardPolicy sets the optional auto_shard_policy attribute to value. If not specified, defaults to 0
func AutoShardDatasetNumReplicas ¶
func AutoShardDatasetNumReplicas(value int64) AutoShardDatasetAttr
AutoShardDatasetNumReplicas sets the optional num_replicas attribute to value. If not specified, defaults to 0
type AvgPool3DAttr ¶
type AvgPool3DAttr func(optionalAttr)
AvgPool3DAttr is an optional argument to AvgPool3D.
func AvgPool3DDataFormat ¶
func AvgPool3DDataFormat(value string) AvgPool3DAttr
AvgPool3DDataFormat sets the optional data_format attribute to value.
value: The data format of the input and output data. With the default format "NDHWC", the data is stored in the order of:
[batch, in_depth, in_height, in_width, in_channels].
Alternatively, the format could be "NCDHW", the data storage order is:
[batch, in_channels, in_depth, in_height, in_width].
If not specified, defaults to "NDHWC"
type AvgPool3DGradAttr ¶
type AvgPool3DGradAttr func(optionalAttr)
AvgPool3DGradAttr is an optional argument to AvgPool3DGrad.
func AvgPool3DGradDataFormat ¶
func AvgPool3DGradDataFormat(value string) AvgPool3DGradAttr
AvgPool3DGradDataFormat sets the optional data_format attribute to value.
value: The data format of the input and output data. With the default format "NDHWC", the data is stored in the order of:
[batch, in_depth, in_height, in_width, in_channels].
Alternatively, the format could be "NCDHW", the data storage order is:
[batch, in_channels, in_depth, in_height, in_width].
If not specified, defaults to "NDHWC"
type AvgPoolAttr ¶
type AvgPoolAttr func(optionalAttr)
AvgPoolAttr is an optional argument to AvgPool.
func AvgPoolDataFormat ¶
func AvgPoolDataFormat(value string) AvgPoolAttr
AvgPoolDataFormat sets the optional data_format attribute to value.
value: Specify the data format of the input and output data. With the default format "NHWC", the data is stored in the order of:
[batch, in_height, in_width, in_channels].
Alternatively, the format could be "NCHW", the data storage order of:
[batch, in_channels, in_height, in_width].
If not specified, defaults to "NHWC"
type AvgPoolGradAttr ¶
type AvgPoolGradAttr func(optionalAttr)
AvgPoolGradAttr is an optional argument to AvgPoolGrad.
func AvgPoolGradDataFormat ¶
func AvgPoolGradDataFormat(value string) AvgPoolGradAttr
AvgPoolGradDataFormat sets the optional data_format attribute to value.
value: Specify the data format of the input and output data. With the default format "NHWC", the data is stored in the order of:
[batch, in_height, in_width, in_channels].
Alternatively, the format could be "NCHW", the data storage order of:
[batch, in_channels, in_height, in_width].
If not specified, defaults to "NHWC"
type BatchAttr ¶
type BatchAttr func(optionalAttr)
BatchAttr is an optional argument to Batch.
func BatchAllowedBatchSizes ¶
BatchAllowedBatchSizes sets the optional allowed_batch_sizes attribute to value. If not specified, defaults to {}
func BatchBatchingQueue ¶
BatchBatchingQueue sets the optional batching_queue attribute to value. If not specified, defaults to ""
func BatchContainer ¶
BatchContainer sets the optional container attribute to value. If not specified, defaults to ""
func BatchMaxEnqueuedBatches ¶
BatchMaxEnqueuedBatches sets the optional max_enqueued_batches attribute to value. If not specified, defaults to 10
func BatchSharedName ¶
BatchSharedName sets the optional shared_name attribute to value. If not specified, defaults to ""
type BatchDatasetAttr ¶
type BatchDatasetAttr func(optionalAttr)
BatchDatasetAttr is an optional argument to BatchDataset.
func BatchDatasetMetadata ¶
func BatchDatasetMetadata(value string) BatchDatasetAttr
BatchDatasetMetadata sets the optional metadata attribute to value. If not specified, defaults to ""
type BatchDatasetV2Attr ¶
type BatchDatasetV2Attr func(optionalAttr)
BatchDatasetV2Attr is an optional argument to BatchDatasetV2.
func BatchDatasetV2Metadata ¶
func BatchDatasetV2Metadata(value string) BatchDatasetV2Attr
BatchDatasetV2Metadata sets the optional metadata attribute to value. If not specified, defaults to ""
func BatchDatasetV2ParallelCopy ¶
func BatchDatasetV2ParallelCopy(value bool) BatchDatasetV2Attr
BatchDatasetV2ParallelCopy sets the optional parallel_copy attribute to value. If not specified, defaults to false
type BatchMatMulAttr ¶
type BatchMatMulAttr func(optionalAttr)
BatchMatMulAttr is an optional argument to BatchMatMul.
func BatchMatMulAdjX ¶
func BatchMatMulAdjX(value bool) BatchMatMulAttr
BatchMatMulAdjX sets the optional adj_x attribute to value.
value: If `True`, adjoint the slices of `x`. Defaults to `False`. If not specified, defaults to false
func BatchMatMulAdjY ¶
func BatchMatMulAdjY(value bool) BatchMatMulAttr
BatchMatMulAdjY sets the optional adj_y attribute to value.
value: If `True`, adjoint the slices of `y`. Defaults to `False`. If not specified, defaults to false
func BatchMatMulGradX ¶ added in v0.8.0
func BatchMatMulGradX(value bool) BatchMatMulAttr
BatchMatMulGradX sets the optional grad_x attribute to value. If not specified, defaults to false
func BatchMatMulGradY ¶ added in v0.8.0
func BatchMatMulGradY(value bool) BatchMatMulAttr
BatchMatMulGradY sets the optional grad_y attribute to value. If not specified, defaults to false
type BatchMatMulV2Attr ¶
type BatchMatMulV2Attr func(optionalAttr)
BatchMatMulV2Attr is an optional argument to BatchMatMulV2.
func BatchMatMulV2AdjX ¶
func BatchMatMulV2AdjX(value bool) BatchMatMulV2Attr
BatchMatMulV2AdjX sets the optional adj_x attribute to value.
value: If `True`, adjoint the slices of `x`. Defaults to `False`. If not specified, defaults to false
func BatchMatMulV2AdjY ¶
func BatchMatMulV2AdjY(value bool) BatchMatMulV2Attr
BatchMatMulV2AdjY sets the optional adj_y attribute to value.
value: If `True`, adjoint the slices of `y`. Defaults to `False`. If not specified, defaults to false
func BatchMatMulV2GradX ¶ added in v0.8.0
func BatchMatMulV2GradX(value bool) BatchMatMulV2Attr
BatchMatMulV2GradX sets the optional grad_x attribute to value. If not specified, defaults to false
func BatchMatMulV2GradY ¶ added in v0.8.0
func BatchMatMulV2GradY(value bool) BatchMatMulV2Attr
BatchMatMulV2GradY sets the optional grad_y attribute to value. If not specified, defaults to false
type BatchMatMulV3Attr ¶
type BatchMatMulV3Attr func(optionalAttr)
BatchMatMulV3Attr is an optional argument to BatchMatMulV3.
func BatchMatMulV3AdjX ¶
func BatchMatMulV3AdjX(value bool) BatchMatMulV3Attr
BatchMatMulV3AdjX sets the optional adj_x attribute to value.
value: If `True`, adjoint the slices of `x`. Defaults to `False`. If not specified, defaults to false
func BatchMatMulV3AdjY ¶
func BatchMatMulV3AdjY(value bool) BatchMatMulV3Attr
BatchMatMulV3AdjY sets the optional adj_y attribute to value.
value: If `True`, adjoint the slices of `y`. Defaults to `False`. If not specified, defaults to false
func BatchMatMulV3GradX ¶ added in v0.8.0
func BatchMatMulV3GradX(value bool) BatchMatMulV3Attr
BatchMatMulV3GradX sets the optional grad_x attribute to value. If not specified, defaults to false
func BatchMatMulV3GradY ¶ added in v0.8.0
func BatchMatMulV3GradY(value bool) BatchMatMulV3Attr
BatchMatMulV3GradY sets the optional grad_y attribute to value. If not specified, defaults to false
type BiasAddAttr ¶
type BiasAddAttr func(optionalAttr)
BiasAddAttr is an optional argument to BiasAdd.
func BiasAddDataFormat ¶
func BiasAddDataFormat(value string) BiasAddAttr
BiasAddDataFormat sets the optional data_format attribute to value.
value: Specify the data format of the input and output data. With the default format "NHWC", the bias tensor will be added to the last dimension of the value tensor. Alternatively, the format could be "NCHW", the data storage order of:
[batch, in_channels, in_height, in_width].
The tensor will be added to "in_channels", the third-to-the-last
dimension.
If not specified, defaults to "NHWC"
type BiasAddGradAttr ¶
type BiasAddGradAttr func(optionalAttr)
BiasAddGradAttr is an optional argument to BiasAddGrad.
func BiasAddGradDataFormat ¶
func BiasAddGradDataFormat(value string) BiasAddGradAttr
BiasAddGradDataFormat sets the optional data_format attribute to value.
value: Specify the data format of the input and output data. With the default format "NHWC", the bias tensor will be added to the last dimension of the value tensor. Alternatively, the format could be "NCHW", the data storage order of:
[batch, in_channels, in_height, in_width].
The tensor will be added to "in_channels", the third-to-the-last
dimension.
If not specified, defaults to "NHWC"
type BlockLSTMAttr ¶
type BlockLSTMAttr func(optionalAttr)
BlockLSTMAttr is an optional argument to BlockLSTM.
func BlockLSTMCellClip ¶
func BlockLSTMCellClip(value float32) BlockLSTMAttr
BlockLSTMCellClip sets the optional cell_clip attribute to value.
value: Value to clip the 'cs' value to. If not specified, defaults to 3
func BlockLSTMForgetBias ¶
func BlockLSTMForgetBias(value float32) BlockLSTMAttr
BlockLSTMForgetBias sets the optional forget_bias attribute to value.
value: The forget gate bias. If not specified, defaults to 1
func BlockLSTMUsePeephole ¶
func BlockLSTMUsePeephole(value bool) BlockLSTMAttr
BlockLSTMUsePeephole sets the optional use_peephole attribute to value.
value: Whether to use peephole weights. If not specified, defaults to false
type BlockLSTMV2Attr ¶
type BlockLSTMV2Attr func(optionalAttr)
BlockLSTMV2Attr is an optional argument to BlockLSTMV2.
func BlockLSTMV2CellClip ¶
func BlockLSTMV2CellClip(value float32) BlockLSTMV2Attr
BlockLSTMV2CellClip sets the optional cell_clip attribute to value.
value: Value to clip the 'cs' value to. If not specified, defaults to 0
func BlockLSTMV2UsePeephole ¶
func BlockLSTMV2UsePeephole(value bool) BlockLSTMV2Attr
BlockLSTMV2UsePeephole sets the optional use_peephole attribute to value.
value: Whether to use peephole weights. If not specified, defaults to false
type BoostedTreesCalculateBestFeatureSplitAttr ¶
type BoostedTreesCalculateBestFeatureSplitAttr func(optionalAttr)
BoostedTreesCalculateBestFeatureSplitAttr is an optional argument to BoostedTreesCalculateBestFeatureSplit.
func BoostedTreesCalculateBestFeatureSplitSplitType ¶
func BoostedTreesCalculateBestFeatureSplitSplitType(value string) BoostedTreesCalculateBestFeatureSplitAttr
BoostedTreesCalculateBestFeatureSplitSplitType sets the optional split_type attribute to value.
value: A string indicating if this Op should perform inequality split or equality split. If not specified, defaults to "inequality"
type BoostedTreesCreateQuantileStreamResourceAttr ¶
type BoostedTreesCreateQuantileStreamResourceAttr func(optionalAttr)
BoostedTreesCreateQuantileStreamResourceAttr is an optional argument to BoostedTreesCreateQuantileStreamResource.
func BoostedTreesCreateQuantileStreamResourceMaxElements ¶
func BoostedTreesCreateQuantileStreamResourceMaxElements(value int64) BoostedTreesCreateQuantileStreamResourceAttr
BoostedTreesCreateQuantileStreamResourceMaxElements sets the optional max_elements attribute to value.
value: int; The maximum number of data points that can be fed to the stream. If not specified, defaults to 1099511627776
type BoostedTreesEnsembleResourceHandleOpAttr ¶
type BoostedTreesEnsembleResourceHandleOpAttr func(optionalAttr)
BoostedTreesEnsembleResourceHandleOpAttr is an optional argument to BoostedTreesEnsembleResourceHandleOp.
func BoostedTreesEnsembleResourceHandleOpContainer ¶
func BoostedTreesEnsembleResourceHandleOpContainer(value string) BoostedTreesEnsembleResourceHandleOpAttr
BoostedTreesEnsembleResourceHandleOpContainer sets the optional container attribute to value. If not specified, defaults to ""
func BoostedTreesEnsembleResourceHandleOpSharedName ¶
func BoostedTreesEnsembleResourceHandleOpSharedName(value string) BoostedTreesEnsembleResourceHandleOpAttr
BoostedTreesEnsembleResourceHandleOpSharedName sets the optional shared_name attribute to value. If not specified, defaults to ""
type BoostedTreesQuantileStreamResourceFlushAttr ¶
type BoostedTreesQuantileStreamResourceFlushAttr func(optionalAttr)
BoostedTreesQuantileStreamResourceFlushAttr is an optional argument to BoostedTreesQuantileStreamResourceFlush.
func BoostedTreesQuantileStreamResourceFlushGenerateQuantiles ¶
func BoostedTreesQuantileStreamResourceFlushGenerateQuantiles(value bool) BoostedTreesQuantileStreamResourceFlushAttr
BoostedTreesQuantileStreamResourceFlushGenerateQuantiles sets the optional generate_quantiles attribute to value.
value: bool; If True, the output will be the num_quantiles for each stream where the ith entry is the ith quantile of the input with an approximation error of epsilon. Duplicate values may be present. If False, the output will be the points in the histogram that we got which roughly translates to 1/epsilon boundaries and without any duplicates. Default to False. If not specified, defaults to false
type BoostedTreesQuantileStreamResourceHandleOpAttr ¶
type BoostedTreesQuantileStreamResourceHandleOpAttr func(optionalAttr)
BoostedTreesQuantileStreamResourceHandleOpAttr is an optional argument to BoostedTreesQuantileStreamResourceHandleOp.
func BoostedTreesQuantileStreamResourceHandleOpContainer ¶
func BoostedTreesQuantileStreamResourceHandleOpContainer(value string) BoostedTreesQuantileStreamResourceHandleOpAttr
BoostedTreesQuantileStreamResourceHandleOpContainer sets the optional container attribute to value. If not specified, defaults to ""
func BoostedTreesQuantileStreamResourceHandleOpSharedName ¶
func BoostedTreesQuantileStreamResourceHandleOpSharedName(value string) BoostedTreesQuantileStreamResourceHandleOpAttr
BoostedTreesQuantileStreamResourceHandleOpSharedName sets the optional shared_name attribute to value. If not specified, defaults to ""
type BoostedTreesSparseCalculateBestFeatureSplitAttr ¶
type BoostedTreesSparseCalculateBestFeatureSplitAttr func(optionalAttr)
BoostedTreesSparseCalculateBestFeatureSplitAttr is an optional argument to BoostedTreesSparseCalculateBestFeatureSplit.
func BoostedTreesSparseCalculateBestFeatureSplitSplitType ¶
func BoostedTreesSparseCalculateBestFeatureSplitSplitType(value string) BoostedTreesSparseCalculateBestFeatureSplitAttr
BoostedTreesSparseCalculateBestFeatureSplitSplitType sets the optional split_type attribute to value.
value: A string indicating if this Op should perform inequality split or equality split. If not specified, defaults to "inequality"
type BoostedTreesUpdateEnsembleV2Attr ¶
type BoostedTreesUpdateEnsembleV2Attr func(optionalAttr)
BoostedTreesUpdateEnsembleV2Attr is an optional argument to BoostedTreesUpdateEnsembleV2.
func BoostedTreesUpdateEnsembleV2LogitsDimension ¶
func BoostedTreesUpdateEnsembleV2LogitsDimension(value int64) BoostedTreesUpdateEnsembleV2Attr
BoostedTreesUpdateEnsembleV2LogitsDimension sets the optional logits_dimension attribute to value.
value: scalar, dimension of the logits If not specified, defaults to 1
type CTCBeamSearchDecoderAttr ¶
type CTCBeamSearchDecoderAttr func(optionalAttr)
CTCBeamSearchDecoderAttr is an optional argument to CTCBeamSearchDecoder.
func CTCBeamSearchDecoderMergeRepeated ¶
func CTCBeamSearchDecoderMergeRepeated(value bool) CTCBeamSearchDecoderAttr
CTCBeamSearchDecoderMergeRepeated sets the optional merge_repeated attribute to value.
value: If true, merge repeated classes in output. If not specified, defaults to true
type CTCGreedyDecoderAttr ¶
type CTCGreedyDecoderAttr func(optionalAttr)
CTCGreedyDecoderAttr is an optional argument to CTCGreedyDecoder.
func CTCGreedyDecoderBlankIndex ¶
func CTCGreedyDecoderBlankIndex(value int64) CTCGreedyDecoderAttr
CTCGreedyDecoderBlankIndex sets the optional blank_index attribute to value. If not specified, defaults to -1
func CTCGreedyDecoderMergeRepeated ¶
func CTCGreedyDecoderMergeRepeated(value bool) CTCGreedyDecoderAttr
CTCGreedyDecoderMergeRepeated sets the optional merge_repeated attribute to value.
value: If True, merge repeated classes in output. If not specified, defaults to false
type CTCLossAttr ¶
type CTCLossAttr func(optionalAttr)
CTCLossAttr is an optional argument to CTCLoss.
func CTCLossCtcMergeRepeated ¶
func CTCLossCtcMergeRepeated(value bool) CTCLossAttr
CTCLossCtcMergeRepeated sets the optional ctc_merge_repeated attribute to value.
value: Scalar. If set to false, *during* CTC calculation repeated non-blank labels will not be merged and are interpreted as individual labels. This is a simplified version of CTC. If not specified, defaults to true
func CTCLossIgnoreLongerOutputsThanInputs ¶
func CTCLossIgnoreLongerOutputsThanInputs(value bool) CTCLossAttr
CTCLossIgnoreLongerOutputsThanInputs sets the optional ignore_longer_outputs_than_inputs attribute to value.
value: Scalar. If set to true, during CTC calculation, items that have longer output sequences than input sequences are skipped: they don't contribute to the loss term and have zero-gradient. If not specified, defaults to false
func CTCLossPreprocessCollapseRepeated ¶
func CTCLossPreprocessCollapseRepeated(value bool) CTCLossAttr
CTCLossPreprocessCollapseRepeated sets the optional preprocess_collapse_repeated attribute to value.
value: Scalar, if true then repeated labels are collapsed prior to the CTC calculation. If not specified, defaults to false
type CTCLossV2Attr ¶
type CTCLossV2Attr func(optionalAttr)
CTCLossV2Attr is an optional argument to CTCLossV2.
func CTCLossV2CtcMergeRepeated ¶
func CTCLossV2CtcMergeRepeated(value bool) CTCLossV2Attr
CTCLossV2CtcMergeRepeated sets the optional ctc_merge_repeated attribute to value.
value: Scalar. If set to false, *during* CTC calculation repeated non-blank labels will not be merged and are interpreted as individual labels. This is a simplified version of CTC. If not specified, defaults to true
func CTCLossV2IgnoreLongerOutputsThanInputs ¶
func CTCLossV2IgnoreLongerOutputsThanInputs(value bool) CTCLossV2Attr
CTCLossV2IgnoreLongerOutputsThanInputs sets the optional ignore_longer_outputs_than_inputs attribute to value.
value: Scalar. If set to true, during CTC calculation, items that have longer output sequences than input sequences are skipped: they don't contribute to the loss term and have zero-gradient. If not specified, defaults to false
func CTCLossV2PreprocessCollapseRepeated ¶
func CTCLossV2PreprocessCollapseRepeated(value bool) CTCLossV2Attr
CTCLossV2PreprocessCollapseRepeated sets the optional preprocess_collapse_repeated attribute to value.
value: Scalar, if true then repeated labels are collapsed prior to the CTC calculation. If not specified, defaults to false
type CacheDatasetAttr ¶
type CacheDatasetAttr func(optionalAttr)
CacheDatasetAttr is an optional argument to CacheDataset.
func CacheDatasetMetadata ¶
func CacheDatasetMetadata(value string) CacheDatasetAttr
CacheDatasetMetadata sets the optional metadata attribute to value. If not specified, defaults to ""
type CastAttr ¶
type CastAttr func(optionalAttr)
CastAttr is an optional argument to Cast.
func CastTruncate ¶
CastTruncate sets the optional Truncate attribute to value. If not specified, defaults to false
type CollectiveAllToAllV2Attr ¶ added in v0.5.0
type CollectiveAllToAllV2Attr func(optionalAttr)
CollectiveAllToAllV2Attr is an optional argument to CollectiveAllToAllV2.
func CollectiveAllToAllV2CommunicationHint ¶ added in v0.5.0
func CollectiveAllToAllV2CommunicationHint(value string) CollectiveAllToAllV2Attr
CollectiveAllToAllV2CommunicationHint sets the optional communication_hint attribute to value. If not specified, defaults to "auto"
func CollectiveAllToAllV2IsStateless ¶ added in v0.7.0
func CollectiveAllToAllV2IsStateless(value bool) CollectiveAllToAllV2Attr
CollectiveAllToAllV2IsStateless sets the optional is_stateless attribute to value. If not specified, defaults to false
func CollectiveAllToAllV2TimeoutSeconds ¶ added in v0.5.0
func CollectiveAllToAllV2TimeoutSeconds(value float32) CollectiveAllToAllV2Attr
CollectiveAllToAllV2TimeoutSeconds sets the optional timeout_seconds attribute to value. If not specified, defaults to 0
type CollectiveAllToAllV3Attr ¶
type CollectiveAllToAllV3Attr func(optionalAttr)
CollectiveAllToAllV3Attr is an optional argument to CollectiveAllToAllV3.
func CollectiveAllToAllV3TimeoutSeconds ¶
func CollectiveAllToAllV3TimeoutSeconds(value float32) CollectiveAllToAllV3Attr
CollectiveAllToAllV3TimeoutSeconds sets the optional timeout_seconds attribute to value. If not specified, defaults to 0
type CollectiveBcastRecvAttr ¶
type CollectiveBcastRecvAttr func(optionalAttr)
CollectiveBcastRecvAttr is an optional argument to CollectiveBcastRecv.
func CollectiveBcastRecvCommunicationHint ¶
func CollectiveBcastRecvCommunicationHint(value string) CollectiveBcastRecvAttr
CollectiveBcastRecvCommunicationHint sets the optional communication_hint attribute to value. If not specified, defaults to "auto"
func CollectiveBcastRecvTimeoutSeconds ¶
func CollectiveBcastRecvTimeoutSeconds(value float32) CollectiveBcastRecvAttr
CollectiveBcastRecvTimeoutSeconds sets the optional timeout_seconds attribute to value. If not specified, defaults to 0
type CollectiveBcastRecvV2Attr ¶
type CollectiveBcastRecvV2Attr func(optionalAttr)
CollectiveBcastRecvV2Attr is an optional argument to CollectiveBcastRecvV2.
func CollectiveBcastRecvV2CommunicationHint ¶
func CollectiveBcastRecvV2CommunicationHint(value string) CollectiveBcastRecvV2Attr
CollectiveBcastRecvV2CommunicationHint sets the optional communication_hint attribute to value. If not specified, defaults to "auto"
func CollectiveBcastRecvV2TimeoutSeconds ¶
func CollectiveBcastRecvV2TimeoutSeconds(value float32) CollectiveBcastRecvV2Attr
CollectiveBcastRecvV2TimeoutSeconds sets the optional timeout_seconds attribute to value. If not specified, defaults to 0
type CollectiveBcastSendAttr ¶
type CollectiveBcastSendAttr func(optionalAttr)
CollectiveBcastSendAttr is an optional argument to CollectiveBcastSend.
func CollectiveBcastSendCommunicationHint ¶
func CollectiveBcastSendCommunicationHint(value string) CollectiveBcastSendAttr
CollectiveBcastSendCommunicationHint sets the optional communication_hint attribute to value. If not specified, defaults to "auto"
func CollectiveBcastSendTimeoutSeconds ¶
func CollectiveBcastSendTimeoutSeconds(value float32) CollectiveBcastSendAttr
CollectiveBcastSendTimeoutSeconds sets the optional timeout_seconds attribute to value. If not specified, defaults to 0
type CollectiveBcastSendV2Attr ¶
type CollectiveBcastSendV2Attr func(optionalAttr)
CollectiveBcastSendV2Attr is an optional argument to CollectiveBcastSendV2.
func CollectiveBcastSendV2CommunicationHint ¶
func CollectiveBcastSendV2CommunicationHint(value string) CollectiveBcastSendV2Attr
CollectiveBcastSendV2CommunicationHint sets the optional communication_hint attribute to value. If not specified, defaults to "auto"
func CollectiveBcastSendV2TimeoutSeconds ¶
func CollectiveBcastSendV2TimeoutSeconds(value float32) CollectiveBcastSendV2Attr
CollectiveBcastSendV2TimeoutSeconds sets the optional timeout_seconds attribute to value. If not specified, defaults to 0
type CollectiveGatherAttr ¶
type CollectiveGatherAttr func(optionalAttr)
CollectiveGatherAttr is an optional argument to CollectiveGather.
func CollectiveGatherCommunicationHint ¶
func CollectiveGatherCommunicationHint(value string) CollectiveGatherAttr
CollectiveGatherCommunicationHint sets the optional communication_hint attribute to value. If not specified, defaults to "auto"
func CollectiveGatherTimeoutSeconds ¶
func CollectiveGatherTimeoutSeconds(value float32) CollectiveGatherAttr
CollectiveGatherTimeoutSeconds sets the optional timeout_seconds attribute to value. If not specified, defaults to 0
type CollectiveGatherV2Attr ¶
type CollectiveGatherV2Attr func(optionalAttr)
CollectiveGatherV2Attr is an optional argument to CollectiveGatherV2.
func CollectiveGatherV2CommunicationHint ¶
func CollectiveGatherV2CommunicationHint(value string) CollectiveGatherV2Attr
CollectiveGatherV2CommunicationHint sets the optional communication_hint attribute to value. If not specified, defaults to "auto"
func CollectiveGatherV2IsStateless ¶ added in v0.7.0
func CollectiveGatherV2IsStateless(value bool) CollectiveGatherV2Attr
CollectiveGatherV2IsStateless sets the optional is_stateless attribute to value. If not specified, defaults to false
func CollectiveGatherV2TimeoutSeconds ¶
func CollectiveGatherV2TimeoutSeconds(value float32) CollectiveGatherV2Attr
CollectiveGatherV2TimeoutSeconds sets the optional timeout_seconds attribute to value. If not specified, defaults to 0
type CollectiveInitializeCommunicatorAttr ¶
type CollectiveInitializeCommunicatorAttr func(optionalAttr)
CollectiveInitializeCommunicatorAttr is an optional argument to CollectiveInitializeCommunicator.
func CollectiveInitializeCommunicatorCommunicationHint ¶
func CollectiveInitializeCommunicatorCommunicationHint(value string) CollectiveInitializeCommunicatorAttr
CollectiveInitializeCommunicatorCommunicationHint sets the optional communication_hint attribute to value. If not specified, defaults to "auto"
func CollectiveInitializeCommunicatorTimeoutSeconds ¶
func CollectiveInitializeCommunicatorTimeoutSeconds(value float32) CollectiveInitializeCommunicatorAttr
CollectiveInitializeCommunicatorTimeoutSeconds sets the optional timeout_seconds attribute to value. If not specified, defaults to 0
type CollectiveReduceAttr ¶
type CollectiveReduceAttr func(optionalAttr)
CollectiveReduceAttr is an optional argument to CollectiveReduce.
func CollectiveReduceCommunicationHint ¶
func CollectiveReduceCommunicationHint(value string) CollectiveReduceAttr
CollectiveReduceCommunicationHint sets the optional communication_hint attribute to value. If not specified, defaults to "auto"
func CollectiveReduceTimeoutSeconds ¶
func CollectiveReduceTimeoutSeconds(value float32) CollectiveReduceAttr
CollectiveReduceTimeoutSeconds sets the optional timeout_seconds attribute to value. If not specified, defaults to 0
func CollectiveReduceWaitFor ¶
func CollectiveReduceWaitFor(value []int64) CollectiveReduceAttr
CollectiveReduceWaitFor sets the optional wait_for attribute to value. If not specified, defaults to {}
type CollectiveReduceScatterV2Attr ¶ added in v0.4.0
type CollectiveReduceScatterV2Attr func(optionalAttr)
CollectiveReduceScatterV2Attr is an optional argument to CollectiveReduceScatterV2.
func CollectiveReduceScatterV2CommunicationHint ¶ added in v0.4.0
func CollectiveReduceScatterV2CommunicationHint(value string) CollectiveReduceScatterV2Attr
CollectiveReduceScatterV2CommunicationHint sets the optional communication_hint attribute to value. If not specified, defaults to "auto"
func CollectiveReduceScatterV2IsStateless ¶ added in v0.7.0
func CollectiveReduceScatterV2IsStateless(value bool) CollectiveReduceScatterV2Attr
CollectiveReduceScatterV2IsStateless sets the optional is_stateless attribute to value. If not specified, defaults to false
func CollectiveReduceScatterV2MaxSubdivsPerDevice ¶ added in v0.4.0
func CollectiveReduceScatterV2MaxSubdivsPerDevice(value int64) CollectiveReduceScatterV2Attr
CollectiveReduceScatterV2MaxSubdivsPerDevice sets the optional max_subdivs_per_device attribute to value. If not specified, defaults to -1
func CollectiveReduceScatterV2TimeoutSeconds ¶ added in v0.4.0
func CollectiveReduceScatterV2TimeoutSeconds(value float32) CollectiveReduceScatterV2Attr
CollectiveReduceScatterV2TimeoutSeconds sets the optional timeout_seconds attribute to value. If not specified, defaults to 0
type CollectiveReduceV2Attr ¶
type CollectiveReduceV2Attr func(optionalAttr)
CollectiveReduceV2Attr is an optional argument to CollectiveReduceV2.
func CollectiveReduceV2CommunicationHint ¶
func CollectiveReduceV2CommunicationHint(value string) CollectiveReduceV2Attr
CollectiveReduceV2CommunicationHint sets the optional communication_hint attribute to value. If not specified, defaults to "auto"
func CollectiveReduceV2IsStateless ¶ added in v0.7.0
func CollectiveReduceV2IsStateless(value bool) CollectiveReduceV2Attr
CollectiveReduceV2IsStateless sets the optional is_stateless attribute to value. If not specified, defaults to false
func CollectiveReduceV2MaxSubdivsPerDevice ¶
func CollectiveReduceV2MaxSubdivsPerDevice(value int64) CollectiveReduceV2Attr
CollectiveReduceV2MaxSubdivsPerDevice sets the optional max_subdivs_per_device attribute to value. If not specified, defaults to -1
func CollectiveReduceV2TimeoutSeconds ¶
func CollectiveReduceV2TimeoutSeconds(value float32) CollectiveReduceV2Attr
CollectiveReduceV2TimeoutSeconds sets the optional timeout_seconds attribute to value. If not specified, defaults to 0
type CollectiveReduceV3Attr ¶
type CollectiveReduceV3Attr func(optionalAttr)
CollectiveReduceV3Attr is an optional argument to CollectiveReduceV3.
func CollectiveReduceV3TimeoutSeconds ¶
func CollectiveReduceV3TimeoutSeconds(value float32) CollectiveReduceV3Attr
CollectiveReduceV3TimeoutSeconds sets the optional timeout_seconds attribute to value. If not specified, defaults to 0
type CombinedNonMaxSuppressionAttr ¶
type CombinedNonMaxSuppressionAttr func(optionalAttr)
CombinedNonMaxSuppressionAttr is an optional argument to CombinedNonMaxSuppression.
func CombinedNonMaxSuppressionClipBoxes ¶
func CombinedNonMaxSuppressionClipBoxes(value bool) CombinedNonMaxSuppressionAttr
CombinedNonMaxSuppressionClipBoxes sets the optional clip_boxes attribute to value.
value: If true, assume the box coordinates are between [0, 1] and clip the output boxes if they fall beyond [0, 1]. If false, do not do clipping and output the box coordinates as it is. If not specified, defaults to true
func CombinedNonMaxSuppressionPadPerClass ¶
func CombinedNonMaxSuppressionPadPerClass(value bool) CombinedNonMaxSuppressionAttr
CombinedNonMaxSuppressionPadPerClass sets the optional pad_per_class attribute to value.
value: If false, the output nmsed boxes, scores and classes are padded/clipped to `max_total_size`. If true, the output nmsed boxes, scores and classes are padded to be of length `max_size_per_class`*`num_classes`, unless it exceeds `max_total_size` in which case it is clipped to `max_total_size`. Defaults to false. If not specified, defaults to false
type ComplexAbsAttr ¶
type ComplexAbsAttr func(optionalAttr)
ComplexAbsAttr is an optional argument to ComplexAbs.
func ComplexAbsTout ¶
func ComplexAbsTout(value tf.DataType) ComplexAbsAttr
ComplexAbsTout sets the optional Tout attribute to value. If not specified, defaults to DT_FLOAT
type ComplexAttr ¶
type ComplexAttr func(optionalAttr)
ComplexAttr is an optional argument to Complex.
func ComplexTout ¶
func ComplexTout(value tf.DataType) ComplexAttr
ComplexTout sets the optional Tout attribute to value. If not specified, defaults to DT_COMPLEX64
type ComputeAccidentalHitsAttr ¶
type ComputeAccidentalHitsAttr func(optionalAttr)
ComputeAccidentalHitsAttr is an optional argument to ComputeAccidentalHits.
func ComputeAccidentalHitsSeed ¶
func ComputeAccidentalHitsSeed(value int64) ComputeAccidentalHitsAttr
ComputeAccidentalHitsSeed sets the optional seed attribute to value.
value: If either seed or seed2 are set to be non-zero, the random number generator is seeded by the given seed. Otherwise, it is seeded by a random seed. If not specified, defaults to 0
func ComputeAccidentalHitsSeed2 ¶
func ComputeAccidentalHitsSeed2(value int64) ComputeAccidentalHitsAttr
ComputeAccidentalHitsSeed2 sets the optional seed2 attribute to value.
value: An second seed to avoid seed collision. If not specified, defaults to 0
type ConcatenateDatasetAttr ¶
type ConcatenateDatasetAttr func(optionalAttr)
ConcatenateDatasetAttr is an optional argument to ConcatenateDataset.
func ConcatenateDatasetMetadata ¶
func ConcatenateDatasetMetadata(value string) ConcatenateDatasetAttr
ConcatenateDatasetMetadata sets the optional metadata attribute to value. If not specified, defaults to ""
type ConfigureAndInitializeGlobalTPUAttr ¶ added in v0.3.0
type ConfigureAndInitializeGlobalTPUAttr func(optionalAttr)
ConfigureAndInitializeGlobalTPUAttr is an optional argument to ConfigureAndInitializeGlobalTPU.
func ConfigureAndInitializeGlobalTPUUseTfrtHostRuntime ¶ added in v0.3.0
func ConfigureAndInitializeGlobalTPUUseTfrtHostRuntime(value bool) ConfigureAndInitializeGlobalTPUAttr
ConfigureAndInitializeGlobalTPUUseTfrtHostRuntime sets the optional use_tfrt_host_runtime attribute to value. If not specified, defaults to true
type ConfigureDistributedTPUAttr ¶
type ConfigureDistributedTPUAttr func(optionalAttr)
ConfigureDistributedTPUAttr is an optional argument to ConfigureDistributedTPU.
func ConfigureDistributedTPUCompilationFailureClosesChips ¶
func ConfigureDistributedTPUCompilationFailureClosesChips(value bool) ConfigureDistributedTPUAttr
ConfigureDistributedTPUCompilationFailureClosesChips sets the optional compilation_failure_closes_chips attribute to value. If not specified, defaults to true
func ConfigureDistributedTPUEmbeddingConfig ¶
func ConfigureDistributedTPUEmbeddingConfig(value string) ConfigureDistributedTPUAttr
ConfigureDistributedTPUEmbeddingConfig sets the optional embedding_config attribute to value.
value: Reserved. Do not use. If not specified, defaults to ""
func ConfigureDistributedTPUEnableWholeMeshCompilations ¶
func ConfigureDistributedTPUEnableWholeMeshCompilations(value bool) ConfigureDistributedTPUAttr
ConfigureDistributedTPUEnableWholeMeshCompilations sets the optional enable_whole_mesh_compilations attribute to value. If not specified, defaults to false
func ConfigureDistributedTPUIsGlobalInit ¶
func ConfigureDistributedTPUIsGlobalInit(value bool) ConfigureDistributedTPUAttr
ConfigureDistributedTPUIsGlobalInit sets the optional is_global_init attribute to value.
value: Reserved. Do not use. If not specified, defaults to false
func ConfigureDistributedTPUTpuCancellationClosesChips ¶
func ConfigureDistributedTPUTpuCancellationClosesChips(value int64) ConfigureDistributedTPUAttr
ConfigureDistributedTPUTpuCancellationClosesChips sets the optional tpu_cancellation_closes_chips attribute to value. If not specified, defaults to 0
func ConfigureDistributedTPUTpuEmbeddingConfig ¶
func ConfigureDistributedTPUTpuEmbeddingConfig(value string) ConfigureDistributedTPUAttr
ConfigureDistributedTPUTpuEmbeddingConfig sets the optional tpu_embedding_config attribute to value.
value: Serialized tensorflow.tpu.TPUEmbeddingConfiguration that describes the embedding lookups of the program. If not specified, defaults to ""
type Conv2DAttr ¶
type Conv2DAttr func(optionalAttr)
Conv2DAttr is an optional argument to Conv2D.
func Conv2DDataFormat ¶
func Conv2DDataFormat(value string) Conv2DAttr
Conv2DDataFormat sets the optional data_format attribute to value.
value: Specify the data format of the input and output data. With the default format "NHWC", the data is stored in the order of:
[batch, height, width, channels].
Alternatively, the format could be "NCHW", the data storage order of:
[batch, channels, height, width].
If not specified, defaults to "NHWC"
func Conv2DDilations ¶
func Conv2DDilations(value []int64) Conv2DAttr
Conv2DDilations sets the optional dilations attribute to value.
value: 1-D tensor of length 4. The dilation factor for each dimension of `input`. If set to k > 1, there will be k-1 skipped cells between each filter element on that dimension. The dimension order is determined by the value of `data_format`, see above for details. Dilations in the batch and depth dimensions must be 1. If not specified, defaults to {i:1 i:1 i:1 i:1}
func Conv2DExplicitPaddings ¶
func Conv2DExplicitPaddings(value []int64) Conv2DAttr
Conv2DExplicitPaddings sets the optional explicit_paddings attribute to value.
value: If `padding` is `"EXPLICIT"`, the list of explicit padding amounts. For the ith dimension, the amount of padding inserted before and after the dimension is `explicit_paddings[2 * i]` and `explicit_paddings[2 * i + 1]`, respectively. If `padding` is not `"EXPLICIT"`, `explicit_paddings` must be empty. If not specified, defaults to {}
func Conv2DUseCudnnOnGpu ¶
func Conv2DUseCudnnOnGpu(value bool) Conv2DAttr
Conv2DUseCudnnOnGpu sets the optional use_cudnn_on_gpu attribute to value. If not specified, defaults to true
type Conv2DBackpropFilterAttr ¶
type Conv2DBackpropFilterAttr func(optionalAttr)
Conv2DBackpropFilterAttr is an optional argument to Conv2DBackpropFilter.
func Conv2DBackpropFilterDataFormat ¶
func Conv2DBackpropFilterDataFormat(value string) Conv2DBackpropFilterAttr
Conv2DBackpropFilterDataFormat sets the optional data_format attribute to value.
value: Specify the data format of the input and output data. With the default format "NHWC", the data is stored in the order of:
[batch, in_height, in_width, in_channels].
Alternatively, the format could be "NCHW", the data storage order of:
[batch, in_channels, in_height, in_width].
If not specified, defaults to "NHWC"
func Conv2DBackpropFilterDilations ¶
func Conv2DBackpropFilterDilations(value []int64) Conv2DBackpropFilterAttr
Conv2DBackpropFilterDilations sets the optional dilations attribute to value.
value: 1-D tensor of length 4. The dilation factor for each dimension of `input`. If set to k > 1, there will be k-1 skipped cells between each filter element on that dimension. The dimension order is determined by the value of `data_format`, see above for details. Dilations in the batch and depth dimensions must be 1. If not specified, defaults to {i:1 i:1 i:1 i:1}
func Conv2DBackpropFilterExplicitPaddings ¶
func Conv2DBackpropFilterExplicitPaddings(value []int64) Conv2DBackpropFilterAttr
Conv2DBackpropFilterExplicitPaddings sets the optional explicit_paddings attribute to value.
value: If `padding` is `"EXPLICIT"`, the list of explicit padding amounts. For the ith dimension, the amount of padding inserted before and after the dimension is `explicit_paddings[2 * i]` and `explicit_paddings[2 * i + 1]`, respectively. If `padding` is not `"EXPLICIT"`, `explicit_paddings` must be empty. If not specified, defaults to {}
func Conv2DBackpropFilterUseCudnnOnGpu ¶
func Conv2DBackpropFilterUseCudnnOnGpu(value bool) Conv2DBackpropFilterAttr
Conv2DBackpropFilterUseCudnnOnGpu sets the optional use_cudnn_on_gpu attribute to value. If not specified, defaults to true
type Conv2DBackpropFilterV2Attr ¶ added in v0.4.0
type Conv2DBackpropFilterV2Attr func(optionalAttr)
Conv2DBackpropFilterV2Attr is an optional argument to Conv2DBackpropFilterV2.
func Conv2DBackpropFilterV2DataFormat ¶ added in v0.4.0
func Conv2DBackpropFilterV2DataFormat(value string) Conv2DBackpropFilterV2Attr
Conv2DBackpropFilterV2DataFormat sets the optional data_format attribute to value.
value: Specify the data format of the input and output data. With the default format "NHWC", the data is stored in the order of:
[batch, in_height, in_width, in_channels].
Alternatively, the format could be "NCHW", the data storage order of:
[batch, in_channels, in_height, in_width].
If not specified, defaults to "NHWC"
func Conv2DBackpropFilterV2Dilations ¶ added in v0.4.0
func Conv2DBackpropFilterV2Dilations(value []int64) Conv2DBackpropFilterV2Attr
Conv2DBackpropFilterV2Dilations sets the optional dilations attribute to value.
value: 1-D tensor of length 4. The dilation factor for each dimension of `input`. If set to k > 1, there will be k-1 skipped cells between each filter element on that dimension. The dimension order is determined by the value of `data_format`, see above for details. Dilations in the batch and depth dimensions must be 1. If not specified, defaults to {i:1 i:1 i:1 i:1}
func Conv2DBackpropFilterV2ExplicitPaddings ¶ added in v0.4.0
func Conv2DBackpropFilterV2ExplicitPaddings(value []int64) Conv2DBackpropFilterV2Attr
Conv2DBackpropFilterV2ExplicitPaddings sets the optional explicit_paddings attribute to value.
value: If `padding` is `"EXPLICIT"`, the list of explicit padding amounts. For the ith dimension, the amount of padding inserted before and after the dimension is `explicit_paddings[2 * i]` and `explicit_paddings[2 * i + 1]`, respectively. If `padding` is not `"EXPLICIT"`, `explicit_paddings` must be empty. If not specified, defaults to {}
func Conv2DBackpropFilterV2UseCudnnOnGpu ¶ added in v0.4.0
func Conv2DBackpropFilterV2UseCudnnOnGpu(value bool) Conv2DBackpropFilterV2Attr
Conv2DBackpropFilterV2UseCudnnOnGpu sets the optional use_cudnn_on_gpu attribute to value. If not specified, defaults to true
type Conv2DBackpropInputAttr ¶
type Conv2DBackpropInputAttr func(optionalAttr)
Conv2DBackpropInputAttr is an optional argument to Conv2DBackpropInput.
func Conv2DBackpropInputDataFormat ¶
func Conv2DBackpropInputDataFormat(value string) Conv2DBackpropInputAttr
Conv2DBackpropInputDataFormat sets the optional data_format attribute to value.
value: Specify the data format of the input and output data. With the default format "NHWC", the data is stored in the order of:
[batch, in_height, in_width, in_channels].
Alternatively, the format could be "NCHW", the data storage order of:
[batch, in_channels, in_height, in_width].
If not specified, defaults to "NHWC"
func Conv2DBackpropInputDilations ¶
func Conv2DBackpropInputDilations(value []int64) Conv2DBackpropInputAttr
Conv2DBackpropInputDilations sets the optional dilations attribute to value.
value: 1-D tensor of length 4. The dilation factor for each dimension of `input`. If set to k > 1, there will be k-1 skipped cells between each filter element on that dimension. The dimension order is determined by the value of `data_format`, see above for details. Dilations in the batch and depth dimensions must be 1. If not specified, defaults to {i:1 i:1 i:1 i:1}
func Conv2DBackpropInputExplicitPaddings ¶
func Conv2DBackpropInputExplicitPaddings(value []int64) Conv2DBackpropInputAttr
Conv2DBackpropInputExplicitPaddings sets the optional explicit_paddings attribute to value.
value: If `padding` is `"EXPLICIT"`, the list of explicit padding amounts. For the ith dimension, the amount of padding inserted before and after the dimension is `explicit_paddings[2 * i]` and `explicit_paddings[2 * i + 1]`, respectively. If `padding` is not `"EXPLICIT"`, `explicit_paddings` must be empty. If not specified, defaults to {}
func Conv2DBackpropInputUseCudnnOnGpu ¶
func Conv2DBackpropInputUseCudnnOnGpu(value bool) Conv2DBackpropInputAttr
Conv2DBackpropInputUseCudnnOnGpu sets the optional use_cudnn_on_gpu attribute to value. If not specified, defaults to true
type Conv2DBackpropInputV2Attr ¶ added in v0.4.0
type Conv2DBackpropInputV2Attr func(optionalAttr)
Conv2DBackpropInputV2Attr is an optional argument to Conv2DBackpropInputV2.
func Conv2DBackpropInputV2DataFormat ¶ added in v0.4.0
func Conv2DBackpropInputV2DataFormat(value string) Conv2DBackpropInputV2Attr
Conv2DBackpropInputV2DataFormat sets the optional data_format attribute to value.
value: Specify the data format of the input and output data. With the default format "NHWC", the data is stored in the order of:
[batch, in_height, in_width, in_channels].
Alternatively, the format could be "NCHW", the data storage order of:
[batch, in_channels, in_height, in_width].
If not specified, defaults to "NHWC"
func Conv2DBackpropInputV2Dilations ¶ added in v0.4.0
func Conv2DBackpropInputV2Dilations(value []int64) Conv2DBackpropInputV2Attr
Conv2DBackpropInputV2Dilations sets the optional dilations attribute to value.
value: 1-D tensor of length 4. The dilation factor for each dimension of `input`. If set to k > 1, there will be k-1 skipped cells between each filter element on that dimension. The dimension order is determined by the value of `data_format`, see above for details. Dilations in the batch and depth dimensions must be 1. If not specified, defaults to {i:1 i:1 i:1 i:1}
func Conv2DBackpropInputV2ExplicitPaddings ¶ added in v0.4.0
func Conv2DBackpropInputV2ExplicitPaddings(value []int64) Conv2DBackpropInputV2Attr
Conv2DBackpropInputV2ExplicitPaddings sets the optional explicit_paddings attribute to value.
value: If `padding` is `"EXPLICIT"`, the list of explicit padding amounts. For the ith dimension, the amount of padding inserted before and after the dimension is `explicit_paddings[2 * i]` and `explicit_paddings[2 * i + 1]`, respectively. If `padding` is not `"EXPLICIT"`, `explicit_paddings` must be empty. If not specified, defaults to {}
func Conv2DBackpropInputV2UseCudnnOnGpu ¶ added in v0.4.0
func Conv2DBackpropInputV2UseCudnnOnGpu(value bool) Conv2DBackpropInputV2Attr
Conv2DBackpropInputV2UseCudnnOnGpu sets the optional use_cudnn_on_gpu attribute to value. If not specified, defaults to true
type Conv3DAttr ¶
type Conv3DAttr func(optionalAttr)
Conv3DAttr is an optional argument to Conv3D.
func Conv3DDataFormat ¶
func Conv3DDataFormat(value string) Conv3DAttr
Conv3DDataFormat sets the optional data_format attribute to value.
value: The data format of the input and output data. With the default format "NDHWC", the data is stored in the order of:
[batch, in_depth, in_height, in_width, in_channels].
Alternatively, the format could be "NCDHW", the data storage order is:
[batch, in_channels, in_depth, in_height, in_width].
If not specified, defaults to "NDHWC"
func Conv3DDilations ¶
func Conv3DDilations(value []int64) Conv3DAttr
Conv3DDilations sets the optional dilations attribute to value.
value: 1-D tensor of length 5. The dilation factor for each dimension of `input`. If set to k > 1, there will be k-1 skipped cells between each filter element on that dimension. The dimension order is determined by the value of `data_format`, see above for details. Dilations in the batch and depth dimensions must be 1. If not specified, defaults to {i:1 i:1 i:1 i:1 i:1}
type Conv3DBackpropFilterAttr ¶
type Conv3DBackpropFilterAttr func(optionalAttr)
Conv3DBackpropFilterAttr is an optional argument to Conv3DBackpropFilter.
func Conv3DBackpropFilterDilations ¶
func Conv3DBackpropFilterDilations(value []int64) Conv3DBackpropFilterAttr
Conv3DBackpropFilterDilations sets the optional dilations attribute to value. If not specified, defaults to {i:1 i:1 i:1 i:1 i:1}
type Conv3DBackpropFilterV2Attr ¶
type Conv3DBackpropFilterV2Attr func(optionalAttr)
Conv3DBackpropFilterV2Attr is an optional argument to Conv3DBackpropFilterV2.
func Conv3DBackpropFilterV2DataFormat ¶
func Conv3DBackpropFilterV2DataFormat(value string) Conv3DBackpropFilterV2Attr
Conv3DBackpropFilterV2DataFormat sets the optional data_format attribute to value.
value: The data format of the input and output data. With the default format "NDHWC", the data is stored in the order of:
[batch, in_depth, in_height, in_width, in_channels].
Alternatively, the format could be "NCDHW", the data storage order is:
[batch, in_channels, in_depth, in_height, in_width].
If not specified, defaults to "NDHWC"
func Conv3DBackpropFilterV2Dilations ¶
func Conv3DBackpropFilterV2Dilations(value []int64) Conv3DBackpropFilterV2Attr
Conv3DBackpropFilterV2Dilations sets the optional dilations attribute to value.
value: 1-D tensor of length 5. The dilation factor for each dimension of `input`. If set to k > 1, there will be k-1 skipped cells between each filter element on that dimension. The dimension order is determined by the value of `data_format`, see above for details. Dilations in the batch and depth dimensions must be 1. If not specified, defaults to {i:1 i:1 i:1 i:1 i:1}
type Conv3DBackpropInputAttr ¶
type Conv3DBackpropInputAttr func(optionalAttr)
Conv3DBackpropInputAttr is an optional argument to Conv3DBackpropInput.
func Conv3DBackpropInputDilations ¶
func Conv3DBackpropInputDilations(value []int64) Conv3DBackpropInputAttr
Conv3DBackpropInputDilations sets the optional dilations attribute to value. If not specified, defaults to {i:1 i:1 i:1 i:1 i:1}
type Conv3DBackpropInputV2Attr ¶
type Conv3DBackpropInputV2Attr func(optionalAttr)
Conv3DBackpropInputV2Attr is an optional argument to Conv3DBackpropInputV2.
func Conv3DBackpropInputV2DataFormat ¶
func Conv3DBackpropInputV2DataFormat(value string) Conv3DBackpropInputV2Attr
Conv3DBackpropInputV2DataFormat sets the optional data_format attribute to value.
value: The data format of the input and output data. With the default format "NDHWC", the data is stored in the order of:
[batch, in_depth, in_height, in_width, in_channels].
Alternatively, the format could be "NCDHW", the data storage order is:
[batch, in_channels, in_depth, in_height, in_width].
If not specified, defaults to "NDHWC"
func Conv3DBackpropInputV2Dilations ¶
func Conv3DBackpropInputV2Dilations(value []int64) Conv3DBackpropInputV2Attr
Conv3DBackpropInputV2Dilations sets the optional dilations attribute to value.
value: 1-D tensor of length 5. The dilation factor for each dimension of `input`. If set to k > 1, there will be k-1 skipped cells between each filter element on that dimension. The dimension order is determined by the value of `data_format`, see above for details. Dilations in the batch and depth dimensions must be 1. If not specified, defaults to {i:1 i:1 i:1 i:1 i:1}
type ConvAttr ¶ added in v0.6.0
type ConvAttr func(optionalAttr)
ConvAttr is an optional argument to Conv.
func ConvBatchDims ¶ added in v0.6.0
ConvBatchDims sets the optional batch_dims attribute to value.
value: A positive integer specifying the number of batch dimensions for the input tensor. Should be less than the rank of the input tensor. If not specified, defaults to 1
func ConvDataFormat ¶ added in v0.6.0
ConvDataFormat sets the optional data_format attribute to value.
value: Used to set the data format. By default `CHANNELS_FIRST`, uses `NHWC (2D) / NDHWC (3D)` or if `CHANNELS_LAST`, uses `NCHW (2D) / NCDHW (3D)`. If not specified, defaults to "CHANNELS_LAST"
func ConvDilations ¶ added in v0.6.0
ConvDilations sets the optional dilations attribute to value.
value: 1-D tensor of length `N+2`. The dilation factor for each dimension of `input`. If set to `k > 1`, there will be `k-1` skipped cells between each filter element on that dimension. The dimension order is determined by the value of `channels_last_format`, see above for details. Dilations in the batch and depth dimensions must be 1. If not specified, defaults to {}
func ConvExplicitPaddings ¶ added in v0.6.0
ConvExplicitPaddings sets the optional explicit_paddings attribute to value.
value: If `padding` is `"EXPLICIT"`, the list of explicit padding amounts. For the ith dimension, the amount of padding inserted before and after the dimension is `explicit_paddings[2 * i]` and `explicit_paddings[2 * i + 1]`, respectively. If `padding` is not `"EXPLICIT"`, `explicit_paddings` must be empty. If not specified, defaults to {}
func ConvGroups ¶ added in v0.6.0
ConvGroups sets the optional groups attribute to value.
value: A positive integer specifying the number of groups in which the input is split along the channel axis. Each group is convolved separately with `filters / groups` filters. The output is the concatenation of all the groups results along the channel axis. Input channels and filters must both be divisible by groups. If not specified, defaults to 1
type CopyAttr ¶
type CopyAttr func(optionalAttr)
CopyAttr is an optional argument to Copy.
func CopyDebugOpsSpec ¶
CopyDebugOpsSpec sets the optional debug_ops_spec attribute to value.
value: A list of debug op spec (op, url, gated_grpc) for attached debug ops. Each element of the list has the format <debug_op>;<grpc_url>;<gated_grpc>, wherein gated_grpc is boolean represented as 0/1. E.g., "DebugIdentity;grpc://foo:3333;1", "DebugIdentity;file:///tmp/tfdbg_1;0". If not specified, defaults to {}
func CopyTensorName ¶
CopyTensorName sets the optional tensor_name attribute to value.
value: The name of the input tensor. If not specified, defaults to ""
type CopyHostAttr ¶
type CopyHostAttr func(optionalAttr)
CopyHostAttr is an optional argument to CopyHost.
func CopyHostDebugOpsSpec ¶
func CopyHostDebugOpsSpec(value []string) CopyHostAttr
CopyHostDebugOpsSpec sets the optional debug_ops_spec attribute to value.
value: A list of debug op spec (op, url, gated_grpc) for attached debug ops. Each element of the list has the format <debug_op>;<grpc_url>;<gated_grpc>, wherein gated_grpc is boolean represented as 0/1. E.g., "DebugIdentity;grpc://foo:3333;1", "DebugIdentity;file:///tmp/tfdbg_1;0". If not specified, defaults to {}
func CopyHostTensorName ¶
func CopyHostTensorName(value string) CopyHostAttr
CopyHostTensorName sets the optional tensor_name attribute to value.
value: The name of the input tensor. If not specified, defaults to ""
type CropAndResizeAttr ¶
type CropAndResizeAttr func(optionalAttr)
CropAndResizeAttr is an optional argument to CropAndResize.
func CropAndResizeExtrapolationValue ¶
func CropAndResizeExtrapolationValue(value float32) CropAndResizeAttr
CropAndResizeExtrapolationValue sets the optional extrapolation_value attribute to value.
value: Value used for extrapolation, when applicable. If not specified, defaults to 0
func CropAndResizeMethod ¶
func CropAndResizeMethod(value string) CropAndResizeAttr
CropAndResizeMethod sets the optional method attribute to value.
value: A string specifying the sampling method for resizing. It can be either `"bilinear"` or `"nearest"` and default to `"bilinear"`. Currently two sampling methods are supported: Bilinear and Nearest Neighbor. If not specified, defaults to "bilinear"
type CropAndResizeGradBoxesAttr ¶
type CropAndResizeGradBoxesAttr func(optionalAttr)
CropAndResizeGradBoxesAttr is an optional argument to CropAndResizeGradBoxes.
func CropAndResizeGradBoxesMethod ¶
func CropAndResizeGradBoxesMethod(value string) CropAndResizeGradBoxesAttr
CropAndResizeGradBoxesMethod sets the optional method attribute to value.
value: A string specifying the interpolation method. Only 'bilinear' is supported for now. If not specified, defaults to "bilinear"
type CropAndResizeGradImageAttr ¶
type CropAndResizeGradImageAttr func(optionalAttr)
CropAndResizeGradImageAttr is an optional argument to CropAndResizeGradImage.
func CropAndResizeGradImageMethod ¶
func CropAndResizeGradImageMethod(value string) CropAndResizeGradImageAttr
CropAndResizeGradImageMethod sets the optional method attribute to value.
value: A string specifying the interpolation method. Only 'bilinear' is supported for now. If not specified, defaults to "bilinear"
type CudnnRNNAttr ¶
type CudnnRNNAttr func(optionalAttr)
CudnnRNNAttr is an optional argument to CudnnRNN.
func CudnnRNNDirection ¶
func CudnnRNNDirection(value string) CudnnRNNAttr
CudnnRNNDirection sets the optional direction attribute to value. If not specified, defaults to "unidirectional"
func CudnnRNNDropout ¶
func CudnnRNNDropout(value float32) CudnnRNNAttr
CudnnRNNDropout sets the optional dropout attribute to value. If not specified, defaults to 0
func CudnnRNNInputMode ¶
func CudnnRNNInputMode(value string) CudnnRNNAttr
CudnnRNNInputMode sets the optional input_mode attribute to value. If not specified, defaults to "linear_input"
func CudnnRNNIsTraining ¶
func CudnnRNNIsTraining(value bool) CudnnRNNAttr
CudnnRNNIsTraining sets the optional is_training attribute to value. If not specified, defaults to true
func CudnnRNNRnnMode ¶
func CudnnRNNRnnMode(value string) CudnnRNNAttr
CudnnRNNRnnMode sets the optional rnn_mode attribute to value. If not specified, defaults to "lstm"
func CudnnRNNSeed ¶
func CudnnRNNSeed(value int64) CudnnRNNAttr
CudnnRNNSeed sets the optional seed attribute to value. If not specified, defaults to 0
func CudnnRNNSeed2 ¶
func CudnnRNNSeed2(value int64) CudnnRNNAttr
CudnnRNNSeed2 sets the optional seed2 attribute to value. If not specified, defaults to 0
type CudnnRNNBackpropAttr ¶
type CudnnRNNBackpropAttr func(optionalAttr)
CudnnRNNBackpropAttr is an optional argument to CudnnRNNBackprop.
func CudnnRNNBackpropDirection ¶
func CudnnRNNBackpropDirection(value string) CudnnRNNBackpropAttr
CudnnRNNBackpropDirection sets the optional direction attribute to value. If not specified, defaults to "unidirectional"
func CudnnRNNBackpropDropout ¶
func CudnnRNNBackpropDropout(value float32) CudnnRNNBackpropAttr
CudnnRNNBackpropDropout sets the optional dropout attribute to value. If not specified, defaults to 0
func CudnnRNNBackpropInputMode ¶
func CudnnRNNBackpropInputMode(value string) CudnnRNNBackpropAttr
CudnnRNNBackpropInputMode sets the optional input_mode attribute to value. If not specified, defaults to "linear_input"
func CudnnRNNBackpropRnnMode ¶
func CudnnRNNBackpropRnnMode(value string) CudnnRNNBackpropAttr
CudnnRNNBackpropRnnMode sets the optional rnn_mode attribute to value. If not specified, defaults to "lstm"
func CudnnRNNBackpropSeed ¶
func CudnnRNNBackpropSeed(value int64) CudnnRNNBackpropAttr
CudnnRNNBackpropSeed sets the optional seed attribute to value. If not specified, defaults to 0
func CudnnRNNBackpropSeed2 ¶
func CudnnRNNBackpropSeed2(value int64) CudnnRNNBackpropAttr
CudnnRNNBackpropSeed2 sets the optional seed2 attribute to value. If not specified, defaults to 0
type CudnnRNNBackpropV2Attr ¶
type CudnnRNNBackpropV2Attr func(optionalAttr)
CudnnRNNBackpropV2Attr is an optional argument to CudnnRNNBackpropV2.
func CudnnRNNBackpropV2Direction ¶
func CudnnRNNBackpropV2Direction(value string) CudnnRNNBackpropV2Attr
CudnnRNNBackpropV2Direction sets the optional direction attribute to value. If not specified, defaults to "unidirectional"
func CudnnRNNBackpropV2Dropout ¶
func CudnnRNNBackpropV2Dropout(value float32) CudnnRNNBackpropV2Attr
CudnnRNNBackpropV2Dropout sets the optional dropout attribute to value. If not specified, defaults to 0
func CudnnRNNBackpropV2InputMode ¶
func CudnnRNNBackpropV2InputMode(value string) CudnnRNNBackpropV2Attr
CudnnRNNBackpropV2InputMode sets the optional input_mode attribute to value. If not specified, defaults to "linear_input"
func CudnnRNNBackpropV2RnnMode ¶
func CudnnRNNBackpropV2RnnMode(value string) CudnnRNNBackpropV2Attr
CudnnRNNBackpropV2RnnMode sets the optional rnn_mode attribute to value. If not specified, defaults to "lstm"
func CudnnRNNBackpropV2Seed ¶
func CudnnRNNBackpropV2Seed(value int64) CudnnRNNBackpropV2Attr
CudnnRNNBackpropV2Seed sets the optional seed attribute to value. If not specified, defaults to 0
func CudnnRNNBackpropV2Seed2 ¶
func CudnnRNNBackpropV2Seed2(value int64) CudnnRNNBackpropV2Attr
CudnnRNNBackpropV2Seed2 sets the optional seed2 attribute to value. If not specified, defaults to 0
type CudnnRNNBackpropV3Attr ¶
type CudnnRNNBackpropV3Attr func(optionalAttr)
CudnnRNNBackpropV3Attr is an optional argument to CudnnRNNBackpropV3.
func CudnnRNNBackpropV3Direction ¶
func CudnnRNNBackpropV3Direction(value string) CudnnRNNBackpropV3Attr
CudnnRNNBackpropV3Direction sets the optional direction attribute to value. If not specified, defaults to "unidirectional"
func CudnnRNNBackpropV3Dropout ¶
func CudnnRNNBackpropV3Dropout(value float32) CudnnRNNBackpropV3Attr
CudnnRNNBackpropV3Dropout sets the optional dropout attribute to value. If not specified, defaults to 0
func CudnnRNNBackpropV3InputMode ¶
func CudnnRNNBackpropV3InputMode(value string) CudnnRNNBackpropV3Attr
CudnnRNNBackpropV3InputMode sets the optional input_mode attribute to value. If not specified, defaults to "linear_input"
func CudnnRNNBackpropV3NumProj ¶
func CudnnRNNBackpropV3NumProj(value int64) CudnnRNNBackpropV3Attr
CudnnRNNBackpropV3NumProj sets the optional num_proj attribute to value. If not specified, defaults to 0
func CudnnRNNBackpropV3RnnMode ¶
func CudnnRNNBackpropV3RnnMode(value string) CudnnRNNBackpropV3Attr
CudnnRNNBackpropV3RnnMode sets the optional rnn_mode attribute to value. If not specified, defaults to "lstm"
func CudnnRNNBackpropV3Seed ¶
func CudnnRNNBackpropV3Seed(value int64) CudnnRNNBackpropV3Attr
CudnnRNNBackpropV3Seed sets the optional seed attribute to value. If not specified, defaults to 0
func CudnnRNNBackpropV3Seed2 ¶
func CudnnRNNBackpropV3Seed2(value int64) CudnnRNNBackpropV3Attr
CudnnRNNBackpropV3Seed2 sets the optional seed2 attribute to value. If not specified, defaults to 0
func CudnnRNNBackpropV3TimeMajor ¶
func CudnnRNNBackpropV3TimeMajor(value bool) CudnnRNNBackpropV3Attr
CudnnRNNBackpropV3TimeMajor sets the optional time_major attribute to value. If not specified, defaults to true
type CudnnRNNCanonicalToParamsAttr ¶
type CudnnRNNCanonicalToParamsAttr func(optionalAttr)
CudnnRNNCanonicalToParamsAttr is an optional argument to CudnnRNNCanonicalToParams.
func CudnnRNNCanonicalToParamsDirection ¶
func CudnnRNNCanonicalToParamsDirection(value string) CudnnRNNCanonicalToParamsAttr
CudnnRNNCanonicalToParamsDirection sets the optional direction attribute to value. If not specified, defaults to "unidirectional"
func CudnnRNNCanonicalToParamsDropout ¶
func CudnnRNNCanonicalToParamsDropout(value float32) CudnnRNNCanonicalToParamsAttr
CudnnRNNCanonicalToParamsDropout sets the optional dropout attribute to value. If not specified, defaults to 0
func CudnnRNNCanonicalToParamsInputMode ¶
func CudnnRNNCanonicalToParamsInputMode(value string) CudnnRNNCanonicalToParamsAttr
CudnnRNNCanonicalToParamsInputMode sets the optional input_mode attribute to value. If not specified, defaults to "linear_input"
func CudnnRNNCanonicalToParamsRnnMode ¶
func CudnnRNNCanonicalToParamsRnnMode(value string) CudnnRNNCanonicalToParamsAttr
CudnnRNNCanonicalToParamsRnnMode sets the optional rnn_mode attribute to value. If not specified, defaults to "lstm"
func CudnnRNNCanonicalToParamsSeed ¶
func CudnnRNNCanonicalToParamsSeed(value int64) CudnnRNNCanonicalToParamsAttr
CudnnRNNCanonicalToParamsSeed sets the optional seed attribute to value. If not specified, defaults to 0
func CudnnRNNCanonicalToParamsSeed2 ¶
func CudnnRNNCanonicalToParamsSeed2(value int64) CudnnRNNCanonicalToParamsAttr
CudnnRNNCanonicalToParamsSeed2 sets the optional seed2 attribute to value. If not specified, defaults to 0
type CudnnRNNCanonicalToParamsV2Attr ¶
type CudnnRNNCanonicalToParamsV2Attr func(optionalAttr)
CudnnRNNCanonicalToParamsV2Attr is an optional argument to CudnnRNNCanonicalToParamsV2.
func CudnnRNNCanonicalToParamsV2Direction ¶
func CudnnRNNCanonicalToParamsV2Direction(value string) CudnnRNNCanonicalToParamsV2Attr
CudnnRNNCanonicalToParamsV2Direction sets the optional direction attribute to value. If not specified, defaults to "unidirectional"
func CudnnRNNCanonicalToParamsV2Dropout ¶
func CudnnRNNCanonicalToParamsV2Dropout(value float32) CudnnRNNCanonicalToParamsV2Attr
CudnnRNNCanonicalToParamsV2Dropout sets the optional dropout attribute to value. If not specified, defaults to 0
func CudnnRNNCanonicalToParamsV2InputMode ¶
func CudnnRNNCanonicalToParamsV2InputMode(value string) CudnnRNNCanonicalToParamsV2Attr
CudnnRNNCanonicalToParamsV2InputMode sets the optional input_mode attribute to value. If not specified, defaults to "linear_input"
func CudnnRNNCanonicalToParamsV2NumProj ¶
func CudnnRNNCanonicalToParamsV2NumProj(value int64) CudnnRNNCanonicalToParamsV2Attr
CudnnRNNCanonicalToParamsV2NumProj sets the optional num_proj attribute to value. If not specified, defaults to 0
func CudnnRNNCanonicalToParamsV2RnnMode ¶
func CudnnRNNCanonicalToParamsV2RnnMode(value string) CudnnRNNCanonicalToParamsV2Attr
CudnnRNNCanonicalToParamsV2RnnMode sets the optional rnn_mode attribute to value. If not specified, defaults to "lstm"
func CudnnRNNCanonicalToParamsV2Seed ¶
func CudnnRNNCanonicalToParamsV2Seed(value int64) CudnnRNNCanonicalToParamsV2Attr
CudnnRNNCanonicalToParamsV2Seed sets the optional seed attribute to value. If not specified, defaults to 0
func CudnnRNNCanonicalToParamsV2Seed2 ¶
func CudnnRNNCanonicalToParamsV2Seed2(value int64) CudnnRNNCanonicalToParamsV2Attr
CudnnRNNCanonicalToParamsV2Seed2 sets the optional seed2 attribute to value. If not specified, defaults to 0
type CudnnRNNParamsSizeAttr ¶
type CudnnRNNParamsSizeAttr func(optionalAttr)
CudnnRNNParamsSizeAttr is an optional argument to CudnnRNNParamsSize.
func CudnnRNNParamsSizeDirection ¶
func CudnnRNNParamsSizeDirection(value string) CudnnRNNParamsSizeAttr
CudnnRNNParamsSizeDirection sets the optional direction attribute to value. If not specified, defaults to "unidirectional"
func CudnnRNNParamsSizeDropout ¶
func CudnnRNNParamsSizeDropout(value float32) CudnnRNNParamsSizeAttr
CudnnRNNParamsSizeDropout sets the optional dropout attribute to value. If not specified, defaults to 0
func CudnnRNNParamsSizeInputMode ¶
func CudnnRNNParamsSizeInputMode(value string) CudnnRNNParamsSizeAttr
CudnnRNNParamsSizeInputMode sets the optional input_mode attribute to value. If not specified, defaults to "linear_input"
func CudnnRNNParamsSizeNumProj ¶
func CudnnRNNParamsSizeNumProj(value int64) CudnnRNNParamsSizeAttr
CudnnRNNParamsSizeNumProj sets the optional num_proj attribute to value. If not specified, defaults to 0
func CudnnRNNParamsSizeRnnMode ¶
func CudnnRNNParamsSizeRnnMode(value string) CudnnRNNParamsSizeAttr
CudnnRNNParamsSizeRnnMode sets the optional rnn_mode attribute to value. If not specified, defaults to "lstm"
func CudnnRNNParamsSizeSeed ¶
func CudnnRNNParamsSizeSeed(value int64) CudnnRNNParamsSizeAttr
CudnnRNNParamsSizeSeed sets the optional seed attribute to value. If not specified, defaults to 0
func CudnnRNNParamsSizeSeed2 ¶
func CudnnRNNParamsSizeSeed2(value int64) CudnnRNNParamsSizeAttr
CudnnRNNParamsSizeSeed2 sets the optional seed2 attribute to value. If not specified, defaults to 0
type CudnnRNNParamsToCanonicalAttr ¶
type CudnnRNNParamsToCanonicalAttr func(optionalAttr)
CudnnRNNParamsToCanonicalAttr is an optional argument to CudnnRNNParamsToCanonical.
func CudnnRNNParamsToCanonicalDirection ¶
func CudnnRNNParamsToCanonicalDirection(value string) CudnnRNNParamsToCanonicalAttr
CudnnRNNParamsToCanonicalDirection sets the optional direction attribute to value. If not specified, defaults to "unidirectional"
func CudnnRNNParamsToCanonicalDropout ¶
func CudnnRNNParamsToCanonicalDropout(value float32) CudnnRNNParamsToCanonicalAttr
CudnnRNNParamsToCanonicalDropout sets the optional dropout attribute to value. If not specified, defaults to 0
func CudnnRNNParamsToCanonicalInputMode ¶
func CudnnRNNParamsToCanonicalInputMode(value string) CudnnRNNParamsToCanonicalAttr
CudnnRNNParamsToCanonicalInputMode sets the optional input_mode attribute to value. If not specified, defaults to "linear_input"
func CudnnRNNParamsToCanonicalRnnMode ¶
func CudnnRNNParamsToCanonicalRnnMode(value string) CudnnRNNParamsToCanonicalAttr
CudnnRNNParamsToCanonicalRnnMode sets the optional rnn_mode attribute to value. If not specified, defaults to "lstm"
func CudnnRNNParamsToCanonicalSeed ¶
func CudnnRNNParamsToCanonicalSeed(value int64) CudnnRNNParamsToCanonicalAttr
CudnnRNNParamsToCanonicalSeed sets the optional seed attribute to value. If not specified, defaults to 0
func CudnnRNNParamsToCanonicalSeed2 ¶
func CudnnRNNParamsToCanonicalSeed2(value int64) CudnnRNNParamsToCanonicalAttr
CudnnRNNParamsToCanonicalSeed2 sets the optional seed2 attribute to value. If not specified, defaults to 0
type CudnnRNNParamsToCanonicalV2Attr ¶
type CudnnRNNParamsToCanonicalV2Attr func(optionalAttr)
CudnnRNNParamsToCanonicalV2Attr is an optional argument to CudnnRNNParamsToCanonicalV2.
func CudnnRNNParamsToCanonicalV2Direction ¶
func CudnnRNNParamsToCanonicalV2Direction(value string) CudnnRNNParamsToCanonicalV2Attr
CudnnRNNParamsToCanonicalV2Direction sets the optional direction attribute to value. If not specified, defaults to "unidirectional"
func CudnnRNNParamsToCanonicalV2Dropout ¶
func CudnnRNNParamsToCanonicalV2Dropout(value float32) CudnnRNNParamsToCanonicalV2Attr
CudnnRNNParamsToCanonicalV2Dropout sets the optional dropout attribute to value. If not specified, defaults to 0
func CudnnRNNParamsToCanonicalV2InputMode ¶
func CudnnRNNParamsToCanonicalV2InputMode(value string) CudnnRNNParamsToCanonicalV2Attr
CudnnRNNParamsToCanonicalV2InputMode sets the optional input_mode attribute to value. If not specified, defaults to "linear_input"
func CudnnRNNParamsToCanonicalV2NumProj ¶
func CudnnRNNParamsToCanonicalV2NumProj(value int64) CudnnRNNParamsToCanonicalV2Attr
CudnnRNNParamsToCanonicalV2NumProj sets the optional num_proj attribute to value. If not specified, defaults to 0
func CudnnRNNParamsToCanonicalV2RnnMode ¶
func CudnnRNNParamsToCanonicalV2RnnMode(value string) CudnnRNNParamsToCanonicalV2Attr
CudnnRNNParamsToCanonicalV2RnnMode sets the optional rnn_mode attribute to value. If not specified, defaults to "lstm"
func CudnnRNNParamsToCanonicalV2Seed ¶
func CudnnRNNParamsToCanonicalV2Seed(value int64) CudnnRNNParamsToCanonicalV2Attr
CudnnRNNParamsToCanonicalV2Seed sets the optional seed attribute to value. If not specified, defaults to 0
func CudnnRNNParamsToCanonicalV2Seed2 ¶
func CudnnRNNParamsToCanonicalV2Seed2(value int64) CudnnRNNParamsToCanonicalV2Attr
CudnnRNNParamsToCanonicalV2Seed2 sets the optional seed2 attribute to value. If not specified, defaults to 0
type CudnnRNNV2Attr ¶
type CudnnRNNV2Attr func(optionalAttr)
CudnnRNNV2Attr is an optional argument to CudnnRNNV2.
func CudnnRNNV2Direction ¶
func CudnnRNNV2Direction(value string) CudnnRNNV2Attr
CudnnRNNV2Direction sets the optional direction attribute to value. If not specified, defaults to "unidirectional"
func CudnnRNNV2Dropout ¶
func CudnnRNNV2Dropout(value float32) CudnnRNNV2Attr
CudnnRNNV2Dropout sets the optional dropout attribute to value. If not specified, defaults to 0
func CudnnRNNV2InputMode ¶
func CudnnRNNV2InputMode(value string) CudnnRNNV2Attr
CudnnRNNV2InputMode sets the optional input_mode attribute to value. If not specified, defaults to "linear_input"
func CudnnRNNV2IsTraining ¶
func CudnnRNNV2IsTraining(value bool) CudnnRNNV2Attr
CudnnRNNV2IsTraining sets the optional is_training attribute to value. If not specified, defaults to true
func CudnnRNNV2RnnMode ¶
func CudnnRNNV2RnnMode(value string) CudnnRNNV2Attr
CudnnRNNV2RnnMode sets the optional rnn_mode attribute to value. If not specified, defaults to "lstm"
func CudnnRNNV2Seed ¶
func CudnnRNNV2Seed(value int64) CudnnRNNV2Attr
CudnnRNNV2Seed sets the optional seed attribute to value. If not specified, defaults to 0
func CudnnRNNV2Seed2 ¶
func CudnnRNNV2Seed2(value int64) CudnnRNNV2Attr
CudnnRNNV2Seed2 sets the optional seed2 attribute to value. If not specified, defaults to 0
type CudnnRNNV3Attr ¶
type CudnnRNNV3Attr func(optionalAttr)
CudnnRNNV3Attr is an optional argument to CudnnRNNV3.
func CudnnRNNV3Direction ¶
func CudnnRNNV3Direction(value string) CudnnRNNV3Attr
CudnnRNNV3Direction sets the optional direction attribute to value. If not specified, defaults to "unidirectional"
func CudnnRNNV3Dropout ¶
func CudnnRNNV3Dropout(value float32) CudnnRNNV3Attr
CudnnRNNV3Dropout sets the optional dropout attribute to value. If not specified, defaults to 0
func CudnnRNNV3InputMode ¶
func CudnnRNNV3InputMode(value string) CudnnRNNV3Attr
CudnnRNNV3InputMode sets the optional input_mode attribute to value. If not specified, defaults to "linear_input"
func CudnnRNNV3IsTraining ¶
func CudnnRNNV3IsTraining(value bool) CudnnRNNV3Attr
CudnnRNNV3IsTraining sets the optional is_training attribute to value. If not specified, defaults to true
func CudnnRNNV3NumProj ¶
func CudnnRNNV3NumProj(value int64) CudnnRNNV3Attr
CudnnRNNV3NumProj sets the optional num_proj attribute to value. If not specified, defaults to 0
func CudnnRNNV3RnnMode ¶
func CudnnRNNV3RnnMode(value string) CudnnRNNV3Attr
CudnnRNNV3RnnMode sets the optional rnn_mode attribute to value. If not specified, defaults to "lstm"
func CudnnRNNV3Seed ¶
func CudnnRNNV3Seed(value int64) CudnnRNNV3Attr
CudnnRNNV3Seed sets the optional seed attribute to value. If not specified, defaults to 0
func CudnnRNNV3Seed2 ¶
func CudnnRNNV3Seed2(value int64) CudnnRNNV3Attr
CudnnRNNV3Seed2 sets the optional seed2 attribute to value. If not specified, defaults to 0
func CudnnRNNV3TimeMajor ¶
func CudnnRNNV3TimeMajor(value bool) CudnnRNNV3Attr
CudnnRNNV3TimeMajor sets the optional time_major attribute to value. If not specified, defaults to true
type CumprodAttr ¶
type CumprodAttr func(optionalAttr)
CumprodAttr is an optional argument to Cumprod.
func CumprodExclusive ¶
func CumprodExclusive(value bool) CumprodAttr
CumprodExclusive sets the optional exclusive attribute to value.
value: If `True`, perform exclusive cumprod. If not specified, defaults to false
func CumprodReverse ¶
func CumprodReverse(value bool) CumprodAttr
CumprodReverse sets the optional reverse attribute to value.
value: A `bool` (default: False). If not specified, defaults to false
type CumsumAttr ¶
type CumsumAttr func(optionalAttr)
CumsumAttr is an optional argument to Cumsum.
func CumsumExclusive ¶
func CumsumExclusive(value bool) CumsumAttr
CumsumExclusive sets the optional exclusive attribute to value.
value: If `True`, perform exclusive cumsum. If not specified, defaults to false
func CumsumReverse ¶
func CumsumReverse(value bool) CumsumAttr
CumsumReverse sets the optional reverse attribute to value.
value: A `bool` (default: False). If not specified, defaults to false
type CumulativeLogsumexpAttr ¶
type CumulativeLogsumexpAttr func(optionalAttr)
CumulativeLogsumexpAttr is an optional argument to CumulativeLogsumexp.
func CumulativeLogsumexpExclusive ¶
func CumulativeLogsumexpExclusive(value bool) CumulativeLogsumexpAttr
CumulativeLogsumexpExclusive sets the optional exclusive attribute to value.
value: If `True`, perform exclusive cumulative log-sum-exp. If not specified, defaults to false
func CumulativeLogsumexpReverse ¶
func CumulativeLogsumexpReverse(value bool) CumulativeLogsumexpAttr
CumulativeLogsumexpReverse sets the optional reverse attribute to value.
value: A `bool` (default: False). If not specified, defaults to false
type DataFormatDimMapAttr ¶
type DataFormatDimMapAttr func(optionalAttr)
DataFormatDimMapAttr is an optional argument to DataFormatDimMap.
func DataFormatDimMapDstFormat ¶
func DataFormatDimMapDstFormat(value string) DataFormatDimMapAttr
DataFormatDimMapDstFormat sets the optional dst_format attribute to value.
value: destination data format. If not specified, defaults to "NCHW"
func DataFormatDimMapSrcFormat ¶
func DataFormatDimMapSrcFormat(value string) DataFormatDimMapAttr
DataFormatDimMapSrcFormat sets the optional src_format attribute to value.
value: source data format. If not specified, defaults to "NHWC"
type DataFormatVecPermuteAttr ¶
type DataFormatVecPermuteAttr func(optionalAttr)
DataFormatVecPermuteAttr is an optional argument to DataFormatVecPermute.
func DataFormatVecPermuteDstFormat ¶
func DataFormatVecPermuteDstFormat(value string) DataFormatVecPermuteAttr
DataFormatVecPermuteDstFormat sets the optional dst_format attribute to value.
value: destination data format. If not specified, defaults to "NCHW"
func DataFormatVecPermuteSrcFormat ¶
func DataFormatVecPermuteSrcFormat(value string) DataFormatVecPermuteAttr
DataFormatVecPermuteSrcFormat sets the optional src_format attribute to value.
value: source data format. If not specified, defaults to "NHWC"
type DataServiceDatasetAttr ¶
type DataServiceDatasetAttr func(optionalAttr)
DataServiceDatasetAttr is an optional argument to DataServiceDataset.
func DataServiceDatasetCrossTrainerCacheOptions ¶ added in v0.2.0
func DataServiceDatasetCrossTrainerCacheOptions(value string) DataServiceDatasetAttr
DataServiceDatasetCrossTrainerCacheOptions sets the optional cross_trainer_cache_options attribute to value. If not specified, defaults to ""
func DataServiceDatasetDataTransferProtocol ¶
func DataServiceDatasetDataTransferProtocol(value string) DataServiceDatasetAttr
DataServiceDatasetDataTransferProtocol sets the optional data_transfer_protocol attribute to value. If not specified, defaults to ""
func DataServiceDatasetTargetWorkers ¶
func DataServiceDatasetTargetWorkers(value string) DataServiceDatasetAttr
DataServiceDatasetTargetWorkers sets the optional target_workers attribute to value. If not specified, defaults to "AUTO"
func DataServiceDatasetTaskRefreshIntervalHintMs ¶
func DataServiceDatasetTaskRefreshIntervalHintMs(value int64) DataServiceDatasetAttr
DataServiceDatasetTaskRefreshIntervalHintMs sets the optional task_refresh_interval_hint_ms attribute to value. If not specified, defaults to -1
type DataServiceDatasetV2Attr ¶
type DataServiceDatasetV2Attr func(optionalAttr)
DataServiceDatasetV2Attr is an optional argument to DataServiceDatasetV2.
func DataServiceDatasetV2CrossTrainerCacheOptions ¶ added in v0.2.0
func DataServiceDatasetV2CrossTrainerCacheOptions(value string) DataServiceDatasetV2Attr
DataServiceDatasetV2CrossTrainerCacheOptions sets the optional cross_trainer_cache_options attribute to value. If not specified, defaults to ""
func DataServiceDatasetV2DataTransferProtocol ¶
func DataServiceDatasetV2DataTransferProtocol(value string) DataServiceDatasetV2Attr
DataServiceDatasetV2DataTransferProtocol sets the optional data_transfer_protocol attribute to value. If not specified, defaults to ""
func DataServiceDatasetV2TargetWorkers ¶
func DataServiceDatasetV2TargetWorkers(value string) DataServiceDatasetV2Attr
DataServiceDatasetV2TargetWorkers sets the optional target_workers attribute to value. If not specified, defaults to "AUTO"
func DataServiceDatasetV2TaskRefreshIntervalHintMs ¶
func DataServiceDatasetV2TaskRefreshIntervalHintMs(value int64) DataServiceDatasetV2Attr
DataServiceDatasetV2TaskRefreshIntervalHintMs sets the optional task_refresh_interval_hint_ms attribute to value. If not specified, defaults to -1
type DatasetCardinalityAttr ¶ added in v0.5.0
type DatasetCardinalityAttr func(optionalAttr)
DatasetCardinalityAttr is an optional argument to DatasetCardinality.
func DatasetCardinalityCardinalityOptions ¶ added in v0.5.0
func DatasetCardinalityCardinalityOptions(value string) DatasetCardinalityAttr
DatasetCardinalityCardinalityOptions sets the optional cardinality_options attribute to value. If not specified, defaults to ""
type DatasetToGraphAttr ¶
type DatasetToGraphAttr func(optionalAttr)
DatasetToGraphAttr is an optional argument to DatasetToGraph.
func DatasetToGraphAllowStateful ¶
func DatasetToGraphAllowStateful(value bool) DatasetToGraphAttr
DatasetToGraphAllowStateful sets the optional allow_stateful attribute to value. If not specified, defaults to false
func DatasetToGraphStatefulWhitelist ¶
func DatasetToGraphStatefulWhitelist(value []string) DatasetToGraphAttr
DatasetToGraphStatefulWhitelist sets the optional stateful_whitelist attribute to value. If not specified, defaults to {}
REQUIRES: len(value) >= 0
func DatasetToGraphStripDeviceAssignment ¶
func DatasetToGraphStripDeviceAssignment(value bool) DatasetToGraphAttr
DatasetToGraphStripDeviceAssignment sets the optional strip_device_assignment attribute to value. If not specified, defaults to false
type DatasetToGraphV2Attr ¶
type DatasetToGraphV2Attr func(optionalAttr)
DatasetToGraphV2Attr is an optional argument to DatasetToGraphV2.
func DatasetToGraphV2ExternalStatePolicy ¶
func DatasetToGraphV2ExternalStatePolicy(value int64) DatasetToGraphV2Attr
DatasetToGraphV2ExternalStatePolicy sets the optional external_state_policy attribute to value. If not specified, defaults to 0
func DatasetToGraphV2StripDeviceAssignment ¶
func DatasetToGraphV2StripDeviceAssignment(value bool) DatasetToGraphV2Attr
DatasetToGraphV2StripDeviceAssignment sets the optional strip_device_assignment attribute to value. If not specified, defaults to false
type DatasetToSingleElementAttr ¶
type DatasetToSingleElementAttr func(optionalAttr)
DatasetToSingleElementAttr is an optional argument to DatasetToSingleElement.
func DatasetToSingleElementMetadata ¶
func DatasetToSingleElementMetadata(value string) DatasetToSingleElementAttr
DatasetToSingleElementMetadata sets the optional metadata attribute to value. If not specified, defaults to ""
type DebugIdentityAttr ¶
type DebugIdentityAttr func(optionalAttr)
DebugIdentityAttr is an optional argument to DebugIdentity.
func DebugIdentityDebugUrls ¶
func DebugIdentityDebugUrls(value []string) DebugIdentityAttr
DebugIdentityDebugUrls sets the optional debug_urls attribute to value.
value: List of URLs to debug targets, e.g.,
file:///foo/tfdbg_dump, grpc:://localhost:11011
If not specified, defaults to {}
func DebugIdentityDeviceName ¶
func DebugIdentityDeviceName(value string) DebugIdentityAttr
DebugIdentityDeviceName sets the optional device_name attribute to value.
value: Name of the device on which the tensor resides. If not specified, defaults to ""
func DebugIdentityGatedGrpc ¶
func DebugIdentityGatedGrpc(value bool) DebugIdentityAttr
DebugIdentityGatedGrpc sets the optional gated_grpc attribute to value.
value: Whether this op will be gated. If any of the debug_urls of this
debug node is of the grpc:// scheme, when the value of this attribute is set to True, the data will not actually be sent via the grpc stream unless this debug op has been enabled at the debug_url. If all of the debug_urls of this debug node are of the grpc:// scheme and the debug op is enabled at none of them, the output will be an empty Tensor.
If not specified, defaults to false
func DebugIdentityTensorName ¶
func DebugIdentityTensorName(value string) DebugIdentityAttr
DebugIdentityTensorName sets the optional tensor_name attribute to value.
value: Name of the input tensor. If not specified, defaults to ""
type DebugIdentityV2Attr ¶
type DebugIdentityV2Attr func(optionalAttr)
DebugIdentityV2Attr is an optional argument to DebugIdentityV2.
func DebugIdentityV2CircularBufferSize ¶
func DebugIdentityV2CircularBufferSize(value int64) DebugIdentityV2Attr
DebugIdentityV2CircularBufferSize sets the optional circular_buffer_size attribute to value. If not specified, defaults to 1000
func DebugIdentityV2DebugUrls ¶
func DebugIdentityV2DebugUrls(value []string) DebugIdentityV2Attr
DebugIdentityV2DebugUrls sets the optional debug_urls attribute to value.
value: List of URLs to debug targets, e.g., file:///foo/tfdbg_dump. If not specified, defaults to {}
func DebugIdentityV2OpName ¶
func DebugIdentityV2OpName(value string) DebugIdentityV2Attr
DebugIdentityV2OpName sets the optional op_name attribute to value.
value: Optional. Name of the op that the debug op is concerned with.
Used only for single-tensor trace.
If not specified, defaults to ""
func DebugIdentityV2OutputSlot ¶
func DebugIdentityV2OutputSlot(value int64) DebugIdentityV2Attr
DebugIdentityV2OutputSlot sets the optional output_slot attribute to value.
value: Optional. Output slot index of the tensor that the debug op
is concerned with. Used only for single-tensor trace.
If not specified, defaults to -1
func DebugIdentityV2TensorDebugMode ¶
func DebugIdentityV2TensorDebugMode(value int64) DebugIdentityV2Attr
DebugIdentityV2TensorDebugMode sets the optional tensor_debug_mode attribute to value.
value: TensorDebugMode enum value. See debug_event.proto for details. If not specified, defaults to -1
func DebugIdentityV2TfdbgContextId ¶
func DebugIdentityV2TfdbgContextId(value string) DebugIdentityV2Attr
DebugIdentityV2TfdbgContextId sets the optional tfdbg_context_id attribute to value.
value: A tfdbg-generated ID for the context that the op belongs to,
e.g., a concrete compiled tf.function.
If not specified, defaults to ""
func DebugIdentityV2TfdbgRunId ¶
func DebugIdentityV2TfdbgRunId(value string) DebugIdentityV2Attr
DebugIdentityV2TfdbgRunId sets the optional tfdbg_run_id attribute to value. If not specified, defaults to ""
type DebugIdentityV3Attr ¶ added in v0.5.0
type DebugIdentityV3Attr func(optionalAttr)
DebugIdentityV3Attr is an optional argument to DebugIdentityV3.
func DebugIdentityV3DebugUrls ¶ added in v0.5.0
func DebugIdentityV3DebugUrls(value []string) DebugIdentityV3Attr
DebugIdentityV3DebugUrls sets the optional debug_urls attribute to value.
value: List of URLs to debug targets, e.g.,
file:///foo/tfdbg_dump, grpc:://localhost:11011
If not specified, defaults to {}
func DebugIdentityV3DeviceName ¶ added in v0.5.0
func DebugIdentityV3DeviceName(value string) DebugIdentityV3Attr
DebugIdentityV3DeviceName sets the optional device_name attribute to value.
value: Name of the device on which the tensor resides. If not specified, defaults to ""
func DebugIdentityV3GatedGrpc ¶ added in v0.5.0
func DebugIdentityV3GatedGrpc(value bool) DebugIdentityV3Attr
DebugIdentityV3GatedGrpc sets the optional gated_grpc attribute to value.
value: Whether this op will be gated. If any of the debug_urls of this
debug node is of the grpc:// scheme, when the value of this attribute is set to True, the data will not actually be sent via the grpc stream unless this debug op has been enabled at the debug_url. If all of the debug_urls of this debug node are of the grpc:// scheme and the debug op is enabled at none of them, the output will be an empty Tensor.
If not specified, defaults to false
func DebugIdentityV3IoIndex ¶ added in v0.5.0
func DebugIdentityV3IoIndex(value int64) DebugIdentityV3Attr
DebugIdentityV3IoIndex sets the optional io_index attribute to value.
value: The index of which the tensor is an input or output of the node. If not specified, defaults to -1
func DebugIdentityV3IoOfNode ¶ added in v0.5.0
func DebugIdentityV3IoOfNode(value string) DebugIdentityV3Attr
DebugIdentityV3IoOfNode sets the optional io_of_node attribute to value.
value: Name of the node of which the tensor is an input or output. If not specified, defaults to ""
func DebugIdentityV3IsInput ¶ added in v0.5.0
func DebugIdentityV3IsInput(value bool) DebugIdentityV3Attr
DebugIdentityV3IsInput sets the optional is_input attribute to value.
value: If true, the tensor is an input of the node; otherwise the output. If not specified, defaults to false
func DebugIdentityV3TensorName ¶ added in v0.5.0
func DebugIdentityV3TensorName(value string) DebugIdentityV3Attr
DebugIdentityV3TensorName sets the optional tensor_name attribute to value.
value: Name of the input tensor. If not specified, defaults to ""
type DebugNanCountAttr ¶
type DebugNanCountAttr func(optionalAttr)
DebugNanCountAttr is an optional argument to DebugNanCount.
func DebugNanCountDebugUrls ¶
func DebugNanCountDebugUrls(value []string) DebugNanCountAttr
DebugNanCountDebugUrls sets the optional debug_urls attribute to value.
value: List of URLs to debug targets, e.g.,
file:///foo/tfdbg_dump, grpc:://localhost:11011.
If not specified, defaults to {}
func DebugNanCountDeviceName ¶
func DebugNanCountDeviceName(value string) DebugNanCountAttr
DebugNanCountDeviceName sets the optional device_name attribute to value. If not specified, defaults to ""
func DebugNanCountGatedGrpc ¶
func DebugNanCountGatedGrpc(value bool) DebugNanCountAttr
DebugNanCountGatedGrpc sets the optional gated_grpc attribute to value.
value: Whether this op will be gated. If any of the debug_urls of this
debug node is of the grpc:// scheme, when the value of this attribute is set to True, the data will not actually be sent via the grpc stream unless this debug op has been enabled at the debug_url. If all of the debug_urls of this debug node are of the grpc:// scheme and the debug op is enabled at none of them, the output will be an empty Tensor.
If not specified, defaults to false
func DebugNanCountTensorName ¶
func DebugNanCountTensorName(value string) DebugNanCountAttr
DebugNanCountTensorName sets the optional tensor_name attribute to value.
value: Name of the input tensor. If not specified, defaults to ""
type DebugNumericSummaryAttr ¶
type DebugNumericSummaryAttr func(optionalAttr)
DebugNumericSummaryAttr is an optional argument to DebugNumericSummary.
func DebugNumericSummaryDebugUrls ¶
func DebugNumericSummaryDebugUrls(value []string) DebugNumericSummaryAttr
DebugNumericSummaryDebugUrls sets the optional debug_urls attribute to value.
value: List of URLs to debug targets, e.g.,
file:///foo/tfdbg_dump, grpc:://localhost:11011.
If not specified, defaults to {}
func DebugNumericSummaryDeviceName ¶
func DebugNumericSummaryDeviceName(value string) DebugNumericSummaryAttr
DebugNumericSummaryDeviceName sets the optional device_name attribute to value. If not specified, defaults to ""
func DebugNumericSummaryGatedGrpc ¶
func DebugNumericSummaryGatedGrpc(value bool) DebugNumericSummaryAttr
DebugNumericSummaryGatedGrpc sets the optional gated_grpc attribute to value.
value: Whether this op will be gated. If any of the debug_urls of this
debug node is of the grpc:// scheme, when the value of this attribute is set to True, the data will not actually be sent via the grpc stream unless this debug op has been enabled at the debug_url. If all of the debug_urls of this debug node are of the grpc:// scheme and the debug op is enabled at none of them, the output will be an empty Tensor.
If not specified, defaults to false
func DebugNumericSummaryLowerBound ¶
func DebugNumericSummaryLowerBound(value float32) DebugNumericSummaryAttr
DebugNumericSummaryLowerBound sets the optional lower_bound attribute to value.
value: (float) The lower bound <= which values will be included in the
generalized -inf count. Default: -inf.
If not specified, defaults to -inf
func DebugNumericSummaryMuteIfHealthy ¶
func DebugNumericSummaryMuteIfHealthy(value bool) DebugNumericSummaryAttr
DebugNumericSummaryMuteIfHealthy sets the optional mute_if_healthy attribute to value.
value: (bool) Do not send data to the debug URLs unless at least one
of elements [2], [3] and [7] (i.e., the nan count and the generalized -inf and inf counts) is non-zero.
If not specified, defaults to false
func DebugNumericSummaryTensorName ¶
func DebugNumericSummaryTensorName(value string) DebugNumericSummaryAttr
DebugNumericSummaryTensorName sets the optional tensor_name attribute to value.
value: Name of the input tensor. If not specified, defaults to ""
func DebugNumericSummaryUpperBound ¶
func DebugNumericSummaryUpperBound(value float32) DebugNumericSummaryAttr
DebugNumericSummaryUpperBound sets the optional upper_bound attribute to value.
value: (float) The upper bound >= which values will be included in the
generalized +inf count. Default: +inf.
If not specified, defaults to inf
type DebugNumericSummaryV2Attr ¶
type DebugNumericSummaryV2Attr func(optionalAttr)
DebugNumericSummaryV2Attr is an optional argument to DebugNumericSummaryV2.
func DebugNumericSummaryV2OutputDtype ¶
func DebugNumericSummaryV2OutputDtype(value tf.DataType) DebugNumericSummaryV2Attr
DebugNumericSummaryV2OutputDtype sets the optional output_dtype attribute to value.
value: Optional. The type of the output. Can be float32 or float64 (default: float32). If not specified, defaults to DT_FLOAT
func DebugNumericSummaryV2TensorDebugMode ¶
func DebugNumericSummaryV2TensorDebugMode(value int64) DebugNumericSummaryV2Attr
DebugNumericSummaryV2TensorDebugMode sets the optional tensor_debug_mode attribute to value.
value: Tensor debug mode: the mode in which the input tensor is summarized
by the op. See the TensorDebugMode enum in tensorflow/core/protobuf/debug_event.proto for details.
Supported values:
2 (CURT_HEALTH): Output a float32/64 tensor of shape [2]. The 1st element is the tensor_id, if provided, and -1 otherwise. The 2nd element is a bit which is set to 1 if the input tensor has an infinity or nan value, or zero otherwise. 3 (CONCISE_HEALTH): Output a float32/64 tensor of shape [5]. The 1st element is the tensor_id, if provided, and -1 otherwise. The remaining four slots are the total number of elements, -infs, +infs, and nans in the input tensor respectively. 4 (FULL_HEALTH): Output a float32/64 tensor of shape [11]. The 1st element is the tensor_id, if provided, and -1 otherwise. The 2nd element is the device_id, if provided, and -1 otherwise. The 3rd element holds the datatype value of the input tensor as according to the enumerated type in tensorflow/core/framework/types.proto. The remaining elements hold the total number of elements, -infs, +infs, nans, negative finite numbers, zeros, and positive finite numbers in the input tensor respectively. 5 (SHAPE): Output a float32/64 tensor of shape [10]. The 1st element is the tensor_id, if provided, and -1 otherwise. The 2nd element holds the datatype value of the input tensor as according to the enumerated type in tensorflow/core/framework/types.proto. The 3rd element holds the rank of the tensor. The 4th element holds the number of elements within the tensor. Finally the remaining 6 elements hold the shape of the tensor. If the rank of the tensor is lower than 6, the shape is right padded with zeros. If the rank is greater than 6, the head of the shape is truncated. 6 (FULL_NUMERICS): Output a float32/64 tensor of shape [22]. The 1st element is the tensor_id, if provided, and -1 otherwise. The 2nd element is the device_id, if provided, and -1 otherwise. The 3rd element holds the datatype value of the input tensor as according to the enumerated type in tensorflow/core/framework/types.proto. The 4th element holds the rank of the tensor. The 5th to 11th elements hold the shape of the tensor. If the rank of the tensor is lower than 6, the shape is right padded with zeros. If the rank is greater than 6, the head of the shape is truncated. The 12th to 18th elements hold the number of elements, -infs, +infs, nans, denormal floats, negative finite numbers, zeros, and positive finite numbers in the input tensor respectively. The final four elements hold the min value, max value, mean, and variance of the input tensor. 8 (REDUCE_INF_NAN_THREE_SLOTS): Output a float32/64 tensor of shape [3]. The 1st element is -inf if any elements of the input tensor is -inf, or zero otherwise. The 2nd element is +inf if any elements of the input tensor is +inf, or zero otherwise. The 3rd element is nan if any element of the input tensor is nan, or zero otherwise.
If not specified, defaults to -1
func DebugNumericSummaryV2TensorId ¶
func DebugNumericSummaryV2TensorId(value int64) DebugNumericSummaryV2Attr
DebugNumericSummaryV2TensorId sets the optional tensor_id attribute to value.
value: Optional. An integer identifier for the tensor being summarized by this op. If not specified, defaults to -1
type DecodeAndCropJpegAttr ¶
type DecodeAndCropJpegAttr func(optionalAttr)
DecodeAndCropJpegAttr is an optional argument to DecodeAndCropJpeg.
func DecodeAndCropJpegAcceptableFraction ¶
func DecodeAndCropJpegAcceptableFraction(value float32) DecodeAndCropJpegAttr
DecodeAndCropJpegAcceptableFraction sets the optional acceptable_fraction attribute to value.
value: The minimum required fraction of lines before a truncated input is accepted. If not specified, defaults to 1
func DecodeAndCropJpegChannels ¶
func DecodeAndCropJpegChannels(value int64) DecodeAndCropJpegAttr
DecodeAndCropJpegChannels sets the optional channels attribute to value.
value: Number of color channels for the decoded image. If not specified, defaults to 0
func DecodeAndCropJpegDctMethod ¶
func DecodeAndCropJpegDctMethod(value string) DecodeAndCropJpegAttr
DecodeAndCropJpegDctMethod sets the optional dct_method attribute to value.
value: string specifying a hint about the algorithm used for decompression. Defaults to "" which maps to a system-specific default. Currently valid values are ["INTEGER_FAST", "INTEGER_ACCURATE"]. The hint may be ignored (e.g., the internal jpeg library changes to a version that does not have that specific option.) If not specified, defaults to ""
func DecodeAndCropJpegFancyUpscaling ¶
func DecodeAndCropJpegFancyUpscaling(value bool) DecodeAndCropJpegAttr
DecodeAndCropJpegFancyUpscaling sets the optional fancy_upscaling attribute to value.
value: If true use a slower but nicer upscaling of the chroma planes (yuv420/422 only). If not specified, defaults to true
func DecodeAndCropJpegRatio ¶
func DecodeAndCropJpegRatio(value int64) DecodeAndCropJpegAttr
DecodeAndCropJpegRatio sets the optional ratio attribute to value.
value: Downscaling ratio. If not specified, defaults to 1
func DecodeAndCropJpegTryRecoverTruncated ¶
func DecodeAndCropJpegTryRecoverTruncated(value bool) DecodeAndCropJpegAttr
DecodeAndCropJpegTryRecoverTruncated sets the optional try_recover_truncated attribute to value.
value: If true try to recover an image from truncated input. If not specified, defaults to false
type DecodeBmpAttr ¶
type DecodeBmpAttr func(optionalAttr)
DecodeBmpAttr is an optional argument to DecodeBmp.
func DecodeBmpChannels ¶
func DecodeBmpChannels(value int64) DecodeBmpAttr
DecodeBmpChannels sets the optional channels attribute to value. If not specified, defaults to 0
type DecodeCSVAttr ¶
type DecodeCSVAttr func(optionalAttr)
DecodeCSVAttr is an optional argument to DecodeCSV.
func DecodeCSVFieldDelim ¶
func DecodeCSVFieldDelim(value string) DecodeCSVAttr
DecodeCSVFieldDelim sets the optional field_delim attribute to value.
value: char delimiter to separate fields in a record. If not specified, defaults to ","
func DecodeCSVNaValue ¶
func DecodeCSVNaValue(value string) DecodeCSVAttr
DecodeCSVNaValue sets the optional na_value attribute to value.
value: Additional string to recognize as NA/NaN. If not specified, defaults to ""
func DecodeCSVSelectCols ¶
func DecodeCSVSelectCols(value []int64) DecodeCSVAttr
DecodeCSVSelectCols sets the optional select_cols attribute to value. If not specified, defaults to {}
func DecodeCSVUseQuoteDelim ¶
func DecodeCSVUseQuoteDelim(value bool) DecodeCSVAttr
DecodeCSVUseQuoteDelim sets the optional use_quote_delim attribute to value.
value: If false, treats double quotation marks as regular characters inside of the string fields (ignoring RFC 4180, Section 2, Bullet 5). If not specified, defaults to true
type DecodeCompressedAttr ¶
type DecodeCompressedAttr func(optionalAttr)
DecodeCompressedAttr is an optional argument to DecodeCompressed.
func DecodeCompressedCompressionType ¶
func DecodeCompressedCompressionType(value string) DecodeCompressedAttr
DecodeCompressedCompressionType sets the optional compression_type attribute to value.
value: A scalar containing either (i) the empty string (no compression), (ii) "ZLIB", or (iii) "GZIP". If not specified, defaults to ""
type DecodeImageAttr ¶
type DecodeImageAttr func(optionalAttr)
DecodeImageAttr is an optional argument to DecodeImage.
func DecodeImageChannels ¶
func DecodeImageChannels(value int64) DecodeImageAttr
DecodeImageChannels sets the optional channels attribute to value.
value: Number of color channels for the decoded image. If not specified, defaults to 0
func DecodeImageDtype ¶
func DecodeImageDtype(value tf.DataType) DecodeImageAttr
DecodeImageDtype sets the optional dtype attribute to value.
value: The desired DType of the returned Tensor. If not specified, defaults to DT_UINT8
func DecodeImageExpandAnimations ¶
func DecodeImageExpandAnimations(value bool) DecodeImageAttr
DecodeImageExpandAnimations sets the optional expand_animations attribute to value.
value: Controls the output shape of the returned op. If True, the returned op will produce a 3-D tensor for PNG, JPEG, and BMP files; and a 4-D tensor for all GIFs, whether animated or not. If, False, the returned op will produce a 3-D tensor for all file types and will truncate animated GIFs to the first frame. If not specified, defaults to true
type DecodeJpegAttr ¶
type DecodeJpegAttr func(optionalAttr)
DecodeJpegAttr is an optional argument to DecodeJpeg.
func DecodeJpegAcceptableFraction ¶
func DecodeJpegAcceptableFraction(value float32) DecodeJpegAttr
DecodeJpegAcceptableFraction sets the optional acceptable_fraction attribute to value.
value: The minimum required fraction of lines before a truncated input is accepted. If not specified, defaults to 1
func DecodeJpegChannels ¶
func DecodeJpegChannels(value int64) DecodeJpegAttr
DecodeJpegChannels sets the optional channels attribute to value.
value: Number of color channels for the decoded image. If not specified, defaults to 0
func DecodeJpegDctMethod ¶
func DecodeJpegDctMethod(value string) DecodeJpegAttr
DecodeJpegDctMethod sets the optional dct_method attribute to value.
value: string specifying a hint about the algorithm used for decompression. Defaults to "" which maps to a system-specific default. Currently valid values are ["INTEGER_FAST", "INTEGER_ACCURATE"]. The hint may be ignored (e.g., the internal jpeg library changes to a version that does not have that specific option.) If not specified, defaults to ""
func DecodeJpegFancyUpscaling ¶
func DecodeJpegFancyUpscaling(value bool) DecodeJpegAttr
DecodeJpegFancyUpscaling sets the optional fancy_upscaling attribute to value.
value: If true use a slower but nicer upscaling of the chroma planes (yuv420/422 only). If not specified, defaults to true
func DecodeJpegRatio ¶
func DecodeJpegRatio(value int64) DecodeJpegAttr
DecodeJpegRatio sets the optional ratio attribute to value.
value: Downscaling ratio. If not specified, defaults to 1
func DecodeJpegTryRecoverTruncated ¶
func DecodeJpegTryRecoverTruncated(value bool) DecodeJpegAttr
DecodeJpegTryRecoverTruncated sets the optional try_recover_truncated attribute to value.
value: If true try to recover an image from truncated input. If not specified, defaults to false
type DecodePaddedRawAttr ¶
type DecodePaddedRawAttr func(optionalAttr)
DecodePaddedRawAttr is an optional argument to DecodePaddedRaw.
func DecodePaddedRawLittleEndian ¶
func DecodePaddedRawLittleEndian(value bool) DecodePaddedRawAttr
DecodePaddedRawLittleEndian sets the optional little_endian attribute to value.
value: Whether the input `input_bytes` is in little-endian order. Ignored for `out_type` values that are stored in a single byte, like `uint8` If not specified, defaults to true
type DecodePngAttr ¶
type DecodePngAttr func(optionalAttr)
DecodePngAttr is an optional argument to DecodePng.
func DecodePngChannels ¶
func DecodePngChannels(value int64) DecodePngAttr
DecodePngChannels sets the optional channels attribute to value.
value: Number of color channels for the decoded image. If not specified, defaults to 0
func DecodePngDtype ¶
func DecodePngDtype(value tf.DataType) DecodePngAttr
DecodePngDtype sets the optional dtype attribute to value. If not specified, defaults to DT_UINT8
type DecodeProtoV2Attr ¶
type DecodeProtoV2Attr func(optionalAttr)
DecodeProtoV2Attr is an optional argument to DecodeProtoV2.
func DecodeProtoV2DescriptorSource ¶
func DecodeProtoV2DescriptorSource(value string) DecodeProtoV2Attr
DecodeProtoV2DescriptorSource sets the optional descriptor_source attribute to value.
value: Either the special value `local://` or a path to a file containing a serialized `FileDescriptorSet`. If not specified, defaults to "local://"
func DecodeProtoV2MessageFormat ¶
func DecodeProtoV2MessageFormat(value string) DecodeProtoV2Attr
DecodeProtoV2MessageFormat sets the optional message_format attribute to value.
value: Either `binary` or `text`. If not specified, defaults to "binary"
func DecodeProtoV2Sanitize ¶
func DecodeProtoV2Sanitize(value bool) DecodeProtoV2Attr
DecodeProtoV2Sanitize sets the optional sanitize attribute to value.
value: Whether to sanitize the result or not. If not specified, defaults to false
type DecodeRawAttr ¶
type DecodeRawAttr func(optionalAttr)
DecodeRawAttr is an optional argument to DecodeRaw.
func DecodeRawLittleEndian ¶
func DecodeRawLittleEndian(value bool) DecodeRawAttr
DecodeRawLittleEndian sets the optional little_endian attribute to value.
value: Whether the input `bytes` are in little-endian order. Ignored for `out_type` values that are stored in a single byte like `uint8`. If not specified, defaults to true
type DecodeWavAttr ¶
type DecodeWavAttr func(optionalAttr)
DecodeWavAttr is an optional argument to DecodeWav.
func DecodeWavDesiredChannels ¶
func DecodeWavDesiredChannels(value int64) DecodeWavAttr
DecodeWavDesiredChannels sets the optional desired_channels attribute to value.
value: Number of sample channels wanted. If not specified, defaults to -1
func DecodeWavDesiredSamples ¶
func DecodeWavDesiredSamples(value int64) DecodeWavAttr
DecodeWavDesiredSamples sets the optional desired_samples attribute to value.
value: Length of audio requested. If not specified, defaults to -1
type DenseBincountAttr ¶
type DenseBincountAttr func(optionalAttr)
DenseBincountAttr is an optional argument to DenseBincount.
func DenseBincountBinaryOutput ¶
func DenseBincountBinaryOutput(value bool) DenseBincountAttr
DenseBincountBinaryOutput sets the optional binary_output attribute to value.
value: bool; Whether the kernel should count the appearance or number of occurrences. If not specified, defaults to false
type DenseCountSparseOutputAttr ¶
type DenseCountSparseOutputAttr func(optionalAttr)
DenseCountSparseOutputAttr is an optional argument to DenseCountSparseOutput.
func DenseCountSparseOutputMaxlength ¶
func DenseCountSparseOutputMaxlength(value int64) DenseCountSparseOutputAttr
DenseCountSparseOutputMaxlength sets the optional maxlength attribute to value.
value: Maximum value to count. Can be set to -1 for no maximum. If not specified, defaults to -1
REQUIRES: value >= -1
func DenseCountSparseOutputMinlength ¶
func DenseCountSparseOutputMinlength(value int64) DenseCountSparseOutputAttr
DenseCountSparseOutputMinlength sets the optional minlength attribute to value.
value: Minimum value to count. Can be set to -1 for no minimum. If not specified, defaults to -1
REQUIRES: value >= -1
type DenseToDenseSetOperationAttr ¶
type DenseToDenseSetOperationAttr func(optionalAttr)
DenseToDenseSetOperationAttr is an optional argument to DenseToDenseSetOperation.
func DenseToDenseSetOperationValidateIndices ¶
func DenseToDenseSetOperationValidateIndices(value bool) DenseToDenseSetOperationAttr
DenseToDenseSetOperationValidateIndices sets the optional validate_indices attribute to value. If not specified, defaults to true
type DenseToSparseSetOperationAttr ¶
type DenseToSparseSetOperationAttr func(optionalAttr)
DenseToSparseSetOperationAttr is an optional argument to DenseToSparseSetOperation.
func DenseToSparseSetOperationValidateIndices ¶
func DenseToSparseSetOperationValidateIndices(value bool) DenseToSparseSetOperationAttr
DenseToSparseSetOperationValidateIndices sets the optional validate_indices attribute to value. If not specified, defaults to true
type DepthToSpaceAttr ¶
type DepthToSpaceAttr func(optionalAttr)
DepthToSpaceAttr is an optional argument to DepthToSpace.
func DepthToSpaceDataFormat ¶
func DepthToSpaceDataFormat(value string) DepthToSpaceAttr
DepthToSpaceDataFormat sets the optional data_format attribute to value. If not specified, defaults to "NHWC"
type DepthwiseConv2dNativeAttr ¶
type DepthwiseConv2dNativeAttr func(optionalAttr)
DepthwiseConv2dNativeAttr is an optional argument to DepthwiseConv2dNative.
func DepthwiseConv2dNativeDataFormat ¶
func DepthwiseConv2dNativeDataFormat(value string) DepthwiseConv2dNativeAttr
DepthwiseConv2dNativeDataFormat sets the optional data_format attribute to value.
value: Specify the data format of the input and output data. With the default format "NHWC", the data is stored in the order of:
[batch, height, width, channels].
Alternatively, the format could be "NCHW", the data storage order of:
[batch, channels, height, width].
If not specified, defaults to "NHWC"
func DepthwiseConv2dNativeDilations ¶
func DepthwiseConv2dNativeDilations(value []int64) DepthwiseConv2dNativeAttr
DepthwiseConv2dNativeDilations sets the optional dilations attribute to value.
value: 1-D tensor of length 4. The dilation factor for each dimension of `input`. If set to k > 1, there will be k-1 skipped cells between each filter element on that dimension. The dimension order is determined by the value of `data_format`, see above for details. Dilations in the batch and depth dimensions must be 1. If not specified, defaults to {i:1 i:1 i:1 i:1}
func DepthwiseConv2dNativeExplicitPaddings ¶
func DepthwiseConv2dNativeExplicitPaddings(value []int64) DepthwiseConv2dNativeAttr
DepthwiseConv2dNativeExplicitPaddings sets the optional explicit_paddings attribute to value. If not specified, defaults to {}
type DepthwiseConv2dNativeBackpropFilterAttr ¶
type DepthwiseConv2dNativeBackpropFilterAttr func(optionalAttr)
DepthwiseConv2dNativeBackpropFilterAttr is an optional argument to DepthwiseConv2dNativeBackpropFilter.
func DepthwiseConv2dNativeBackpropFilterDataFormat ¶
func DepthwiseConv2dNativeBackpropFilterDataFormat(value string) DepthwiseConv2dNativeBackpropFilterAttr
DepthwiseConv2dNativeBackpropFilterDataFormat sets the optional data_format attribute to value.
value: Specify the data format of the input and output data. With the default format "NHWC", the data is stored in the order of:
[batch, height, width, channels].
Alternatively, the format could be "NCHW", the data storage order of:
[batch, channels, height, width].
If not specified, defaults to "NHWC"
func DepthwiseConv2dNativeBackpropFilterDilations ¶
func DepthwiseConv2dNativeBackpropFilterDilations(value []int64) DepthwiseConv2dNativeBackpropFilterAttr
DepthwiseConv2dNativeBackpropFilterDilations sets the optional dilations attribute to value.
value: 1-D tensor of length 4. The dilation factor for each dimension of `input`. If set to k > 1, there will be k-1 skipped cells between each filter element on that dimension. The dimension order is determined by the value of `data_format`, see above for details. Dilations in the batch and depth dimensions must be 1. If not specified, defaults to {i:1 i:1 i:1 i:1}
func DepthwiseConv2dNativeBackpropFilterExplicitPaddings ¶
func DepthwiseConv2dNativeBackpropFilterExplicitPaddings(value []int64) DepthwiseConv2dNativeBackpropFilterAttr
DepthwiseConv2dNativeBackpropFilterExplicitPaddings sets the optional explicit_paddings attribute to value. If not specified, defaults to {}
type DepthwiseConv2dNativeBackpropInputAttr ¶
type DepthwiseConv2dNativeBackpropInputAttr func(optionalAttr)
DepthwiseConv2dNativeBackpropInputAttr is an optional argument to DepthwiseConv2dNativeBackpropInput.
func DepthwiseConv2dNativeBackpropInputDataFormat ¶
func DepthwiseConv2dNativeBackpropInputDataFormat(value string) DepthwiseConv2dNativeBackpropInputAttr
DepthwiseConv2dNativeBackpropInputDataFormat sets the optional data_format attribute to value.
value: Specify the data format of the input and output data. With the default format "NHWC", the data is stored in the order of:
[batch, height, width, channels].
Alternatively, the format could be "NCHW", the data storage order of:
[batch, channels, height, width].
If not specified, defaults to "NHWC"
func DepthwiseConv2dNativeBackpropInputDilations ¶
func DepthwiseConv2dNativeBackpropInputDilations(value []int64) DepthwiseConv2dNativeBackpropInputAttr
DepthwiseConv2dNativeBackpropInputDilations sets the optional dilations attribute to value.
value: 1-D tensor of length 4. The dilation factor for each dimension of `input`. If set to k > 1, there will be k-1 skipped cells between each filter element on that dimension. The dimension order is determined by the value of `data_format`, see above for details. Dilations in the batch and depth dimensions must be 1. If not specified, defaults to {i:1 i:1 i:1 i:1}
func DepthwiseConv2dNativeBackpropInputExplicitPaddings ¶
func DepthwiseConv2dNativeBackpropInputExplicitPaddings(value []int64) DepthwiseConv2dNativeBackpropInputAttr
DepthwiseConv2dNativeBackpropInputExplicitPaddings sets the optional explicit_paddings attribute to value. If not specified, defaults to {}
type DequantizeAttr ¶
type DequantizeAttr func(optionalAttr)
DequantizeAttr is an optional argument to Dequantize.
func DequantizeAxis ¶
func DequantizeAxis(value int64) DequantizeAttr
DequantizeAxis sets the optional axis attribute to value. If not specified, defaults to -1
func DequantizeDtype ¶
func DequantizeDtype(value tf.DataType) DequantizeAttr
DequantizeDtype sets the optional dtype attribute to value.
value: Type of the output tensor. Currently Dequantize supports float and bfloat16. If 'dtype' is 'bfloat16', it only supports 'MIN_COMBINED' mode. If not specified, defaults to DT_FLOAT
func DequantizeMode ¶
func DequantizeMode(value string) DequantizeAttr
DequantizeMode sets the optional mode attribute to value. If not specified, defaults to "MIN_COMBINED"
func DequantizeNarrowRange ¶
func DequantizeNarrowRange(value bool) DequantizeAttr
DequantizeNarrowRange sets the optional narrow_range attribute to value. If not specified, defaults to false
type DestroyResourceOpAttr ¶
type DestroyResourceOpAttr func(optionalAttr)
DestroyResourceOpAttr is an optional argument to DestroyResourceOp.
func DestroyResourceOpIgnoreLookupError ¶
func DestroyResourceOpIgnoreLookupError(value bool) DestroyResourceOpAttr
DestroyResourceOpIgnoreLookupError sets the optional ignore_lookup_error attribute to value.
value: whether to ignore the error when the resource doesn't exist. If not specified, defaults to true
type DirectedInterleaveDatasetAttr ¶
type DirectedInterleaveDatasetAttr func(optionalAttr)
DirectedInterleaveDatasetAttr is an optional argument to DirectedInterleaveDataset.
func DirectedInterleaveDatasetStopOnEmptyDataset ¶
func DirectedInterleaveDatasetStopOnEmptyDataset(value bool) DirectedInterleaveDatasetAttr
DirectedInterleaveDatasetStopOnEmptyDataset sets the optional stop_on_empty_dataset attribute to value. If not specified, defaults to false
type DynamicEnqueueTPUEmbeddingArbitraryTensorBatchAttr ¶
type DynamicEnqueueTPUEmbeddingArbitraryTensorBatchAttr func(optionalAttr)
DynamicEnqueueTPUEmbeddingArbitraryTensorBatchAttr is an optional argument to DynamicEnqueueTPUEmbeddingArbitraryTensorBatch.
func DynamicEnqueueTPUEmbeddingArbitraryTensorBatchCombiners ¶
func DynamicEnqueueTPUEmbeddingArbitraryTensorBatchCombiners(value []string) DynamicEnqueueTPUEmbeddingArbitraryTensorBatchAttr
DynamicEnqueueTPUEmbeddingArbitraryTensorBatchCombiners sets the optional combiners attribute to value.
value: A list of string scalars, one for each embedding table that specify how to normalize the embedding activations after weighted summation. Supported combiners are 'mean', 'sum', or 'sqrtn'. It is invalid to have the sum of the weights be 0 for 'mean' or the sum of the squared weights be 0 for 'sqrtn'. If combiners isn't passed, the default is to use 'sum' for all tables. If not specified, defaults to {}
type EagerPyFuncAttr ¶
type EagerPyFuncAttr func(optionalAttr)
EagerPyFuncAttr is an optional argument to EagerPyFunc.
func EagerPyFuncIsAsync ¶
func EagerPyFuncIsAsync(value bool) EagerPyFuncAttr
EagerPyFuncIsAsync sets the optional is_async attribute to value. If not specified, defaults to false
type EditDistanceAttr ¶
type EditDistanceAttr func(optionalAttr)
EditDistanceAttr is an optional argument to EditDistance.
func EditDistanceNormalize ¶
func EditDistanceNormalize(value bool) EditDistanceAttr
EditDistanceNormalize sets the optional normalize attribute to value.
value: boolean (if true, edit distances are normalized by length of truth).
The output is: If not specified, defaults to true
type EigAttr ¶
type EigAttr func(optionalAttr)
EigAttr is an optional argument to Eig.
func EigComputeV ¶
EigComputeV sets the optional compute_v attribute to value.
value: If `True` then eigenvectors will be computed and returned in `v`. Otherwise, only the eigenvalues will be computed. If not specified, defaults to true
type EncodeBase64Attr ¶
type EncodeBase64Attr func(optionalAttr)
EncodeBase64Attr is an optional argument to EncodeBase64.
func EncodeBase64Pad ¶
func EncodeBase64Pad(value bool) EncodeBase64Attr
EncodeBase64Pad sets the optional pad attribute to value.
value: Bool whether padding is applied at the ends. If not specified, defaults to false
type EncodeJpegAttr ¶
type EncodeJpegAttr func(optionalAttr)
EncodeJpegAttr is an optional argument to EncodeJpeg.
func EncodeJpegChromaDownsampling ¶
func EncodeJpegChromaDownsampling(value bool) EncodeJpegAttr
EncodeJpegChromaDownsampling sets the optional chroma_downsampling attribute to value.
value: See http://en.wikipedia.org/wiki/Chroma_subsampling. If not specified, defaults to true
func EncodeJpegDensityUnit ¶
func EncodeJpegDensityUnit(value string) EncodeJpegAttr
EncodeJpegDensityUnit sets the optional density_unit attribute to value.
value: Unit used to specify `x_density` and `y_density`: pixels per inch (`'in'`) or centimeter (`'cm'`). If not specified, defaults to "in"
func EncodeJpegFormat ¶
func EncodeJpegFormat(value string) EncodeJpegAttr
EncodeJpegFormat sets the optional format attribute to value.
value: Per pixel image format. If not specified, defaults to ""
func EncodeJpegOptimizeSize ¶
func EncodeJpegOptimizeSize(value bool) EncodeJpegAttr
EncodeJpegOptimizeSize sets the optional optimize_size attribute to value.
value: If True, spend CPU/RAM to reduce size with no quality change. If not specified, defaults to false
func EncodeJpegProgressive ¶
func EncodeJpegProgressive(value bool) EncodeJpegAttr
EncodeJpegProgressive sets the optional progressive attribute to value.
value: If True, create a JPEG that loads progressively (coarse to fine). If not specified, defaults to false
func EncodeJpegQuality ¶
func EncodeJpegQuality(value int64) EncodeJpegAttr
EncodeJpegQuality sets the optional quality attribute to value.
value: Quality of the compression from 0 to 100 (higher is better and slower). If not specified, defaults to 95
func EncodeJpegXDensity ¶
func EncodeJpegXDensity(value int64) EncodeJpegAttr
EncodeJpegXDensity sets the optional x_density attribute to value.
value: Horizontal pixels per density unit. If not specified, defaults to 300
func EncodeJpegXmpMetadata ¶
func EncodeJpegXmpMetadata(value string) EncodeJpegAttr
EncodeJpegXmpMetadata sets the optional xmp_metadata attribute to value.
value: If not empty, embed this XMP metadata in the image header. If not specified, defaults to ""
func EncodeJpegYDensity ¶
func EncodeJpegYDensity(value int64) EncodeJpegAttr
EncodeJpegYDensity sets the optional y_density attribute to value.
value: Vertical pixels per density unit. If not specified, defaults to 300
type EncodePngAttr ¶
type EncodePngAttr func(optionalAttr)
EncodePngAttr is an optional argument to EncodePng.
func EncodePngCompression ¶
func EncodePngCompression(value int64) EncodePngAttr
EncodePngCompression sets the optional compression attribute to value.
value: Compression level. If not specified, defaults to -1
type EncodeProtoAttr ¶
type EncodeProtoAttr func(optionalAttr)
EncodeProtoAttr is an optional argument to EncodeProto.
func EncodeProtoDescriptorSource ¶
func EncodeProtoDescriptorSource(value string) EncodeProtoAttr
EncodeProtoDescriptorSource sets the optional descriptor_source attribute to value. If not specified, defaults to "local://"
type EnqueueTPUEmbeddingArbitraryTensorBatchAttr ¶
type EnqueueTPUEmbeddingArbitraryTensorBatchAttr func(optionalAttr)
EnqueueTPUEmbeddingArbitraryTensorBatchAttr is an optional argument to EnqueueTPUEmbeddingArbitraryTensorBatch.
func EnqueueTPUEmbeddingArbitraryTensorBatchCombiners ¶
func EnqueueTPUEmbeddingArbitraryTensorBatchCombiners(value []string) EnqueueTPUEmbeddingArbitraryTensorBatchAttr
EnqueueTPUEmbeddingArbitraryTensorBatchCombiners sets the optional combiners attribute to value.
value: A list of string scalars, one for each embedding table that specify how to normalize the embedding activations after weighted summation. Supported combiners are 'mean', 'sum', or 'sqrtn'. It is invalid to have the sum of the weights be 0 for 'mean' or the sum of the squared weights be 0 for 'sqrtn'. If combiners isn't passed, the default is to use 'sum' for all tables. If not specified, defaults to {}
func EnqueueTPUEmbeddingArbitraryTensorBatchDeviceOrdinal ¶
func EnqueueTPUEmbeddingArbitraryTensorBatchDeviceOrdinal(value int64) EnqueueTPUEmbeddingArbitraryTensorBatchAttr
EnqueueTPUEmbeddingArbitraryTensorBatchDeviceOrdinal sets the optional device_ordinal attribute to value.
value: The TPU device to use. Should be >= 0 and less than the number of TPU cores in the task on which the node is placed. If not specified, defaults to -1
type EnqueueTPUEmbeddingBatchAttr ¶
type EnqueueTPUEmbeddingBatchAttr func(optionalAttr)
EnqueueTPUEmbeddingBatchAttr is an optional argument to EnqueueTPUEmbeddingBatch.
func EnqueueTPUEmbeddingBatchCombiners ¶
func EnqueueTPUEmbeddingBatchCombiners(value []string) EnqueueTPUEmbeddingBatchAttr
EnqueueTPUEmbeddingBatchCombiners sets the optional combiners attribute to value.
value: A list of string scalars, one for each embedding table that specify how to normalize the embedding activations after weighted summation. Supported combiners are 'mean', 'sum', or 'sqrtn'. It is invalid to have the sum of the weights be 0 for 'mean' or the sum of the squared weights be 0 for 'sqrtn'. If combiners isn't passed, the default is to use 'sum' for all tables. If not specified, defaults to {}
func EnqueueTPUEmbeddingBatchDeviceOrdinal ¶
func EnqueueTPUEmbeddingBatchDeviceOrdinal(value int64) EnqueueTPUEmbeddingBatchAttr
EnqueueTPUEmbeddingBatchDeviceOrdinal sets the optional device_ordinal attribute to value.
value: The TPU device to use. This should be -1 when the Op is running on a TPU device, and >= 0 when the Op is running on the CPU device. If not specified, defaults to -1
type EnqueueTPUEmbeddingIntegerBatchAttr ¶
type EnqueueTPUEmbeddingIntegerBatchAttr func(optionalAttr)
EnqueueTPUEmbeddingIntegerBatchAttr is an optional argument to EnqueueTPUEmbeddingIntegerBatch.
func EnqueueTPUEmbeddingIntegerBatchDeviceOrdinal ¶
func EnqueueTPUEmbeddingIntegerBatchDeviceOrdinal(value int64) EnqueueTPUEmbeddingIntegerBatchAttr
EnqueueTPUEmbeddingIntegerBatchDeviceOrdinal sets the optional device_ordinal attribute to value.
value: The TPU device to use. Should be >= 0 and less than the number of TPU cores in the task on which the node is placed. If not specified, defaults to -1
type EnqueueTPUEmbeddingRaggedTensorBatchAttr ¶
type EnqueueTPUEmbeddingRaggedTensorBatchAttr func(optionalAttr)
EnqueueTPUEmbeddingRaggedTensorBatchAttr is an optional argument to EnqueueTPUEmbeddingRaggedTensorBatch.
func EnqueueTPUEmbeddingRaggedTensorBatchCombiners ¶
func EnqueueTPUEmbeddingRaggedTensorBatchCombiners(value []string) EnqueueTPUEmbeddingRaggedTensorBatchAttr
EnqueueTPUEmbeddingRaggedTensorBatchCombiners sets the optional combiners attribute to value.
value: A list of string scalars, one for each embedding table that specify how to normalize the embedding activations after weighted summation. Supported combiners are 'mean', 'sum', or 'sqrtn'. It is invalid to have the sum of the weights be 0 for 'mean' or the sum of the squared weights be 0 for 'sqrtn'. If combiners isn't passed, the default is to use 'sum' for all tables. If not specified, defaults to {}
func EnqueueTPUEmbeddingRaggedTensorBatchDeviceOrdinal ¶
func EnqueueTPUEmbeddingRaggedTensorBatchDeviceOrdinal(value int64) EnqueueTPUEmbeddingRaggedTensorBatchAttr
EnqueueTPUEmbeddingRaggedTensorBatchDeviceOrdinal sets the optional device_ordinal attribute to value.
value: The TPU device to use. Should be >= 0 and less than the number of TPU cores in the task on which the node is placed. If not specified, defaults to -1
func EnqueueTPUEmbeddingRaggedTensorBatchMaxSequenceLengths ¶
func EnqueueTPUEmbeddingRaggedTensorBatchMaxSequenceLengths(value []int64) EnqueueTPUEmbeddingRaggedTensorBatchAttr
EnqueueTPUEmbeddingRaggedTensorBatchMaxSequenceLengths sets the optional max_sequence_lengths attribute to value. If not specified, defaults to {}
func EnqueueTPUEmbeddingRaggedTensorBatchNumFeatures ¶
func EnqueueTPUEmbeddingRaggedTensorBatchNumFeatures(value []int64) EnqueueTPUEmbeddingRaggedTensorBatchAttr
EnqueueTPUEmbeddingRaggedTensorBatchNumFeatures sets the optional num_features attribute to value. If not specified, defaults to {}
type EnqueueTPUEmbeddingSparseBatchAttr ¶
type EnqueueTPUEmbeddingSparseBatchAttr func(optionalAttr)
EnqueueTPUEmbeddingSparseBatchAttr is an optional argument to EnqueueTPUEmbeddingSparseBatch.
func EnqueueTPUEmbeddingSparseBatchCombiners ¶
func EnqueueTPUEmbeddingSparseBatchCombiners(value []string) EnqueueTPUEmbeddingSparseBatchAttr
EnqueueTPUEmbeddingSparseBatchCombiners sets the optional combiners attribute to value.
value: A list of string scalars, one for each embedding table that specify how to normalize the embedding activations after weighted summation. Supported combiners are 'mean', 'sum', or 'sqrtn'. It is invalid to have the sum of the weights be 0 for 'mean' or the sum of the squared weights be 0 for 'sqrtn'. If combiners isn't passed, the default is to use 'sum' for all tables. If not specified, defaults to {}
func EnqueueTPUEmbeddingSparseBatchDeviceOrdinal ¶
func EnqueueTPUEmbeddingSparseBatchDeviceOrdinal(value int64) EnqueueTPUEmbeddingSparseBatchAttr
EnqueueTPUEmbeddingSparseBatchDeviceOrdinal sets the optional device_ordinal attribute to value.
value: The TPU device to use. Should be >= 0 and less than the number of TPU cores in the task on which the node is placed. If not specified, defaults to -1
type EnqueueTPUEmbeddingSparseTensorBatchAttr ¶
type EnqueueTPUEmbeddingSparseTensorBatchAttr func(optionalAttr)
EnqueueTPUEmbeddingSparseTensorBatchAttr is an optional argument to EnqueueTPUEmbeddingSparseTensorBatch.
func EnqueueTPUEmbeddingSparseTensorBatchCombiners ¶
func EnqueueTPUEmbeddingSparseTensorBatchCombiners(value []string) EnqueueTPUEmbeddingSparseTensorBatchAttr
EnqueueTPUEmbeddingSparseTensorBatchCombiners sets the optional combiners attribute to value.
value: A list of string scalars, one for each embedding table that specify how to normalize the embedding activations after weighted summation. Supported combiners are 'mean', 'sum', or 'sqrtn'. It is invalid to have the sum of the weights be 0 for 'mean' or the sum of the squared weights be 0 for 'sqrtn'. If combiners isn't passed, the default is to use 'sum' for all tables. If not specified, defaults to {}
func EnqueueTPUEmbeddingSparseTensorBatchDeviceOrdinal ¶
func EnqueueTPUEmbeddingSparseTensorBatchDeviceOrdinal(value int64) EnqueueTPUEmbeddingSparseTensorBatchAttr
EnqueueTPUEmbeddingSparseTensorBatchDeviceOrdinal sets the optional device_ordinal attribute to value.
value: The TPU device to use. Should be >= 0 and less than the number of TPU cores in the task on which the node is placed. If not specified, defaults to -1
func EnqueueTPUEmbeddingSparseTensorBatchMaxSequenceLengths ¶
func EnqueueTPUEmbeddingSparseTensorBatchMaxSequenceLengths(value []int64) EnqueueTPUEmbeddingSparseTensorBatchAttr
EnqueueTPUEmbeddingSparseTensorBatchMaxSequenceLengths sets the optional max_sequence_lengths attribute to value. If not specified, defaults to {}
func EnqueueTPUEmbeddingSparseTensorBatchNumFeatures ¶
func EnqueueTPUEmbeddingSparseTensorBatchNumFeatures(value []int64) EnqueueTPUEmbeddingSparseTensorBatchAttr
EnqueueTPUEmbeddingSparseTensorBatchNumFeatures sets the optional num_features attribute to value. If not specified, defaults to {}
type EnterAttr ¶
type EnterAttr func(optionalAttr)
EnterAttr is an optional argument to Enter.
func EnterIsConstant ¶
EnterIsConstant sets the optional is_constant attribute to value.
value: If true, the output is constant within the child frame. If not specified, defaults to false
func EnterParallelIterations ¶
EnterParallelIterations sets the optional parallel_iterations attribute to value.
value: The number of iterations allowed to run in parallel. If not specified, defaults to 10
type EqualAttr ¶
type EqualAttr func(optionalAttr)
EqualAttr is an optional argument to Equal.
func EqualIncompatibleShapeError ¶
EqualIncompatibleShapeError sets the optional incompatible_shape_error attribute to value. If not specified, defaults to true
type EuclideanNormAttr ¶
type EuclideanNormAttr func(optionalAttr)
EuclideanNormAttr is an optional argument to EuclideanNorm.
func EuclideanNormKeepDims ¶
func EuclideanNormKeepDims(value bool) EuclideanNormAttr
EuclideanNormKeepDims sets the optional keep_dims attribute to value.
value: If true, retain reduced dimensions with length 1. If not specified, defaults to false
type ExperimentalAutoShardDatasetAttr ¶
type ExperimentalAutoShardDatasetAttr func(optionalAttr)
ExperimentalAutoShardDatasetAttr is an optional argument to ExperimentalAutoShardDataset.
func ExperimentalAutoShardDatasetAutoShardPolicy ¶
func ExperimentalAutoShardDatasetAutoShardPolicy(value int64) ExperimentalAutoShardDatasetAttr
ExperimentalAutoShardDatasetAutoShardPolicy sets the optional auto_shard_policy attribute to value. If not specified, defaults to 0
type ExperimentalIgnoreErrorsDatasetAttr ¶
type ExperimentalIgnoreErrorsDatasetAttr func(optionalAttr)
ExperimentalIgnoreErrorsDatasetAttr is an optional argument to ExperimentalIgnoreErrorsDataset.
func ExperimentalIgnoreErrorsDatasetLogWarning ¶
func ExperimentalIgnoreErrorsDatasetLogWarning(value bool) ExperimentalIgnoreErrorsDatasetAttr
ExperimentalIgnoreErrorsDatasetLogWarning sets the optional log_warning attribute to value. If not specified, defaults to false
type ExperimentalParseExampleDatasetAttr ¶
type ExperimentalParseExampleDatasetAttr func(optionalAttr)
ExperimentalParseExampleDatasetAttr is an optional argument to ExperimentalParseExampleDataset.
func ExperimentalParseExampleDatasetSloppy ¶
func ExperimentalParseExampleDatasetSloppy(value bool) ExperimentalParseExampleDatasetAttr
ExperimentalParseExampleDatasetSloppy sets the optional sloppy attribute to value. If not specified, defaults to false
type ExperimentalRebatchDatasetAttr ¶
type ExperimentalRebatchDatasetAttr func(optionalAttr)
ExperimentalRebatchDatasetAttr is an optional argument to ExperimentalRebatchDataset.
func ExperimentalRebatchDatasetUseFallback ¶
func ExperimentalRebatchDatasetUseFallback(value bool) ExperimentalRebatchDatasetAttr
ExperimentalRebatchDatasetUseFallback sets the optional use_fallback attribute to value. If not specified, defaults to true
type ExperimentalStatsAggregatorHandleAttr ¶
type ExperimentalStatsAggregatorHandleAttr func(optionalAttr)
ExperimentalStatsAggregatorHandleAttr is an optional argument to ExperimentalStatsAggregatorHandle.
func ExperimentalStatsAggregatorHandleContainer ¶
func ExperimentalStatsAggregatorHandleContainer(value string) ExperimentalStatsAggregatorHandleAttr
ExperimentalStatsAggregatorHandleContainer sets the optional container attribute to value. If not specified, defaults to ""
func ExperimentalStatsAggregatorHandleSharedName ¶
func ExperimentalStatsAggregatorHandleSharedName(value string) ExperimentalStatsAggregatorHandleAttr
ExperimentalStatsAggregatorHandleSharedName sets the optional shared_name attribute to value. If not specified, defaults to ""
type ExperimentalThreadPoolHandleAttr ¶
type ExperimentalThreadPoolHandleAttr func(optionalAttr)
ExperimentalThreadPoolHandleAttr is an optional argument to ExperimentalThreadPoolHandle.
func ExperimentalThreadPoolHandleContainer ¶
func ExperimentalThreadPoolHandleContainer(value string) ExperimentalThreadPoolHandleAttr
ExperimentalThreadPoolHandleContainer sets the optional container attribute to value. If not specified, defaults to ""
func ExperimentalThreadPoolHandleMaxIntraOpParallelism ¶
func ExperimentalThreadPoolHandleMaxIntraOpParallelism(value int64) ExperimentalThreadPoolHandleAttr
ExperimentalThreadPoolHandleMaxIntraOpParallelism sets the optional max_intra_op_parallelism attribute to value.
value: The maximum degree of parallelism to use within operations that execute on this threadpool. If not specified, defaults to 1
func ExperimentalThreadPoolHandleSharedName ¶
func ExperimentalThreadPoolHandleSharedName(value string) ExperimentalThreadPoolHandleAttr
ExperimentalThreadPoolHandleSharedName sets the optional shared_name attribute to value. If not specified, defaults to ""
type ExtractGlimpseAttr ¶
type ExtractGlimpseAttr func(optionalAttr)
ExtractGlimpseAttr is an optional argument to ExtractGlimpse.
func ExtractGlimpseCentered ¶
func ExtractGlimpseCentered(value bool) ExtractGlimpseAttr
ExtractGlimpseCentered sets the optional centered attribute to value.
value: indicates if the offset coordinates are centered relative to the image, in which case the (0, 0) offset is relative to the center of the input images. If false, the (0,0) offset corresponds to the upper left corner of the input images. If not specified, defaults to true
func ExtractGlimpseNoise ¶
func ExtractGlimpseNoise(value string) ExtractGlimpseAttr
ExtractGlimpseNoise sets the optional noise attribute to value.
value: indicates if the noise should `uniform`, `gaussian`, or `zero`. The default is `uniform` which means the noise type will be decided by `uniform_noise`. If not specified, defaults to "uniform"
func ExtractGlimpseNormalized ¶
func ExtractGlimpseNormalized(value bool) ExtractGlimpseAttr
ExtractGlimpseNormalized sets the optional normalized attribute to value.
value: indicates if the offset coordinates are normalized. If not specified, defaults to true
func ExtractGlimpseUniformNoise ¶
func ExtractGlimpseUniformNoise(value bool) ExtractGlimpseAttr
ExtractGlimpseUniformNoise sets the optional uniform_noise attribute to value.
value: indicates if the noise should be generated using a uniform distribution or a Gaussian distribution. If not specified, defaults to true
type ExtractGlimpseV2Attr ¶
type ExtractGlimpseV2Attr func(optionalAttr)
ExtractGlimpseV2Attr is an optional argument to ExtractGlimpseV2.
func ExtractGlimpseV2Centered ¶
func ExtractGlimpseV2Centered(value bool) ExtractGlimpseV2Attr
ExtractGlimpseV2Centered sets the optional centered attribute to value.
value: indicates if the offset coordinates are centered relative to the image, in which case the (0, 0) offset is relative to the center of the input images. If false, the (0,0) offset corresponds to the upper left corner of the input images. If not specified, defaults to true
func ExtractGlimpseV2Noise ¶
func ExtractGlimpseV2Noise(value string) ExtractGlimpseV2Attr
ExtractGlimpseV2Noise sets the optional noise attribute to value.
value: indicates if the noise should `uniform`, `gaussian`, or `zero`. The default is `uniform` which means the noise type will be decided by `uniform_noise`. If not specified, defaults to "uniform"
func ExtractGlimpseV2Normalized ¶
func ExtractGlimpseV2Normalized(value bool) ExtractGlimpseV2Attr
ExtractGlimpseV2Normalized sets the optional normalized attribute to value.
value: indicates if the offset coordinates are normalized. If not specified, defaults to true
func ExtractGlimpseV2UniformNoise ¶
func ExtractGlimpseV2UniformNoise(value bool) ExtractGlimpseV2Attr
ExtractGlimpseV2UniformNoise sets the optional uniform_noise attribute to value.
value: indicates if the noise should be generated using a uniform distribution or a Gaussian distribution. If not specified, defaults to true
type ExtractJpegShapeAttr ¶
type ExtractJpegShapeAttr func(optionalAttr)
ExtractJpegShapeAttr is an optional argument to ExtractJpegShape.
func ExtractJpegShapeOutputType ¶
func ExtractJpegShapeOutputType(value tf.DataType) ExtractJpegShapeAttr
ExtractJpegShapeOutputType sets the optional output_type attribute to value.
value: (Optional) The output type of the operation (int32 or int64). Defaults to int32. If not specified, defaults to DT_INT32
type FIFOQueueV2Attr ¶
type FIFOQueueV2Attr func(optionalAttr)
FIFOQueueV2Attr is an optional argument to FIFOQueueV2.
func FIFOQueueV2Capacity ¶
func FIFOQueueV2Capacity(value int64) FIFOQueueV2Attr
FIFOQueueV2Capacity sets the optional capacity attribute to value.
value: The upper bound on the number of elements in this queue. Negative numbers mean no limit. If not specified, defaults to -1
func FIFOQueueV2Container ¶
func FIFOQueueV2Container(value string) FIFOQueueV2Attr
FIFOQueueV2Container sets the optional container attribute to value.
value: If non-empty, this queue is placed in the given container. Otherwise, a default container is used. If not specified, defaults to ""
func FIFOQueueV2Shapes ¶
func FIFOQueueV2Shapes(value []tf.Shape) FIFOQueueV2Attr
FIFOQueueV2Shapes sets the optional shapes attribute to value.
value: The shape of each component in a value. The length of this attr must be either 0 or the same as the length of component_types. If the length of this attr is 0, the shapes of queue elements are not constrained, and only one element may be dequeued at a time. If not specified, defaults to {}
REQUIRES: len(value) >= 0
func FIFOQueueV2SharedName ¶
func FIFOQueueV2SharedName(value string) FIFOQueueV2Attr
FIFOQueueV2SharedName sets the optional shared_name attribute to value.
value: If non-empty, this queue will be shared under the given name across multiple sessions. If not specified, defaults to ""
type FakeQuantWithMinMaxArgsAttr ¶
type FakeQuantWithMinMaxArgsAttr func(optionalAttr)
FakeQuantWithMinMaxArgsAttr is an optional argument to FakeQuantWithMinMaxArgs.
func FakeQuantWithMinMaxArgsMax ¶
func FakeQuantWithMinMaxArgsMax(value float32) FakeQuantWithMinMaxArgsAttr
FakeQuantWithMinMaxArgsMax sets the optional max attribute to value. If not specified, defaults to 6
func FakeQuantWithMinMaxArgsMin ¶
func FakeQuantWithMinMaxArgsMin(value float32) FakeQuantWithMinMaxArgsAttr
FakeQuantWithMinMaxArgsMin sets the optional min attribute to value. If not specified, defaults to -6
func FakeQuantWithMinMaxArgsNarrowRange ¶
func FakeQuantWithMinMaxArgsNarrowRange(value bool) FakeQuantWithMinMaxArgsAttr
FakeQuantWithMinMaxArgsNarrowRange sets the optional narrow_range attribute to value. If not specified, defaults to false
func FakeQuantWithMinMaxArgsNumBits ¶
func FakeQuantWithMinMaxArgsNumBits(value int64) FakeQuantWithMinMaxArgsAttr
FakeQuantWithMinMaxArgsNumBits sets the optional num_bits attribute to value. If not specified, defaults to 8
type FakeQuantWithMinMaxArgsGradientAttr ¶
type FakeQuantWithMinMaxArgsGradientAttr func(optionalAttr)
FakeQuantWithMinMaxArgsGradientAttr is an optional argument to FakeQuantWithMinMaxArgsGradient.
func FakeQuantWithMinMaxArgsGradientMax ¶
func FakeQuantWithMinMaxArgsGradientMax(value float32) FakeQuantWithMinMaxArgsGradientAttr
FakeQuantWithMinMaxArgsGradientMax sets the optional max attribute to value. If not specified, defaults to 6
func FakeQuantWithMinMaxArgsGradientMin ¶
func FakeQuantWithMinMaxArgsGradientMin(value float32) FakeQuantWithMinMaxArgsGradientAttr
FakeQuantWithMinMaxArgsGradientMin sets the optional min attribute to value. If not specified, defaults to -6
func FakeQuantWithMinMaxArgsGradientNarrowRange ¶
func FakeQuantWithMinMaxArgsGradientNarrowRange(value bool) FakeQuantWithMinMaxArgsGradientAttr
FakeQuantWithMinMaxArgsGradientNarrowRange sets the optional narrow_range attribute to value. If not specified, defaults to false
func FakeQuantWithMinMaxArgsGradientNumBits ¶
func FakeQuantWithMinMaxArgsGradientNumBits(value int64) FakeQuantWithMinMaxArgsGradientAttr
FakeQuantWithMinMaxArgsGradientNumBits sets the optional num_bits attribute to value. If not specified, defaults to 8
type FakeQuantWithMinMaxVarsAttr ¶
type FakeQuantWithMinMaxVarsAttr func(optionalAttr)
FakeQuantWithMinMaxVarsAttr is an optional argument to FakeQuantWithMinMaxVars.
func FakeQuantWithMinMaxVarsNarrowRange ¶
func FakeQuantWithMinMaxVarsNarrowRange(value bool) FakeQuantWithMinMaxVarsAttr
FakeQuantWithMinMaxVarsNarrowRange sets the optional narrow_range attribute to value. If not specified, defaults to false
func FakeQuantWithMinMaxVarsNumBits ¶
func FakeQuantWithMinMaxVarsNumBits(value int64) FakeQuantWithMinMaxVarsAttr
FakeQuantWithMinMaxVarsNumBits sets the optional num_bits attribute to value. If not specified, defaults to 8
type FakeQuantWithMinMaxVarsGradientAttr ¶
type FakeQuantWithMinMaxVarsGradientAttr func(optionalAttr)
FakeQuantWithMinMaxVarsGradientAttr is an optional argument to FakeQuantWithMinMaxVarsGradient.
func FakeQuantWithMinMaxVarsGradientNarrowRange ¶
func FakeQuantWithMinMaxVarsGradientNarrowRange(value bool) FakeQuantWithMinMaxVarsGradientAttr
FakeQuantWithMinMaxVarsGradientNarrowRange sets the optional narrow_range attribute to value.
value: Whether to quantize into 2^num_bits - 1 distinct values. If not specified, defaults to false
func FakeQuantWithMinMaxVarsGradientNumBits ¶
func FakeQuantWithMinMaxVarsGradientNumBits(value int64) FakeQuantWithMinMaxVarsGradientAttr
FakeQuantWithMinMaxVarsGradientNumBits sets the optional num_bits attribute to value.
value: The bitwidth of the quantization; between 2 and 8, inclusive. If not specified, defaults to 8
type FakeQuantWithMinMaxVarsPerChannelAttr ¶
type FakeQuantWithMinMaxVarsPerChannelAttr func(optionalAttr)
FakeQuantWithMinMaxVarsPerChannelAttr is an optional argument to FakeQuantWithMinMaxVarsPerChannel.
func FakeQuantWithMinMaxVarsPerChannelNarrowRange ¶
func FakeQuantWithMinMaxVarsPerChannelNarrowRange(value bool) FakeQuantWithMinMaxVarsPerChannelAttr
FakeQuantWithMinMaxVarsPerChannelNarrowRange sets the optional narrow_range attribute to value. If not specified, defaults to false
func FakeQuantWithMinMaxVarsPerChannelNumBits ¶
func FakeQuantWithMinMaxVarsPerChannelNumBits(value int64) FakeQuantWithMinMaxVarsPerChannelAttr
FakeQuantWithMinMaxVarsPerChannelNumBits sets the optional num_bits attribute to value. If not specified, defaults to 8
type FakeQuantWithMinMaxVarsPerChannelGradientAttr ¶
type FakeQuantWithMinMaxVarsPerChannelGradientAttr func(optionalAttr)
FakeQuantWithMinMaxVarsPerChannelGradientAttr is an optional argument to FakeQuantWithMinMaxVarsPerChannelGradient.
func FakeQuantWithMinMaxVarsPerChannelGradientNarrowRange ¶
func FakeQuantWithMinMaxVarsPerChannelGradientNarrowRange(value bool) FakeQuantWithMinMaxVarsPerChannelGradientAttr
FakeQuantWithMinMaxVarsPerChannelGradientNarrowRange sets the optional narrow_range attribute to value.
value: Whether to quantize into 2^num_bits - 1 distinct values. If not specified, defaults to false
func FakeQuantWithMinMaxVarsPerChannelGradientNumBits ¶
func FakeQuantWithMinMaxVarsPerChannelGradientNumBits(value int64) FakeQuantWithMinMaxVarsPerChannelGradientAttr
FakeQuantWithMinMaxVarsPerChannelGradientNumBits sets the optional num_bits attribute to value.
value: The bitwidth of the quantization; between 2 and 16, inclusive. If not specified, defaults to 8
type FinalizeDatasetAttr ¶
type FinalizeDatasetAttr func(optionalAttr)
FinalizeDatasetAttr is an optional argument to FinalizeDataset.
func FinalizeDatasetHasCapturedRef ¶
func FinalizeDatasetHasCapturedRef(value bool) FinalizeDatasetAttr
FinalizeDatasetHasCapturedRef sets the optional has_captured_ref attribute to value. If not specified, defaults to false
type FixedLengthRecordDatasetAttr ¶
type FixedLengthRecordDatasetAttr func(optionalAttr)
FixedLengthRecordDatasetAttr is an optional argument to FixedLengthRecordDataset.
func FixedLengthRecordDatasetMetadata ¶
func FixedLengthRecordDatasetMetadata(value string) FixedLengthRecordDatasetAttr
FixedLengthRecordDatasetMetadata sets the optional metadata attribute to value. If not specified, defaults to ""
type FixedLengthRecordReaderV2Attr ¶
type FixedLengthRecordReaderV2Attr func(optionalAttr)
FixedLengthRecordReaderV2Attr is an optional argument to FixedLengthRecordReaderV2.
func FixedLengthRecordReaderV2Container ¶
func FixedLengthRecordReaderV2Container(value string) FixedLengthRecordReaderV2Attr
FixedLengthRecordReaderV2Container sets the optional container attribute to value.
value: If non-empty, this reader is placed in the given container. Otherwise, a default container is used. If not specified, defaults to ""
func FixedLengthRecordReaderV2Encoding ¶
func FixedLengthRecordReaderV2Encoding(value string) FixedLengthRecordReaderV2Attr
FixedLengthRecordReaderV2Encoding sets the optional encoding attribute to value.
value: The type of encoding for the file. Currently ZLIB and GZIP are supported. Defaults to none. If not specified, defaults to ""
func FixedLengthRecordReaderV2FooterBytes ¶
func FixedLengthRecordReaderV2FooterBytes(value int64) FixedLengthRecordReaderV2Attr
FixedLengthRecordReaderV2FooterBytes sets the optional footer_bytes attribute to value.
value: Number of bytes in the footer, defaults to 0. If not specified, defaults to 0
func FixedLengthRecordReaderV2HeaderBytes ¶
func FixedLengthRecordReaderV2HeaderBytes(value int64) FixedLengthRecordReaderV2Attr
FixedLengthRecordReaderV2HeaderBytes sets the optional header_bytes attribute to value.
value: Number of bytes in the header, defaults to 0. If not specified, defaults to 0
func FixedLengthRecordReaderV2HopBytes ¶
func FixedLengthRecordReaderV2HopBytes(value int64) FixedLengthRecordReaderV2Attr
FixedLengthRecordReaderV2HopBytes sets the optional hop_bytes attribute to value.
value: Number of bytes to hop before each read. Default of 0 means using record_bytes. If not specified, defaults to 0
func FixedLengthRecordReaderV2SharedName ¶
func FixedLengthRecordReaderV2SharedName(value string) FixedLengthRecordReaderV2Attr
FixedLengthRecordReaderV2SharedName sets the optional shared_name attribute to value.
value: If non-empty, this reader is named in the given bucket with this shared_name. Otherwise, the node name is used instead. If not specified, defaults to ""
type FixedUnigramCandidateSamplerAttr ¶
type FixedUnigramCandidateSamplerAttr func(optionalAttr)
FixedUnigramCandidateSamplerAttr is an optional argument to FixedUnigramCandidateSampler.
func FixedUnigramCandidateSamplerDistortion ¶
func FixedUnigramCandidateSamplerDistortion(value float32) FixedUnigramCandidateSamplerAttr
FixedUnigramCandidateSamplerDistortion sets the optional distortion attribute to value.
value: The distortion is used to skew the unigram probability distribution. Each weight is first raised to the distortion's power before adding to the internal unigram distribution. As a result, distortion = 1.0 gives regular unigram sampling (as defined by the vocab file), and distortion = 0.0 gives a uniform distribution. If not specified, defaults to 1
func FixedUnigramCandidateSamplerNumReservedIds ¶
func FixedUnigramCandidateSamplerNumReservedIds(value int64) FixedUnigramCandidateSamplerAttr
FixedUnigramCandidateSamplerNumReservedIds sets the optional num_reserved_ids attribute to value.
value: Optionally some reserved IDs can be added in the range [0, ..., num_reserved_ids) by the users. One use case is that a special unknown word token is used as ID 0. These IDs will have a sampling probability of 0. If not specified, defaults to 0
func FixedUnigramCandidateSamplerNumShards ¶
func FixedUnigramCandidateSamplerNumShards(value int64) FixedUnigramCandidateSamplerAttr
FixedUnigramCandidateSamplerNumShards sets the optional num_shards attribute to value.
value: A sampler can be used to sample from a subset of the original range in order to speed up the whole computation through parallelism. This parameter (together with 'shard') indicates the number of partitions that are being used in the overall computation. If not specified, defaults to 1
REQUIRES: value >= 1
func FixedUnigramCandidateSamplerSeed ¶
func FixedUnigramCandidateSamplerSeed(value int64) FixedUnigramCandidateSamplerAttr
FixedUnigramCandidateSamplerSeed sets the optional seed attribute to value.
value: If either seed or seed2 are set to be non-zero, the random number generator is seeded by the given seed. Otherwise, it is seeded by a random seed. If not specified, defaults to 0
func FixedUnigramCandidateSamplerSeed2 ¶
func FixedUnigramCandidateSamplerSeed2(value int64) FixedUnigramCandidateSamplerAttr
FixedUnigramCandidateSamplerSeed2 sets the optional seed2 attribute to value.
value: An second seed to avoid seed collision. If not specified, defaults to 0
func FixedUnigramCandidateSamplerShard ¶
func FixedUnigramCandidateSamplerShard(value int64) FixedUnigramCandidateSamplerAttr
FixedUnigramCandidateSamplerShard sets the optional shard attribute to value.
value: A sampler can be used to sample from a subset of the original range in order to speed up the whole computation through parallelism. This parameter (together with 'num_shards') indicates the particular partition number of a sampler op, when partitioning is being used. If not specified, defaults to 0
REQUIRES: value >= 0
func FixedUnigramCandidateSamplerUnigrams ¶
func FixedUnigramCandidateSamplerUnigrams(value []float32) FixedUnigramCandidateSamplerAttr
FixedUnigramCandidateSamplerUnigrams sets the optional unigrams attribute to value.
value: A list of unigram counts or probabilities, one per ID in sequential order. Exactly one of vocab_file and unigrams should be passed to this op. If not specified, defaults to {}
func FixedUnigramCandidateSamplerVocabFile ¶
func FixedUnigramCandidateSamplerVocabFile(value string) FixedUnigramCandidateSamplerAttr
FixedUnigramCandidateSamplerVocabFile sets the optional vocab_file attribute to value.
value: Each valid line in this file (which should have a CSV-like format) corresponds to a valid word ID. IDs are in sequential order, starting from num_reserved_ids. The last entry in each line is expected to be a value corresponding to the count or relative probability. Exactly one of vocab_file and unigrams needs to be passed to this op. If not specified, defaults to ""
type FractionalAvgPoolAttr ¶
type FractionalAvgPoolAttr func(optionalAttr)
FractionalAvgPoolAttr is an optional argument to FractionalAvgPool.
func FractionalAvgPoolDeterministic ¶
func FractionalAvgPoolDeterministic(value bool) FractionalAvgPoolAttr
FractionalAvgPoolDeterministic sets the optional deterministic attribute to value.
value: When set to True, a fixed pooling region will be used when iterating over a FractionalAvgPool node in the computation graph. Mainly used in unit test to make FractionalAvgPool deterministic. If not specified, defaults to false
func FractionalAvgPoolOverlapping ¶
func FractionalAvgPoolOverlapping(value bool) FractionalAvgPoolAttr
FractionalAvgPoolOverlapping sets the optional overlapping attribute to value.
value: When set to True, it means when pooling, the values at the boundary of adjacent pooling cells are used by both cells. For example:
`index 0 1 2 3 4`
`value 20 5 16 3 7`
If the pooling sequence is [0, 2, 4], then 16, at index 2 will be used twice. The result would be [41/3, 26/3] for fractional avg pooling. If not specified, defaults to false
func FractionalAvgPoolPseudoRandom ¶
func FractionalAvgPoolPseudoRandom(value bool) FractionalAvgPoolAttr
FractionalAvgPoolPseudoRandom sets the optional pseudo_random attribute to value.
value: When set to True, generates the pooling sequence in a pseudorandom fashion, otherwise, in a random fashion. Check paper [Benjamin Graham, Fractional Max-Pooling](http://arxiv.org/abs/1412.6071) for difference between pseudorandom and random. If not specified, defaults to false
func FractionalAvgPoolSeed ¶
func FractionalAvgPoolSeed(value int64) FractionalAvgPoolAttr
FractionalAvgPoolSeed sets the optional seed attribute to value.
value: If either seed or seed2 are set to be non-zero, the random number generator is seeded by the given seed. Otherwise, it is seeded by a random seed. If not specified, defaults to 0
func FractionalAvgPoolSeed2 ¶
func FractionalAvgPoolSeed2(value int64) FractionalAvgPoolAttr
FractionalAvgPoolSeed2 sets the optional seed2 attribute to value.
value: An second seed to avoid seed collision. If not specified, defaults to 0
type FractionalAvgPoolGradAttr ¶
type FractionalAvgPoolGradAttr func(optionalAttr)
FractionalAvgPoolGradAttr is an optional argument to FractionalAvgPoolGrad.
func FractionalAvgPoolGradOverlapping ¶
func FractionalAvgPoolGradOverlapping(value bool) FractionalAvgPoolGradAttr
FractionalAvgPoolGradOverlapping sets the optional overlapping attribute to value.
value: When set to True, it means when pooling, the values at the boundary of adjacent pooling cells are used by both cells. For example:
`index 0 1 2 3 4`
`value 20 5 16 3 7`
If the pooling sequence is [0, 2, 4], then 16, at index 2 will be used twice. The result would be [41/3, 26/3] for fractional avg pooling. If not specified, defaults to false
type FractionalMaxPoolAttr ¶
type FractionalMaxPoolAttr func(optionalAttr)
FractionalMaxPoolAttr is an optional argument to FractionalMaxPool.
func FractionalMaxPoolDeterministic ¶
func FractionalMaxPoolDeterministic(value bool) FractionalMaxPoolAttr
FractionalMaxPoolDeterministic sets the optional deterministic attribute to value.
value: When set to True, a fixed pooling region will be used when iterating over a FractionalMaxPool node in the computation graph. Mainly used in unit test to make FractionalMaxPool deterministic. If not specified, defaults to false
func FractionalMaxPoolOverlapping ¶
func FractionalMaxPoolOverlapping(value bool) FractionalMaxPoolAttr
FractionalMaxPoolOverlapping sets the optional overlapping attribute to value.
value: When set to True, it means when pooling, the values at the boundary of adjacent pooling cells are used by both cells. For example:
`index 0 1 2 3 4`
`value 20 5 16 3 7`
If the pooling sequence is [0, 2, 4], then 16, at index 2 will be used twice. The result would be [20, 16] for fractional max pooling. If not specified, defaults to false
func FractionalMaxPoolPseudoRandom ¶
func FractionalMaxPoolPseudoRandom(value bool) FractionalMaxPoolAttr
FractionalMaxPoolPseudoRandom sets the optional pseudo_random attribute to value.
value: When set to True, generates the pooling sequence in a pseudorandom fashion, otherwise, in a random fashion. Check paper [Benjamin Graham, Fractional Max-Pooling](http://arxiv.org/abs/1412.6071) for difference between pseudorandom and random. If not specified, defaults to false
func FractionalMaxPoolSeed ¶
func FractionalMaxPoolSeed(value int64) FractionalMaxPoolAttr
FractionalMaxPoolSeed sets the optional seed attribute to value.
value: If either seed or seed2 are set to be non-zero, the random number generator is seeded by the given seed. Otherwise, it is seeded by a random seed. If not specified, defaults to 0
func FractionalMaxPoolSeed2 ¶
func FractionalMaxPoolSeed2(value int64) FractionalMaxPoolAttr
FractionalMaxPoolSeed2 sets the optional seed2 attribute to value.
value: An second seed to avoid seed collision. If not specified, defaults to 0
type FractionalMaxPoolGradAttr ¶
type FractionalMaxPoolGradAttr func(optionalAttr)
FractionalMaxPoolGradAttr is an optional argument to FractionalMaxPoolGrad.
func FractionalMaxPoolGradOverlapping ¶
func FractionalMaxPoolGradOverlapping(value bool) FractionalMaxPoolGradAttr
FractionalMaxPoolGradOverlapping sets the optional overlapping attribute to value.
value: When set to True, it means when pooling, the values at the boundary of adjacent pooling cells are used by both cells. For example:
`index 0 1 2 3 4`
`value 20 5 16 3 7`
If the pooling sequence is [0, 2, 4], then 16, at index 2 will be used twice. The result would be [20, 16] for fractional max pooling. If not specified, defaults to false
type FusedBatchNormAttr ¶
type FusedBatchNormAttr func(optionalAttr)
FusedBatchNormAttr is an optional argument to FusedBatchNorm.
func FusedBatchNormDataFormat ¶
func FusedBatchNormDataFormat(value string) FusedBatchNormAttr
FusedBatchNormDataFormat sets the optional data_format attribute to value.
value: The data format for x and y. Either "NHWC" (default) or "NCHW". If not specified, defaults to "NHWC"
func FusedBatchNormEpsilon ¶
func FusedBatchNormEpsilon(value float32) FusedBatchNormAttr
FusedBatchNormEpsilon sets the optional epsilon attribute to value.
value: A small float number added to the variance of x. If not specified, defaults to 0.0001
func FusedBatchNormExponentialAvgFactor ¶
func FusedBatchNormExponentialAvgFactor(value float32) FusedBatchNormAttr
FusedBatchNormExponentialAvgFactor sets the optional exponential_avg_factor attribute to value. If not specified, defaults to 1
func FusedBatchNormIsTraining ¶
func FusedBatchNormIsTraining(value bool) FusedBatchNormAttr
FusedBatchNormIsTraining sets the optional is_training attribute to value.
value: A bool value to indicate the operation is for training (default) or inference. If not specified, defaults to true
type FusedBatchNormGradAttr ¶
type FusedBatchNormGradAttr func(optionalAttr)
FusedBatchNormGradAttr is an optional argument to FusedBatchNormGrad.
func FusedBatchNormGradDataFormat ¶
func FusedBatchNormGradDataFormat(value string) FusedBatchNormGradAttr
FusedBatchNormGradDataFormat sets the optional data_format attribute to value.
value: The data format for y_backprop, x, x_backprop. Either "NHWC" (default) or "NCHW". If not specified, defaults to "NHWC"
func FusedBatchNormGradEpsilon ¶
func FusedBatchNormGradEpsilon(value float32) FusedBatchNormGradAttr
FusedBatchNormGradEpsilon sets the optional epsilon attribute to value.
value: A small float number added to the variance of x. If not specified, defaults to 0.0001
func FusedBatchNormGradIsTraining ¶
func FusedBatchNormGradIsTraining(value bool) FusedBatchNormGradAttr
FusedBatchNormGradIsTraining sets the optional is_training attribute to value.
value: A bool value to indicate the operation is for training (default) or inference. If not specified, defaults to true
type FusedBatchNormGradV2Attr ¶
type FusedBatchNormGradV2Attr func(optionalAttr)
FusedBatchNormGradV2Attr is an optional argument to FusedBatchNormGradV2.
func FusedBatchNormGradV2DataFormat ¶
func FusedBatchNormGradV2DataFormat(value string) FusedBatchNormGradV2Attr
FusedBatchNormGradV2DataFormat sets the optional data_format attribute to value.
value: The data format for y_backprop, x, x_backprop. Either "NHWC" (default) or "NCHW". If not specified, defaults to "NHWC"
func FusedBatchNormGradV2Epsilon ¶
func FusedBatchNormGradV2Epsilon(value float32) FusedBatchNormGradV2Attr
FusedBatchNormGradV2Epsilon sets the optional epsilon attribute to value.
value: A small float number added to the variance of x. If not specified, defaults to 0.0001
func FusedBatchNormGradV2IsTraining ¶
func FusedBatchNormGradV2IsTraining(value bool) FusedBatchNormGradV2Attr
FusedBatchNormGradV2IsTraining sets the optional is_training attribute to value.
value: A bool value to indicate the operation is for training (default) or inference. If not specified, defaults to true
type FusedBatchNormGradV3Attr ¶
type FusedBatchNormGradV3Attr func(optionalAttr)
FusedBatchNormGradV3Attr is an optional argument to FusedBatchNormGradV3.
func FusedBatchNormGradV3DataFormat ¶
func FusedBatchNormGradV3DataFormat(value string) FusedBatchNormGradV3Attr
FusedBatchNormGradV3DataFormat sets the optional data_format attribute to value.
value: The data format for y_backprop, x, x_backprop. Either "NHWC" (default) or "NCHW". If not specified, defaults to "NHWC"
func FusedBatchNormGradV3Epsilon ¶
func FusedBatchNormGradV3Epsilon(value float32) FusedBatchNormGradV3Attr
FusedBatchNormGradV3Epsilon sets the optional epsilon attribute to value.
value: A small float number added to the variance of x. If not specified, defaults to 0.0001
func FusedBatchNormGradV3IsTraining ¶
func FusedBatchNormGradV3IsTraining(value bool) FusedBatchNormGradV3Attr
FusedBatchNormGradV3IsTraining sets the optional is_training attribute to value.
value: A bool value to indicate the operation is for training (default) or inference. If not specified, defaults to true
type FusedBatchNormV2Attr ¶
type FusedBatchNormV2Attr func(optionalAttr)
FusedBatchNormV2Attr is an optional argument to FusedBatchNormV2.
func FusedBatchNormV2DataFormat ¶
func FusedBatchNormV2DataFormat(value string) FusedBatchNormV2Attr
FusedBatchNormV2DataFormat sets the optional data_format attribute to value.
value: The data format for x and y. Either "NHWC" (default) or "NCHW". If not specified, defaults to "NHWC"
func FusedBatchNormV2Epsilon ¶
func FusedBatchNormV2Epsilon(value float32) FusedBatchNormV2Attr
FusedBatchNormV2Epsilon sets the optional epsilon attribute to value.
value: A small float number added to the variance of x. If not specified, defaults to 0.0001
func FusedBatchNormV2ExponentialAvgFactor ¶
func FusedBatchNormV2ExponentialAvgFactor(value float32) FusedBatchNormV2Attr
FusedBatchNormV2ExponentialAvgFactor sets the optional exponential_avg_factor attribute to value. If not specified, defaults to 1
func FusedBatchNormV2IsTraining ¶
func FusedBatchNormV2IsTraining(value bool) FusedBatchNormV2Attr
FusedBatchNormV2IsTraining sets the optional is_training attribute to value.
value: A bool value to indicate the operation is for training (default) or inference. If not specified, defaults to true
type FusedBatchNormV3Attr ¶
type FusedBatchNormV3Attr func(optionalAttr)
FusedBatchNormV3Attr is an optional argument to FusedBatchNormV3.
func FusedBatchNormV3DataFormat ¶
func FusedBatchNormV3DataFormat(value string) FusedBatchNormV3Attr
FusedBatchNormV3DataFormat sets the optional data_format attribute to value.
value: The data format for x and y. Either "NHWC" (default) or "NCHW". If not specified, defaults to "NHWC"
func FusedBatchNormV3Epsilon ¶
func FusedBatchNormV3Epsilon(value float32) FusedBatchNormV3Attr
FusedBatchNormV3Epsilon sets the optional epsilon attribute to value.
value: A small float number added to the variance of x. If not specified, defaults to 0.0001
func FusedBatchNormV3ExponentialAvgFactor ¶
func FusedBatchNormV3ExponentialAvgFactor(value float32) FusedBatchNormV3Attr
FusedBatchNormV3ExponentialAvgFactor sets the optional exponential_avg_factor attribute to value. If not specified, defaults to 1
func FusedBatchNormV3IsTraining ¶
func FusedBatchNormV3IsTraining(value bool) FusedBatchNormV3Attr
FusedBatchNormV3IsTraining sets the optional is_training attribute to value.
value: A bool value to indicate the operation is for training (default) or inference. If not specified, defaults to true
type FusedResizeAndPadConv2DAttr ¶
type FusedResizeAndPadConv2DAttr func(optionalAttr)
FusedResizeAndPadConv2DAttr is an optional argument to FusedResizeAndPadConv2D.
func FusedResizeAndPadConv2DResizeAlignCorners ¶
func FusedResizeAndPadConv2DResizeAlignCorners(value bool) FusedResizeAndPadConv2DAttr
FusedResizeAndPadConv2DResizeAlignCorners sets the optional resize_align_corners attribute to value.
value: If true, the centers of the 4 corner pixels of the input and output tensors are aligned, preserving the values at the corner pixels. Defaults to false. If not specified, defaults to false
type GatherAttr ¶
type GatherAttr func(optionalAttr)
GatherAttr is an optional argument to Gather.
func GatherValidateIndices ¶
func GatherValidateIndices(value bool) GatherAttr
GatherValidateIndices sets the optional validate_indices attribute to value. If not specified, defaults to true
type GatherNdAttr ¶ added in v0.8.2
type GatherNdAttr func(optionalAttr)
GatherNdAttr is an optional argument to GatherNd.
func GatherNdBadIndicesPolicy ¶ added in v0.8.2
func GatherNdBadIndicesPolicy(value string) GatherNdAttr
GatherNdBadIndicesPolicy sets the optional bad_indices_policy attribute to value. If not specified, defaults to ""
type GatherV2Attr ¶
type GatherV2Attr func(optionalAttr)
GatherV2Attr is an optional argument to GatherV2.
func GatherV2BatchDims ¶
func GatherV2BatchDims(value int64) GatherV2Attr
GatherV2BatchDims sets the optional batch_dims attribute to value. If not specified, defaults to 0
type GenerateBoundingBoxProposalsAttr ¶
type GenerateBoundingBoxProposalsAttr func(optionalAttr)
GenerateBoundingBoxProposalsAttr is an optional argument to GenerateBoundingBoxProposals.
func GenerateBoundingBoxProposalsPostNmsTopn ¶
func GenerateBoundingBoxProposalsPostNmsTopn(value int64) GenerateBoundingBoxProposalsAttr
GenerateBoundingBoxProposalsPostNmsTopn sets the optional post_nms_topn attribute to value.
value: An integer. Maximum number of rois in the output. If not specified, defaults to 300
type GenerateVocabRemappingAttr ¶
type GenerateVocabRemappingAttr func(optionalAttr)
GenerateVocabRemappingAttr is an optional argument to GenerateVocabRemapping.
func GenerateVocabRemappingOldVocabSize ¶
func GenerateVocabRemappingOldVocabSize(value int64) GenerateVocabRemappingAttr
GenerateVocabRemappingOldVocabSize sets the optional old_vocab_size attribute to value.
value: Number of entries in the old vocab file to consider. If -1, use the entire old vocabulary. If not specified, defaults to -1
REQUIRES: value >= -1
type HashTableV2Attr ¶
type HashTableV2Attr func(optionalAttr)
HashTableV2Attr is an optional argument to HashTableV2.
func HashTableV2Container ¶
func HashTableV2Container(value string) HashTableV2Attr
HashTableV2Container sets the optional container attribute to value.
value: If non-empty, this table is placed in the given container. Otherwise, a default container is used. If not specified, defaults to ""
func HashTableV2SharedName ¶
func HashTableV2SharedName(value string) HashTableV2Attr
HashTableV2SharedName sets the optional shared_name attribute to value.
value: If non-empty, this table is shared under the given name across multiple sessions. If not specified, defaults to ""
func HashTableV2UseNodeNameSharing ¶
func HashTableV2UseNodeNameSharing(value bool) HashTableV2Attr
HashTableV2UseNodeNameSharing sets the optional use_node_name_sharing attribute to value.
value: If true and shared_name is empty, the table is shared using the node name. If not specified, defaults to false
type HistogramFixedWidthAttr ¶
type HistogramFixedWidthAttr func(optionalAttr)
HistogramFixedWidthAttr is an optional argument to HistogramFixedWidth.
func HistogramFixedWidthDtype ¶
func HistogramFixedWidthDtype(value tf.DataType) HistogramFixedWidthAttr
HistogramFixedWidthDtype sets the optional dtype attribute to value. If not specified, defaults to DT_INT32
type IRFFT2DAttr ¶
type IRFFT2DAttr func(optionalAttr)
IRFFT2DAttr is an optional argument to IRFFT2D.
func IRFFT2DTreal ¶
func IRFFT2DTreal(value tf.DataType) IRFFT2DAttr
IRFFT2DTreal sets the optional Treal attribute to value. If not specified, defaults to DT_FLOAT
type IRFFT3DAttr ¶
type IRFFT3DAttr func(optionalAttr)
IRFFT3DAttr is an optional argument to IRFFT3D.
func IRFFT3DTreal ¶
func IRFFT3DTreal(value tf.DataType) IRFFT3DAttr
IRFFT3DTreal sets the optional Treal attribute to value. If not specified, defaults to DT_FLOAT
type IRFFTAttr ¶
type IRFFTAttr func(optionalAttr)
IRFFTAttr is an optional argument to IRFFT.
func IRFFTTreal ¶
IRFFTTreal sets the optional Treal attribute to value. If not specified, defaults to DT_FLOAT
type IRFFTNDAttr ¶ added in v0.7.0
type IRFFTNDAttr func(optionalAttr)
IRFFTNDAttr is an optional argument to IRFFTND.
func IRFFTNDTreal ¶ added in v0.7.0
func IRFFTNDTreal(value tf.DataType) IRFFTNDAttr
IRFFTNDTreal sets the optional Treal attribute to value. If not specified, defaults to DT_FLOAT
type IdentityReaderV2Attr ¶
type IdentityReaderV2Attr func(optionalAttr)
IdentityReaderV2Attr is an optional argument to IdentityReaderV2.
func IdentityReaderV2Container ¶
func IdentityReaderV2Container(value string) IdentityReaderV2Attr
IdentityReaderV2Container sets the optional container attribute to value.
value: If non-empty, this reader is placed in the given container. Otherwise, a default container is used. If not specified, defaults to ""
func IdentityReaderV2SharedName ¶
func IdentityReaderV2SharedName(value string) IdentityReaderV2Attr
IdentityReaderV2SharedName sets the optional shared_name attribute to value.
value: If non-empty, this reader is named in the given bucket with this shared_name. Otherwise, the node name is used instead. If not specified, defaults to ""
type IgnoreErrorsDatasetAttr ¶
type IgnoreErrorsDatasetAttr func(optionalAttr)
IgnoreErrorsDatasetAttr is an optional argument to IgnoreErrorsDataset.
func IgnoreErrorsDatasetLogWarning ¶
func IgnoreErrorsDatasetLogWarning(value bool) IgnoreErrorsDatasetAttr
IgnoreErrorsDatasetLogWarning sets the optional log_warning attribute to value. If not specified, defaults to false
type ImageProjectiveTransformV2Attr ¶
type ImageProjectiveTransformV2Attr func(optionalAttr)
ImageProjectiveTransformV2Attr is an optional argument to ImageProjectiveTransformV2.
func ImageProjectiveTransformV2FillMode ¶
func ImageProjectiveTransformV2FillMode(value string) ImageProjectiveTransformV2Attr
ImageProjectiveTransformV2FillMode sets the optional fill_mode attribute to value.
value: Fill mode, "REFLECT", "WRAP", or "CONSTANT". If not specified, defaults to "CONSTANT"
type ImageProjectiveTransformV3Attr ¶
type ImageProjectiveTransformV3Attr func(optionalAttr)
ImageProjectiveTransformV3Attr is an optional argument to ImageProjectiveTransformV3.
func ImageProjectiveTransformV3FillMode ¶
func ImageProjectiveTransformV3FillMode(value string) ImageProjectiveTransformV3Attr
ImageProjectiveTransformV3FillMode sets the optional fill_mode attribute to value.
value: Fill mode, "REFLECT", "WRAP", "CONSTANT", or "NEAREST". If not specified, defaults to "CONSTANT"
type ImageSummaryAttr ¶
type ImageSummaryAttr func(optionalAttr)
ImageSummaryAttr is an optional argument to ImageSummary.
func ImageSummaryBadColor ¶
func ImageSummaryBadColor(value tf.Tensor) ImageSummaryAttr
ImageSummaryBadColor sets the optional bad_color attribute to value.
value: Color to use for pixels with non-finite values. If not specified, defaults to {dtype:DT_UINT8 tensor_shape:{dim:{size:4}} int_val:255 int_val:0 int_val:0 int_val:255}
func ImageSummaryMaxImages ¶
func ImageSummaryMaxImages(value int64) ImageSummaryAttr
ImageSummaryMaxImages sets the optional max_images attribute to value.
value: Max number of batch elements to generate images for. If not specified, defaults to 3
REQUIRES: value >= 1
type InfeedEnqueueAttr ¶
type InfeedEnqueueAttr func(optionalAttr)
InfeedEnqueueAttr is an optional argument to InfeedEnqueue.
func InfeedEnqueueDeviceOrdinal ¶
func InfeedEnqueueDeviceOrdinal(value int64) InfeedEnqueueAttr
InfeedEnqueueDeviceOrdinal sets the optional device_ordinal attribute to value.
value: The TPU device to use. This should be -1 when the Op is running on a TPU device, and >= 0 when the Op is running on the CPU device. If not specified, defaults to -1
func InfeedEnqueueLayout ¶
func InfeedEnqueueLayout(value []int64) InfeedEnqueueAttr
InfeedEnqueueLayout sets the optional layout attribute to value.
value: A vector holding the requested layout in minor-to-major sequence. If a layout attribute is passed, but its values are all -1, the layout will be computed by the infeed operation. If not specified, defaults to {}
func InfeedEnqueueShape ¶
func InfeedEnqueueShape(value tf.Shape) InfeedEnqueueAttr
InfeedEnqueueShape sets the optional shape attribute to value.
value: The shape of the tensor. If not specified, defaults to {}
type InfeedEnqueuePrelinearizedBufferAttr ¶
type InfeedEnqueuePrelinearizedBufferAttr func(optionalAttr)
InfeedEnqueuePrelinearizedBufferAttr is an optional argument to InfeedEnqueuePrelinearizedBuffer.
func InfeedEnqueuePrelinearizedBufferDeviceOrdinal ¶
func InfeedEnqueuePrelinearizedBufferDeviceOrdinal(value int64) InfeedEnqueuePrelinearizedBufferAttr
InfeedEnqueuePrelinearizedBufferDeviceOrdinal sets the optional device_ordinal attribute to value.
value: The TPU device to use. This should be -1 when the Op is running on a TPU device and = 0 when the Op is running on the CPU device. If not specified, defaults to -1
type InfeedEnqueueTupleAttr ¶
type InfeedEnqueueTupleAttr func(optionalAttr)
InfeedEnqueueTupleAttr is an optional argument to InfeedEnqueueTuple.
func InfeedEnqueueTupleDeviceOrdinal ¶
func InfeedEnqueueTupleDeviceOrdinal(value int64) InfeedEnqueueTupleAttr
InfeedEnqueueTupleDeviceOrdinal sets the optional device_ordinal attribute to value.
value: The TPU device to use. This should be -1 when the Op is running on a TPU device, and >= 0 when the Op is running on the CPU device. If not specified, defaults to -1
func InfeedEnqueueTupleLayouts ¶
func InfeedEnqueueTupleLayouts(value []int64) InfeedEnqueueTupleAttr
InfeedEnqueueTupleLayouts sets the optional layouts attribute to value.
value: A vector holding the requested layout in minor-to-major sequence for all the tuple shapes, in the order the shapes appear in the "shapes" input. The layout elements for a sub-shape can be set to -1, in which case the corresponding layout will be computed by the infeed operation. If not specified, defaults to {}
type InitializeTableFromTextFileV2Attr ¶
type InitializeTableFromTextFileV2Attr func(optionalAttr)
InitializeTableFromTextFileV2Attr is an optional argument to InitializeTableFromTextFileV2.
func InitializeTableFromTextFileV2Delimiter ¶
func InitializeTableFromTextFileV2Delimiter(value string) InitializeTableFromTextFileV2Attr
InitializeTableFromTextFileV2Delimiter sets the optional delimiter attribute to value.
value: Delimiter to separate fields in a line. If not specified, defaults to "\t"
func InitializeTableFromTextFileV2Offset ¶
func InitializeTableFromTextFileV2Offset(value int64) InitializeTableFromTextFileV2Attr
InitializeTableFromTextFileV2Offset sets the optional offset attribute to value. If not specified, defaults to 0
func InitializeTableFromTextFileV2VocabSize ¶
func InitializeTableFromTextFileV2VocabSize(value int64) InitializeTableFromTextFileV2Attr
InitializeTableFromTextFileV2VocabSize sets the optional vocab_size attribute to value.
value: Number of elements of the file, use -1 if unknown. If not specified, defaults to -1
REQUIRES: value >= -1
type IsTPUEmbeddingInitializedAttr ¶
type IsTPUEmbeddingInitializedAttr func(optionalAttr)
IsTPUEmbeddingInitializedAttr is an optional argument to IsTPUEmbeddingInitialized.
func IsTPUEmbeddingInitializedConfig ¶
func IsTPUEmbeddingInitializedConfig(value string) IsTPUEmbeddingInitializedAttr
IsTPUEmbeddingInitializedConfig sets the optional config attribute to value. If not specified, defaults to ""
type IsotonicRegressionAttr ¶
type IsotonicRegressionAttr func(optionalAttr)
IsotonicRegressionAttr is an optional argument to IsotonicRegression.
func IsotonicRegressionOutputDtype ¶
func IsotonicRegressionOutputDtype(value tf.DataType) IsotonicRegressionAttr
IsotonicRegressionOutputDtype sets the optional output_dtype attribute to value.
value: Dtype of output. If not specified, defaults to DT_FLOAT
type IteratorFromStringHandleAttr ¶
type IteratorFromStringHandleAttr func(optionalAttr)
IteratorFromStringHandleAttr is an optional argument to IteratorFromStringHandle.
func IteratorFromStringHandleOutputShapes ¶
func IteratorFromStringHandleOutputShapes(value []tf.Shape) IteratorFromStringHandleAttr
IteratorFromStringHandleOutputShapes sets the optional output_shapes attribute to value.
value: If specified, defines the shape of each tuple component in an element produced by the resulting iterator. If not specified, defaults to {}
REQUIRES: len(value) >= 0
func IteratorFromStringHandleOutputTypes ¶
func IteratorFromStringHandleOutputTypes(value []tf.DataType) IteratorFromStringHandleAttr
IteratorFromStringHandleOutputTypes sets the optional output_types attribute to value.
value: If specified, defines the type of each tuple component in an element produced by the resulting iterator. If not specified, defaults to {}
REQUIRES: len(value) >= 0
type LRNAttr ¶
type LRNAttr func(optionalAttr)
LRNAttr is an optional argument to LRN.
func LRNAlpha ¶
LRNAlpha sets the optional alpha attribute to value.
value: A scale factor, usually positive. If not specified, defaults to 1
func LRNBeta ¶
LRNBeta sets the optional beta attribute to value.
value: An exponent. If not specified, defaults to 0.5
func LRNBias ¶
LRNBias sets the optional bias attribute to value.
value: An offset (usually positive to avoid dividing by 0). If not specified, defaults to 1
func LRNDepthRadius ¶
LRNDepthRadius sets the optional depth_radius attribute to value.
value: 0-D. Half-width of the 1-D normalization window. If not specified, defaults to 5
type LRNGradAttr ¶
type LRNGradAttr func(optionalAttr)
LRNGradAttr is an optional argument to LRNGrad.
func LRNGradAlpha ¶
func LRNGradAlpha(value float32) LRNGradAttr
LRNGradAlpha sets the optional alpha attribute to value.
value: A scale factor, usually positive. If not specified, defaults to 1
func LRNGradBeta ¶
func LRNGradBeta(value float32) LRNGradAttr
LRNGradBeta sets the optional beta attribute to value.
value: An exponent. If not specified, defaults to 0.5
func LRNGradBias ¶
func LRNGradBias(value float32) LRNGradAttr
LRNGradBias sets the optional bias attribute to value.
value: An offset (usually > 0 to avoid dividing by 0). If not specified, defaults to 1
func LRNGradDepthRadius ¶
func LRNGradDepthRadius(value int64) LRNGradAttr
LRNGradDepthRadius sets the optional depth_radius attribute to value.
value: A depth radius. If not specified, defaults to 5
type LSTMBlockCellAttr ¶
type LSTMBlockCellAttr func(optionalAttr)
LSTMBlockCellAttr is an optional argument to LSTMBlockCell.
func LSTMBlockCellCellClip ¶
func LSTMBlockCellCellClip(value float32) LSTMBlockCellAttr
LSTMBlockCellCellClip sets the optional cell_clip attribute to value.
value: Value to clip the 'cs' value to. If not specified, defaults to 3
func LSTMBlockCellForgetBias ¶
func LSTMBlockCellForgetBias(value float32) LSTMBlockCellAttr
LSTMBlockCellForgetBias sets the optional forget_bias attribute to value.
value: The forget gate bias. If not specified, defaults to 1
func LSTMBlockCellUsePeephole ¶
func LSTMBlockCellUsePeephole(value bool) LSTMBlockCellAttr
LSTMBlockCellUsePeephole sets the optional use_peephole attribute to value.
value: Whether to use peephole weights. If not specified, defaults to false
type LeakyReluAttr ¶
type LeakyReluAttr func(optionalAttr)
LeakyReluAttr is an optional argument to LeakyRelu.
func LeakyReluAlpha ¶
func LeakyReluAlpha(value float32) LeakyReluAttr
LeakyReluAlpha sets the optional alpha attribute to value. If not specified, defaults to 0.2
type LeakyReluGradAttr ¶
type LeakyReluGradAttr func(optionalAttr)
LeakyReluGradAttr is an optional argument to LeakyReluGrad.
func LeakyReluGradAlpha ¶
func LeakyReluGradAlpha(value float32) LeakyReluGradAttr
LeakyReluGradAlpha sets the optional alpha attribute to value. If not specified, defaults to 0.2
type LearnedUnigramCandidateSamplerAttr ¶
type LearnedUnigramCandidateSamplerAttr func(optionalAttr)
LearnedUnigramCandidateSamplerAttr is an optional argument to LearnedUnigramCandidateSampler.
func LearnedUnigramCandidateSamplerSeed ¶
func LearnedUnigramCandidateSamplerSeed(value int64) LearnedUnigramCandidateSamplerAttr
LearnedUnigramCandidateSamplerSeed sets the optional seed attribute to value.
value: If either seed or seed2 are set to be non-zero, the random number generator is seeded by the given seed. Otherwise, it is seeded by a random seed. If not specified, defaults to 0
func LearnedUnigramCandidateSamplerSeed2 ¶
func LearnedUnigramCandidateSamplerSeed2(value int64) LearnedUnigramCandidateSamplerAttr
LearnedUnigramCandidateSamplerSeed2 sets the optional seed2 attribute to value.
value: An second seed to avoid seed collision. If not specified, defaults to 0
type ListDatasetAttr ¶ added in v0.2.0
type ListDatasetAttr func(optionalAttr)
ListDatasetAttr is an optional argument to ListDataset.
func ListDatasetMetadata ¶ added in v0.2.0
func ListDatasetMetadata(value string) ListDatasetAttr
ListDatasetMetadata sets the optional metadata attribute to value. If not specified, defaults to ""
type ListDiffAttr ¶
type ListDiffAttr func(optionalAttr)
ListDiffAttr is an optional argument to ListDiff.
func ListDiffOutIdx ¶
func ListDiffOutIdx(value tf.DataType) ListDiffAttr
ListDiffOutIdx sets the optional out_idx attribute to value. If not specified, defaults to DT_INT32
type LoadAndRemapMatrixAttr ¶
type LoadAndRemapMatrixAttr func(optionalAttr)
LoadAndRemapMatrixAttr is an optional argument to LoadAndRemapMatrix.
func LoadAndRemapMatrixMaxRowsInMemory ¶
func LoadAndRemapMatrixMaxRowsInMemory(value int64) LoadAndRemapMatrixAttr
LoadAndRemapMatrixMaxRowsInMemory sets the optional max_rows_in_memory attribute to value.
value: The maximum number of rows to load from the checkpoint at once. If less than or equal to 0, the entire matrix will be loaded into memory. Setting this arg trades increased disk reads for lower memory usage. If not specified, defaults to -1
type LoadTPUEmbeddingADAMParametersAttr ¶
type LoadTPUEmbeddingADAMParametersAttr func(optionalAttr)
LoadTPUEmbeddingADAMParametersAttr is an optional argument to LoadTPUEmbeddingADAMParameters.
func LoadTPUEmbeddingADAMParametersConfig ¶
func LoadTPUEmbeddingADAMParametersConfig(value string) LoadTPUEmbeddingADAMParametersAttr
LoadTPUEmbeddingADAMParametersConfig sets the optional config attribute to value. If not specified, defaults to ""
func LoadTPUEmbeddingADAMParametersTableId ¶
func LoadTPUEmbeddingADAMParametersTableId(value int64) LoadTPUEmbeddingADAMParametersAttr
LoadTPUEmbeddingADAMParametersTableId sets the optional table_id attribute to value. If not specified, defaults to -1
func LoadTPUEmbeddingADAMParametersTableName ¶
func LoadTPUEmbeddingADAMParametersTableName(value string) LoadTPUEmbeddingADAMParametersAttr
LoadTPUEmbeddingADAMParametersTableName sets the optional table_name attribute to value. If not specified, defaults to ""
type LoadTPUEmbeddingAdadeltaParametersAttr ¶
type LoadTPUEmbeddingAdadeltaParametersAttr func(optionalAttr)
LoadTPUEmbeddingAdadeltaParametersAttr is an optional argument to LoadTPUEmbeddingAdadeltaParameters.
func LoadTPUEmbeddingAdadeltaParametersConfig ¶
func LoadTPUEmbeddingAdadeltaParametersConfig(value string) LoadTPUEmbeddingAdadeltaParametersAttr
LoadTPUEmbeddingAdadeltaParametersConfig sets the optional config attribute to value. If not specified, defaults to ""
func LoadTPUEmbeddingAdadeltaParametersTableId ¶
func LoadTPUEmbeddingAdadeltaParametersTableId(value int64) LoadTPUEmbeddingAdadeltaParametersAttr
LoadTPUEmbeddingAdadeltaParametersTableId sets the optional table_id attribute to value. If not specified, defaults to -1
func LoadTPUEmbeddingAdadeltaParametersTableName ¶
func LoadTPUEmbeddingAdadeltaParametersTableName(value string) LoadTPUEmbeddingAdadeltaParametersAttr
LoadTPUEmbeddingAdadeltaParametersTableName sets the optional table_name attribute to value. If not specified, defaults to ""
type LoadTPUEmbeddingAdagradMomentumParametersAttr ¶
type LoadTPUEmbeddingAdagradMomentumParametersAttr func(optionalAttr)
LoadTPUEmbeddingAdagradMomentumParametersAttr is an optional argument to LoadTPUEmbeddingAdagradMomentumParameters.
func LoadTPUEmbeddingAdagradMomentumParametersConfig ¶
func LoadTPUEmbeddingAdagradMomentumParametersConfig(value string) LoadTPUEmbeddingAdagradMomentumParametersAttr
LoadTPUEmbeddingAdagradMomentumParametersConfig sets the optional config attribute to value. If not specified, defaults to ""
func LoadTPUEmbeddingAdagradMomentumParametersTableId ¶
func LoadTPUEmbeddingAdagradMomentumParametersTableId(value int64) LoadTPUEmbeddingAdagradMomentumParametersAttr
LoadTPUEmbeddingAdagradMomentumParametersTableId sets the optional table_id attribute to value. If not specified, defaults to -1
func LoadTPUEmbeddingAdagradMomentumParametersTableName ¶
func LoadTPUEmbeddingAdagradMomentumParametersTableName(value string) LoadTPUEmbeddingAdagradMomentumParametersAttr
LoadTPUEmbeddingAdagradMomentumParametersTableName sets the optional table_name attribute to value. If not specified, defaults to ""
type LoadTPUEmbeddingAdagradParametersAttr ¶
type LoadTPUEmbeddingAdagradParametersAttr func(optionalAttr)
LoadTPUEmbeddingAdagradParametersAttr is an optional argument to LoadTPUEmbeddingAdagradParameters.
func LoadTPUEmbeddingAdagradParametersConfig ¶
func LoadTPUEmbeddingAdagradParametersConfig(value string) LoadTPUEmbeddingAdagradParametersAttr
LoadTPUEmbeddingAdagradParametersConfig sets the optional config attribute to value. If not specified, defaults to ""
func LoadTPUEmbeddingAdagradParametersTableId ¶
func LoadTPUEmbeddingAdagradParametersTableId(value int64) LoadTPUEmbeddingAdagradParametersAttr
LoadTPUEmbeddingAdagradParametersTableId sets the optional table_id attribute to value. If not specified, defaults to -1
func LoadTPUEmbeddingAdagradParametersTableName ¶
func LoadTPUEmbeddingAdagradParametersTableName(value string) LoadTPUEmbeddingAdagradParametersAttr
LoadTPUEmbeddingAdagradParametersTableName sets the optional table_name attribute to value. If not specified, defaults to ""
type LoadTPUEmbeddingCenteredRMSPropParametersAttr ¶
type LoadTPUEmbeddingCenteredRMSPropParametersAttr func(optionalAttr)
LoadTPUEmbeddingCenteredRMSPropParametersAttr is an optional argument to LoadTPUEmbeddingCenteredRMSPropParameters.
func LoadTPUEmbeddingCenteredRMSPropParametersConfig ¶
func LoadTPUEmbeddingCenteredRMSPropParametersConfig(value string) LoadTPUEmbeddingCenteredRMSPropParametersAttr
LoadTPUEmbeddingCenteredRMSPropParametersConfig sets the optional config attribute to value. If not specified, defaults to ""
func LoadTPUEmbeddingCenteredRMSPropParametersTableId ¶
func LoadTPUEmbeddingCenteredRMSPropParametersTableId(value int64) LoadTPUEmbeddingCenteredRMSPropParametersAttr
LoadTPUEmbeddingCenteredRMSPropParametersTableId sets the optional table_id attribute to value. If not specified, defaults to -1
func LoadTPUEmbeddingCenteredRMSPropParametersTableName ¶
func LoadTPUEmbeddingCenteredRMSPropParametersTableName(value string) LoadTPUEmbeddingCenteredRMSPropParametersAttr
LoadTPUEmbeddingCenteredRMSPropParametersTableName sets the optional table_name attribute to value. If not specified, defaults to ""
type LoadTPUEmbeddingFTRLParametersAttr ¶
type LoadTPUEmbeddingFTRLParametersAttr func(optionalAttr)
LoadTPUEmbeddingFTRLParametersAttr is an optional argument to LoadTPUEmbeddingFTRLParameters.
func LoadTPUEmbeddingFTRLParametersConfig ¶
func LoadTPUEmbeddingFTRLParametersConfig(value string) LoadTPUEmbeddingFTRLParametersAttr
LoadTPUEmbeddingFTRLParametersConfig sets the optional config attribute to value. If not specified, defaults to ""
func LoadTPUEmbeddingFTRLParametersTableId ¶
func LoadTPUEmbeddingFTRLParametersTableId(value int64) LoadTPUEmbeddingFTRLParametersAttr
LoadTPUEmbeddingFTRLParametersTableId sets the optional table_id attribute to value. If not specified, defaults to -1
func LoadTPUEmbeddingFTRLParametersTableName ¶
func LoadTPUEmbeddingFTRLParametersTableName(value string) LoadTPUEmbeddingFTRLParametersAttr
LoadTPUEmbeddingFTRLParametersTableName sets the optional table_name attribute to value. If not specified, defaults to ""
type LoadTPUEmbeddingFrequencyEstimatorParametersAttr ¶
type LoadTPUEmbeddingFrequencyEstimatorParametersAttr func(optionalAttr)
LoadTPUEmbeddingFrequencyEstimatorParametersAttr is an optional argument to LoadTPUEmbeddingFrequencyEstimatorParameters.
func LoadTPUEmbeddingFrequencyEstimatorParametersConfig ¶
func LoadTPUEmbeddingFrequencyEstimatorParametersConfig(value string) LoadTPUEmbeddingFrequencyEstimatorParametersAttr
LoadTPUEmbeddingFrequencyEstimatorParametersConfig sets the optional config attribute to value. If not specified, defaults to ""
func LoadTPUEmbeddingFrequencyEstimatorParametersTableId ¶
func LoadTPUEmbeddingFrequencyEstimatorParametersTableId(value int64) LoadTPUEmbeddingFrequencyEstimatorParametersAttr
LoadTPUEmbeddingFrequencyEstimatorParametersTableId sets the optional table_id attribute to value. If not specified, defaults to -1
func LoadTPUEmbeddingFrequencyEstimatorParametersTableName ¶
func LoadTPUEmbeddingFrequencyEstimatorParametersTableName(value string) LoadTPUEmbeddingFrequencyEstimatorParametersAttr
LoadTPUEmbeddingFrequencyEstimatorParametersTableName sets the optional table_name attribute to value. If not specified, defaults to ""
type LoadTPUEmbeddingMDLAdagradLightParametersAttr ¶
type LoadTPUEmbeddingMDLAdagradLightParametersAttr func(optionalAttr)
LoadTPUEmbeddingMDLAdagradLightParametersAttr is an optional argument to LoadTPUEmbeddingMDLAdagradLightParameters.
func LoadTPUEmbeddingMDLAdagradLightParametersConfig ¶
func LoadTPUEmbeddingMDLAdagradLightParametersConfig(value string) LoadTPUEmbeddingMDLAdagradLightParametersAttr
LoadTPUEmbeddingMDLAdagradLightParametersConfig sets the optional config attribute to value. If not specified, defaults to ""
func LoadTPUEmbeddingMDLAdagradLightParametersTableId ¶
func LoadTPUEmbeddingMDLAdagradLightParametersTableId(value int64) LoadTPUEmbeddingMDLAdagradLightParametersAttr
LoadTPUEmbeddingMDLAdagradLightParametersTableId sets the optional table_id attribute to value. If not specified, defaults to -1
func LoadTPUEmbeddingMDLAdagradLightParametersTableName ¶
func LoadTPUEmbeddingMDLAdagradLightParametersTableName(value string) LoadTPUEmbeddingMDLAdagradLightParametersAttr
LoadTPUEmbeddingMDLAdagradLightParametersTableName sets the optional table_name attribute to value. If not specified, defaults to ""
type LoadTPUEmbeddingMomentumParametersAttr ¶
type LoadTPUEmbeddingMomentumParametersAttr func(optionalAttr)
LoadTPUEmbeddingMomentumParametersAttr is an optional argument to LoadTPUEmbeddingMomentumParameters.
func LoadTPUEmbeddingMomentumParametersConfig ¶
func LoadTPUEmbeddingMomentumParametersConfig(value string) LoadTPUEmbeddingMomentumParametersAttr
LoadTPUEmbeddingMomentumParametersConfig sets the optional config attribute to value. If not specified, defaults to ""
func LoadTPUEmbeddingMomentumParametersTableId ¶
func LoadTPUEmbeddingMomentumParametersTableId(value int64) LoadTPUEmbeddingMomentumParametersAttr
LoadTPUEmbeddingMomentumParametersTableId sets the optional table_id attribute to value. If not specified, defaults to -1
func LoadTPUEmbeddingMomentumParametersTableName ¶
func LoadTPUEmbeddingMomentumParametersTableName(value string) LoadTPUEmbeddingMomentumParametersAttr
LoadTPUEmbeddingMomentumParametersTableName sets the optional table_name attribute to value. If not specified, defaults to ""
type LoadTPUEmbeddingProximalAdagradParametersAttr ¶
type LoadTPUEmbeddingProximalAdagradParametersAttr func(optionalAttr)
LoadTPUEmbeddingProximalAdagradParametersAttr is an optional argument to LoadTPUEmbeddingProximalAdagradParameters.
func LoadTPUEmbeddingProximalAdagradParametersConfig ¶
func LoadTPUEmbeddingProximalAdagradParametersConfig(value string) LoadTPUEmbeddingProximalAdagradParametersAttr
LoadTPUEmbeddingProximalAdagradParametersConfig sets the optional config attribute to value. If not specified, defaults to ""
func LoadTPUEmbeddingProximalAdagradParametersTableId ¶
func LoadTPUEmbeddingProximalAdagradParametersTableId(value int64) LoadTPUEmbeddingProximalAdagradParametersAttr
LoadTPUEmbeddingProximalAdagradParametersTableId sets the optional table_id attribute to value. If not specified, defaults to -1
func LoadTPUEmbeddingProximalAdagradParametersTableName ¶
func LoadTPUEmbeddingProximalAdagradParametersTableName(value string) LoadTPUEmbeddingProximalAdagradParametersAttr
LoadTPUEmbeddingProximalAdagradParametersTableName sets the optional table_name attribute to value. If not specified, defaults to ""
type LoadTPUEmbeddingRMSPropParametersAttr ¶
type LoadTPUEmbeddingRMSPropParametersAttr func(optionalAttr)
LoadTPUEmbeddingRMSPropParametersAttr is an optional argument to LoadTPUEmbeddingRMSPropParameters.
func LoadTPUEmbeddingRMSPropParametersConfig ¶
func LoadTPUEmbeddingRMSPropParametersConfig(value string) LoadTPUEmbeddingRMSPropParametersAttr
LoadTPUEmbeddingRMSPropParametersConfig sets the optional config attribute to value. If not specified, defaults to ""
func LoadTPUEmbeddingRMSPropParametersTableId ¶
func LoadTPUEmbeddingRMSPropParametersTableId(value int64) LoadTPUEmbeddingRMSPropParametersAttr
LoadTPUEmbeddingRMSPropParametersTableId sets the optional table_id attribute to value. If not specified, defaults to -1
func LoadTPUEmbeddingRMSPropParametersTableName ¶
func LoadTPUEmbeddingRMSPropParametersTableName(value string) LoadTPUEmbeddingRMSPropParametersAttr
LoadTPUEmbeddingRMSPropParametersTableName sets the optional table_name attribute to value. If not specified, defaults to ""
type LoadTPUEmbeddingStochasticGradientDescentParametersAttr ¶
type LoadTPUEmbeddingStochasticGradientDescentParametersAttr func(optionalAttr)
LoadTPUEmbeddingStochasticGradientDescentParametersAttr is an optional argument to LoadTPUEmbeddingStochasticGradientDescentParameters.
func LoadTPUEmbeddingStochasticGradientDescentParametersConfig ¶
func LoadTPUEmbeddingStochasticGradientDescentParametersConfig(value string) LoadTPUEmbeddingStochasticGradientDescentParametersAttr
LoadTPUEmbeddingStochasticGradientDescentParametersConfig sets the optional config attribute to value. If not specified, defaults to ""
func LoadTPUEmbeddingStochasticGradientDescentParametersTableId ¶
func LoadTPUEmbeddingStochasticGradientDescentParametersTableId(value int64) LoadTPUEmbeddingStochasticGradientDescentParametersAttr
LoadTPUEmbeddingStochasticGradientDescentParametersTableId sets the optional table_id attribute to value. If not specified, defaults to -1
func LoadTPUEmbeddingStochasticGradientDescentParametersTableName ¶
func LoadTPUEmbeddingStochasticGradientDescentParametersTableName(value string) LoadTPUEmbeddingStochasticGradientDescentParametersAttr
LoadTPUEmbeddingStochasticGradientDescentParametersTableName sets the optional table_name attribute to value. If not specified, defaults to ""
type LogUniformCandidateSamplerAttr ¶
type LogUniformCandidateSamplerAttr func(optionalAttr)
LogUniformCandidateSamplerAttr is an optional argument to LogUniformCandidateSampler.
func LogUniformCandidateSamplerSeed ¶
func LogUniformCandidateSamplerSeed(value int64) LogUniformCandidateSamplerAttr
LogUniformCandidateSamplerSeed sets the optional seed attribute to value.
value: If either seed or seed2 are set to be non-zero, the random number generator is seeded by the given seed. Otherwise, it is seeded by a random seed. If not specified, defaults to 0
func LogUniformCandidateSamplerSeed2 ¶
func LogUniformCandidateSamplerSeed2(value int64) LogUniformCandidateSamplerAttr
LogUniformCandidateSamplerSeed2 sets the optional seed2 attribute to value.
value: An second seed to avoid seed collision. If not specified, defaults to 0
type LowerBoundAttr ¶
type LowerBoundAttr func(optionalAttr)
LowerBoundAttr is an optional argument to LowerBound.
func LowerBoundOutType ¶
func LowerBoundOutType(value tf.DataType) LowerBoundAttr
LowerBoundOutType sets the optional out_type attribute to value. If not specified, defaults to DT_INT32
type LuAttr ¶
type LuAttr func(optionalAttr)
LuAttr is an optional argument to Lu.
func LuOutputIdxType ¶
LuOutputIdxType sets the optional output_idx_type attribute to value. If not specified, defaults to DT_INT32
type MapClearAttr ¶
type MapClearAttr func(optionalAttr)
MapClearAttr is an optional argument to MapClear.
func MapClearCapacity ¶
func MapClearCapacity(value int64) MapClearAttr
MapClearCapacity sets the optional capacity attribute to value. If not specified, defaults to 0
REQUIRES: value >= 0
func MapClearContainer ¶
func MapClearContainer(value string) MapClearAttr
MapClearContainer sets the optional container attribute to value. If not specified, defaults to ""
func MapClearMemoryLimit ¶
func MapClearMemoryLimit(value int64) MapClearAttr
MapClearMemoryLimit sets the optional memory_limit attribute to value. If not specified, defaults to 0
REQUIRES: value >= 0
func MapClearSharedName ¶
func MapClearSharedName(value string) MapClearAttr
MapClearSharedName sets the optional shared_name attribute to value. If not specified, defaults to ""
type MapIncompleteSizeAttr ¶
type MapIncompleteSizeAttr func(optionalAttr)
MapIncompleteSizeAttr is an optional argument to MapIncompleteSize.
func MapIncompleteSizeCapacity ¶
func MapIncompleteSizeCapacity(value int64) MapIncompleteSizeAttr
MapIncompleteSizeCapacity sets the optional capacity attribute to value. If not specified, defaults to 0
REQUIRES: value >= 0
func MapIncompleteSizeContainer ¶
func MapIncompleteSizeContainer(value string) MapIncompleteSizeAttr
MapIncompleteSizeContainer sets the optional container attribute to value. If not specified, defaults to ""
func MapIncompleteSizeMemoryLimit ¶
func MapIncompleteSizeMemoryLimit(value int64) MapIncompleteSizeAttr
MapIncompleteSizeMemoryLimit sets the optional memory_limit attribute to value. If not specified, defaults to 0
REQUIRES: value >= 0
func MapIncompleteSizeSharedName ¶
func MapIncompleteSizeSharedName(value string) MapIncompleteSizeAttr
MapIncompleteSizeSharedName sets the optional shared_name attribute to value. If not specified, defaults to ""
type MapPeekAttr ¶
type MapPeekAttr func(optionalAttr)
MapPeekAttr is an optional argument to MapPeek.
func MapPeekCapacity ¶
func MapPeekCapacity(value int64) MapPeekAttr
MapPeekCapacity sets the optional capacity attribute to value. If not specified, defaults to 0
REQUIRES: value >= 0
func MapPeekContainer ¶
func MapPeekContainer(value string) MapPeekAttr
MapPeekContainer sets the optional container attribute to value. If not specified, defaults to ""
func MapPeekMemoryLimit ¶
func MapPeekMemoryLimit(value int64) MapPeekAttr
MapPeekMemoryLimit sets the optional memory_limit attribute to value. If not specified, defaults to 0
REQUIRES: value >= 0
func MapPeekSharedName ¶
func MapPeekSharedName(value string) MapPeekAttr
MapPeekSharedName sets the optional shared_name attribute to value. If not specified, defaults to ""
type MapSizeAttr ¶
type MapSizeAttr func(optionalAttr)
MapSizeAttr is an optional argument to MapSize.
func MapSizeCapacity ¶
func MapSizeCapacity(value int64) MapSizeAttr
MapSizeCapacity sets the optional capacity attribute to value. If not specified, defaults to 0
REQUIRES: value >= 0
func MapSizeContainer ¶
func MapSizeContainer(value string) MapSizeAttr
MapSizeContainer sets the optional container attribute to value. If not specified, defaults to ""
func MapSizeMemoryLimit ¶
func MapSizeMemoryLimit(value int64) MapSizeAttr
MapSizeMemoryLimit sets the optional memory_limit attribute to value. If not specified, defaults to 0
REQUIRES: value >= 0
func MapSizeSharedName ¶
func MapSizeSharedName(value string) MapSizeAttr
MapSizeSharedName sets the optional shared_name attribute to value. If not specified, defaults to ""
type MapStageAttr ¶
type MapStageAttr func(optionalAttr)
MapStageAttr is an optional argument to MapStage.
func MapStageCapacity ¶
func MapStageCapacity(value int64) MapStageAttr
MapStageCapacity sets the optional capacity attribute to value.
value: Maximum number of elements in the Staging Area. If > 0, inserts on the container will block when the capacity is reached. If not specified, defaults to 0
REQUIRES: value >= 0
func MapStageContainer ¶
func MapStageContainer(value string) MapStageAttr
MapStageContainer sets the optional container attribute to value.
value: If non-empty, this queue is placed in the given container. Otherwise, a default container is used. If not specified, defaults to ""
func MapStageMemoryLimit ¶
func MapStageMemoryLimit(value int64) MapStageAttr
MapStageMemoryLimit sets the optional memory_limit attribute to value. If not specified, defaults to 0
REQUIRES: value >= 0
func MapStageSharedName ¶
func MapStageSharedName(value string) MapStageAttr
MapStageSharedName sets the optional shared_name attribute to value.
value: It is necessary to match this name to the matching Unstage Op. If not specified, defaults to ""
type MapUnstageAttr ¶
type MapUnstageAttr func(optionalAttr)
MapUnstageAttr is an optional argument to MapUnstage.
func MapUnstageCapacity ¶
func MapUnstageCapacity(value int64) MapUnstageAttr
MapUnstageCapacity sets the optional capacity attribute to value. If not specified, defaults to 0
REQUIRES: value >= 0
func MapUnstageContainer ¶
func MapUnstageContainer(value string) MapUnstageAttr
MapUnstageContainer sets the optional container attribute to value. If not specified, defaults to ""
func MapUnstageMemoryLimit ¶
func MapUnstageMemoryLimit(value int64) MapUnstageAttr
MapUnstageMemoryLimit sets the optional memory_limit attribute to value. If not specified, defaults to 0
REQUIRES: value >= 0
func MapUnstageSharedName ¶
func MapUnstageSharedName(value string) MapUnstageAttr
MapUnstageSharedName sets the optional shared_name attribute to value. If not specified, defaults to ""
type MapUnstageNoKeyAttr ¶
type MapUnstageNoKeyAttr func(optionalAttr)
MapUnstageNoKeyAttr is an optional argument to MapUnstageNoKey.
func MapUnstageNoKeyCapacity ¶
func MapUnstageNoKeyCapacity(value int64) MapUnstageNoKeyAttr
MapUnstageNoKeyCapacity sets the optional capacity attribute to value. If not specified, defaults to 0
REQUIRES: value >= 0
func MapUnstageNoKeyContainer ¶
func MapUnstageNoKeyContainer(value string) MapUnstageNoKeyAttr
MapUnstageNoKeyContainer sets the optional container attribute to value. If not specified, defaults to ""
func MapUnstageNoKeyMemoryLimit ¶
func MapUnstageNoKeyMemoryLimit(value int64) MapUnstageNoKeyAttr
MapUnstageNoKeyMemoryLimit sets the optional memory_limit attribute to value. If not specified, defaults to 0
REQUIRES: value >= 0
func MapUnstageNoKeySharedName ¶
func MapUnstageNoKeySharedName(value string) MapUnstageNoKeyAttr
MapUnstageNoKeySharedName sets the optional shared_name attribute to value. If not specified, defaults to ""
type MatMulAttr ¶
type MatMulAttr func(optionalAttr)
MatMulAttr is an optional argument to MatMul.
func MatMulGradA ¶ added in v0.8.0
func MatMulGradA(value bool) MatMulAttr
MatMulGradA sets the optional grad_a attribute to value. If not specified, defaults to false
func MatMulGradB ¶ added in v0.8.0
func MatMulGradB(value bool) MatMulAttr
MatMulGradB sets the optional grad_b attribute to value. If not specified, defaults to false
func MatMulTransposeA ¶
func MatMulTransposeA(value bool) MatMulAttr
MatMulTransposeA sets the optional transpose_a attribute to value.
value: If true, "a" is transposed before multiplication. If not specified, defaults to false
func MatMulTransposeB ¶
func MatMulTransposeB(value bool) MatMulAttr
MatMulTransposeB sets the optional transpose_b attribute to value.
value: If true, "b" is transposed before multiplication. If not specified, defaults to false
type MatrixDiagPartV3Attr ¶
type MatrixDiagPartV3Attr func(optionalAttr)
MatrixDiagPartV3Attr is an optional argument to MatrixDiagPartV3.
func MatrixDiagPartV3Align ¶
func MatrixDiagPartV3Align(value string) MatrixDiagPartV3Attr
MatrixDiagPartV3Align sets the optional align attribute to value.
value: Some diagonals are shorter than `max_diag_len` and need to be padded. `align` is a string specifying how superdiagonals and subdiagonals should be aligned, respectively. There are four possible alignments: "RIGHT_LEFT" (default), "LEFT_RIGHT", "LEFT_LEFT", and "RIGHT_RIGHT". "RIGHT_LEFT" aligns superdiagonals to the right (left-pads the row) and subdiagonals to the left (right-pads the row). It is the packing format LAPACK uses. cuSPARSE uses "LEFT_RIGHT", which is the opposite alignment. If not specified, defaults to "RIGHT_LEFT"
type MatrixDiagV3Attr ¶
type MatrixDiagV3Attr func(optionalAttr)
MatrixDiagV3Attr is an optional argument to MatrixDiagV3.
func MatrixDiagV3Align ¶
func MatrixDiagV3Align(value string) MatrixDiagV3Attr
MatrixDiagV3Align sets the optional align attribute to value.
value: Some diagonals are shorter than `max_diag_len` and need to be padded. `align` is a string specifying how superdiagonals and subdiagonals should be aligned, respectively. There are four possible alignments: "RIGHT_LEFT" (default), "LEFT_RIGHT", "LEFT_LEFT", and "RIGHT_RIGHT". "RIGHT_LEFT" aligns superdiagonals to the right (left-pads the row) and subdiagonals to the left (right-pads the row). It is the packing format LAPACK uses. cuSPARSE uses "LEFT_RIGHT", which is the opposite alignment. If not specified, defaults to "RIGHT_LEFT"
type MatrixInverseAttr ¶
type MatrixInverseAttr func(optionalAttr)
MatrixInverseAttr is an optional argument to MatrixInverse.
func MatrixInverseAdjoint ¶
func MatrixInverseAdjoint(value bool) MatrixInverseAttr
MatrixInverseAdjoint sets the optional adjoint attribute to value. If not specified, defaults to false
type MatrixSetDiagV3Attr ¶
type MatrixSetDiagV3Attr func(optionalAttr)
MatrixSetDiagV3Attr is an optional argument to MatrixSetDiagV3.
func MatrixSetDiagV3Align ¶
func MatrixSetDiagV3Align(value string) MatrixSetDiagV3Attr
MatrixSetDiagV3Align sets the optional align attribute to value.
value: Some diagonals are shorter than `max_diag_len` and need to be padded. `align` is a string specifying how superdiagonals and subdiagonals should be aligned, respectively. There are four possible alignments: "RIGHT_LEFT" (default), "LEFT_RIGHT", "LEFT_LEFT", and "RIGHT_RIGHT". "RIGHT_LEFT" aligns superdiagonals to the right (left-pads the row) and subdiagonals to the left (right-pads the row). It is the packing format LAPACK uses. cuSPARSE uses "LEFT_RIGHT", which is the opposite alignment. If not specified, defaults to "RIGHT_LEFT"
type MatrixSolveAttr ¶
type MatrixSolveAttr func(optionalAttr)
MatrixSolveAttr is an optional argument to MatrixSolve.
func MatrixSolveAdjoint ¶
func MatrixSolveAdjoint(value bool) MatrixSolveAttr
MatrixSolveAdjoint sets the optional adjoint attribute to value.
value: Boolean indicating whether to solve with `matrix` or its (block-wise) adjoint. If not specified, defaults to false
type MatrixSolveLsAttr ¶
type MatrixSolveLsAttr func(optionalAttr)
MatrixSolveLsAttr is an optional argument to MatrixSolveLs.
func MatrixSolveLsFast ¶
func MatrixSolveLsFast(value bool) MatrixSolveLsAttr
MatrixSolveLsFast sets the optional fast attribute to value. If not specified, defaults to true
type MatrixTriangularSolveAttr ¶
type MatrixTriangularSolveAttr func(optionalAttr)
MatrixTriangularSolveAttr is an optional argument to MatrixTriangularSolve.
func MatrixTriangularSolveAdjoint ¶
func MatrixTriangularSolveAdjoint(value bool) MatrixTriangularSolveAttr
MatrixTriangularSolveAdjoint sets the optional adjoint attribute to value.
value: Boolean indicating whether to solve with `matrix` or its (block-wise)
adjoint.
@compatibility(numpy) Equivalent to scipy.linalg.solve_triangular @end_compatibility If not specified, defaults to false
func MatrixTriangularSolveLower ¶
func MatrixTriangularSolveLower(value bool) MatrixTriangularSolveAttr
MatrixTriangularSolveLower sets the optional lower attribute to value.
value: Boolean indicating whether the innermost matrices in `matrix` are lower or upper triangular. If not specified, defaults to true
type MaxAttr ¶
type MaxAttr func(optionalAttr)
MaxAttr is an optional argument to Max.
func MaxKeepDims ¶
MaxKeepDims sets the optional keep_dims attribute to value.
value: If true, retain reduced dimensions with length 1. If not specified, defaults to false
type MaxPool3DAttr ¶
type MaxPool3DAttr func(optionalAttr)
MaxPool3DAttr is an optional argument to MaxPool3D.
func MaxPool3DDataFormat ¶
func MaxPool3DDataFormat(value string) MaxPool3DAttr
MaxPool3DDataFormat sets the optional data_format attribute to value.
value: The data format of the input and output data. With the default format "NDHWC", the data is stored in the order of:
[batch, in_depth, in_height, in_width, in_channels].
Alternatively, the format could be "NCDHW", the data storage order is:
[batch, in_channels, in_depth, in_height, in_width].
If not specified, defaults to "NDHWC"
type MaxPool3DGradAttr ¶
type MaxPool3DGradAttr func(optionalAttr)
MaxPool3DGradAttr is an optional argument to MaxPool3DGrad.
func MaxPool3DGradDataFormat ¶
func MaxPool3DGradDataFormat(value string) MaxPool3DGradAttr
MaxPool3DGradDataFormat sets the optional data_format attribute to value.
value: The data format of the input and output data. With the default format "NDHWC", the data is stored in the order of:
[batch, in_depth, in_height, in_width, in_channels].
Alternatively, the format could be "NCDHW", the data storage order is:
[batch, in_channels, in_depth, in_height, in_width].
If not specified, defaults to "NDHWC"
type MaxPool3DGradGradAttr ¶
type MaxPool3DGradGradAttr func(optionalAttr)
MaxPool3DGradGradAttr is an optional argument to MaxPool3DGradGrad.
func MaxPool3DGradGradDataFormat ¶
func MaxPool3DGradGradDataFormat(value string) MaxPool3DGradGradAttr
MaxPool3DGradGradDataFormat sets the optional data_format attribute to value.
value: The data format of the input and output data. With the default format "NDHWC", the data is stored in the order of:
[batch, in_depth, in_height, in_width, in_channels].
Alternatively, the format could be "NCDHW", the data storage order is:
[batch, in_channels, in_depth, in_height, in_width].
If not specified, defaults to "NDHWC"
type MaxPoolAttr ¶
type MaxPoolAttr func(optionalAttr)
MaxPoolAttr is an optional argument to MaxPool.
func MaxPoolDataFormat ¶
func MaxPoolDataFormat(value string) MaxPoolAttr
MaxPoolDataFormat sets the optional data_format attribute to value.
value: Specify the data format of the input and output data. With the default format "NHWC", the data is stored in the order of:
[batch, in_height, in_width, in_channels].
Alternatively, the format could be "NCHW", the data storage order of:
[batch, in_channels, in_height, in_width].
If not specified, defaults to "NHWC"
func MaxPoolExplicitPaddings ¶
func MaxPoolExplicitPaddings(value []int64) MaxPoolAttr
MaxPoolExplicitPaddings sets the optional explicit_paddings attribute to value. If not specified, defaults to {}
type MaxPoolGradAttr ¶
type MaxPoolGradAttr func(optionalAttr)
MaxPoolGradAttr is an optional argument to MaxPoolGrad.
func MaxPoolGradDataFormat ¶
func MaxPoolGradDataFormat(value string) MaxPoolGradAttr
MaxPoolGradDataFormat sets the optional data_format attribute to value.
value: Specify the data format of the input and output data. With the default format "NHWC", the data is stored in the order of:
[batch, in_height, in_width, in_channels].
Alternatively, the format could be "NCHW", the data storage order of:
[batch, in_channels, in_height, in_width].
If not specified, defaults to "NHWC"
func MaxPoolGradExplicitPaddings ¶
func MaxPoolGradExplicitPaddings(value []int64) MaxPoolGradAttr
MaxPoolGradExplicitPaddings sets the optional explicit_paddings attribute to value. If not specified, defaults to {}
type MaxPoolGradGradAttr ¶
type MaxPoolGradGradAttr func(optionalAttr)
MaxPoolGradGradAttr is an optional argument to MaxPoolGradGrad.
func MaxPoolGradGradDataFormat ¶
func MaxPoolGradGradDataFormat(value string) MaxPoolGradGradAttr
MaxPoolGradGradDataFormat sets the optional data_format attribute to value.
value: Specify the data format of the input and output data. With the default format "NHWC", the data is stored in the order of:
[batch, in_height, in_width, in_channels].
Alternatively, the format could be "NCHW", the data storage order of:
[batch, in_channels, in_height, in_width].
If not specified, defaults to "NHWC"
type MaxPoolGradGradV2Attr ¶
type MaxPoolGradGradV2Attr func(optionalAttr)
MaxPoolGradGradV2Attr is an optional argument to MaxPoolGradGradV2.
func MaxPoolGradGradV2DataFormat ¶
func MaxPoolGradGradV2DataFormat(value string) MaxPoolGradGradV2Attr
MaxPoolGradGradV2DataFormat sets the optional data_format attribute to value.
value: Specify the data format of the input and output data. With the default format "NHWC", the data is stored in the order of:
[batch, in_height, in_width, in_channels].
Alternatively, the format could be "NCHW", the data storage order of:
[batch, in_channels, in_height, in_width].
If not specified, defaults to "NHWC"
type MaxPoolGradGradWithArgmaxAttr ¶
type MaxPoolGradGradWithArgmaxAttr func(optionalAttr)
MaxPoolGradGradWithArgmaxAttr is an optional argument to MaxPoolGradGradWithArgmax.
func MaxPoolGradGradWithArgmaxIncludeBatchInIndex ¶
func MaxPoolGradGradWithArgmaxIncludeBatchInIndex(value bool) MaxPoolGradGradWithArgmaxAttr
MaxPoolGradGradWithArgmaxIncludeBatchInIndex sets the optional include_batch_in_index attribute to value.
value: Whether to include batch dimension in flattened index of `argmax`. If not specified, defaults to false
type MaxPoolGradV2Attr ¶
type MaxPoolGradV2Attr func(optionalAttr)
MaxPoolGradV2Attr is an optional argument to MaxPoolGradV2.
func MaxPoolGradV2DataFormat ¶
func MaxPoolGradV2DataFormat(value string) MaxPoolGradV2Attr
MaxPoolGradV2DataFormat sets the optional data_format attribute to value.
value: Specify the data format of the input and output data. With the default format "NHWC", the data is stored in the order of:
[batch, in_height, in_width, in_channels].
Alternatively, the format could be "NCHW", the data storage order of:
[batch, in_channels, in_height, in_width].
If not specified, defaults to "NHWC"
type MaxPoolGradWithArgmaxAttr ¶
type MaxPoolGradWithArgmaxAttr func(optionalAttr)
MaxPoolGradWithArgmaxAttr is an optional argument to MaxPoolGradWithArgmax.
func MaxPoolGradWithArgmaxIncludeBatchInIndex ¶
func MaxPoolGradWithArgmaxIncludeBatchInIndex(value bool) MaxPoolGradWithArgmaxAttr
MaxPoolGradWithArgmaxIncludeBatchInIndex sets the optional include_batch_in_index attribute to value.
value: Whether to include batch dimension in flattened index of `argmax`. If not specified, defaults to false
type MaxPoolV2Attr ¶
type MaxPoolV2Attr func(optionalAttr)
MaxPoolV2Attr is an optional argument to MaxPoolV2.
func MaxPoolV2DataFormat ¶
func MaxPoolV2DataFormat(value string) MaxPoolV2Attr
MaxPoolV2DataFormat sets the optional data_format attribute to value.
value: Specify the data format of the input and output data. With the default format "NHWC", the data is stored in the order of:
[batch, in_height, in_width, in_channels].
Alternatively, the format could be "NCHW", the data storage order of:
[batch, in_channels, in_height, in_width].
If not specified, defaults to "NHWC"
type MaxPoolWithArgmaxAttr ¶
type MaxPoolWithArgmaxAttr func(optionalAttr)
MaxPoolWithArgmaxAttr is an optional argument to MaxPoolWithArgmax.
func MaxPoolWithArgmaxIncludeBatchInIndex ¶
func MaxPoolWithArgmaxIncludeBatchInIndex(value bool) MaxPoolWithArgmaxAttr
MaxPoolWithArgmaxIncludeBatchInIndex sets the optional include_batch_in_index attribute to value.
value: Whether to include batch dimension in flattened index of `argmax`. If not specified, defaults to false
func MaxPoolWithArgmaxTargmax ¶
func MaxPoolWithArgmaxTargmax(value tf.DataType) MaxPoolWithArgmaxAttr
MaxPoolWithArgmaxTargmax sets the optional Targmax attribute to value. If not specified, defaults to DT_INT64
type MeanAttr ¶
type MeanAttr func(optionalAttr)
MeanAttr is an optional argument to Mean.
func MeanKeepDims ¶
MeanKeepDims sets the optional keep_dims attribute to value.
value: If true, retain reduced dimensions with length 1. If not specified, defaults to false
type MergeDedupDataAttr ¶ added in v0.5.0
type MergeDedupDataAttr func(optionalAttr)
MergeDedupDataAttr is an optional argument to MergeDedupData.
func MergeDedupDataConfig ¶ added in v0.5.0
func MergeDedupDataConfig(value string) MergeDedupDataAttr
MergeDedupDataConfig sets the optional config attribute to value. If not specified, defaults to ""
type MergeV2CheckpointsAttr ¶
type MergeV2CheckpointsAttr func(optionalAttr)
MergeV2CheckpointsAttr is an optional argument to MergeV2Checkpoints.
func MergeV2CheckpointsAllowMissingFiles ¶ added in v0.2.0
func MergeV2CheckpointsAllowMissingFiles(value bool) MergeV2CheckpointsAttr
MergeV2CheckpointsAllowMissingFiles sets the optional allow_missing_files attribute to value.
value: see above. If not specified, defaults to false
func MergeV2CheckpointsDeleteOldDirs ¶
func MergeV2CheckpointsDeleteOldDirs(value bool) MergeV2CheckpointsAttr
MergeV2CheckpointsDeleteOldDirs sets the optional delete_old_dirs attribute to value.
value: see above. If not specified, defaults to true
type MfccAttr ¶
type MfccAttr func(optionalAttr)
MfccAttr is an optional argument to Mfcc.
func MfccDctCoefficientCount ¶
MfccDctCoefficientCount sets the optional dct_coefficient_count attribute to value.
value: How many output channels to produce per time slice. If not specified, defaults to 13
func MfccFilterbankChannelCount ¶
MfccFilterbankChannelCount sets the optional filterbank_channel_count attribute to value.
value: Resolution of the Mel bank used internally. If not specified, defaults to 40
func MfccLowerFrequencyLimit ¶
MfccLowerFrequencyLimit sets the optional lower_frequency_limit attribute to value.
value: The lowest frequency to use when calculating the ceptstrum. If not specified, defaults to 20
func MfccUpperFrequencyLimit ¶
MfccUpperFrequencyLimit sets the optional upper_frequency_limit attribute to value.
value: The highest frequency to use when calculating the ceptstrum. If not specified, defaults to 4000
type MinAttr ¶
type MinAttr func(optionalAttr)
MinAttr is an optional argument to Min.
func MinKeepDims ¶
MinKeepDims sets the optional keep_dims attribute to value.
value: If true, retain reduced dimensions with length 1. If not specified, defaults to false
type ModelDatasetAttr ¶
type ModelDatasetAttr func(optionalAttr)
ModelDatasetAttr is an optional argument to ModelDataset.
func ModelDatasetAlgorithm ¶
func ModelDatasetAlgorithm(value int64) ModelDatasetAttr
ModelDatasetAlgorithm sets the optional algorithm attribute to value. If not specified, defaults to 0
func ModelDatasetCpuBudget ¶
func ModelDatasetCpuBudget(value int64) ModelDatasetAttr
ModelDatasetCpuBudget sets the optional cpu_budget attribute to value. If not specified, defaults to 0
func ModelDatasetRamBudget ¶
func ModelDatasetRamBudget(value int64) ModelDatasetAttr
ModelDatasetRamBudget sets the optional ram_budget attribute to value. If not specified, defaults to 0
type MultiDeviceIteratorFromStringHandleAttr ¶
type MultiDeviceIteratorFromStringHandleAttr func(optionalAttr)
MultiDeviceIteratorFromStringHandleAttr is an optional argument to MultiDeviceIteratorFromStringHandle.
func MultiDeviceIteratorFromStringHandleOutputShapes ¶
func MultiDeviceIteratorFromStringHandleOutputShapes(value []tf.Shape) MultiDeviceIteratorFromStringHandleAttr
MultiDeviceIteratorFromStringHandleOutputShapes sets the optional output_shapes attribute to value.
value: The list of shapes being produced. If not specified, defaults to {}
REQUIRES: len(value) >= 0
func MultiDeviceIteratorFromStringHandleOutputTypes ¶
func MultiDeviceIteratorFromStringHandleOutputTypes(value []tf.DataType) MultiDeviceIteratorFromStringHandleAttr
MultiDeviceIteratorFromStringHandleOutputTypes sets the optional output_types attribute to value.
value: The type list for the return values. If not specified, defaults to {}
REQUIRES: len(value) >= 0
type MultinomialAttr ¶
type MultinomialAttr func(optionalAttr)
MultinomialAttr is an optional argument to Multinomial.
func MultinomialOutputDtype ¶
func MultinomialOutputDtype(value tf.DataType) MultinomialAttr
MultinomialOutputDtype sets the optional output_dtype attribute to value. If not specified, defaults to DT_INT64
func MultinomialSeed ¶
func MultinomialSeed(value int64) MultinomialAttr
MultinomialSeed sets the optional seed attribute to value.
value: If either seed or seed2 is set to be non-zero, the internal random number generator is seeded by the given seed. Otherwise, a random seed is used. If not specified, defaults to 0
func MultinomialSeed2 ¶
func MultinomialSeed2(value int64) MultinomialAttr
MultinomialSeed2 sets the optional seed2 attribute to value.
value: A second seed to avoid seed collision. If not specified, defaults to 0
type MutableDenseHashTableV2Attr ¶
type MutableDenseHashTableV2Attr func(optionalAttr)
MutableDenseHashTableV2Attr is an optional argument to MutableDenseHashTableV2.
func MutableDenseHashTableV2Container ¶
func MutableDenseHashTableV2Container(value string) MutableDenseHashTableV2Attr
MutableDenseHashTableV2Container sets the optional container attribute to value.
value: If non-empty, this table is placed in the given container. Otherwise, a default container is used. If not specified, defaults to ""
func MutableDenseHashTableV2InitialNumBuckets ¶
func MutableDenseHashTableV2InitialNumBuckets(value int64) MutableDenseHashTableV2Attr
MutableDenseHashTableV2InitialNumBuckets sets the optional initial_num_buckets attribute to value.
value: The initial number of hash table buckets. Must be a power to 2. If not specified, defaults to 131072
func MutableDenseHashTableV2MaxLoadFactor ¶
func MutableDenseHashTableV2MaxLoadFactor(value float32) MutableDenseHashTableV2Attr
MutableDenseHashTableV2MaxLoadFactor sets the optional max_load_factor attribute to value.
value: The maximum ratio between number of entries and number of buckets before growing the table. Must be between 0 and 1. If not specified, defaults to 0.8
func MutableDenseHashTableV2SharedName ¶
func MutableDenseHashTableV2SharedName(value string) MutableDenseHashTableV2Attr
MutableDenseHashTableV2SharedName sets the optional shared_name attribute to value.
value: If non-empty, this table is shared under the given name across multiple sessions. If not specified, defaults to ""
func MutableDenseHashTableV2UseNodeNameSharing ¶
func MutableDenseHashTableV2UseNodeNameSharing(value bool) MutableDenseHashTableV2Attr
MutableDenseHashTableV2UseNodeNameSharing sets the optional use_node_name_sharing attribute to value. If not specified, defaults to false
func MutableDenseHashTableV2ValueShape ¶
func MutableDenseHashTableV2ValueShape(value tf.Shape) MutableDenseHashTableV2Attr
MutableDenseHashTableV2ValueShape sets the optional value_shape attribute to value.
value: The shape of each value. If not specified, defaults to {}
type MutableHashTableOfTensorsV2Attr ¶
type MutableHashTableOfTensorsV2Attr func(optionalAttr)
MutableHashTableOfTensorsV2Attr is an optional argument to MutableHashTableOfTensorsV2.
func MutableHashTableOfTensorsV2Container ¶
func MutableHashTableOfTensorsV2Container(value string) MutableHashTableOfTensorsV2Attr
MutableHashTableOfTensorsV2Container sets the optional container attribute to value.
value: If non-empty, this table is placed in the given container. Otherwise, a default container is used. If not specified, defaults to ""
func MutableHashTableOfTensorsV2SharedName ¶
func MutableHashTableOfTensorsV2SharedName(value string) MutableHashTableOfTensorsV2Attr
MutableHashTableOfTensorsV2SharedName sets the optional shared_name attribute to value.
value: If non-empty, this table is shared under the given name across multiple sessions. If not specified, defaults to ""
func MutableHashTableOfTensorsV2UseNodeNameSharing ¶
func MutableHashTableOfTensorsV2UseNodeNameSharing(value bool) MutableHashTableOfTensorsV2Attr
MutableHashTableOfTensorsV2UseNodeNameSharing sets the optional use_node_name_sharing attribute to value. If not specified, defaults to false
func MutableHashTableOfTensorsV2ValueShape ¶
func MutableHashTableOfTensorsV2ValueShape(value tf.Shape) MutableHashTableOfTensorsV2Attr
MutableHashTableOfTensorsV2ValueShape sets the optional value_shape attribute to value. If not specified, defaults to {}
type MutableHashTableV2Attr ¶
type MutableHashTableV2Attr func(optionalAttr)
MutableHashTableV2Attr is an optional argument to MutableHashTableV2.
func MutableHashTableV2Container ¶
func MutableHashTableV2Container(value string) MutableHashTableV2Attr
MutableHashTableV2Container sets the optional container attribute to value.
value: If non-empty, this table is placed in the given container. Otherwise, a default container is used. If not specified, defaults to ""
func MutableHashTableV2SharedName ¶
func MutableHashTableV2SharedName(value string) MutableHashTableV2Attr
MutableHashTableV2SharedName sets the optional shared_name attribute to value.
value: If non-empty, this table is shared under the given name across multiple sessions. If not specified, defaults to ""
func MutableHashTableV2UseNodeNameSharing ¶
func MutableHashTableV2UseNodeNameSharing(value bool) MutableHashTableV2Attr
MutableHashTableV2UseNodeNameSharing sets the optional use_node_name_sharing attribute to value.
value: If true and shared_name is empty, the table is shared using the node name. If not specified, defaults to false
type MutexV2Attr ¶
type MutexV2Attr func(optionalAttr)
MutexV2Attr is an optional argument to MutexV2.
func MutexV2Container ¶
func MutexV2Container(value string) MutexV2Attr
MutexV2Container sets the optional container attribute to value.
value: If non-empty, this variable is placed in the given container. Otherwise, a default container is used. If not specified, defaults to ""
func MutexV2SharedName ¶
func MutexV2SharedName(value string) MutexV2Attr
MutexV2SharedName sets the optional shared_name attribute to value.
value: If non-empty, this variable is named in the given bucket with this shared_name. Otherwise, the node name is used instead. If not specified, defaults to ""
type NonDeterministicIntsAttr ¶
type NonDeterministicIntsAttr func(optionalAttr)
NonDeterministicIntsAttr is an optional argument to NonDeterministicInts.
func NonDeterministicIntsDtype ¶
func NonDeterministicIntsDtype(value tf.DataType) NonDeterministicIntsAttr
NonDeterministicIntsDtype sets the optional dtype attribute to value.
value: The type of the output. If not specified, defaults to DT_INT64
type NonMaxSuppressionAttr ¶
type NonMaxSuppressionAttr func(optionalAttr)
NonMaxSuppressionAttr is an optional argument to NonMaxSuppression.
func NonMaxSuppressionIouThreshold ¶
func NonMaxSuppressionIouThreshold(value float32) NonMaxSuppressionAttr
NonMaxSuppressionIouThreshold sets the optional iou_threshold attribute to value.
value: A float representing the threshold for deciding whether boxes overlap too much with respect to IOU. If not specified, defaults to 0.5
type NonMaxSuppressionV4Attr ¶
type NonMaxSuppressionV4Attr func(optionalAttr)
NonMaxSuppressionV4Attr is an optional argument to NonMaxSuppressionV4.
func NonMaxSuppressionV4PadToMaxOutputSize ¶
func NonMaxSuppressionV4PadToMaxOutputSize(value bool) NonMaxSuppressionV4Attr
NonMaxSuppressionV4PadToMaxOutputSize sets the optional pad_to_max_output_size attribute to value.
value: If true, the output `selected_indices` is padded to be of length `max_output_size`. Defaults to false. If not specified, defaults to false
type NonMaxSuppressionV5Attr ¶
type NonMaxSuppressionV5Attr func(optionalAttr)
NonMaxSuppressionV5Attr is an optional argument to NonMaxSuppressionV5.
func NonMaxSuppressionV5PadToMaxOutputSize ¶
func NonMaxSuppressionV5PadToMaxOutputSize(value bool) NonMaxSuppressionV5Attr
NonMaxSuppressionV5PadToMaxOutputSize sets the optional pad_to_max_output_size attribute to value.
value: If true, the output `selected_indices` is padded to be of length `max_output_size`. Defaults to false. If not specified, defaults to false
type NotEqualAttr ¶
type NotEqualAttr func(optionalAttr)
NotEqualAttr is an optional argument to NotEqual.
func NotEqualIncompatibleShapeError ¶
func NotEqualIncompatibleShapeError(value bool) NotEqualAttr
NotEqualIncompatibleShapeError sets the optional incompatible_shape_error attribute to value. If not specified, defaults to true
type NthElementAttr ¶
type NthElementAttr func(optionalAttr)
NthElementAttr is an optional argument to NthElement.
func NthElementReverse ¶
func NthElementReverse(value bool) NthElementAttr
NthElementReverse sets the optional reverse attribute to value.
value: When set to True, find the nth-largest value in the vector and vice versa. If not specified, defaults to false
type OneHotAttr ¶
type OneHotAttr func(optionalAttr)
OneHotAttr is an optional argument to OneHot.
func OneHotAxis ¶
func OneHotAxis(value int64) OneHotAttr
OneHotAxis sets the optional axis attribute to value.
value: The axis to fill (default: -1, a new inner-most axis). If not specified, defaults to -1
type OptimizeDatasetAttr ¶
type OptimizeDatasetAttr func(optionalAttr)
OptimizeDatasetAttr is an optional argument to OptimizeDataset.
func OptimizeDatasetOptimizationConfigs ¶
func OptimizeDatasetOptimizationConfigs(value []string) OptimizeDatasetAttr
OptimizeDatasetOptimizationConfigs sets the optional optimization_configs attribute to value. If not specified, defaults to {}
type OptimizeDatasetV2Attr ¶
type OptimizeDatasetV2Attr func(optionalAttr)
OptimizeDatasetV2Attr is an optional argument to OptimizeDatasetV2.
func OptimizeDatasetV2OptimizationConfigs ¶
func OptimizeDatasetV2OptimizationConfigs(value []string) OptimizeDatasetV2Attr
OptimizeDatasetV2OptimizationConfigs sets the optional optimization_configs attribute to value. If not specified, defaults to {}
type OptionsDatasetAttr ¶
type OptionsDatasetAttr func(optionalAttr)
OptionsDatasetAttr is an optional argument to OptionsDataset.
func OptionsDatasetMetadata ¶
func OptionsDatasetMetadata(value string) OptionsDatasetAttr
OptionsDatasetMetadata sets the optional metadata attribute to value. If not specified, defaults to ""
type OrderedMapClearAttr ¶
type OrderedMapClearAttr func(optionalAttr)
OrderedMapClearAttr is an optional argument to OrderedMapClear.
func OrderedMapClearCapacity ¶
func OrderedMapClearCapacity(value int64) OrderedMapClearAttr
OrderedMapClearCapacity sets the optional capacity attribute to value. If not specified, defaults to 0
REQUIRES: value >= 0
func OrderedMapClearContainer ¶
func OrderedMapClearContainer(value string) OrderedMapClearAttr
OrderedMapClearContainer sets the optional container attribute to value. If not specified, defaults to ""
func OrderedMapClearMemoryLimit ¶
func OrderedMapClearMemoryLimit(value int64) OrderedMapClearAttr
OrderedMapClearMemoryLimit sets the optional memory_limit attribute to value. If not specified, defaults to 0
REQUIRES: value >= 0
func OrderedMapClearSharedName ¶
func OrderedMapClearSharedName(value string) OrderedMapClearAttr
OrderedMapClearSharedName sets the optional shared_name attribute to value. If not specified, defaults to ""
type OrderedMapIncompleteSizeAttr ¶
type OrderedMapIncompleteSizeAttr func(optionalAttr)
OrderedMapIncompleteSizeAttr is an optional argument to OrderedMapIncompleteSize.
func OrderedMapIncompleteSizeCapacity ¶
func OrderedMapIncompleteSizeCapacity(value int64) OrderedMapIncompleteSizeAttr
OrderedMapIncompleteSizeCapacity sets the optional capacity attribute to value. If not specified, defaults to 0
REQUIRES: value >= 0
func OrderedMapIncompleteSizeContainer ¶
func OrderedMapIncompleteSizeContainer(value string) OrderedMapIncompleteSizeAttr
OrderedMapIncompleteSizeContainer sets the optional container attribute to value. If not specified, defaults to ""
func OrderedMapIncompleteSizeMemoryLimit ¶
func OrderedMapIncompleteSizeMemoryLimit(value int64) OrderedMapIncompleteSizeAttr
OrderedMapIncompleteSizeMemoryLimit sets the optional memory_limit attribute to value. If not specified, defaults to 0
REQUIRES: value >= 0
func OrderedMapIncompleteSizeSharedName ¶
func OrderedMapIncompleteSizeSharedName(value string) OrderedMapIncompleteSizeAttr
OrderedMapIncompleteSizeSharedName sets the optional shared_name attribute to value. If not specified, defaults to ""
type OrderedMapPeekAttr ¶
type OrderedMapPeekAttr func(optionalAttr)
OrderedMapPeekAttr is an optional argument to OrderedMapPeek.
func OrderedMapPeekCapacity ¶
func OrderedMapPeekCapacity(value int64) OrderedMapPeekAttr
OrderedMapPeekCapacity sets the optional capacity attribute to value. If not specified, defaults to 0
REQUIRES: value >= 0
func OrderedMapPeekContainer ¶
func OrderedMapPeekContainer(value string) OrderedMapPeekAttr
OrderedMapPeekContainer sets the optional container attribute to value. If not specified, defaults to ""
func OrderedMapPeekMemoryLimit ¶
func OrderedMapPeekMemoryLimit(value int64) OrderedMapPeekAttr
OrderedMapPeekMemoryLimit sets the optional memory_limit attribute to value. If not specified, defaults to 0
REQUIRES: value >= 0
func OrderedMapPeekSharedName ¶
func OrderedMapPeekSharedName(value string) OrderedMapPeekAttr
OrderedMapPeekSharedName sets the optional shared_name attribute to value. If not specified, defaults to ""
type OrderedMapSizeAttr ¶
type OrderedMapSizeAttr func(optionalAttr)
OrderedMapSizeAttr is an optional argument to OrderedMapSize.
func OrderedMapSizeCapacity ¶
func OrderedMapSizeCapacity(value int64) OrderedMapSizeAttr
OrderedMapSizeCapacity sets the optional capacity attribute to value. If not specified, defaults to 0
REQUIRES: value >= 0
func OrderedMapSizeContainer ¶
func OrderedMapSizeContainer(value string) OrderedMapSizeAttr
OrderedMapSizeContainer sets the optional container attribute to value. If not specified, defaults to ""
func OrderedMapSizeMemoryLimit ¶
func OrderedMapSizeMemoryLimit(value int64) OrderedMapSizeAttr
OrderedMapSizeMemoryLimit sets the optional memory_limit attribute to value. If not specified, defaults to 0
REQUIRES: value >= 0
func OrderedMapSizeSharedName ¶
func OrderedMapSizeSharedName(value string) OrderedMapSizeAttr
OrderedMapSizeSharedName sets the optional shared_name attribute to value. If not specified, defaults to ""
type OrderedMapStageAttr ¶
type OrderedMapStageAttr func(optionalAttr)
OrderedMapStageAttr is an optional argument to OrderedMapStage.
func OrderedMapStageCapacity ¶
func OrderedMapStageCapacity(value int64) OrderedMapStageAttr
OrderedMapStageCapacity sets the optional capacity attribute to value.
value: Maximum number of elements in the Staging Area. If > 0, inserts on the container will block when the capacity is reached. If not specified, defaults to 0
REQUIRES: value >= 0
func OrderedMapStageContainer ¶
func OrderedMapStageContainer(value string) OrderedMapStageAttr
OrderedMapStageContainer sets the optional container attribute to value.
value: If non-empty, this queue is placed in the given container. Otherwise, a default container is used. If not specified, defaults to ""
func OrderedMapStageMemoryLimit ¶
func OrderedMapStageMemoryLimit(value int64) OrderedMapStageAttr
OrderedMapStageMemoryLimit sets the optional memory_limit attribute to value. If not specified, defaults to 0
REQUIRES: value >= 0
func OrderedMapStageSharedName ¶
func OrderedMapStageSharedName(value string) OrderedMapStageAttr
OrderedMapStageSharedName sets the optional shared_name attribute to value.
value: It is necessary to match this name to the matching Unstage Op. If not specified, defaults to ""
type OrderedMapUnstageAttr ¶
type OrderedMapUnstageAttr func(optionalAttr)
OrderedMapUnstageAttr is an optional argument to OrderedMapUnstage.
func OrderedMapUnstageCapacity ¶
func OrderedMapUnstageCapacity(value int64) OrderedMapUnstageAttr
OrderedMapUnstageCapacity sets the optional capacity attribute to value. If not specified, defaults to 0
REQUIRES: value >= 0
func OrderedMapUnstageContainer ¶
func OrderedMapUnstageContainer(value string) OrderedMapUnstageAttr
OrderedMapUnstageContainer sets the optional container attribute to value. If not specified, defaults to ""
func OrderedMapUnstageMemoryLimit ¶
func OrderedMapUnstageMemoryLimit(value int64) OrderedMapUnstageAttr
OrderedMapUnstageMemoryLimit sets the optional memory_limit attribute to value. If not specified, defaults to 0
REQUIRES: value >= 0
func OrderedMapUnstageSharedName ¶
func OrderedMapUnstageSharedName(value string) OrderedMapUnstageAttr
OrderedMapUnstageSharedName sets the optional shared_name attribute to value. If not specified, defaults to ""
type OrderedMapUnstageNoKeyAttr ¶
type OrderedMapUnstageNoKeyAttr func(optionalAttr)
OrderedMapUnstageNoKeyAttr is an optional argument to OrderedMapUnstageNoKey.
func OrderedMapUnstageNoKeyCapacity ¶
func OrderedMapUnstageNoKeyCapacity(value int64) OrderedMapUnstageNoKeyAttr
OrderedMapUnstageNoKeyCapacity sets the optional capacity attribute to value. If not specified, defaults to 0
REQUIRES: value >= 0
func OrderedMapUnstageNoKeyContainer ¶
func OrderedMapUnstageNoKeyContainer(value string) OrderedMapUnstageNoKeyAttr
OrderedMapUnstageNoKeyContainer sets the optional container attribute to value. If not specified, defaults to ""
func OrderedMapUnstageNoKeyMemoryLimit ¶
func OrderedMapUnstageNoKeyMemoryLimit(value int64) OrderedMapUnstageNoKeyAttr
OrderedMapUnstageNoKeyMemoryLimit sets the optional memory_limit attribute to value. If not specified, defaults to 0
REQUIRES: value >= 0
func OrderedMapUnstageNoKeySharedName ¶
func OrderedMapUnstageNoKeySharedName(value string) OrderedMapUnstageNoKeyAttr
OrderedMapUnstageNoKeySharedName sets the optional shared_name attribute to value. If not specified, defaults to ""
type OutfeedDequeueAttr ¶
type OutfeedDequeueAttr func(optionalAttr)
OutfeedDequeueAttr is an optional argument to OutfeedDequeue.
func OutfeedDequeueDeviceOrdinal ¶
func OutfeedDequeueDeviceOrdinal(value int64) OutfeedDequeueAttr
OutfeedDequeueDeviceOrdinal sets the optional device_ordinal attribute to value.
value: The TPU device to use. This should be -1 when the Op is running on a TPU device, and >= 0 when the Op is running on the CPU device. If not specified, defaults to -1
type OutfeedDequeueTupleAttr ¶
type OutfeedDequeueTupleAttr func(optionalAttr)
OutfeedDequeueTupleAttr is an optional argument to OutfeedDequeueTuple.
func OutfeedDequeueTupleDeviceOrdinal ¶
func OutfeedDequeueTupleDeviceOrdinal(value int64) OutfeedDequeueTupleAttr
OutfeedDequeueTupleDeviceOrdinal sets the optional device_ordinal attribute to value.
value: The TPU device to use. This should be -1 when the Op is running on a TPU device, and >= 0 when the Op is running on the CPU device. If not specified, defaults to -1
type PaddedBatchDatasetAttr ¶
type PaddedBatchDatasetAttr func(optionalAttr)
PaddedBatchDatasetAttr is an optional argument to PaddedBatchDataset.
func PaddedBatchDatasetMetadata ¶
func PaddedBatchDatasetMetadata(value string) PaddedBatchDatasetAttr
PaddedBatchDatasetMetadata sets the optional metadata attribute to value. If not specified, defaults to ""
type PaddedBatchDatasetV2Attr ¶
type PaddedBatchDatasetV2Attr func(optionalAttr)
PaddedBatchDatasetV2Attr is an optional argument to PaddedBatchDatasetV2.
func PaddedBatchDatasetV2Metadata ¶
func PaddedBatchDatasetV2Metadata(value string) PaddedBatchDatasetV2Attr
PaddedBatchDatasetV2Metadata sets the optional metadata attribute to value. If not specified, defaults to ""
func PaddedBatchDatasetV2ParallelCopy ¶
func PaddedBatchDatasetV2ParallelCopy(value bool) PaddedBatchDatasetV2Attr
PaddedBatchDatasetV2ParallelCopy sets the optional parallel_copy attribute to value. If not specified, defaults to false
type PaddingFIFOQueueV2Attr ¶
type PaddingFIFOQueueV2Attr func(optionalAttr)
PaddingFIFOQueueV2Attr is an optional argument to PaddingFIFOQueueV2.
func PaddingFIFOQueueV2Capacity ¶
func PaddingFIFOQueueV2Capacity(value int64) PaddingFIFOQueueV2Attr
PaddingFIFOQueueV2Capacity sets the optional capacity attribute to value.
value: The upper bound on the number of elements in this queue. Negative numbers mean no limit. If not specified, defaults to -1
func PaddingFIFOQueueV2Container ¶
func PaddingFIFOQueueV2Container(value string) PaddingFIFOQueueV2Attr
PaddingFIFOQueueV2Container sets the optional container attribute to value.
value: If non-empty, this queue is placed in the given container. Otherwise, a default container is used. If not specified, defaults to ""
func PaddingFIFOQueueV2Shapes ¶
func PaddingFIFOQueueV2Shapes(value []tf.Shape) PaddingFIFOQueueV2Attr
PaddingFIFOQueueV2Shapes sets the optional shapes attribute to value.
value: The shape of each component in a value. The length of this attr must be either 0 or the same as the length of component_types. Shapes of fixed rank but variable size are allowed by setting any shape dimension to -1. In this case, the inputs' shape may vary along the given dimension, and DequeueMany will pad the given dimension with zeros up to the maximum shape of all elements in the given batch. If the length of this attr is 0, different queue elements may have different ranks and shapes, but only one element may be dequeued at a time. If not specified, defaults to {}
REQUIRES: len(value) >= 0
func PaddingFIFOQueueV2SharedName ¶
func PaddingFIFOQueueV2SharedName(value string) PaddingFIFOQueueV2Attr
PaddingFIFOQueueV2SharedName sets the optional shared_name attribute to value.
value: If non-empty, this queue will be shared under the given name across multiple sessions. If not specified, defaults to ""
type ParameterizedTruncatedNormalAttr ¶
type ParameterizedTruncatedNormalAttr func(optionalAttr)
ParameterizedTruncatedNormalAttr is an optional argument to ParameterizedTruncatedNormal.
func ParameterizedTruncatedNormalSeed ¶
func ParameterizedTruncatedNormalSeed(value int64) ParameterizedTruncatedNormalAttr
ParameterizedTruncatedNormalSeed sets the optional seed attribute to value.
value: If either `seed` or `seed2` are set to be non-zero, the random number generator is seeded by the given seed. Otherwise, it is seeded by a random seed. If not specified, defaults to 0
func ParameterizedTruncatedNormalSeed2 ¶
func ParameterizedTruncatedNormalSeed2(value int64) ParameterizedTruncatedNormalAttr
ParameterizedTruncatedNormalSeed2 sets the optional seed2 attribute to value.
value: A second seed to avoid seed collision. If not specified, defaults to 0
type ParseExampleDatasetAttr ¶
type ParseExampleDatasetAttr func(optionalAttr)
ParseExampleDatasetAttr is an optional argument to ParseExampleDataset.
func ParseExampleDatasetRaggedKeys ¶
func ParseExampleDatasetRaggedKeys(value []string) ParseExampleDatasetAttr
ParseExampleDatasetRaggedKeys sets the optional ragged_keys attribute to value. If not specified, defaults to {}
REQUIRES: len(value) >= 0
func ParseExampleDatasetRaggedSplitTypes ¶
func ParseExampleDatasetRaggedSplitTypes(value []tf.DataType) ParseExampleDatasetAttr
ParseExampleDatasetRaggedSplitTypes sets the optional ragged_split_types attribute to value. If not specified, defaults to {}
REQUIRES: len(value) >= 0
func ParseExampleDatasetRaggedValueTypes ¶
func ParseExampleDatasetRaggedValueTypes(value []tf.DataType) ParseExampleDatasetAttr
ParseExampleDatasetRaggedValueTypes sets the optional ragged_value_types attribute to value. If not specified, defaults to {}
REQUIRES: len(value) >= 0
func ParseExampleDatasetSloppy ¶
func ParseExampleDatasetSloppy(value bool) ParseExampleDatasetAttr
ParseExampleDatasetSloppy sets the optional sloppy attribute to value. If not specified, defaults to false
type ParseExampleDatasetV2Attr ¶
type ParseExampleDatasetV2Attr func(optionalAttr)
ParseExampleDatasetV2Attr is an optional argument to ParseExampleDatasetV2.
func ParseExampleDatasetV2Deterministic ¶
func ParseExampleDatasetV2Deterministic(value string) ParseExampleDatasetV2Attr
ParseExampleDatasetV2Deterministic sets the optional deterministic attribute to value.
value: A string indicating the op-level determinism to use. Deterministic controls whether the dataset is allowed to return elements out of order if the next element to be returned isn't available, but a later element is. Options are "true", "false", and "default". "default" indicates that determinism should be decided by the `experimental_deterministic` parameter of `tf.data.Options`. If not specified, defaults to "default"
func ParseExampleDatasetV2RaggedKeys ¶
func ParseExampleDatasetV2RaggedKeys(value []string) ParseExampleDatasetV2Attr
ParseExampleDatasetV2RaggedKeys sets the optional ragged_keys attribute to value. If not specified, defaults to {}
REQUIRES: len(value) >= 0
func ParseExampleDatasetV2RaggedSplitTypes ¶
func ParseExampleDatasetV2RaggedSplitTypes(value []tf.DataType) ParseExampleDatasetV2Attr
ParseExampleDatasetV2RaggedSplitTypes sets the optional ragged_split_types attribute to value. If not specified, defaults to {}
REQUIRES: len(value) >= 0
func ParseExampleDatasetV2RaggedValueTypes ¶
func ParseExampleDatasetV2RaggedValueTypes(value []tf.DataType) ParseExampleDatasetV2Attr
ParseExampleDatasetV2RaggedValueTypes sets the optional ragged_value_types attribute to value. If not specified, defaults to {}
REQUIRES: len(value) >= 0
type ParseSequenceExampleAttr ¶
type ParseSequenceExampleAttr func(optionalAttr)
ParseSequenceExampleAttr is an optional argument to ParseSequenceExample.
func ParseSequenceExampleContextDenseShapes ¶
func ParseSequenceExampleContextDenseShapes(value []tf.Shape) ParseSequenceExampleAttr
ParseSequenceExampleContextDenseShapes sets the optional context_dense_shapes attribute to value.
value: A list of Ncontext_dense shapes; the shapes of data in each context Feature given in context_dense_keys. The number of elements in the Feature corresponding to context_dense_key[j] must always equal context_dense_shapes[j].NumEntries(). The shape of context_dense_values[j] will match context_dense_shapes[j]. If not specified, defaults to {}
REQUIRES: len(value) >= 0
func ParseSequenceExampleContextSparseTypes ¶
func ParseSequenceExampleContextSparseTypes(value []tf.DataType) ParseSequenceExampleAttr
ParseSequenceExampleContextSparseTypes sets the optional context_sparse_types attribute to value.
value: A list of Ncontext_sparse types; the data types of data in each context Feature given in context_sparse_keys. Currently the ParseSingleSequenceExample supports DT_FLOAT (FloatList), DT_INT64 (Int64List), and DT_STRING (BytesList). If not specified, defaults to {}
REQUIRES: len(value) >= 0
func ParseSequenceExampleFeatureListDenseShapes ¶
func ParseSequenceExampleFeatureListDenseShapes(value []tf.Shape) ParseSequenceExampleAttr
ParseSequenceExampleFeatureListDenseShapes sets the optional feature_list_dense_shapes attribute to value.
value: A list of Nfeature_list_dense shapes; the shapes of data in each FeatureList given in feature_list_dense_keys. The shape of each Feature in the FeatureList corresponding to feature_list_dense_key[j] must always equal feature_list_dense_shapes[j].NumEntries(). If not specified, defaults to {}
REQUIRES: len(value) >= 0
func ParseSequenceExampleFeatureListDenseTypes ¶
func ParseSequenceExampleFeatureListDenseTypes(value []tf.DataType) ParseSequenceExampleAttr
ParseSequenceExampleFeatureListDenseTypes sets the optional feature_list_dense_types attribute to value. If not specified, defaults to {}
REQUIRES: len(value) >= 0
func ParseSequenceExampleFeatureListSparseTypes ¶
func ParseSequenceExampleFeatureListSparseTypes(value []tf.DataType) ParseSequenceExampleAttr
ParseSequenceExampleFeatureListSparseTypes sets the optional feature_list_sparse_types attribute to value.
value: A list of Nfeature_list_sparse types; the data types of data in each FeatureList given in feature_list_sparse_keys. Currently the ParseSingleSequenceExample supports DT_FLOAT (FloatList), DT_INT64 (Int64List), and DT_STRING (BytesList). If not specified, defaults to {}
REQUIRES: len(value) >= 0
func ParseSequenceExampleNcontextDense ¶
func ParseSequenceExampleNcontextDense(value int64) ParseSequenceExampleAttr
ParseSequenceExampleNcontextDense sets the optional Ncontext_dense attribute to value. If not specified, defaults to 0
REQUIRES: value >= 0
func ParseSequenceExampleNcontextSparse ¶
func ParseSequenceExampleNcontextSparse(value int64) ParseSequenceExampleAttr
ParseSequenceExampleNcontextSparse sets the optional Ncontext_sparse attribute to value. If not specified, defaults to 0
REQUIRES: value >= 0
func ParseSequenceExampleNfeatureListDense ¶
func ParseSequenceExampleNfeatureListDense(value int64) ParseSequenceExampleAttr
ParseSequenceExampleNfeatureListDense sets the optional Nfeature_list_dense attribute to value. If not specified, defaults to 0
REQUIRES: value >= 0
func ParseSequenceExampleNfeatureListSparse ¶
func ParseSequenceExampleNfeatureListSparse(value int64) ParseSequenceExampleAttr
ParseSequenceExampleNfeatureListSparse sets the optional Nfeature_list_sparse attribute to value. If not specified, defaults to 0
REQUIRES: value >= 0
type ParseSequenceExampleV2Attr ¶
type ParseSequenceExampleV2Attr func(optionalAttr)
ParseSequenceExampleV2Attr is an optional argument to ParseSequenceExampleV2.
func ParseSequenceExampleV2ContextDenseShapes ¶
func ParseSequenceExampleV2ContextDenseShapes(value []tf.Shape) ParseSequenceExampleV2Attr
ParseSequenceExampleV2ContextDenseShapes sets the optional context_dense_shapes attribute to value.
value: A list of Ncontext_dense shapes; the shapes of data in each context Feature given in context_dense_keys. The number of elements in the Feature corresponding to context_dense_key[j] must always equal context_dense_shapes[j].NumEntries(). The shape of context_dense_values[j] will match context_dense_shapes[j]. If not specified, defaults to {}
REQUIRES: len(value) >= 0
func ParseSequenceExampleV2ContextRaggedSplitTypes ¶
func ParseSequenceExampleV2ContextRaggedSplitTypes(value []tf.DataType) ParseSequenceExampleV2Attr
ParseSequenceExampleV2ContextRaggedSplitTypes sets the optional context_ragged_split_types attribute to value.
value: RaggedTensor.row_split dtypes for the ragged context features. If not specified, defaults to {}
REQUIRES: len(value) >= 0
func ParseSequenceExampleV2ContextRaggedValueTypes ¶
func ParseSequenceExampleV2ContextRaggedValueTypes(value []tf.DataType) ParseSequenceExampleV2Attr
ParseSequenceExampleV2ContextRaggedValueTypes sets the optional context_ragged_value_types attribute to value.
value: RaggedTensor.value dtypes for the ragged context features. If not specified, defaults to {}
REQUIRES: len(value) >= 0
func ParseSequenceExampleV2ContextSparseTypes ¶
func ParseSequenceExampleV2ContextSparseTypes(value []tf.DataType) ParseSequenceExampleV2Attr
ParseSequenceExampleV2ContextSparseTypes sets the optional context_sparse_types attribute to value.
value: A list of Ncontext_sparse types; the data types of data in each context Feature given in context_sparse_keys. Currently the ParseSingleSequenceExample supports DT_FLOAT (FloatList), DT_INT64 (Int64List), and DT_STRING (BytesList). If not specified, defaults to {}
REQUIRES: len(value) >= 0
func ParseSequenceExampleV2FeatureListDenseShapes ¶
func ParseSequenceExampleV2FeatureListDenseShapes(value []tf.Shape) ParseSequenceExampleV2Attr
ParseSequenceExampleV2FeatureListDenseShapes sets the optional feature_list_dense_shapes attribute to value.
value: A list of Nfeature_list_dense shapes; the shapes of data in each FeatureList given in feature_list_dense_keys. The shape of each Feature in the FeatureList corresponding to feature_list_dense_key[j] must always equal feature_list_dense_shapes[j].NumEntries(). If not specified, defaults to {}
REQUIRES: len(value) >= 0
func ParseSequenceExampleV2FeatureListDenseTypes ¶
func ParseSequenceExampleV2FeatureListDenseTypes(value []tf.DataType) ParseSequenceExampleV2Attr
ParseSequenceExampleV2FeatureListDenseTypes sets the optional feature_list_dense_types attribute to value. If not specified, defaults to {}
REQUIRES: len(value) >= 0
func ParseSequenceExampleV2FeatureListRaggedSplitTypes ¶
func ParseSequenceExampleV2FeatureListRaggedSplitTypes(value []tf.DataType) ParseSequenceExampleV2Attr
ParseSequenceExampleV2FeatureListRaggedSplitTypes sets the optional feature_list_ragged_split_types attribute to value.
value: RaggedTensor.row_split dtypes for the ragged FeatureList features. If not specified, defaults to {}
REQUIRES: len(value) >= 0
func ParseSequenceExampleV2FeatureListRaggedValueTypes ¶
func ParseSequenceExampleV2FeatureListRaggedValueTypes(value []tf.DataType) ParseSequenceExampleV2Attr
ParseSequenceExampleV2FeatureListRaggedValueTypes sets the optional feature_list_ragged_value_types attribute to value.
value: RaggedTensor.value dtypes for the ragged FeatureList features. If not specified, defaults to {}
REQUIRES: len(value) >= 0
func ParseSequenceExampleV2FeatureListSparseTypes ¶
func ParseSequenceExampleV2FeatureListSparseTypes(value []tf.DataType) ParseSequenceExampleV2Attr
ParseSequenceExampleV2FeatureListSparseTypes sets the optional feature_list_sparse_types attribute to value.
value: A list of Nfeature_list_sparse types; the data types of data in each FeatureList given in feature_list_sparse_keys. Currently the ParseSingleSequenceExample supports DT_FLOAT (FloatList), DT_INT64 (Int64List), and DT_STRING (BytesList). If not specified, defaults to {}
REQUIRES: len(value) >= 0
func ParseSequenceExampleV2NcontextSparse ¶
func ParseSequenceExampleV2NcontextSparse(value int64) ParseSequenceExampleV2Attr
ParseSequenceExampleV2NcontextSparse sets the optional Ncontext_sparse attribute to value. If not specified, defaults to 0
REQUIRES: value >= 0
func ParseSequenceExampleV2NfeatureListDense ¶
func ParseSequenceExampleV2NfeatureListDense(value int64) ParseSequenceExampleV2Attr
ParseSequenceExampleV2NfeatureListDense sets the optional Nfeature_list_dense attribute to value. If not specified, defaults to 0
REQUIRES: value >= 0
func ParseSequenceExampleV2NfeatureListSparse ¶
func ParseSequenceExampleV2NfeatureListSparse(value int64) ParseSequenceExampleV2Attr
ParseSequenceExampleV2NfeatureListSparse sets the optional Nfeature_list_sparse attribute to value. If not specified, defaults to 0
REQUIRES: value >= 0
type ParseSingleSequenceExampleAttr ¶
type ParseSingleSequenceExampleAttr func(optionalAttr)
ParseSingleSequenceExampleAttr is an optional argument to ParseSingleSequenceExample.
func ParseSingleSequenceExampleContextDenseShapes ¶
func ParseSingleSequenceExampleContextDenseShapes(value []tf.Shape) ParseSingleSequenceExampleAttr
ParseSingleSequenceExampleContextDenseShapes sets the optional context_dense_shapes attribute to value.
value: A list of Ncontext_dense shapes; the shapes of data in each context Feature given in context_dense_keys. The number of elements in the Feature corresponding to context_dense_key[j] must always equal context_dense_shapes[j].NumEntries(). The shape of context_dense_values[j] will match context_dense_shapes[j]. If not specified, defaults to {}
REQUIRES: len(value) >= 0
func ParseSingleSequenceExampleContextSparseTypes ¶
func ParseSingleSequenceExampleContextSparseTypes(value []tf.DataType) ParseSingleSequenceExampleAttr
ParseSingleSequenceExampleContextSparseTypes sets the optional context_sparse_types attribute to value.
value: A list of Ncontext_sparse types; the data types of data in each context Feature given in context_sparse_keys. Currently the ParseSingleSequenceExample supports DT_FLOAT (FloatList), DT_INT64 (Int64List), and DT_STRING (BytesList). If not specified, defaults to {}
REQUIRES: len(value) >= 0
func ParseSingleSequenceExampleFeatureListDenseShapes ¶
func ParseSingleSequenceExampleFeatureListDenseShapes(value []tf.Shape) ParseSingleSequenceExampleAttr
ParseSingleSequenceExampleFeatureListDenseShapes sets the optional feature_list_dense_shapes attribute to value.
value: A list of Nfeature_list_dense shapes; the shapes of data in each FeatureList given in feature_list_dense_keys. The shape of each Feature in the FeatureList corresponding to feature_list_dense_key[j] must always equal feature_list_dense_shapes[j].NumEntries(). If not specified, defaults to {}
REQUIRES: len(value) >= 0
func ParseSingleSequenceExampleFeatureListDenseTypes ¶
func ParseSingleSequenceExampleFeatureListDenseTypes(value []tf.DataType) ParseSingleSequenceExampleAttr
ParseSingleSequenceExampleFeatureListDenseTypes sets the optional feature_list_dense_types attribute to value. If not specified, defaults to {}
REQUIRES: len(value) >= 0
func ParseSingleSequenceExampleFeatureListSparseTypes ¶
func ParseSingleSequenceExampleFeatureListSparseTypes(value []tf.DataType) ParseSingleSequenceExampleAttr
ParseSingleSequenceExampleFeatureListSparseTypes sets the optional feature_list_sparse_types attribute to value.
value: A list of Nfeature_list_sparse types; the data types of data in each FeatureList given in feature_list_sparse_keys. Currently the ParseSingleSequenceExample supports DT_FLOAT (FloatList), DT_INT64 (Int64List), and DT_STRING (BytesList). If not specified, defaults to {}
REQUIRES: len(value) >= 0
type PlaceholderAttr ¶
type PlaceholderAttr func(optionalAttr)
PlaceholderAttr is an optional argument to Placeholder.
func PlaceholderShape ¶
func PlaceholderShape(value tf.Shape) PlaceholderAttr
PlaceholderShape sets the optional shape attribute to value.
value: (Optional) The shape of the tensor. If the shape has 0 dimensions, the shape is unconstrained. If not specified, defaults to {unknown_rank:true}
type PrefetchDatasetAttr ¶
type PrefetchDatasetAttr func(optionalAttr)
PrefetchDatasetAttr is an optional argument to PrefetchDataset.
func PrefetchDatasetBufferSizeMin ¶
func PrefetchDatasetBufferSizeMin(value int64) PrefetchDatasetAttr
PrefetchDatasetBufferSizeMin sets the optional buffer_size_min attribute to value. If not specified, defaults to 0
func PrefetchDatasetLegacyAutotune ¶
func PrefetchDatasetLegacyAutotune(value bool) PrefetchDatasetAttr
PrefetchDatasetLegacyAutotune sets the optional legacy_autotune attribute to value. If not specified, defaults to true
func PrefetchDatasetMetadata ¶
func PrefetchDatasetMetadata(value string) PrefetchDatasetAttr
PrefetchDatasetMetadata sets the optional metadata attribute to value. If not specified, defaults to ""
func PrefetchDatasetSlackPeriod ¶
func PrefetchDatasetSlackPeriod(value int64) PrefetchDatasetAttr
PrefetchDatasetSlackPeriod sets the optional slack_period attribute to value. If not specified, defaults to 0
type PrelinearizeAttr ¶
type PrelinearizeAttr func(optionalAttr)
PrelinearizeAttr is an optional argument to Prelinearize.
func PrelinearizeLayout ¶
func PrelinearizeLayout(value []int64) PrelinearizeAttr
PrelinearizeLayout sets the optional layout attribute to value.
value: A vector holding the requested layout in minor-to-major sequence. If a layout attribute is passed but its values are all -1 the layout will be computed by the infeed operation. If not specified, defaults to {}
func PrelinearizeShape ¶
func PrelinearizeShape(value tf.Shape) PrelinearizeAttr
PrelinearizeShape sets the optional shape attribute to value.
value: The shape of the tensor. If not specified, defaults to {}
type PrelinearizeTupleAttr ¶
type PrelinearizeTupleAttr func(optionalAttr)
PrelinearizeTupleAttr is an optional argument to PrelinearizeTuple.
func PrelinearizeTupleLayouts ¶
func PrelinearizeTupleLayouts(value []int64) PrelinearizeTupleAttr
PrelinearizeTupleLayouts sets the optional layouts attribute to value.
value: A vector holding the requested layout in minor-to-major sequence for all the tuple shapes in the order the shapes appear in the "shapes" input. The layout elements for a sub-shape can be set to -1 in which case the corresponding layout will be computed by the infeed operation. If not specified, defaults to {}
type PreventGradientAttr ¶
type PreventGradientAttr func(optionalAttr)
PreventGradientAttr is an optional argument to PreventGradient.
func PreventGradientMessage ¶
func PreventGradientMessage(value string) PreventGradientAttr
PreventGradientMessage sets the optional message attribute to value.
value: Will be printed in the error when anyone tries to differentiate this operation. If not specified, defaults to ""
type PrintAttr ¶
type PrintAttr func(optionalAttr)
PrintAttr is an optional argument to Print.
func PrintFirstN ¶
PrintFirstN sets the optional first_n attribute to value.
value: Only log `first_n` number of times. -1 disables logging. If not specified, defaults to -1
func PrintMessage ¶
PrintMessage sets the optional message attribute to value.
value: A string, prefix of the error message. If not specified, defaults to ""
func PrintSummarize ¶
PrintSummarize sets the optional summarize attribute to value.
value: Only print this many entries of each tensor. If not specified, defaults to 3
type PrintV2Attr ¶
type PrintV2Attr func(optionalAttr)
PrintV2Attr is an optional argument to PrintV2.
func PrintV2End ¶
func PrintV2End(value string) PrintV2Attr
PrintV2End sets the optional end attribute to value. If not specified, defaults to "\n"
func PrintV2OutputStream ¶
func PrintV2OutputStream(value string) PrintV2Attr
PrintV2OutputStream sets the optional output_stream attribute to value.
value: A string specifying the output stream or logging level to print to. If not specified, defaults to "stderr"
type PriorityQueueV2Attr ¶
type PriorityQueueV2Attr func(optionalAttr)
PriorityQueueV2Attr is an optional argument to PriorityQueueV2.
func PriorityQueueV2Capacity ¶
func PriorityQueueV2Capacity(value int64) PriorityQueueV2Attr
PriorityQueueV2Capacity sets the optional capacity attribute to value.
value: The upper bound on the number of elements in this queue. Negative numbers mean no limit. If not specified, defaults to -1
func PriorityQueueV2ComponentTypes ¶
func PriorityQueueV2ComponentTypes(value []tf.DataType) PriorityQueueV2Attr
PriorityQueueV2ComponentTypes sets the optional component_types attribute to value.
value: The type of each component in a value. If not specified, defaults to {}
REQUIRES: len(value) >= 0
func PriorityQueueV2Container ¶
func PriorityQueueV2Container(value string) PriorityQueueV2Attr
PriorityQueueV2Container sets the optional container attribute to value.
value: If non-empty, this queue is placed in the given container. Otherwise, a default container is used. If not specified, defaults to ""
func PriorityQueueV2SharedName ¶
func PriorityQueueV2SharedName(value string) PriorityQueueV2Attr
PriorityQueueV2SharedName sets the optional shared_name attribute to value.
value: If non-empty, this queue will be shared under the given name across multiple sessions. If not specified, defaults to ""
type ProdAttr ¶
type ProdAttr func(optionalAttr)
ProdAttr is an optional argument to Prod.
func ProdKeepDims ¶
ProdKeepDims sets the optional keep_dims attribute to value.
value: If true, retain reduced dimensions with length 1. If not specified, defaults to false
type QrAttr ¶
type QrAttr func(optionalAttr)
QrAttr is an optional argument to Qr.
func QrFullMatrices ¶
QrFullMatrices sets the optional full_matrices attribute to value.
value: If true, compute full-sized `q` and `r`. If false (the default), compute only the leading `P` columns of `q`. If not specified, defaults to false
type QuantizeAndDequantizeAttr ¶
type QuantizeAndDequantizeAttr func(optionalAttr)
QuantizeAndDequantizeAttr is an optional argument to QuantizeAndDequantize.
func QuantizeAndDequantizeInputMax ¶
func QuantizeAndDequantizeInputMax(value float32) QuantizeAndDequantizeAttr
QuantizeAndDequantizeInputMax sets the optional input_max attribute to value. If not specified, defaults to 0
func QuantizeAndDequantizeInputMin ¶
func QuantizeAndDequantizeInputMin(value float32) QuantizeAndDequantizeAttr
QuantizeAndDequantizeInputMin sets the optional input_min attribute to value. If not specified, defaults to 0
func QuantizeAndDequantizeNumBits ¶
func QuantizeAndDequantizeNumBits(value int64) QuantizeAndDequantizeAttr
QuantizeAndDequantizeNumBits sets the optional num_bits attribute to value. If not specified, defaults to 8
func QuantizeAndDequantizeRangeGiven ¶
func QuantizeAndDequantizeRangeGiven(value bool) QuantizeAndDequantizeAttr
QuantizeAndDequantizeRangeGiven sets the optional range_given attribute to value. If not specified, defaults to false
func QuantizeAndDequantizeSignedInput ¶
func QuantizeAndDequantizeSignedInput(value bool) QuantizeAndDequantizeAttr
QuantizeAndDequantizeSignedInput sets the optional signed_input attribute to value. If not specified, defaults to true
type QuantizeAndDequantizeV2Attr ¶
type QuantizeAndDequantizeV2Attr func(optionalAttr)
QuantizeAndDequantizeV2Attr is an optional argument to QuantizeAndDequantizeV2.
func QuantizeAndDequantizeV2Axis ¶
func QuantizeAndDequantizeV2Axis(value int64) QuantizeAndDequantizeV2Attr
QuantizeAndDequantizeV2Axis sets the optional axis attribute to value.
value: If specified, this axis is treated as a channel or slice axis, and a separate quantization range is used for each channel or slice along this axis. If not specified, defaults to -1
func QuantizeAndDequantizeV2NarrowRange ¶
func QuantizeAndDequantizeV2NarrowRange(value bool) QuantizeAndDequantizeV2Attr
QuantizeAndDequantizeV2NarrowRange sets the optional narrow_range attribute to value.
value: If True, then the absolute value of the quantized minimum value is the same as the quantized maximum value, instead of 1 greater. i.e. for 8 bit quantization, the minimum value is -127 instead of -128. If not specified, defaults to false
func QuantizeAndDequantizeV2NumBits ¶
func QuantizeAndDequantizeV2NumBits(value int64) QuantizeAndDequantizeV2Attr
QuantizeAndDequantizeV2NumBits sets the optional num_bits attribute to value.
value: The bitwidth of the quantization. If not specified, defaults to 8
func QuantizeAndDequantizeV2RangeGiven ¶
func QuantizeAndDequantizeV2RangeGiven(value bool) QuantizeAndDequantizeV2Attr
QuantizeAndDequantizeV2RangeGiven sets the optional range_given attribute to value.
value: Whether the range is given or should be determined from the `input` tensor. If not specified, defaults to false
func QuantizeAndDequantizeV2RoundMode ¶
func QuantizeAndDequantizeV2RoundMode(value string) QuantizeAndDequantizeV2Attr
QuantizeAndDequantizeV2RoundMode sets the optional round_mode attribute to value.
value: The 'round_mode' attribute controls which rounding tie-breaking algorithm is used when rounding float values to their quantized equivalents. The following rounding modes are currently supported:
- HALF_TO_EVEN: this is the default round_mode.
- HALF_UP: round towards positive. In this mode 7.5 rounds up to 8 and -7.5 rounds up to -7.
If not specified, defaults to "HALF_TO_EVEN"
func QuantizeAndDequantizeV2SignedInput ¶
func QuantizeAndDequantizeV2SignedInput(value bool) QuantizeAndDequantizeV2Attr
QuantizeAndDequantizeV2SignedInput sets the optional signed_input attribute to value.
value: Whether the quantization is signed or unsigned. (actually this parameter should have been called <b>`signed_output`</b>) If not specified, defaults to true
type QuantizeAndDequantizeV3Attr ¶
type QuantizeAndDequantizeV3Attr func(optionalAttr)
QuantizeAndDequantizeV3Attr is an optional argument to QuantizeAndDequantizeV3.
func QuantizeAndDequantizeV3Axis ¶
func QuantizeAndDequantizeV3Axis(value int64) QuantizeAndDequantizeV3Attr
QuantizeAndDequantizeV3Axis sets the optional axis attribute to value. If not specified, defaults to -1
func QuantizeAndDequantizeV3NarrowRange ¶
func QuantizeAndDequantizeV3NarrowRange(value bool) QuantizeAndDequantizeV3Attr
QuantizeAndDequantizeV3NarrowRange sets the optional narrow_range attribute to value. If not specified, defaults to false
func QuantizeAndDequantizeV3RangeGiven ¶
func QuantizeAndDequantizeV3RangeGiven(value bool) QuantizeAndDequantizeV3Attr
QuantizeAndDequantizeV3RangeGiven sets the optional range_given attribute to value. If not specified, defaults to true
func QuantizeAndDequantizeV3SignedInput ¶
func QuantizeAndDequantizeV3SignedInput(value bool) QuantizeAndDequantizeV3Attr
QuantizeAndDequantizeV3SignedInput sets the optional signed_input attribute to value. If not specified, defaults to true
type QuantizeAndDequantizeV4Attr ¶
type QuantizeAndDequantizeV4Attr func(optionalAttr)
QuantizeAndDequantizeV4Attr is an optional argument to QuantizeAndDequantizeV4.
func QuantizeAndDequantizeV4Axis ¶
func QuantizeAndDequantizeV4Axis(value int64) QuantizeAndDequantizeV4Attr
QuantizeAndDequantizeV4Axis sets the optional axis attribute to value.
value: If specified, this axis is treated as a channel or slice axis, and a separate quantization range is used for each channel or slice along this axis. If not specified, defaults to -1
func QuantizeAndDequantizeV4NarrowRange ¶
func QuantizeAndDequantizeV4NarrowRange(value bool) QuantizeAndDequantizeV4Attr
QuantizeAndDequantizeV4NarrowRange sets the optional narrow_range attribute to value.
value: If True, then the absolute value of the quantized minimum value is the same as the quantized maximum value, instead of 1 greater. i.e. for 8 bit quantization, the minimum value is -127 instead of -128. If not specified, defaults to false
func QuantizeAndDequantizeV4NumBits ¶
func QuantizeAndDequantizeV4NumBits(value int64) QuantizeAndDequantizeV4Attr
QuantizeAndDequantizeV4NumBits sets the optional num_bits attribute to value.
value: The bitwidth of the quantization. If not specified, defaults to 8
func QuantizeAndDequantizeV4RangeGiven ¶
func QuantizeAndDequantizeV4RangeGiven(value bool) QuantizeAndDequantizeV4Attr
QuantizeAndDequantizeV4RangeGiven sets the optional range_given attribute to value.
value: Whether the range is given or should be determined from the `input` tensor. If not specified, defaults to false
func QuantizeAndDequantizeV4RoundMode ¶
func QuantizeAndDequantizeV4RoundMode(value string) QuantizeAndDequantizeV4Attr
QuantizeAndDequantizeV4RoundMode sets the optional round_mode attribute to value.
value: The 'round_mode' attribute controls which rounding tie-breaking algorithm is used when rounding float values to their quantized equivalents. The following rounding modes are currently supported:
- HALF_TO_EVEN: this is the default round_mode.
- HALF_UP: round towards positive. In this mode 7.5 rounds up to 8 and -7.5 rounds up to -7.
If not specified, defaults to "HALF_TO_EVEN"
func QuantizeAndDequantizeV4SignedInput ¶
func QuantizeAndDequantizeV4SignedInput(value bool) QuantizeAndDequantizeV4Attr
QuantizeAndDequantizeV4SignedInput sets the optional signed_input attribute to value.
value: Whether the quantization is signed or unsigned. (actually this parameter should have been called <b>`signed_output`</b>) If not specified, defaults to true
type QuantizeAndDequantizeV4GradAttr ¶
type QuantizeAndDequantizeV4GradAttr func(optionalAttr)
QuantizeAndDequantizeV4GradAttr is an optional argument to QuantizeAndDequantizeV4Grad.
func QuantizeAndDequantizeV4GradAxis ¶
func QuantizeAndDequantizeV4GradAxis(value int64) QuantizeAndDequantizeV4GradAttr
QuantizeAndDequantizeV4GradAxis sets the optional axis attribute to value. If not specified, defaults to -1
type QuantizeV2Attr ¶
type QuantizeV2Attr func(optionalAttr)
QuantizeV2Attr is an optional argument to QuantizeV2.
func QuantizeV2Axis ¶
func QuantizeV2Axis(value int64) QuantizeV2Attr
QuantizeV2Axis sets the optional axis attribute to value. If not specified, defaults to -1
func QuantizeV2EnsureMinimumRange ¶
func QuantizeV2EnsureMinimumRange(value float32) QuantizeV2Attr
QuantizeV2EnsureMinimumRange sets the optional ensure_minimum_range attribute to value. If not specified, defaults to 0.01
func QuantizeV2Mode ¶
func QuantizeV2Mode(value string) QuantizeV2Attr
QuantizeV2Mode sets the optional mode attribute to value. If not specified, defaults to "MIN_COMBINED"
func QuantizeV2NarrowRange ¶
func QuantizeV2NarrowRange(value bool) QuantizeV2Attr
QuantizeV2NarrowRange sets the optional narrow_range attribute to value. If not specified, defaults to false
func QuantizeV2RoundMode ¶
func QuantizeV2RoundMode(value string) QuantizeV2Attr
QuantizeV2RoundMode sets the optional round_mode attribute to value. If not specified, defaults to "HALF_AWAY_FROM_ZERO"
type QuantizedAddAttr ¶
type QuantizedAddAttr func(optionalAttr)
QuantizedAddAttr is an optional argument to QuantizedAdd.
func QuantizedAddToutput ¶
func QuantizedAddToutput(value tf.DataType) QuantizedAddAttr
QuantizedAddToutput sets the optional Toutput attribute to value. If not specified, defaults to DT_QINT32
type QuantizedConv2DAttr ¶
type QuantizedConv2DAttr func(optionalAttr)
QuantizedConv2DAttr is an optional argument to QuantizedConv2D.
func QuantizedConv2DDilations ¶
func QuantizedConv2DDilations(value []int64) QuantizedConv2DAttr
QuantizedConv2DDilations sets the optional dilations attribute to value.
value: 1-D tensor of length 4. The dilation factor for each dimension of `input`. If set to k > 1, there will be k-1 skipped cells between each filter element on that dimension. The dimension order is determined by the value of `data_format`, see above for details. Dilations in the batch and depth dimensions must be 1. If not specified, defaults to {i:1 i:1 i:1 i:1}
func QuantizedConv2DOutType ¶
func QuantizedConv2DOutType(value tf.DataType) QuantizedConv2DAttr
QuantizedConv2DOutType sets the optional out_type attribute to value. If not specified, defaults to DT_QINT32
type QuantizedConv2DPerChannelAttr ¶
type QuantizedConv2DPerChannelAttr func(optionalAttr)
QuantizedConv2DPerChannelAttr is an optional argument to QuantizedConv2DPerChannel.
func QuantizedConv2DPerChannelDilations ¶
func QuantizedConv2DPerChannelDilations(value []int64) QuantizedConv2DPerChannelAttr
QuantizedConv2DPerChannelDilations sets the optional dilations attribute to value.
value: list of dilation values. If not specified, defaults to {i:1 i:1 i:1 i:1}
func QuantizedConv2DPerChannelOutType ¶
func QuantizedConv2DPerChannelOutType(value tf.DataType) QuantizedConv2DPerChannelAttr
QuantizedConv2DPerChannelOutType sets the optional out_type attribute to value.
value: The quantized type of output tensor that needs to be converted. If not specified, defaults to DT_QINT32
type QuantizedDepthwiseConv2DAttr ¶
type QuantizedDepthwiseConv2DAttr func(optionalAttr)
QuantizedDepthwiseConv2DAttr is an optional argument to QuantizedDepthwiseConv2D.
func QuantizedDepthwiseConv2DDilations ¶
func QuantizedDepthwiseConv2DDilations(value []int64) QuantizedDepthwiseConv2DAttr
QuantizedDepthwiseConv2DDilations sets the optional dilations attribute to value.
value: List of dilation values. If not specified, defaults to {i:1 i:1 i:1 i:1}
func QuantizedDepthwiseConv2DOutType ¶
func QuantizedDepthwiseConv2DOutType(value tf.DataType) QuantizedDepthwiseConv2DAttr
QuantizedDepthwiseConv2DOutType sets the optional out_type attribute to value.
value: The type of the output. If not specified, defaults to DT_QINT32
type QuantizedDepthwiseConv2DWithBiasAndReluAndRequantizeAttr ¶
type QuantizedDepthwiseConv2DWithBiasAndReluAndRequantizeAttr func(optionalAttr)
QuantizedDepthwiseConv2DWithBiasAndReluAndRequantizeAttr is an optional argument to QuantizedDepthwiseConv2DWithBiasAndReluAndRequantize.
func QuantizedDepthwiseConv2DWithBiasAndReluAndRequantizeDilations ¶
func QuantizedDepthwiseConv2DWithBiasAndReluAndRequantizeDilations(value []int64) QuantizedDepthwiseConv2DWithBiasAndReluAndRequantizeAttr
QuantizedDepthwiseConv2DWithBiasAndReluAndRequantizeDilations sets the optional dilations attribute to value.
value: List of dilation values. If not specified, defaults to {i:1 i:1 i:1 i:1}
func QuantizedDepthwiseConv2DWithBiasAndReluAndRequantizeOutType ¶
func QuantizedDepthwiseConv2DWithBiasAndReluAndRequantizeOutType(value tf.DataType) QuantizedDepthwiseConv2DWithBiasAndReluAndRequantizeAttr
QuantizedDepthwiseConv2DWithBiasAndReluAndRequantizeOutType sets the optional out_type attribute to value.
value: The type of the output. If not specified, defaults to DT_QUINT8
func QuantizedDepthwiseConv2DWithBiasAndReluAndRequantizePaddingList ¶
func QuantizedDepthwiseConv2DWithBiasAndReluAndRequantizePaddingList(value []int64) QuantizedDepthwiseConv2DWithBiasAndReluAndRequantizeAttr
QuantizedDepthwiseConv2DWithBiasAndReluAndRequantizePaddingList sets the optional padding_list attribute to value. If not specified, defaults to {}
type QuantizedDepthwiseConv2DWithBiasAndReluAttr ¶
type QuantizedDepthwiseConv2DWithBiasAndReluAttr func(optionalAttr)
QuantizedDepthwiseConv2DWithBiasAndReluAttr is an optional argument to QuantizedDepthwiseConv2DWithBiasAndRelu.
func QuantizedDepthwiseConv2DWithBiasAndReluDilations ¶
func QuantizedDepthwiseConv2DWithBiasAndReluDilations(value []int64) QuantizedDepthwiseConv2DWithBiasAndReluAttr
QuantizedDepthwiseConv2DWithBiasAndReluDilations sets the optional dilations attribute to value.
value: List of dilation values. If not specified, defaults to {i:1 i:1 i:1 i:1}
func QuantizedDepthwiseConv2DWithBiasAndReluOutType ¶
func QuantizedDepthwiseConv2DWithBiasAndReluOutType(value tf.DataType) QuantizedDepthwiseConv2DWithBiasAndReluAttr
QuantizedDepthwiseConv2DWithBiasAndReluOutType sets the optional out_type attribute to value.
value: The type of the output. If not specified, defaults to DT_QINT32
func QuantizedDepthwiseConv2DWithBiasAndReluPaddingList ¶
func QuantizedDepthwiseConv2DWithBiasAndReluPaddingList(value []int64) QuantizedDepthwiseConv2DWithBiasAndReluAttr
QuantizedDepthwiseConv2DWithBiasAndReluPaddingList sets the optional padding_list attribute to value. If not specified, defaults to {}
type QuantizedDepthwiseConv2DWithBiasAttr ¶
type QuantizedDepthwiseConv2DWithBiasAttr func(optionalAttr)
QuantizedDepthwiseConv2DWithBiasAttr is an optional argument to QuantizedDepthwiseConv2DWithBias.
func QuantizedDepthwiseConv2DWithBiasDilations ¶
func QuantizedDepthwiseConv2DWithBiasDilations(value []int64) QuantizedDepthwiseConv2DWithBiasAttr
QuantizedDepthwiseConv2DWithBiasDilations sets the optional dilations attribute to value.
value: List of dilation values. If not specified, defaults to {i:1 i:1 i:1 i:1}
func QuantizedDepthwiseConv2DWithBiasOutType ¶
func QuantizedDepthwiseConv2DWithBiasOutType(value tf.DataType) QuantizedDepthwiseConv2DWithBiasAttr
QuantizedDepthwiseConv2DWithBiasOutType sets the optional out_type attribute to value.
value: The type of the output. If not specified, defaults to DT_QINT32
type QuantizedInstanceNormAttr ¶
type QuantizedInstanceNormAttr func(optionalAttr)
QuantizedInstanceNormAttr is an optional argument to QuantizedInstanceNorm.
func QuantizedInstanceNormGivenYMax ¶
func QuantizedInstanceNormGivenYMax(value float32) QuantizedInstanceNormAttr
QuantizedInstanceNormGivenYMax sets the optional given_y_max attribute to value.
value: Output in `y_max` if `output_range_given` is True. If not specified, defaults to 0
func QuantizedInstanceNormGivenYMin ¶
func QuantizedInstanceNormGivenYMin(value float32) QuantizedInstanceNormAttr
QuantizedInstanceNormGivenYMin sets the optional given_y_min attribute to value.
value: Output in `y_min` if `output_range_given` is True. If not specified, defaults to 0
func QuantizedInstanceNormMinSeparation ¶
func QuantizedInstanceNormMinSeparation(value float32) QuantizedInstanceNormAttr
QuantizedInstanceNormMinSeparation sets the optional min_separation attribute to value.
value: Minimum value of `y_max - y_min` If not specified, defaults to 0.001
func QuantizedInstanceNormOutputRangeGiven ¶
func QuantizedInstanceNormOutputRangeGiven(value bool) QuantizedInstanceNormAttr
QuantizedInstanceNormOutputRangeGiven sets the optional output_range_given attribute to value.
value: If True, `given_y_min` and `given_y_min` and `given_y_max` are used as the output range. Otherwise, the implementation computes the output range. If not specified, defaults to false
func QuantizedInstanceNormVarianceEpsilon ¶
func QuantizedInstanceNormVarianceEpsilon(value float32) QuantizedInstanceNormAttr
QuantizedInstanceNormVarianceEpsilon sets the optional variance_epsilon attribute to value.
value: A small float number to avoid dividing by 0. If not specified, defaults to 1e-05
type QuantizedMatMulAttr ¶
type QuantizedMatMulAttr func(optionalAttr)
QuantizedMatMulAttr is an optional argument to QuantizedMatMul.
func QuantizedMatMulTactivation ¶
func QuantizedMatMulTactivation(value tf.DataType) QuantizedMatMulAttr
QuantizedMatMulTactivation sets the optional Tactivation attribute to value.
value: The type of output produced by activation function following this operation. If not specified, defaults to DT_QUINT8
func QuantizedMatMulToutput ¶
func QuantizedMatMulToutput(value tf.DataType) QuantizedMatMulAttr
QuantizedMatMulToutput sets the optional Toutput attribute to value. If not specified, defaults to DT_QINT32
func QuantizedMatMulTransposeA ¶
func QuantizedMatMulTransposeA(value bool) QuantizedMatMulAttr
QuantizedMatMulTransposeA sets the optional transpose_a attribute to value.
value: If true, `a` is transposed before multiplication. If not specified, defaults to false
func QuantizedMatMulTransposeB ¶
func QuantizedMatMulTransposeB(value bool) QuantizedMatMulAttr
QuantizedMatMulTransposeB sets the optional transpose_b attribute to value.
value: If true, `b` is transposed before multiplication. If not specified, defaults to false
type QuantizedMatMulWithBiasAndReluAndRequantizeAttr ¶
type QuantizedMatMulWithBiasAndReluAndRequantizeAttr func(optionalAttr)
QuantizedMatMulWithBiasAndReluAndRequantizeAttr is an optional argument to QuantizedMatMulWithBiasAndReluAndRequantize.
func QuantizedMatMulWithBiasAndReluAndRequantizeInputQuantMode ¶
func QuantizedMatMulWithBiasAndReluAndRequantizeInputQuantMode(value string) QuantizedMatMulWithBiasAndReluAndRequantizeAttr
QuantizedMatMulWithBiasAndReluAndRequantizeInputQuantMode sets the optional input_quant_mode attribute to value.
value: Input data quantization mode. Either MIN_FIRST(default) or SCALED. If not specified, defaults to "MIN_FIRST"
func QuantizedMatMulWithBiasAndReluAndRequantizeToutput ¶
func QuantizedMatMulWithBiasAndReluAndRequantizeToutput(value tf.DataType) QuantizedMatMulWithBiasAndReluAndRequantizeAttr
QuantizedMatMulWithBiasAndReluAndRequantizeToutput sets the optional Toutput attribute to value. If not specified, defaults to DT_QUINT8
func QuantizedMatMulWithBiasAndReluAndRequantizeTransposeA ¶
func QuantizedMatMulWithBiasAndReluAndRequantizeTransposeA(value bool) QuantizedMatMulWithBiasAndReluAndRequantizeAttr
QuantizedMatMulWithBiasAndReluAndRequantizeTransposeA sets the optional transpose_a attribute to value.
value: If true, `a` is transposed before multiplication. If not specified, defaults to false
func QuantizedMatMulWithBiasAndReluAndRequantizeTransposeB ¶
func QuantizedMatMulWithBiasAndReluAndRequantizeTransposeB(value bool) QuantizedMatMulWithBiasAndReluAndRequantizeAttr
QuantizedMatMulWithBiasAndReluAndRequantizeTransposeB sets the optional transpose_b attribute to value.
value: If true, `b` is transposed before multiplication. If not specified, defaults to false
type QuantizedMatMulWithBiasAndReluAttr ¶
type QuantizedMatMulWithBiasAndReluAttr func(optionalAttr)
QuantizedMatMulWithBiasAndReluAttr is an optional argument to QuantizedMatMulWithBiasAndRelu.
func QuantizedMatMulWithBiasAndReluInputQuantMode ¶
func QuantizedMatMulWithBiasAndReluInputQuantMode(value string) QuantizedMatMulWithBiasAndReluAttr
QuantizedMatMulWithBiasAndReluInputQuantMode sets the optional input_quant_mode attribute to value.
value: Input data quantization mode. Either MIN_FIRST(default) or SCALED. If not specified, defaults to "MIN_FIRST"
func QuantizedMatMulWithBiasAndReluToutput ¶
func QuantizedMatMulWithBiasAndReluToutput(value tf.DataType) QuantizedMatMulWithBiasAndReluAttr
QuantizedMatMulWithBiasAndReluToutput sets the optional Toutput attribute to value. If not specified, defaults to DT_QINT32
func QuantizedMatMulWithBiasAndReluTransposeA ¶
func QuantizedMatMulWithBiasAndReluTransposeA(value bool) QuantizedMatMulWithBiasAndReluAttr
QuantizedMatMulWithBiasAndReluTransposeA sets the optional transpose_a attribute to value.
value: If true, `a` is transposed before multiplication. If not specified, defaults to false
func QuantizedMatMulWithBiasAndReluTransposeB ¶
func QuantizedMatMulWithBiasAndReluTransposeB(value bool) QuantizedMatMulWithBiasAndReluAttr
QuantizedMatMulWithBiasAndReluTransposeB sets the optional transpose_b attribute to value.
value: If true, `b` is transposed before multiplication. If not specified, defaults to false
type QuantizedMatMulWithBiasAttr ¶
type QuantizedMatMulWithBiasAttr func(optionalAttr)
QuantizedMatMulWithBiasAttr is an optional argument to QuantizedMatMulWithBias.
func QuantizedMatMulWithBiasInputQuantMode ¶
func QuantizedMatMulWithBiasInputQuantMode(value string) QuantizedMatMulWithBiasAttr
QuantizedMatMulWithBiasInputQuantMode sets the optional input_quant_mode attribute to value.
value: Input data quantization mode. Either MIN_FIRST(default) or SCALED. If not specified, defaults to "MIN_FIRST"
func QuantizedMatMulWithBiasToutput ¶
func QuantizedMatMulWithBiasToutput(value tf.DataType) QuantizedMatMulWithBiasAttr
QuantizedMatMulWithBiasToutput sets the optional Toutput attribute to value. If not specified, defaults to DT_QINT32
func QuantizedMatMulWithBiasTransposeA ¶
func QuantizedMatMulWithBiasTransposeA(value bool) QuantizedMatMulWithBiasAttr
QuantizedMatMulWithBiasTransposeA sets the optional transpose_a attribute to value.
value: If true, `a` is transposed before multiplication. If not specified, defaults to false
func QuantizedMatMulWithBiasTransposeB ¶
func QuantizedMatMulWithBiasTransposeB(value bool) QuantizedMatMulWithBiasAttr
QuantizedMatMulWithBiasTransposeB sets the optional transpose_b attribute to value.
value: If true, `b` is transposed before multiplication. If not specified, defaults to false
type QuantizedMulAttr ¶
type QuantizedMulAttr func(optionalAttr)
QuantizedMulAttr is an optional argument to QuantizedMul.
func QuantizedMulToutput ¶
func QuantizedMulToutput(value tf.DataType) QuantizedMulAttr
QuantizedMulToutput sets the optional Toutput attribute to value. If not specified, defaults to DT_QINT32
type QuantizedRelu6Attr ¶
type QuantizedRelu6Attr func(optionalAttr)
QuantizedRelu6Attr is an optional argument to QuantizedRelu6.
func QuantizedRelu6OutType ¶
func QuantizedRelu6OutType(value tf.DataType) QuantizedRelu6Attr
QuantizedRelu6OutType sets the optional out_type attribute to value. If not specified, defaults to DT_QUINT8
type QuantizedReluAttr ¶
type QuantizedReluAttr func(optionalAttr)
QuantizedReluAttr is an optional argument to QuantizedRelu.
func QuantizedReluOutType ¶
func QuantizedReluOutType(value tf.DataType) QuantizedReluAttr
QuantizedReluOutType sets the optional out_type attribute to value. If not specified, defaults to DT_QUINT8
type QuantizedReluXAttr ¶
type QuantizedReluXAttr func(optionalAttr)
QuantizedReluXAttr is an optional argument to QuantizedReluX.
func QuantizedReluXOutType ¶
func QuantizedReluXOutType(value tf.DataType) QuantizedReluXAttr
QuantizedReluXOutType sets the optional out_type attribute to value. If not specified, defaults to DT_QUINT8
type QuantizedResizeBilinearAttr ¶
type QuantizedResizeBilinearAttr func(optionalAttr)
QuantizedResizeBilinearAttr is an optional argument to QuantizedResizeBilinear.
func QuantizedResizeBilinearAlignCorners ¶
func QuantizedResizeBilinearAlignCorners(value bool) QuantizedResizeBilinearAttr
QuantizedResizeBilinearAlignCorners sets the optional align_corners attribute to value.
value: If true, the centers of the 4 corner pixels of the input and output tensors are aligned, preserving the values at the corner pixels. Defaults to false. If not specified, defaults to false
func QuantizedResizeBilinearHalfPixelCenters ¶
func QuantizedResizeBilinearHalfPixelCenters(value bool) QuantizedResizeBilinearAttr
QuantizedResizeBilinearHalfPixelCenters sets the optional half_pixel_centers attribute to value. If not specified, defaults to false
type QueueCloseV2Attr ¶
type QueueCloseV2Attr func(optionalAttr)
QueueCloseV2Attr is an optional argument to QueueCloseV2.
func QueueCloseV2CancelPendingEnqueues ¶
func QueueCloseV2CancelPendingEnqueues(value bool) QueueCloseV2Attr
QueueCloseV2CancelPendingEnqueues sets the optional cancel_pending_enqueues attribute to value.
value: If true, all pending enqueue requests that are blocked on the given queue will be canceled. If not specified, defaults to false
type QueueDequeueManyV2Attr ¶
type QueueDequeueManyV2Attr func(optionalAttr)
QueueDequeueManyV2Attr is an optional argument to QueueDequeueManyV2.
func QueueDequeueManyV2TimeoutMs ¶
func QueueDequeueManyV2TimeoutMs(value int64) QueueDequeueManyV2Attr
QueueDequeueManyV2TimeoutMs sets the optional timeout_ms attribute to value.
value: If the queue has fewer than n elements, this operation will block for up to timeout_ms milliseconds. Note: This option is not supported yet. If not specified, defaults to -1
type QueueDequeueUpToV2Attr ¶
type QueueDequeueUpToV2Attr func(optionalAttr)
QueueDequeueUpToV2Attr is an optional argument to QueueDequeueUpToV2.
func QueueDequeueUpToV2TimeoutMs ¶
func QueueDequeueUpToV2TimeoutMs(value int64) QueueDequeueUpToV2Attr
QueueDequeueUpToV2TimeoutMs sets the optional timeout_ms attribute to value.
value: If the queue has fewer than n elements, this operation will block for up to timeout_ms milliseconds. Note: This option is not supported yet. If not specified, defaults to -1
type QueueDequeueV2Attr ¶
type QueueDequeueV2Attr func(optionalAttr)
QueueDequeueV2Attr is an optional argument to QueueDequeueV2.
func QueueDequeueV2TimeoutMs ¶
func QueueDequeueV2TimeoutMs(value int64) QueueDequeueV2Attr
QueueDequeueV2TimeoutMs sets the optional timeout_ms attribute to value.
value: If the queue is empty, this operation will block for up to timeout_ms milliseconds. Note: This option is not supported yet. If not specified, defaults to -1
type QueueEnqueueManyV2Attr ¶
type QueueEnqueueManyV2Attr func(optionalAttr)
QueueEnqueueManyV2Attr is an optional argument to QueueEnqueueManyV2.
func QueueEnqueueManyV2TimeoutMs ¶
func QueueEnqueueManyV2TimeoutMs(value int64) QueueEnqueueManyV2Attr
QueueEnqueueManyV2TimeoutMs sets the optional timeout_ms attribute to value.
value: If the queue is too full, this operation will block for up to timeout_ms milliseconds. Note: This option is not supported yet. If not specified, defaults to -1
type QueueEnqueueV2Attr ¶
type QueueEnqueueV2Attr func(optionalAttr)
QueueEnqueueV2Attr is an optional argument to QueueEnqueueV2.
func QueueEnqueueV2TimeoutMs ¶
func QueueEnqueueV2TimeoutMs(value int64) QueueEnqueueV2Attr
QueueEnqueueV2TimeoutMs sets the optional timeout_ms attribute to value.
value: If the queue is full, this operation will block for up to timeout_ms milliseconds. Note: This option is not supported yet. If not specified, defaults to -1
type RFFT2DAttr ¶
type RFFT2DAttr func(optionalAttr)
RFFT2DAttr is an optional argument to RFFT2D.
func RFFT2DTcomplex ¶
func RFFT2DTcomplex(value tf.DataType) RFFT2DAttr
RFFT2DTcomplex sets the optional Tcomplex attribute to value. If not specified, defaults to DT_COMPLEX64
type RFFT3DAttr ¶
type RFFT3DAttr func(optionalAttr)
RFFT3DAttr is an optional argument to RFFT3D.
func RFFT3DTcomplex ¶
func RFFT3DTcomplex(value tf.DataType) RFFT3DAttr
RFFT3DTcomplex sets the optional Tcomplex attribute to value. If not specified, defaults to DT_COMPLEX64
type RFFTAttr ¶
type RFFTAttr func(optionalAttr)
RFFTAttr is an optional argument to RFFT.
func RFFTTcomplex ¶
RFFTTcomplex sets the optional Tcomplex attribute to value. If not specified, defaults to DT_COMPLEX64
type RFFTNDAttr ¶ added in v0.7.0
type RFFTNDAttr func(optionalAttr)
RFFTNDAttr is an optional argument to RFFTND.
func RFFTNDTcomplex ¶ added in v0.7.0
func RFFTNDTcomplex(value tf.DataType) RFFTNDAttr
RFFTNDTcomplex sets the optional Tcomplex attribute to value. If not specified, defaults to DT_COMPLEX64
type RaggedBincountAttr ¶
type RaggedBincountAttr func(optionalAttr)
RaggedBincountAttr is an optional argument to RaggedBincount.
func RaggedBincountBinaryOutput ¶
func RaggedBincountBinaryOutput(value bool) RaggedBincountAttr
RaggedBincountBinaryOutput sets the optional binary_output attribute to value.
value: bool; Whether the kernel should count the appearance or number of occurrences. If not specified, defaults to false
type RaggedCountSparseOutputAttr ¶
type RaggedCountSparseOutputAttr func(optionalAttr)
RaggedCountSparseOutputAttr is an optional argument to RaggedCountSparseOutput.
func RaggedCountSparseOutputMaxlength ¶
func RaggedCountSparseOutputMaxlength(value int64) RaggedCountSparseOutputAttr
RaggedCountSparseOutputMaxlength sets the optional maxlength attribute to value.
value: Maximum value to count. Can be set to -1 for no maximum. If not specified, defaults to -1
REQUIRES: value >= -1
func RaggedCountSparseOutputMinlength ¶
func RaggedCountSparseOutputMinlength(value int64) RaggedCountSparseOutputAttr
RaggedCountSparseOutputMinlength sets the optional minlength attribute to value.
value: Minimum value to count. Can be set to -1 for no minimum. If not specified, defaults to -1
REQUIRES: value >= -1
type RaggedRangeAttr ¶
type RaggedRangeAttr func(optionalAttr)
RaggedRangeAttr is an optional argument to RaggedRange.
func RaggedRangeTsplits ¶
func RaggedRangeTsplits(value tf.DataType) RaggedRangeAttr
RaggedRangeTsplits sets the optional Tsplits attribute to value. If not specified, defaults to DT_INT64
type RaggedTensorFromVariantAttr ¶
type RaggedTensorFromVariantAttr func(optionalAttr)
RaggedTensorFromVariantAttr is an optional argument to RaggedTensorFromVariant.
func RaggedTensorFromVariantTsplits ¶
func RaggedTensorFromVariantTsplits(value tf.DataType) RaggedTensorFromVariantAttr
RaggedTensorFromVariantTsplits sets the optional Tsplits attribute to value. If not specified, defaults to DT_INT64
type RandomCropAttr ¶
type RandomCropAttr func(optionalAttr)
RandomCropAttr is an optional argument to RandomCrop.
func RandomCropSeed ¶
func RandomCropSeed(value int64) RandomCropAttr
RandomCropSeed sets the optional seed attribute to value.
value: If either seed or seed2 are set to be non-zero, the random number generator is seeded by the given seed. Otherwise, it is seeded by a random seed. If not specified, defaults to 0
func RandomCropSeed2 ¶
func RandomCropSeed2(value int64) RandomCropAttr
RandomCropSeed2 sets the optional seed2 attribute to value.
value: An second seed to avoid seed collision. If not specified, defaults to 0
type RandomDatasetAttr ¶
type RandomDatasetAttr func(optionalAttr)
RandomDatasetAttr is an optional argument to RandomDataset.
func RandomDatasetMetadata ¶
func RandomDatasetMetadata(value string) RandomDatasetAttr
RandomDatasetMetadata sets the optional metadata attribute to value. If not specified, defaults to ""
type RandomDatasetV2Attr ¶ added in v0.4.0
type RandomDatasetV2Attr func(optionalAttr)
RandomDatasetV2Attr is an optional argument to RandomDatasetV2.
func RandomDatasetV2Metadata ¶ added in v0.4.0
func RandomDatasetV2Metadata(value string) RandomDatasetV2Attr
RandomDatasetV2Metadata sets the optional metadata attribute to value. If not specified, defaults to ""
func RandomDatasetV2RerandomizeEachIteration ¶ added in v0.4.0
func RandomDatasetV2RerandomizeEachIteration(value bool) RandomDatasetV2Attr
RandomDatasetV2RerandomizeEachIteration sets the optional rerandomize_each_iteration attribute to value.
value: A boolean attribute to rerandomize the sequence of random numbers generated at each epoch. If not specified, defaults to false
type RandomGammaAttr ¶
type RandomGammaAttr func(optionalAttr)
RandomGammaAttr is an optional argument to RandomGamma.
func RandomGammaSeed ¶
func RandomGammaSeed(value int64) RandomGammaAttr
RandomGammaSeed sets the optional seed attribute to value.
value: If either `seed` or `seed2` are set to be non-zero, the random number generator is seeded by the given seed. Otherwise, it is seeded by a random seed. If not specified, defaults to 0
func RandomGammaSeed2 ¶
func RandomGammaSeed2(value int64) RandomGammaAttr
RandomGammaSeed2 sets the optional seed2 attribute to value.
value: A second seed to avoid seed collision. If not specified, defaults to 0
type RandomIndexShuffleAttr ¶ added in v0.3.0
type RandomIndexShuffleAttr func(optionalAttr)
RandomIndexShuffleAttr is an optional argument to RandomIndexShuffle.
func RandomIndexShuffleRounds ¶ added in v0.3.0
func RandomIndexShuffleRounds(value int64) RandomIndexShuffleAttr
RandomIndexShuffleRounds sets the optional rounds attribute to value.
value: The number of rounds to use the in block cipher. If not specified, defaults to 4
type RandomPoissonAttr ¶
type RandomPoissonAttr func(optionalAttr)
RandomPoissonAttr is an optional argument to RandomPoisson.
func RandomPoissonSeed ¶
func RandomPoissonSeed(value int64) RandomPoissonAttr
RandomPoissonSeed sets the optional seed attribute to value. If not specified, defaults to 0
func RandomPoissonSeed2 ¶
func RandomPoissonSeed2(value int64) RandomPoissonAttr
RandomPoissonSeed2 sets the optional seed2 attribute to value. If not specified, defaults to 0
type RandomPoissonV2Attr ¶
type RandomPoissonV2Attr func(optionalAttr)
RandomPoissonV2Attr is an optional argument to RandomPoissonV2.
func RandomPoissonV2Dtype ¶
func RandomPoissonV2Dtype(value tf.DataType) RandomPoissonV2Attr
RandomPoissonV2Dtype sets the optional dtype attribute to value. If not specified, defaults to DT_INT64
func RandomPoissonV2Seed ¶
func RandomPoissonV2Seed(value int64) RandomPoissonV2Attr
RandomPoissonV2Seed sets the optional seed attribute to value.
value: If either `seed` or `seed2` are set to be non-zero, the random number generator is seeded by the given seed. Otherwise, it is seeded by a random seed. If not specified, defaults to 0
func RandomPoissonV2Seed2 ¶
func RandomPoissonV2Seed2(value int64) RandomPoissonV2Attr
RandomPoissonV2Seed2 sets the optional seed2 attribute to value.
value: A second seed to avoid seed collision. If not specified, defaults to 0
type RandomShuffleAttr ¶
type RandomShuffleAttr func(optionalAttr)
RandomShuffleAttr is an optional argument to RandomShuffle.
func RandomShuffleSeed ¶
func RandomShuffleSeed(value int64) RandomShuffleAttr
RandomShuffleSeed sets the optional seed attribute to value.
value: If either `seed` or `seed2` are set to be non-zero, the random number generator is seeded by the given seed. Otherwise, it is seeded by a random seed. If not specified, defaults to 0
func RandomShuffleSeed2 ¶
func RandomShuffleSeed2(value int64) RandomShuffleAttr
RandomShuffleSeed2 sets the optional seed2 attribute to value.
value: A second seed to avoid seed collision. If not specified, defaults to 0
type RandomShuffleQueueV2Attr ¶
type RandomShuffleQueueV2Attr func(optionalAttr)
RandomShuffleQueueV2Attr is an optional argument to RandomShuffleQueueV2.
func RandomShuffleQueueV2Capacity ¶
func RandomShuffleQueueV2Capacity(value int64) RandomShuffleQueueV2Attr
RandomShuffleQueueV2Capacity sets the optional capacity attribute to value.
value: The upper bound on the number of elements in this queue. Negative numbers mean no limit. If not specified, defaults to -1
func RandomShuffleQueueV2Container ¶
func RandomShuffleQueueV2Container(value string) RandomShuffleQueueV2Attr
RandomShuffleQueueV2Container sets the optional container attribute to value.
value: If non-empty, this queue is placed in the given container. Otherwise, a default container is used. If not specified, defaults to ""
func RandomShuffleQueueV2MinAfterDequeue ¶
func RandomShuffleQueueV2MinAfterDequeue(value int64) RandomShuffleQueueV2Attr
RandomShuffleQueueV2MinAfterDequeue sets the optional min_after_dequeue attribute to value.
value: Dequeue will block unless there would be this many elements after the dequeue or the queue is closed. This ensures a minimum level of mixing of elements. If not specified, defaults to 0
func RandomShuffleQueueV2Seed ¶
func RandomShuffleQueueV2Seed(value int64) RandomShuffleQueueV2Attr
RandomShuffleQueueV2Seed sets the optional seed attribute to value.
value: If either seed or seed2 is set to be non-zero, the random number generator is seeded by the given seed. Otherwise, a random seed is used. If not specified, defaults to 0
func RandomShuffleQueueV2Seed2 ¶
func RandomShuffleQueueV2Seed2(value int64) RandomShuffleQueueV2Attr
RandomShuffleQueueV2Seed2 sets the optional seed2 attribute to value.
value: A second seed to avoid seed collision. If not specified, defaults to 0
func RandomShuffleQueueV2Shapes ¶
func RandomShuffleQueueV2Shapes(value []tf.Shape) RandomShuffleQueueV2Attr
RandomShuffleQueueV2Shapes sets the optional shapes attribute to value.
value: The shape of each component in a value. The length of this attr must be either 0 or the same as the length of component_types. If the length of this attr is 0, the shapes of queue elements are not constrained, and only one element may be dequeued at a time. If not specified, defaults to {}
REQUIRES: len(value) >= 0
func RandomShuffleQueueV2SharedName ¶
func RandomShuffleQueueV2SharedName(value string) RandomShuffleQueueV2Attr
RandomShuffleQueueV2SharedName sets the optional shared_name attribute to value.
value: If non-empty, this queue will be shared under the given name across multiple sessions. If not specified, defaults to ""
type RandomStandardNormalAttr ¶
type RandomStandardNormalAttr func(optionalAttr)
RandomStandardNormalAttr is an optional argument to RandomStandardNormal.
func RandomStandardNormalSeed ¶
func RandomStandardNormalSeed(value int64) RandomStandardNormalAttr
RandomStandardNormalSeed sets the optional seed attribute to value.
value: If either `seed` or `seed2` are set to be non-zero, the random number generator is seeded by the given seed. Otherwise, it is seeded by a random seed. If not specified, defaults to 0
func RandomStandardNormalSeed2 ¶
func RandomStandardNormalSeed2(value int64) RandomStandardNormalAttr
RandomStandardNormalSeed2 sets the optional seed2 attribute to value.
value: A second seed to avoid seed collision. If not specified, defaults to 0
type RandomUniformAttr ¶
type RandomUniformAttr func(optionalAttr)
RandomUniformAttr is an optional argument to RandomUniform.
func RandomUniformSeed ¶
func RandomUniformSeed(value int64) RandomUniformAttr
RandomUniformSeed sets the optional seed attribute to value.
value: If either `seed` or `seed2` are set to be non-zero, the random number generator is seeded by the given seed. Otherwise, it is seeded by a random seed. If not specified, defaults to 0
func RandomUniformSeed2 ¶
func RandomUniformSeed2(value int64) RandomUniformAttr
RandomUniformSeed2 sets the optional seed2 attribute to value.
value: A second seed to avoid seed collision. If not specified, defaults to 0
type RandomUniformIntAttr ¶
type RandomUniformIntAttr func(optionalAttr)
RandomUniformIntAttr is an optional argument to RandomUniformInt.
func RandomUniformIntSeed ¶
func RandomUniformIntSeed(value int64) RandomUniformIntAttr
RandomUniformIntSeed sets the optional seed attribute to value.
value: If either `seed` or `seed2` are set to be non-zero, the random number generator is seeded by the given seed. Otherwise, it is seeded by a random seed. If not specified, defaults to 0
func RandomUniformIntSeed2 ¶
func RandomUniformIntSeed2(value int64) RandomUniformIntAttr
RandomUniformIntSeed2 sets the optional seed2 attribute to value.
value: A second seed to avoid seed collision. If not specified, defaults to 0
type RangeDatasetAttr ¶
type RangeDatasetAttr func(optionalAttr)
RangeDatasetAttr is an optional argument to RangeDataset.
func RangeDatasetMetadata ¶
func RangeDatasetMetadata(value string) RangeDatasetAttr
RangeDatasetMetadata sets the optional metadata attribute to value. If not specified, defaults to ""
func RangeDatasetReplicateOnSplit ¶ added in v0.2.0
func RangeDatasetReplicateOnSplit(value bool) RangeDatasetAttr
RangeDatasetReplicateOnSplit sets the optional replicate_on_split attribute to value. If not specified, defaults to false
type ReadVariableXlaSplitNDAttr ¶
type ReadVariableXlaSplitNDAttr func(optionalAttr)
ReadVariableXlaSplitNDAttr is an optional argument to ReadVariableXlaSplitND.
func ReadVariableXlaSplitNDPaddings ¶
func ReadVariableXlaSplitNDPaddings(value []int64) ReadVariableXlaSplitNDAttr
ReadVariableXlaSplitNDPaddings sets the optional paddings attribute to value.
value: Optional list of right paddings per dimension of input tensor to apply before splitting. This can be used to make a dimension evenly divisible. If not specified, defaults to {}
type RebatchDatasetAttr ¶
type RebatchDatasetAttr func(optionalAttr)
RebatchDatasetAttr is an optional argument to RebatchDataset.
func RebatchDatasetUseFallback ¶
func RebatchDatasetUseFallback(value bool) RebatchDatasetAttr
RebatchDatasetUseFallback sets the optional use_fallback attribute to value. If not specified, defaults to true
type RecordInputAttr ¶
type RecordInputAttr func(optionalAttr)
RecordInputAttr is an optional argument to RecordInput.
func RecordInputBatchSize ¶
func RecordInputBatchSize(value int64) RecordInputAttr
RecordInputBatchSize sets the optional batch_size attribute to value.
value: The batch size. If not specified, defaults to 32
func RecordInputCompressionType ¶
func RecordInputCompressionType(value string) RecordInputAttr
RecordInputCompressionType sets the optional compression_type attribute to value.
value: The type of compression for the file. Currently ZLIB and GZIP are supported. Defaults to none. If not specified, defaults to ""
func RecordInputFileBufferSize ¶
func RecordInputFileBufferSize(value int64) RecordInputAttr
RecordInputFileBufferSize sets the optional file_buffer_size attribute to value.
value: The randomization shuffling buffer. If not specified, defaults to 10000
func RecordInputFileParallelism ¶
func RecordInputFileParallelism(value int64) RecordInputAttr
RecordInputFileParallelism sets the optional file_parallelism attribute to value.
value: How many sstables are opened and concurrently iterated over. If not specified, defaults to 16
func RecordInputFileRandomSeed ¶
func RecordInputFileRandomSeed(value int64) RecordInputAttr
RecordInputFileRandomSeed sets the optional file_random_seed attribute to value.
value: Random seeds used to produce randomized records. If not specified, defaults to 301
func RecordInputFileShuffleShiftRatio ¶
func RecordInputFileShuffleShiftRatio(value float32) RecordInputAttr
RecordInputFileShuffleShiftRatio sets the optional file_shuffle_shift_ratio attribute to value.
value: Shifts the list of files after the list is randomly shuffled. If not specified, defaults to 0
type RecvAttr ¶
type RecvAttr func(optionalAttr)
RecvAttr is an optional argument to Recv.
func RecvClientTerminated ¶
RecvClientTerminated sets the optional client_terminated attribute to value.
value: If set to true, this indicates that the node was added to the graph as a result of a client-side feed or fetch of Tensor data, in which case the corresponding send or recv is expected to be managed locally by the caller. If not specified, defaults to false
type ReduceJoinAttr ¶
type ReduceJoinAttr func(optionalAttr)
ReduceJoinAttr is an optional argument to ReduceJoin.
func ReduceJoinKeepDims ¶
func ReduceJoinKeepDims(value bool) ReduceJoinAttr
ReduceJoinKeepDims sets the optional keep_dims attribute to value.
value: If `True`, retain reduced dimensions with length `1`. If not specified, defaults to false
func ReduceJoinSeparator ¶
func ReduceJoinSeparator(value string) ReduceJoinAttr
ReduceJoinSeparator sets the optional separator attribute to value.
value: The separator to use when joining. If not specified, defaults to ""
type RegexReplaceAttr ¶
type RegexReplaceAttr func(optionalAttr)
RegexReplaceAttr is an optional argument to RegexReplace.
func RegexReplaceReplaceGlobal ¶
func RegexReplaceReplaceGlobal(value bool) RegexReplaceAttr
RegexReplaceReplaceGlobal sets the optional replace_global attribute to value.
value: If True, the replacement is global (that is, all matches of the `pattern` regular expression in each input string are rewritten), otherwise the `rewrite` substitution is only made for the first `pattern` match. If not specified, defaults to true
type RegisterDatasetAttr ¶
type RegisterDatasetAttr func(optionalAttr)
RegisterDatasetAttr is an optional argument to RegisterDataset.
func RegisterDatasetElementSpec ¶
func RegisterDatasetElementSpec(value string) RegisterDatasetAttr
RegisterDatasetElementSpec sets the optional element_spec attribute to value. If not specified, defaults to ""
func RegisterDatasetMetadata ¶
func RegisterDatasetMetadata(value string) RegisterDatasetAttr
RegisterDatasetMetadata sets the optional metadata attribute to value. If not specified, defaults to ""
type RegisterDatasetV2Attr ¶ added in v0.2.0
type RegisterDatasetV2Attr func(optionalAttr)
RegisterDatasetV2Attr is an optional argument to RegisterDatasetV2.
func RegisterDatasetV2ElementSpec ¶ added in v0.2.0
func RegisterDatasetV2ElementSpec(value string) RegisterDatasetV2Attr
RegisterDatasetV2ElementSpec sets the optional element_spec attribute to value. If not specified, defaults to ""
func RegisterDatasetV2Metadata ¶ added in v0.2.0
func RegisterDatasetV2Metadata(value string) RegisterDatasetV2Attr
RegisterDatasetV2Metadata sets the optional metadata attribute to value. If not specified, defaults to ""
func RegisterDatasetV2RequestedDatasetId ¶ added in v0.2.0
func RegisterDatasetV2RequestedDatasetId(value string) RegisterDatasetV2Attr
RegisterDatasetV2RequestedDatasetId sets the optional requested_dataset_id attribute to value. If not specified, defaults to ""
type RepeatDatasetAttr ¶
type RepeatDatasetAttr func(optionalAttr)
RepeatDatasetAttr is an optional argument to RepeatDataset.
func RepeatDatasetMetadata ¶
func RepeatDatasetMetadata(value string) RepeatDatasetAttr
RepeatDatasetMetadata sets the optional metadata attribute to value. If not specified, defaults to ""
type RequantizePerChannelAttr ¶
type RequantizePerChannelAttr func(optionalAttr)
RequantizePerChannelAttr is an optional argument to RequantizePerChannel.
func RequantizePerChannelOutType ¶
func RequantizePerChannelOutType(value tf.DataType) RequantizePerChannelAttr
RequantizePerChannelOutType sets the optional out_type attribute to value.
value: The quantized type of output tensor that needs to be converted. If not specified, defaults to DT_QUINT8
type ResizeAreaAttr ¶
type ResizeAreaAttr func(optionalAttr)
ResizeAreaAttr is an optional argument to ResizeArea.
func ResizeAreaAlignCorners ¶
func ResizeAreaAlignCorners(value bool) ResizeAreaAttr
ResizeAreaAlignCorners sets the optional align_corners attribute to value.
value: If true, the centers of the 4 corner pixels of the input and output tensors are aligned, preserving the values at the corner pixels. Defaults to false. If not specified, defaults to false
type ResizeBicubicAttr ¶
type ResizeBicubicAttr func(optionalAttr)
ResizeBicubicAttr is an optional argument to ResizeBicubic.
func ResizeBicubicAlignCorners ¶
func ResizeBicubicAlignCorners(value bool) ResizeBicubicAttr
ResizeBicubicAlignCorners sets the optional align_corners attribute to value.
value: If true, the centers of the 4 corner pixels of the input and output tensors are aligned, preserving the values at the corner pixels. Defaults to false. If not specified, defaults to false
func ResizeBicubicHalfPixelCenters ¶
func ResizeBicubicHalfPixelCenters(value bool) ResizeBicubicAttr
ResizeBicubicHalfPixelCenters sets the optional half_pixel_centers attribute to value. If not specified, defaults to false
type ResizeBicubicGradAttr ¶
type ResizeBicubicGradAttr func(optionalAttr)
ResizeBicubicGradAttr is an optional argument to ResizeBicubicGrad.
func ResizeBicubicGradAlignCorners ¶
func ResizeBicubicGradAlignCorners(value bool) ResizeBicubicGradAttr
ResizeBicubicGradAlignCorners sets the optional align_corners attribute to value.
value: If true, the centers of the 4 corner pixels of the input and grad tensors are aligned. Defaults to false. If not specified, defaults to false
func ResizeBicubicGradHalfPixelCenters ¶
func ResizeBicubicGradHalfPixelCenters(value bool) ResizeBicubicGradAttr
ResizeBicubicGradHalfPixelCenters sets the optional half_pixel_centers attribute to value. If not specified, defaults to false
type ResizeBilinearAttr ¶
type ResizeBilinearAttr func(optionalAttr)
ResizeBilinearAttr is an optional argument to ResizeBilinear.
func ResizeBilinearAlignCorners ¶
func ResizeBilinearAlignCorners(value bool) ResizeBilinearAttr
ResizeBilinearAlignCorners sets the optional align_corners attribute to value.
value: If true, the centers of the 4 corner pixels of the input and output tensors are aligned, preserving the values at the corner pixels. Defaults to false. If not specified, defaults to false
func ResizeBilinearHalfPixelCenters ¶
func ResizeBilinearHalfPixelCenters(value bool) ResizeBilinearAttr
ResizeBilinearHalfPixelCenters sets the optional half_pixel_centers attribute to value. If not specified, defaults to false
type ResizeBilinearGradAttr ¶
type ResizeBilinearGradAttr func(optionalAttr)
ResizeBilinearGradAttr is an optional argument to ResizeBilinearGrad.
func ResizeBilinearGradAlignCorners ¶
func ResizeBilinearGradAlignCorners(value bool) ResizeBilinearGradAttr
ResizeBilinearGradAlignCorners sets the optional align_corners attribute to value.
value: If true, the centers of the 4 corner pixels of the input and grad tensors are aligned. Defaults to false. If not specified, defaults to false
func ResizeBilinearGradHalfPixelCenters ¶
func ResizeBilinearGradHalfPixelCenters(value bool) ResizeBilinearGradAttr
ResizeBilinearGradHalfPixelCenters sets the optional half_pixel_centers attribute to value. If not specified, defaults to false
type ResizeNearestNeighborAttr ¶
type ResizeNearestNeighborAttr func(optionalAttr)
ResizeNearestNeighborAttr is an optional argument to ResizeNearestNeighbor.
func ResizeNearestNeighborAlignCorners ¶
func ResizeNearestNeighborAlignCorners(value bool) ResizeNearestNeighborAttr
ResizeNearestNeighborAlignCorners sets the optional align_corners attribute to value.
value: If true, the centers of the 4 corner pixels of the input and output tensors are aligned, preserving the values at the corner pixels. Defaults to false. If not specified, defaults to false
func ResizeNearestNeighborHalfPixelCenters ¶
func ResizeNearestNeighborHalfPixelCenters(value bool) ResizeNearestNeighborAttr
ResizeNearestNeighborHalfPixelCenters sets the optional half_pixel_centers attribute to value. If not specified, defaults to false
type ResizeNearestNeighborGradAttr ¶
type ResizeNearestNeighborGradAttr func(optionalAttr)
ResizeNearestNeighborGradAttr is an optional argument to ResizeNearestNeighborGrad.
func ResizeNearestNeighborGradAlignCorners ¶
func ResizeNearestNeighborGradAlignCorners(value bool) ResizeNearestNeighborGradAttr
ResizeNearestNeighborGradAlignCorners sets the optional align_corners attribute to value.
value: If true, the centers of the 4 corner pixels of the input and grad tensors are aligned. Defaults to false. If not specified, defaults to false
func ResizeNearestNeighborGradHalfPixelCenters ¶
func ResizeNearestNeighborGradHalfPixelCenters(value bool) ResizeNearestNeighborGradAttr
ResizeNearestNeighborGradHalfPixelCenters sets the optional half_pixel_centers attribute to value. If not specified, defaults to false
type ResourceApplyAdaMaxAttr ¶
type ResourceApplyAdaMaxAttr func(optionalAttr)
ResourceApplyAdaMaxAttr is an optional argument to ResourceApplyAdaMax.
func ResourceApplyAdaMaxUseLocking ¶
func ResourceApplyAdaMaxUseLocking(value bool) ResourceApplyAdaMaxAttr
ResourceApplyAdaMaxUseLocking sets the optional use_locking attribute to value.
value: If `True`, updating of the var, m, and v tensors will be protected by a lock; otherwise the behavior is undefined, but may exhibit less contention. If not specified, defaults to false
type ResourceApplyAdadeltaAttr ¶
type ResourceApplyAdadeltaAttr func(optionalAttr)
ResourceApplyAdadeltaAttr is an optional argument to ResourceApplyAdadelta.
func ResourceApplyAdadeltaUseLocking ¶
func ResourceApplyAdadeltaUseLocking(value bool) ResourceApplyAdadeltaAttr
ResourceApplyAdadeltaUseLocking sets the optional use_locking attribute to value.
value: If True, updating of the var, accum and update_accum tensors will be protected by a lock; otherwise the behavior is undefined, but may exhibit less contention. If not specified, defaults to false
type ResourceApplyAdagradAttr ¶
type ResourceApplyAdagradAttr func(optionalAttr)
ResourceApplyAdagradAttr is an optional argument to ResourceApplyAdagrad.
func ResourceApplyAdagradUpdateSlots ¶
func ResourceApplyAdagradUpdateSlots(value bool) ResourceApplyAdagradAttr
ResourceApplyAdagradUpdateSlots sets the optional update_slots attribute to value. If not specified, defaults to true
func ResourceApplyAdagradUseLocking ¶
func ResourceApplyAdagradUseLocking(value bool) ResourceApplyAdagradAttr
ResourceApplyAdagradUseLocking sets the optional use_locking attribute to value.
value: If `True`, updating of the var and accum tensors will be protected by a lock; otherwise the behavior is undefined, but may exhibit less contention. If not specified, defaults to false
type ResourceApplyAdagradDAAttr ¶
type ResourceApplyAdagradDAAttr func(optionalAttr)
ResourceApplyAdagradDAAttr is an optional argument to ResourceApplyAdagradDA.
func ResourceApplyAdagradDAUseLocking ¶
func ResourceApplyAdagradDAUseLocking(value bool) ResourceApplyAdagradDAAttr
ResourceApplyAdagradDAUseLocking sets the optional use_locking attribute to value.
value: If True, updating of the var and accum tensors will be protected by a lock; otherwise the behavior is undefined, but may exhibit less contention. If not specified, defaults to false
type ResourceApplyAdagradV2Attr ¶
type ResourceApplyAdagradV2Attr func(optionalAttr)
ResourceApplyAdagradV2Attr is an optional argument to ResourceApplyAdagradV2.
func ResourceApplyAdagradV2UpdateSlots ¶
func ResourceApplyAdagradV2UpdateSlots(value bool) ResourceApplyAdagradV2Attr
ResourceApplyAdagradV2UpdateSlots sets the optional update_slots attribute to value. If not specified, defaults to true
func ResourceApplyAdagradV2UseLocking ¶
func ResourceApplyAdagradV2UseLocking(value bool) ResourceApplyAdagradV2Attr
ResourceApplyAdagradV2UseLocking sets the optional use_locking attribute to value.
value: If `True`, updating of the var and accum tensors will be protected by a lock; otherwise the behavior is undefined, but may exhibit less contention. If not specified, defaults to false
type ResourceApplyAdamAttr ¶
type ResourceApplyAdamAttr func(optionalAttr)
ResourceApplyAdamAttr is an optional argument to ResourceApplyAdam.
func ResourceApplyAdamUseLocking ¶
func ResourceApplyAdamUseLocking(value bool) ResourceApplyAdamAttr
ResourceApplyAdamUseLocking sets the optional use_locking attribute to value.
value: If `True`, updating of the var, m, and v tensors will be protected by a lock; otherwise the behavior is undefined, but may exhibit less contention. If not specified, defaults to false
func ResourceApplyAdamUseNesterov ¶
func ResourceApplyAdamUseNesterov(value bool) ResourceApplyAdamAttr
ResourceApplyAdamUseNesterov sets the optional use_nesterov attribute to value.
value: If `True`, uses the nesterov update. If not specified, defaults to false
type ResourceApplyAdamWithAmsgradAttr ¶
type ResourceApplyAdamWithAmsgradAttr func(optionalAttr)
ResourceApplyAdamWithAmsgradAttr is an optional argument to ResourceApplyAdamWithAmsgrad.
func ResourceApplyAdamWithAmsgradUseLocking ¶
func ResourceApplyAdamWithAmsgradUseLocking(value bool) ResourceApplyAdamWithAmsgradAttr
ResourceApplyAdamWithAmsgradUseLocking sets the optional use_locking attribute to value.
value: If `True`, updating of the var, m, and v tensors will be protected by a lock; otherwise the behavior is undefined, but may exhibit less contention. If not specified, defaults to false
type ResourceApplyAddSignAttr ¶
type ResourceApplyAddSignAttr func(optionalAttr)
ResourceApplyAddSignAttr is an optional argument to ResourceApplyAddSign.
func ResourceApplyAddSignUseLocking ¶
func ResourceApplyAddSignUseLocking(value bool) ResourceApplyAddSignAttr
ResourceApplyAddSignUseLocking sets the optional use_locking attribute to value.
value: If `True`, updating of the var and m tensors is protected by a lock; otherwise the behavior is undefined, but may exhibit less contention. If not specified, defaults to false
type ResourceApplyCenteredRMSPropAttr ¶
type ResourceApplyCenteredRMSPropAttr func(optionalAttr)
ResourceApplyCenteredRMSPropAttr is an optional argument to ResourceApplyCenteredRMSProp.
func ResourceApplyCenteredRMSPropUseLocking ¶
func ResourceApplyCenteredRMSPropUseLocking(value bool) ResourceApplyCenteredRMSPropAttr
ResourceApplyCenteredRMSPropUseLocking sets the optional use_locking attribute to value.
value: If `True`, updating of the var, mg, ms, and mom tensors is protected by a lock; otherwise the behavior is undefined, but may exhibit less contention. If not specified, defaults to false
type ResourceApplyFtrlAttr ¶
type ResourceApplyFtrlAttr func(optionalAttr)
ResourceApplyFtrlAttr is an optional argument to ResourceApplyFtrl.
func ResourceApplyFtrlMultiplyLinearByLr ¶
func ResourceApplyFtrlMultiplyLinearByLr(value bool) ResourceApplyFtrlAttr
ResourceApplyFtrlMultiplyLinearByLr sets the optional multiply_linear_by_lr attribute to value. If not specified, defaults to false
func ResourceApplyFtrlUseLocking ¶
func ResourceApplyFtrlUseLocking(value bool) ResourceApplyFtrlAttr
ResourceApplyFtrlUseLocking sets the optional use_locking attribute to value.
value: If `True`, updating of the var and accum tensors will be protected by a lock; otherwise the behavior is undefined, but may exhibit less contention. If not specified, defaults to false
type ResourceApplyFtrlV2Attr ¶
type ResourceApplyFtrlV2Attr func(optionalAttr)
ResourceApplyFtrlV2Attr is an optional argument to ResourceApplyFtrlV2.
func ResourceApplyFtrlV2MultiplyLinearByLr ¶
func ResourceApplyFtrlV2MultiplyLinearByLr(value bool) ResourceApplyFtrlV2Attr
ResourceApplyFtrlV2MultiplyLinearByLr sets the optional multiply_linear_by_lr attribute to value. If not specified, defaults to false
func ResourceApplyFtrlV2UseLocking ¶
func ResourceApplyFtrlV2UseLocking(value bool) ResourceApplyFtrlV2Attr
ResourceApplyFtrlV2UseLocking sets the optional use_locking attribute to value.
value: If `True`, updating of the var and accum tensors will be protected by a lock; otherwise the behavior is undefined, but may exhibit less contention. If not specified, defaults to false
type ResourceApplyGradientDescentAttr ¶
type ResourceApplyGradientDescentAttr func(optionalAttr)
ResourceApplyGradientDescentAttr is an optional argument to ResourceApplyGradientDescent.
func ResourceApplyGradientDescentUseLocking ¶
func ResourceApplyGradientDescentUseLocking(value bool) ResourceApplyGradientDescentAttr
ResourceApplyGradientDescentUseLocking sets the optional use_locking attribute to value.
value: If `True`, the subtraction will be protected by a lock; otherwise the behavior is undefined, but may exhibit less contention. If not specified, defaults to false
type ResourceApplyKerasMomentumAttr ¶
type ResourceApplyKerasMomentumAttr func(optionalAttr)
ResourceApplyKerasMomentumAttr is an optional argument to ResourceApplyKerasMomentum.
func ResourceApplyKerasMomentumUseLocking ¶
func ResourceApplyKerasMomentumUseLocking(value bool) ResourceApplyKerasMomentumAttr
ResourceApplyKerasMomentumUseLocking sets the optional use_locking attribute to value.
value: If `True`, updating of the var and accum tensors will be protected by a lock; otherwise the behavior is undefined, but may exhibit less contention. If not specified, defaults to false
func ResourceApplyKerasMomentumUseNesterov ¶
func ResourceApplyKerasMomentumUseNesterov(value bool) ResourceApplyKerasMomentumAttr
ResourceApplyKerasMomentumUseNesterov sets the optional use_nesterov attribute to value.
value: If `True`, the tensor passed to compute grad will be var + momentum * accum, so in the end, the var you get is actually var + momentum * accum. If not specified, defaults to false
type ResourceApplyMomentumAttr ¶
type ResourceApplyMomentumAttr func(optionalAttr)
ResourceApplyMomentumAttr is an optional argument to ResourceApplyMomentum.
func ResourceApplyMomentumUseLocking ¶
func ResourceApplyMomentumUseLocking(value bool) ResourceApplyMomentumAttr
ResourceApplyMomentumUseLocking sets the optional use_locking attribute to value.
value: If `True`, updating of the var and accum tensors will be protected by a lock; otherwise the behavior is undefined, but may exhibit less contention. If not specified, defaults to false
func ResourceApplyMomentumUseNesterov ¶
func ResourceApplyMomentumUseNesterov(value bool) ResourceApplyMomentumAttr
ResourceApplyMomentumUseNesterov sets the optional use_nesterov attribute to value.
value: If `True`, the tensor passed to compute grad will be var - lr * momentum * accum, so in the end, the var you get is actually var - lr * momentum * accum. If not specified, defaults to false
type ResourceApplyPowerSignAttr ¶
type ResourceApplyPowerSignAttr func(optionalAttr)
ResourceApplyPowerSignAttr is an optional argument to ResourceApplyPowerSign.
func ResourceApplyPowerSignUseLocking ¶
func ResourceApplyPowerSignUseLocking(value bool) ResourceApplyPowerSignAttr
ResourceApplyPowerSignUseLocking sets the optional use_locking attribute to value.
value: If `True`, updating of the var and m tensors is protected by a lock; otherwise the behavior is undefined, but may exhibit less contention. If not specified, defaults to false
type ResourceApplyProximalAdagradAttr ¶
type ResourceApplyProximalAdagradAttr func(optionalAttr)
ResourceApplyProximalAdagradAttr is an optional argument to ResourceApplyProximalAdagrad.
func ResourceApplyProximalAdagradUseLocking ¶
func ResourceApplyProximalAdagradUseLocking(value bool) ResourceApplyProximalAdagradAttr
ResourceApplyProximalAdagradUseLocking sets the optional use_locking attribute to value.
value: If True, updating of the var and accum tensors will be protected by a lock; otherwise the behavior is undefined, but may exhibit less contention. If not specified, defaults to false
type ResourceApplyProximalGradientDescentAttr ¶
type ResourceApplyProximalGradientDescentAttr func(optionalAttr)
ResourceApplyProximalGradientDescentAttr is an optional argument to ResourceApplyProximalGradientDescent.
func ResourceApplyProximalGradientDescentUseLocking ¶
func ResourceApplyProximalGradientDescentUseLocking(value bool) ResourceApplyProximalGradientDescentAttr
ResourceApplyProximalGradientDescentUseLocking sets the optional use_locking attribute to value.
value: If True, the subtraction will be protected by a lock; otherwise the behavior is undefined, but may exhibit less contention. If not specified, defaults to false
type ResourceApplyRMSPropAttr ¶
type ResourceApplyRMSPropAttr func(optionalAttr)
ResourceApplyRMSPropAttr is an optional argument to ResourceApplyRMSProp.
func ResourceApplyRMSPropUseLocking ¶
func ResourceApplyRMSPropUseLocking(value bool) ResourceApplyRMSPropAttr
ResourceApplyRMSPropUseLocking sets the optional use_locking attribute to value.
value: If `True`, updating of the var, ms, and mom tensors is protected by a lock; otherwise the behavior is undefined, but may exhibit less contention. If not specified, defaults to false
type ResourceConditionalAccumulatorAttr ¶
type ResourceConditionalAccumulatorAttr func(optionalAttr)
ResourceConditionalAccumulatorAttr is an optional argument to ResourceConditionalAccumulator.
func ResourceConditionalAccumulatorContainer ¶
func ResourceConditionalAccumulatorContainer(value string) ResourceConditionalAccumulatorAttr
ResourceConditionalAccumulatorContainer sets the optional container attribute to value.
value: If non-empty, this accumulator is placed in the given container. Otherwise, a default container is used. If not specified, defaults to ""
func ResourceConditionalAccumulatorReductionType ¶
func ResourceConditionalAccumulatorReductionType(value string) ResourceConditionalAccumulatorAttr
ResourceConditionalAccumulatorReductionType sets the optional reduction_type attribute to value. If not specified, defaults to "MEAN"
func ResourceConditionalAccumulatorSharedName ¶
func ResourceConditionalAccumulatorSharedName(value string) ResourceConditionalAccumulatorAttr
ResourceConditionalAccumulatorSharedName sets the optional shared_name attribute to value.
value: If non-empty, this accumulator will be shared under the given name across multiple sessions. If not specified, defaults to ""
type ResourceGatherAttr ¶
type ResourceGatherAttr func(optionalAttr)
ResourceGatherAttr is an optional argument to ResourceGather.
func ResourceGatherBatchDims ¶
func ResourceGatherBatchDims(value int64) ResourceGatherAttr
ResourceGatherBatchDims sets the optional batch_dims attribute to value. If not specified, defaults to 0
func ResourceGatherValidateIndices ¶
func ResourceGatherValidateIndices(value bool) ResourceGatherAttr
ResourceGatherValidateIndices sets the optional validate_indices attribute to value. If not specified, defaults to true
type ResourceScatterNdAddAttr ¶
type ResourceScatterNdAddAttr func(optionalAttr)
ResourceScatterNdAddAttr is an optional argument to ResourceScatterNdAdd.
func ResourceScatterNdAddBadIndicesPolicy ¶ added in v0.8.2
func ResourceScatterNdAddBadIndicesPolicy(value string) ResourceScatterNdAddAttr
ResourceScatterNdAddBadIndicesPolicy sets the optional bad_indices_policy attribute to value. If not specified, defaults to ""
func ResourceScatterNdAddUseLocking ¶
func ResourceScatterNdAddUseLocking(value bool) ResourceScatterNdAddAttr
ResourceScatterNdAddUseLocking sets the optional use_locking attribute to value.
value: An optional bool. Defaults to True. If True, the assignment will be protected by a lock; otherwise the behavior is undefined, but may exhibit less contention. If not specified, defaults to true
type ResourceScatterNdSubAttr ¶
type ResourceScatterNdSubAttr func(optionalAttr)
ResourceScatterNdSubAttr is an optional argument to ResourceScatterNdSub.
func ResourceScatterNdSubBadIndicesPolicy ¶ added in v0.8.2
func ResourceScatterNdSubBadIndicesPolicy(value string) ResourceScatterNdSubAttr
ResourceScatterNdSubBadIndicesPolicy sets the optional bad_indices_policy attribute to value. If not specified, defaults to ""
func ResourceScatterNdSubUseLocking ¶
func ResourceScatterNdSubUseLocking(value bool) ResourceScatterNdSubAttr
ResourceScatterNdSubUseLocking sets the optional use_locking attribute to value.
value: An optional bool. Defaults to True. If True, the assignment will be protected by a lock; otherwise the behavior is undefined, but may exhibit less contention. If not specified, defaults to true
type ResourceScatterNdUpdateAttr ¶
type ResourceScatterNdUpdateAttr func(optionalAttr)
ResourceScatterNdUpdateAttr is an optional argument to ResourceScatterNdUpdate.
func ResourceScatterNdUpdateBadIndicesPolicy ¶ added in v0.8.2
func ResourceScatterNdUpdateBadIndicesPolicy(value string) ResourceScatterNdUpdateAttr
ResourceScatterNdUpdateBadIndicesPolicy sets the optional bad_indices_policy attribute to value. If not specified, defaults to ""
func ResourceScatterNdUpdateUseLocking ¶
func ResourceScatterNdUpdateUseLocking(value bool) ResourceScatterNdUpdateAttr
ResourceScatterNdUpdateUseLocking sets the optional use_locking attribute to value.
value: An optional bool. Defaults to True. If True, the assignment will be protected by a lock; otherwise the behavior is undefined, but may exhibit less contention. If not specified, defaults to true
type ResourceSparseApplyAdadeltaAttr ¶
type ResourceSparseApplyAdadeltaAttr func(optionalAttr)
ResourceSparseApplyAdadeltaAttr is an optional argument to ResourceSparseApplyAdadelta.
func ResourceSparseApplyAdadeltaUseLocking ¶
func ResourceSparseApplyAdadeltaUseLocking(value bool) ResourceSparseApplyAdadeltaAttr
ResourceSparseApplyAdadeltaUseLocking sets the optional use_locking attribute to value.
value: If True, updating of the var and accum tensors will be protected by a lock; otherwise the behavior is undefined, but may exhibit less contention. If not specified, defaults to false
type ResourceSparseApplyAdagradAttr ¶
type ResourceSparseApplyAdagradAttr func(optionalAttr)
ResourceSparseApplyAdagradAttr is an optional argument to ResourceSparseApplyAdagrad.
func ResourceSparseApplyAdagradUpdateSlots ¶
func ResourceSparseApplyAdagradUpdateSlots(value bool) ResourceSparseApplyAdagradAttr
ResourceSparseApplyAdagradUpdateSlots sets the optional update_slots attribute to value. If not specified, defaults to true
func ResourceSparseApplyAdagradUseLocking ¶
func ResourceSparseApplyAdagradUseLocking(value bool) ResourceSparseApplyAdagradAttr
ResourceSparseApplyAdagradUseLocking sets the optional use_locking attribute to value.
value: If `True`, updating of the var and accum tensors will be protected by a lock; otherwise the behavior is undefined, but may exhibit less contention. If not specified, defaults to false
type ResourceSparseApplyAdagradDAAttr ¶
type ResourceSparseApplyAdagradDAAttr func(optionalAttr)
ResourceSparseApplyAdagradDAAttr is an optional argument to ResourceSparseApplyAdagradDA.
func ResourceSparseApplyAdagradDAUseLocking ¶
func ResourceSparseApplyAdagradDAUseLocking(value bool) ResourceSparseApplyAdagradDAAttr
ResourceSparseApplyAdagradDAUseLocking sets the optional use_locking attribute to value.
value: If True, updating of the var and accum tensors will be protected by a lock; otherwise the behavior is undefined, but may exhibit less contention. If not specified, defaults to false
type ResourceSparseApplyAdagradV2Attr ¶
type ResourceSparseApplyAdagradV2Attr func(optionalAttr)
ResourceSparseApplyAdagradV2Attr is an optional argument to ResourceSparseApplyAdagradV2.
func ResourceSparseApplyAdagradV2UpdateSlots ¶
func ResourceSparseApplyAdagradV2UpdateSlots(value bool) ResourceSparseApplyAdagradV2Attr
ResourceSparseApplyAdagradV2UpdateSlots sets the optional update_slots attribute to value. If not specified, defaults to true
func ResourceSparseApplyAdagradV2UseLocking ¶
func ResourceSparseApplyAdagradV2UseLocking(value bool) ResourceSparseApplyAdagradV2Attr
ResourceSparseApplyAdagradV2UseLocking sets the optional use_locking attribute to value.
value: If `True`, updating of the var and accum tensors will be protected by a lock; otherwise the behavior is undefined, but may exhibit less contention. If not specified, defaults to false
type ResourceSparseApplyCenteredRMSPropAttr ¶
type ResourceSparseApplyCenteredRMSPropAttr func(optionalAttr)
ResourceSparseApplyCenteredRMSPropAttr is an optional argument to ResourceSparseApplyCenteredRMSProp.
func ResourceSparseApplyCenteredRMSPropUseLocking ¶
func ResourceSparseApplyCenteredRMSPropUseLocking(value bool) ResourceSparseApplyCenteredRMSPropAttr
ResourceSparseApplyCenteredRMSPropUseLocking sets the optional use_locking attribute to value.
value: If `True`, updating of the var, mg, ms, and mom tensors is protected by a lock; otherwise the behavior is undefined, but may exhibit less contention. If not specified, defaults to false
type ResourceSparseApplyFtrlAttr ¶
type ResourceSparseApplyFtrlAttr func(optionalAttr)
ResourceSparseApplyFtrlAttr is an optional argument to ResourceSparseApplyFtrl.
func ResourceSparseApplyFtrlMultiplyLinearByLr ¶
func ResourceSparseApplyFtrlMultiplyLinearByLr(value bool) ResourceSparseApplyFtrlAttr
ResourceSparseApplyFtrlMultiplyLinearByLr sets the optional multiply_linear_by_lr attribute to value. If not specified, defaults to false
func ResourceSparseApplyFtrlUseLocking ¶
func ResourceSparseApplyFtrlUseLocking(value bool) ResourceSparseApplyFtrlAttr
ResourceSparseApplyFtrlUseLocking sets the optional use_locking attribute to value.
value: If `True`, updating of the var and accum tensors will be protected by a lock; otherwise the behavior is undefined, but may exhibit less contention. If not specified, defaults to false
type ResourceSparseApplyFtrlV2Attr ¶
type ResourceSparseApplyFtrlV2Attr func(optionalAttr)
ResourceSparseApplyFtrlV2Attr is an optional argument to ResourceSparseApplyFtrlV2.
func ResourceSparseApplyFtrlV2MultiplyLinearByLr ¶
func ResourceSparseApplyFtrlV2MultiplyLinearByLr(value bool) ResourceSparseApplyFtrlV2Attr
ResourceSparseApplyFtrlV2MultiplyLinearByLr sets the optional multiply_linear_by_lr attribute to value. If not specified, defaults to false
func ResourceSparseApplyFtrlV2UseLocking ¶
func ResourceSparseApplyFtrlV2UseLocking(value bool) ResourceSparseApplyFtrlV2Attr
ResourceSparseApplyFtrlV2UseLocking sets the optional use_locking attribute to value.
value: If `True`, updating of the var and accum tensors will be protected by a lock; otherwise the behavior is undefined, but may exhibit less contention. If not specified, defaults to false
type ResourceSparseApplyKerasMomentumAttr ¶
type ResourceSparseApplyKerasMomentumAttr func(optionalAttr)
ResourceSparseApplyKerasMomentumAttr is an optional argument to ResourceSparseApplyKerasMomentum.
func ResourceSparseApplyKerasMomentumUseLocking ¶
func ResourceSparseApplyKerasMomentumUseLocking(value bool) ResourceSparseApplyKerasMomentumAttr
ResourceSparseApplyKerasMomentumUseLocking sets the optional use_locking attribute to value.
value: If `True`, updating of the var and accum tensors will be protected by a lock; otherwise the behavior is undefined, but may exhibit less contention. If not specified, defaults to false
func ResourceSparseApplyKerasMomentumUseNesterov ¶
func ResourceSparseApplyKerasMomentumUseNesterov(value bool) ResourceSparseApplyKerasMomentumAttr
ResourceSparseApplyKerasMomentumUseNesterov sets the optional use_nesterov attribute to value.
value: If `True`, the tensor passed to compute grad will be var + momentum * accum, so in the end, the var you get is actually var + momentum * accum. If not specified, defaults to false
type ResourceSparseApplyMomentumAttr ¶
type ResourceSparseApplyMomentumAttr func(optionalAttr)
ResourceSparseApplyMomentumAttr is an optional argument to ResourceSparseApplyMomentum.
func ResourceSparseApplyMomentumUseLocking ¶
func ResourceSparseApplyMomentumUseLocking(value bool) ResourceSparseApplyMomentumAttr
ResourceSparseApplyMomentumUseLocking sets the optional use_locking attribute to value.
value: If `True`, updating of the var and accum tensors will be protected by a lock; otherwise the behavior is undefined, but may exhibit less contention. If not specified, defaults to false
func ResourceSparseApplyMomentumUseNesterov ¶
func ResourceSparseApplyMomentumUseNesterov(value bool) ResourceSparseApplyMomentumAttr
ResourceSparseApplyMomentumUseNesterov sets the optional use_nesterov attribute to value.
value: If `True`, the tensor passed to compute grad will be var - lr * momentum * accum, so in the end, the var you get is actually var - lr * momentum * accum. If not specified, defaults to false
type ResourceSparseApplyProximalAdagradAttr ¶
type ResourceSparseApplyProximalAdagradAttr func(optionalAttr)
ResourceSparseApplyProximalAdagradAttr is an optional argument to ResourceSparseApplyProximalAdagrad.
func ResourceSparseApplyProximalAdagradUseLocking ¶
func ResourceSparseApplyProximalAdagradUseLocking(value bool) ResourceSparseApplyProximalAdagradAttr
ResourceSparseApplyProximalAdagradUseLocking sets the optional use_locking attribute to value.
value: If True, updating of the var and accum tensors will be protected by a lock; otherwise the behavior is undefined, but may exhibit less contention. If not specified, defaults to false
type ResourceSparseApplyProximalGradientDescentAttr ¶
type ResourceSparseApplyProximalGradientDescentAttr func(optionalAttr)
ResourceSparseApplyProximalGradientDescentAttr is an optional argument to ResourceSparseApplyProximalGradientDescent.
func ResourceSparseApplyProximalGradientDescentUseLocking ¶
func ResourceSparseApplyProximalGradientDescentUseLocking(value bool) ResourceSparseApplyProximalGradientDescentAttr
ResourceSparseApplyProximalGradientDescentUseLocking sets the optional use_locking attribute to value.
value: If True, the subtraction will be protected by a lock; otherwise the behavior is undefined, but may exhibit less contention. If not specified, defaults to false
type ResourceSparseApplyRMSPropAttr ¶
type ResourceSparseApplyRMSPropAttr func(optionalAttr)
ResourceSparseApplyRMSPropAttr is an optional argument to ResourceSparseApplyRMSProp.
func ResourceSparseApplyRMSPropUseLocking ¶
func ResourceSparseApplyRMSPropUseLocking(value bool) ResourceSparseApplyRMSPropAttr
ResourceSparseApplyRMSPropUseLocking sets the optional use_locking attribute to value.
value: If `True`, updating of the var, ms, and mom tensors is protected by a lock; otherwise the behavior is undefined, but may exhibit less contention. If not specified, defaults to false
type ResourceStridedSliceAssignAttr ¶
type ResourceStridedSliceAssignAttr func(optionalAttr)
ResourceStridedSliceAssignAttr is an optional argument to ResourceStridedSliceAssign.
func ResourceStridedSliceAssignBeginMask ¶
func ResourceStridedSliceAssignBeginMask(value int64) ResourceStridedSliceAssignAttr
ResourceStridedSliceAssignBeginMask sets the optional begin_mask attribute to value. If not specified, defaults to 0
func ResourceStridedSliceAssignEllipsisMask ¶
func ResourceStridedSliceAssignEllipsisMask(value int64) ResourceStridedSliceAssignAttr
ResourceStridedSliceAssignEllipsisMask sets the optional ellipsis_mask attribute to value. If not specified, defaults to 0
func ResourceStridedSliceAssignEndMask ¶
func ResourceStridedSliceAssignEndMask(value int64) ResourceStridedSliceAssignAttr
ResourceStridedSliceAssignEndMask sets the optional end_mask attribute to value. If not specified, defaults to 0
func ResourceStridedSliceAssignNewAxisMask ¶
func ResourceStridedSliceAssignNewAxisMask(value int64) ResourceStridedSliceAssignAttr
ResourceStridedSliceAssignNewAxisMask sets the optional new_axis_mask attribute to value. If not specified, defaults to 0
func ResourceStridedSliceAssignShrinkAxisMask ¶
func ResourceStridedSliceAssignShrinkAxisMask(value int64) ResourceStridedSliceAssignAttr
ResourceStridedSliceAssignShrinkAxisMask sets the optional shrink_axis_mask attribute to value. If not specified, defaults to 0
type RestoreAttr ¶
type RestoreAttr func(optionalAttr)
RestoreAttr is an optional argument to Restore.
func RestorePreferredShard ¶
func RestorePreferredShard(value int64) RestoreAttr
RestorePreferredShard sets the optional preferred_shard attribute to value.
value: Index of file to open first if multiple files match `file_pattern`. If not specified, defaults to -1
type RestoreSliceAttr ¶
type RestoreSliceAttr func(optionalAttr)
RestoreSliceAttr is an optional argument to RestoreSlice.
func RestoreSlicePreferredShard ¶
func RestoreSlicePreferredShard(value int64) RestoreSliceAttr
RestoreSlicePreferredShard sets the optional preferred_shard attribute to value.
value: Index of file to open first if multiple files match `file_pattern`. See the documentation for `Restore`. If not specified, defaults to -1
type RetrieveTPUEmbeddingADAMParametersAttr ¶
type RetrieveTPUEmbeddingADAMParametersAttr func(optionalAttr)
RetrieveTPUEmbeddingADAMParametersAttr is an optional argument to RetrieveTPUEmbeddingADAMParameters.
func RetrieveTPUEmbeddingADAMParametersConfig ¶
func RetrieveTPUEmbeddingADAMParametersConfig(value string) RetrieveTPUEmbeddingADAMParametersAttr
RetrieveTPUEmbeddingADAMParametersConfig sets the optional config attribute to value. If not specified, defaults to ""
func RetrieveTPUEmbeddingADAMParametersTableId ¶
func RetrieveTPUEmbeddingADAMParametersTableId(value int64) RetrieveTPUEmbeddingADAMParametersAttr
RetrieveTPUEmbeddingADAMParametersTableId sets the optional table_id attribute to value. If not specified, defaults to -1
func RetrieveTPUEmbeddingADAMParametersTableName ¶
func RetrieveTPUEmbeddingADAMParametersTableName(value string) RetrieveTPUEmbeddingADAMParametersAttr
RetrieveTPUEmbeddingADAMParametersTableName sets the optional table_name attribute to value. If not specified, defaults to ""
type RetrieveTPUEmbeddingAdadeltaParametersAttr ¶
type RetrieveTPUEmbeddingAdadeltaParametersAttr func(optionalAttr)
RetrieveTPUEmbeddingAdadeltaParametersAttr is an optional argument to RetrieveTPUEmbeddingAdadeltaParameters.
func RetrieveTPUEmbeddingAdadeltaParametersConfig ¶
func RetrieveTPUEmbeddingAdadeltaParametersConfig(value string) RetrieveTPUEmbeddingAdadeltaParametersAttr
RetrieveTPUEmbeddingAdadeltaParametersConfig sets the optional config attribute to value. If not specified, defaults to ""
func RetrieveTPUEmbeddingAdadeltaParametersTableId ¶
func RetrieveTPUEmbeddingAdadeltaParametersTableId(value int64) RetrieveTPUEmbeddingAdadeltaParametersAttr
RetrieveTPUEmbeddingAdadeltaParametersTableId sets the optional table_id attribute to value. If not specified, defaults to -1
func RetrieveTPUEmbeddingAdadeltaParametersTableName ¶
func RetrieveTPUEmbeddingAdadeltaParametersTableName(value string) RetrieveTPUEmbeddingAdadeltaParametersAttr
RetrieveTPUEmbeddingAdadeltaParametersTableName sets the optional table_name attribute to value. If not specified, defaults to ""
type RetrieveTPUEmbeddingAdagradMomentumParametersAttr ¶
type RetrieveTPUEmbeddingAdagradMomentumParametersAttr func(optionalAttr)
RetrieveTPUEmbeddingAdagradMomentumParametersAttr is an optional argument to RetrieveTPUEmbeddingAdagradMomentumParameters.
func RetrieveTPUEmbeddingAdagradMomentumParametersConfig ¶
func RetrieveTPUEmbeddingAdagradMomentumParametersConfig(value string) RetrieveTPUEmbeddingAdagradMomentumParametersAttr
RetrieveTPUEmbeddingAdagradMomentumParametersConfig sets the optional config attribute to value. If not specified, defaults to ""
func RetrieveTPUEmbeddingAdagradMomentumParametersTableId ¶
func RetrieveTPUEmbeddingAdagradMomentumParametersTableId(value int64) RetrieveTPUEmbeddingAdagradMomentumParametersAttr
RetrieveTPUEmbeddingAdagradMomentumParametersTableId sets the optional table_id attribute to value. If not specified, defaults to -1
func RetrieveTPUEmbeddingAdagradMomentumParametersTableName ¶
func RetrieveTPUEmbeddingAdagradMomentumParametersTableName(value string) RetrieveTPUEmbeddingAdagradMomentumParametersAttr
RetrieveTPUEmbeddingAdagradMomentumParametersTableName sets the optional table_name attribute to value. If not specified, defaults to ""
type RetrieveTPUEmbeddingAdagradParametersAttr ¶
type RetrieveTPUEmbeddingAdagradParametersAttr func(optionalAttr)
RetrieveTPUEmbeddingAdagradParametersAttr is an optional argument to RetrieveTPUEmbeddingAdagradParameters.
func RetrieveTPUEmbeddingAdagradParametersConfig ¶
func RetrieveTPUEmbeddingAdagradParametersConfig(value string) RetrieveTPUEmbeddingAdagradParametersAttr
RetrieveTPUEmbeddingAdagradParametersConfig sets the optional config attribute to value. If not specified, defaults to ""
func RetrieveTPUEmbeddingAdagradParametersTableId ¶
func RetrieveTPUEmbeddingAdagradParametersTableId(value int64) RetrieveTPUEmbeddingAdagradParametersAttr
RetrieveTPUEmbeddingAdagradParametersTableId sets the optional table_id attribute to value. If not specified, defaults to -1
func RetrieveTPUEmbeddingAdagradParametersTableName ¶
func RetrieveTPUEmbeddingAdagradParametersTableName(value string) RetrieveTPUEmbeddingAdagradParametersAttr
RetrieveTPUEmbeddingAdagradParametersTableName sets the optional table_name attribute to value. If not specified, defaults to ""
type RetrieveTPUEmbeddingCenteredRMSPropParametersAttr ¶
type RetrieveTPUEmbeddingCenteredRMSPropParametersAttr func(optionalAttr)
RetrieveTPUEmbeddingCenteredRMSPropParametersAttr is an optional argument to RetrieveTPUEmbeddingCenteredRMSPropParameters.
func RetrieveTPUEmbeddingCenteredRMSPropParametersConfig ¶
func RetrieveTPUEmbeddingCenteredRMSPropParametersConfig(value string) RetrieveTPUEmbeddingCenteredRMSPropParametersAttr
RetrieveTPUEmbeddingCenteredRMSPropParametersConfig sets the optional config attribute to value. If not specified, defaults to ""
func RetrieveTPUEmbeddingCenteredRMSPropParametersTableId ¶
func RetrieveTPUEmbeddingCenteredRMSPropParametersTableId(value int64) RetrieveTPUEmbeddingCenteredRMSPropParametersAttr
RetrieveTPUEmbeddingCenteredRMSPropParametersTableId sets the optional table_id attribute to value. If not specified, defaults to -1
func RetrieveTPUEmbeddingCenteredRMSPropParametersTableName ¶
func RetrieveTPUEmbeddingCenteredRMSPropParametersTableName(value string) RetrieveTPUEmbeddingCenteredRMSPropParametersAttr
RetrieveTPUEmbeddingCenteredRMSPropParametersTableName sets the optional table_name attribute to value. If not specified, defaults to ""
type RetrieveTPUEmbeddingFTRLParametersAttr ¶
type RetrieveTPUEmbeddingFTRLParametersAttr func(optionalAttr)
RetrieveTPUEmbeddingFTRLParametersAttr is an optional argument to RetrieveTPUEmbeddingFTRLParameters.
func RetrieveTPUEmbeddingFTRLParametersConfig ¶
func RetrieveTPUEmbeddingFTRLParametersConfig(value string) RetrieveTPUEmbeddingFTRLParametersAttr
RetrieveTPUEmbeddingFTRLParametersConfig sets the optional config attribute to value. If not specified, defaults to ""
func RetrieveTPUEmbeddingFTRLParametersTableId ¶
func RetrieveTPUEmbeddingFTRLParametersTableId(value int64) RetrieveTPUEmbeddingFTRLParametersAttr
RetrieveTPUEmbeddingFTRLParametersTableId sets the optional table_id attribute to value. If not specified, defaults to -1
func RetrieveTPUEmbeddingFTRLParametersTableName ¶
func RetrieveTPUEmbeddingFTRLParametersTableName(value string) RetrieveTPUEmbeddingFTRLParametersAttr
RetrieveTPUEmbeddingFTRLParametersTableName sets the optional table_name attribute to value. If not specified, defaults to ""
type RetrieveTPUEmbeddingFrequencyEstimatorParametersAttr ¶
type RetrieveTPUEmbeddingFrequencyEstimatorParametersAttr func(optionalAttr)
RetrieveTPUEmbeddingFrequencyEstimatorParametersAttr is an optional argument to RetrieveTPUEmbeddingFrequencyEstimatorParameters.
func RetrieveTPUEmbeddingFrequencyEstimatorParametersConfig ¶
func RetrieveTPUEmbeddingFrequencyEstimatorParametersConfig(value string) RetrieveTPUEmbeddingFrequencyEstimatorParametersAttr
RetrieveTPUEmbeddingFrequencyEstimatorParametersConfig sets the optional config attribute to value. If not specified, defaults to ""
func RetrieveTPUEmbeddingFrequencyEstimatorParametersTableId ¶
func RetrieveTPUEmbeddingFrequencyEstimatorParametersTableId(value int64) RetrieveTPUEmbeddingFrequencyEstimatorParametersAttr
RetrieveTPUEmbeddingFrequencyEstimatorParametersTableId sets the optional table_id attribute to value. If not specified, defaults to -1
func RetrieveTPUEmbeddingFrequencyEstimatorParametersTableName ¶
func RetrieveTPUEmbeddingFrequencyEstimatorParametersTableName(value string) RetrieveTPUEmbeddingFrequencyEstimatorParametersAttr
RetrieveTPUEmbeddingFrequencyEstimatorParametersTableName sets the optional table_name attribute to value. If not specified, defaults to ""
type RetrieveTPUEmbeddingMDLAdagradLightParametersAttr ¶
type RetrieveTPUEmbeddingMDLAdagradLightParametersAttr func(optionalAttr)
RetrieveTPUEmbeddingMDLAdagradLightParametersAttr is an optional argument to RetrieveTPUEmbeddingMDLAdagradLightParameters.
func RetrieveTPUEmbeddingMDLAdagradLightParametersConfig ¶
func RetrieveTPUEmbeddingMDLAdagradLightParametersConfig(value string) RetrieveTPUEmbeddingMDLAdagradLightParametersAttr
RetrieveTPUEmbeddingMDLAdagradLightParametersConfig sets the optional config attribute to value. If not specified, defaults to ""
func RetrieveTPUEmbeddingMDLAdagradLightParametersTableId ¶
func RetrieveTPUEmbeddingMDLAdagradLightParametersTableId(value int64) RetrieveTPUEmbeddingMDLAdagradLightParametersAttr
RetrieveTPUEmbeddingMDLAdagradLightParametersTableId sets the optional table_id attribute to value. If not specified, defaults to -1
func RetrieveTPUEmbeddingMDLAdagradLightParametersTableName ¶
func RetrieveTPUEmbeddingMDLAdagradLightParametersTableName(value string) RetrieveTPUEmbeddingMDLAdagradLightParametersAttr
RetrieveTPUEmbeddingMDLAdagradLightParametersTableName sets the optional table_name attribute to value. If not specified, defaults to ""
type RetrieveTPUEmbeddingMomentumParametersAttr ¶
type RetrieveTPUEmbeddingMomentumParametersAttr func(optionalAttr)
RetrieveTPUEmbeddingMomentumParametersAttr is an optional argument to RetrieveTPUEmbeddingMomentumParameters.
func RetrieveTPUEmbeddingMomentumParametersConfig ¶
func RetrieveTPUEmbeddingMomentumParametersConfig(value string) RetrieveTPUEmbeddingMomentumParametersAttr
RetrieveTPUEmbeddingMomentumParametersConfig sets the optional config attribute to value. If not specified, defaults to ""
func RetrieveTPUEmbeddingMomentumParametersTableId ¶
func RetrieveTPUEmbeddingMomentumParametersTableId(value int64) RetrieveTPUEmbeddingMomentumParametersAttr
RetrieveTPUEmbeddingMomentumParametersTableId sets the optional table_id attribute to value. If not specified, defaults to -1
func RetrieveTPUEmbeddingMomentumParametersTableName ¶
func RetrieveTPUEmbeddingMomentumParametersTableName(value string) RetrieveTPUEmbeddingMomentumParametersAttr
RetrieveTPUEmbeddingMomentumParametersTableName sets the optional table_name attribute to value. If not specified, defaults to ""
type RetrieveTPUEmbeddingProximalAdagradParametersAttr ¶
type RetrieveTPUEmbeddingProximalAdagradParametersAttr func(optionalAttr)
RetrieveTPUEmbeddingProximalAdagradParametersAttr is an optional argument to RetrieveTPUEmbeddingProximalAdagradParameters.
func RetrieveTPUEmbeddingProximalAdagradParametersConfig ¶
func RetrieveTPUEmbeddingProximalAdagradParametersConfig(value string) RetrieveTPUEmbeddingProximalAdagradParametersAttr
RetrieveTPUEmbeddingProximalAdagradParametersConfig sets the optional config attribute to value. If not specified, defaults to ""
func RetrieveTPUEmbeddingProximalAdagradParametersTableId ¶
func RetrieveTPUEmbeddingProximalAdagradParametersTableId(value int64) RetrieveTPUEmbeddingProximalAdagradParametersAttr
RetrieveTPUEmbeddingProximalAdagradParametersTableId sets the optional table_id attribute to value. If not specified, defaults to -1
func RetrieveTPUEmbeddingProximalAdagradParametersTableName ¶
func RetrieveTPUEmbeddingProximalAdagradParametersTableName(value string) RetrieveTPUEmbeddingProximalAdagradParametersAttr
RetrieveTPUEmbeddingProximalAdagradParametersTableName sets the optional table_name attribute to value. If not specified, defaults to ""
type RetrieveTPUEmbeddingRMSPropParametersAttr ¶
type RetrieveTPUEmbeddingRMSPropParametersAttr func(optionalAttr)
RetrieveTPUEmbeddingRMSPropParametersAttr is an optional argument to RetrieveTPUEmbeddingRMSPropParameters.
func RetrieveTPUEmbeddingRMSPropParametersConfig ¶
func RetrieveTPUEmbeddingRMSPropParametersConfig(value string) RetrieveTPUEmbeddingRMSPropParametersAttr
RetrieveTPUEmbeddingRMSPropParametersConfig sets the optional config attribute to value. If not specified, defaults to ""
func RetrieveTPUEmbeddingRMSPropParametersTableId ¶
func RetrieveTPUEmbeddingRMSPropParametersTableId(value int64) RetrieveTPUEmbeddingRMSPropParametersAttr
RetrieveTPUEmbeddingRMSPropParametersTableId sets the optional table_id attribute to value. If not specified, defaults to -1
func RetrieveTPUEmbeddingRMSPropParametersTableName ¶
func RetrieveTPUEmbeddingRMSPropParametersTableName(value string) RetrieveTPUEmbeddingRMSPropParametersAttr
RetrieveTPUEmbeddingRMSPropParametersTableName sets the optional table_name attribute to value. If not specified, defaults to ""
type RetrieveTPUEmbeddingStochasticGradientDescentParametersAttr ¶
type RetrieveTPUEmbeddingStochasticGradientDescentParametersAttr func(optionalAttr)
RetrieveTPUEmbeddingStochasticGradientDescentParametersAttr is an optional argument to RetrieveTPUEmbeddingStochasticGradientDescentParameters.
func RetrieveTPUEmbeddingStochasticGradientDescentParametersConfig ¶
func RetrieveTPUEmbeddingStochasticGradientDescentParametersConfig(value string) RetrieveTPUEmbeddingStochasticGradientDescentParametersAttr
RetrieveTPUEmbeddingStochasticGradientDescentParametersConfig sets the optional config attribute to value. If not specified, defaults to ""
func RetrieveTPUEmbeddingStochasticGradientDescentParametersTableId ¶
func RetrieveTPUEmbeddingStochasticGradientDescentParametersTableId(value int64) RetrieveTPUEmbeddingStochasticGradientDescentParametersAttr
RetrieveTPUEmbeddingStochasticGradientDescentParametersTableId sets the optional table_id attribute to value. If not specified, defaults to -1
func RetrieveTPUEmbeddingStochasticGradientDescentParametersTableName ¶
func RetrieveTPUEmbeddingStochasticGradientDescentParametersTableName(value string) RetrieveTPUEmbeddingStochasticGradientDescentParametersAttr
RetrieveTPUEmbeddingStochasticGradientDescentParametersTableName sets the optional table_name attribute to value. If not specified, defaults to ""
type ReverseSequenceAttr ¶
type ReverseSequenceAttr func(optionalAttr)
ReverseSequenceAttr is an optional argument to ReverseSequence.
func ReverseSequenceBatchDim ¶
func ReverseSequenceBatchDim(value int64) ReverseSequenceAttr
ReverseSequenceBatchDim sets the optional batch_dim attribute to value.
value: The dimension along which reversal is performed. If not specified, defaults to 0
type SampleDistortedBoundingBoxAttr ¶
type SampleDistortedBoundingBoxAttr func(optionalAttr)
SampleDistortedBoundingBoxAttr is an optional argument to SampleDistortedBoundingBox.
func SampleDistortedBoundingBoxAreaRange ¶
func SampleDistortedBoundingBoxAreaRange(value []float32) SampleDistortedBoundingBoxAttr
SampleDistortedBoundingBoxAreaRange sets the optional area_range attribute to value.
value: The cropped area of the image must contain a fraction of the supplied image within this range. If not specified, defaults to {f:0.05 f:1}
func SampleDistortedBoundingBoxAspectRatioRange ¶
func SampleDistortedBoundingBoxAspectRatioRange(value []float32) SampleDistortedBoundingBoxAttr
SampleDistortedBoundingBoxAspectRatioRange sets the optional aspect_ratio_range attribute to value.
value: The cropped area of the image must have an aspect ratio = width / height within this range. If not specified, defaults to {f:0.75 f:1.33}
func SampleDistortedBoundingBoxMaxAttempts ¶
func SampleDistortedBoundingBoxMaxAttempts(value int64) SampleDistortedBoundingBoxAttr
SampleDistortedBoundingBoxMaxAttempts sets the optional max_attempts attribute to value.
value: Number of attempts at generating a cropped region of the image of the specified constraints. After `max_attempts` failures, return the entire image. If not specified, defaults to 100
func SampleDistortedBoundingBoxMinObjectCovered ¶
func SampleDistortedBoundingBoxMinObjectCovered(value float32) SampleDistortedBoundingBoxAttr
SampleDistortedBoundingBoxMinObjectCovered sets the optional min_object_covered attribute to value.
value: The cropped area of the image must contain at least this fraction of any bounding box supplied. The value of this parameter should be non-negative. In the case of 0, the cropped area does not need to overlap any of the bounding boxes supplied. If not specified, defaults to 0.1
func SampleDistortedBoundingBoxSeed ¶
func SampleDistortedBoundingBoxSeed(value int64) SampleDistortedBoundingBoxAttr
SampleDistortedBoundingBoxSeed sets the optional seed attribute to value.
value: If either `seed` or `seed2` are set to non-zero, the random number generator is seeded by the given `seed`. Otherwise, it is seeded by a random seed. If not specified, defaults to 0
func SampleDistortedBoundingBoxSeed2 ¶
func SampleDistortedBoundingBoxSeed2(value int64) SampleDistortedBoundingBoxAttr
SampleDistortedBoundingBoxSeed2 sets the optional seed2 attribute to value.
value: A second seed to avoid seed collision. If not specified, defaults to 0
func SampleDistortedBoundingBoxUseImageIfNoBoundingBoxes ¶
func SampleDistortedBoundingBoxUseImageIfNoBoundingBoxes(value bool) SampleDistortedBoundingBoxAttr
SampleDistortedBoundingBoxUseImageIfNoBoundingBoxes sets the optional use_image_if_no_bounding_boxes attribute to value.
value: Controls behavior if no bounding boxes supplied. If true, assume an implicit bounding box covering the whole input. If false, raise an error. If not specified, defaults to false
type SampleDistortedBoundingBoxV2Attr ¶
type SampleDistortedBoundingBoxV2Attr func(optionalAttr)
SampleDistortedBoundingBoxV2Attr is an optional argument to SampleDistortedBoundingBoxV2.
func SampleDistortedBoundingBoxV2AreaRange ¶
func SampleDistortedBoundingBoxV2AreaRange(value []float32) SampleDistortedBoundingBoxV2Attr
SampleDistortedBoundingBoxV2AreaRange sets the optional area_range attribute to value.
value: The cropped area of the image must contain a fraction of the supplied image within this range. If not specified, defaults to {f:0.05 f:1}
func SampleDistortedBoundingBoxV2AspectRatioRange ¶
func SampleDistortedBoundingBoxV2AspectRatioRange(value []float32) SampleDistortedBoundingBoxV2Attr
SampleDistortedBoundingBoxV2AspectRatioRange sets the optional aspect_ratio_range attribute to value.
value: The cropped area of the image must have an aspect ratio = width / height within this range. If not specified, defaults to {f:0.75 f:1.33}
func SampleDistortedBoundingBoxV2MaxAttempts ¶
func SampleDistortedBoundingBoxV2MaxAttempts(value int64) SampleDistortedBoundingBoxV2Attr
SampleDistortedBoundingBoxV2MaxAttempts sets the optional max_attempts attribute to value.
value: Number of attempts at generating a cropped region of the image of the specified constraints. After `max_attempts` failures, return the entire image. If not specified, defaults to 100
func SampleDistortedBoundingBoxV2Seed ¶
func SampleDistortedBoundingBoxV2Seed(value int64) SampleDistortedBoundingBoxV2Attr
SampleDistortedBoundingBoxV2Seed sets the optional seed attribute to value.
value: If either `seed` or `seed2` are set to non-zero, the random number generator is seeded by the given `seed`. Otherwise, it is seeded by a random seed. If not specified, defaults to 0
func SampleDistortedBoundingBoxV2Seed2 ¶
func SampleDistortedBoundingBoxV2Seed2(value int64) SampleDistortedBoundingBoxV2Attr
SampleDistortedBoundingBoxV2Seed2 sets the optional seed2 attribute to value.
value: A second seed to avoid seed collision. If not specified, defaults to 0
func SampleDistortedBoundingBoxV2UseImageIfNoBoundingBoxes ¶
func SampleDistortedBoundingBoxV2UseImageIfNoBoundingBoxes(value bool) SampleDistortedBoundingBoxV2Attr
SampleDistortedBoundingBoxV2UseImageIfNoBoundingBoxes sets the optional use_image_if_no_bounding_boxes attribute to value.
value: Controls behavior if no bounding boxes supplied. If true, assume an implicit bounding box covering the whole input. If false, raise an error. If not specified, defaults to false
type ScatterNdAttr ¶ added in v0.8.2
type ScatterNdAttr func(optionalAttr)
ScatterNdAttr is an optional argument to ScatterNd.
func ScatterNdBadIndicesPolicy ¶ added in v0.8.2
func ScatterNdBadIndicesPolicy(value string) ScatterNdAttr
ScatterNdBadIndicesPolicy sets the optional bad_indices_policy attribute to value. If not specified, defaults to ""
type ScatterNdNonAliasingAddAttr ¶ added in v0.8.2
type ScatterNdNonAliasingAddAttr func(optionalAttr)
ScatterNdNonAliasingAddAttr is an optional argument to ScatterNdNonAliasingAdd.
func ScatterNdNonAliasingAddBadIndicesPolicy ¶ added in v0.8.2
func ScatterNdNonAliasingAddBadIndicesPolicy(value string) ScatterNdNonAliasingAddAttr
ScatterNdNonAliasingAddBadIndicesPolicy sets the optional bad_indices_policy attribute to value. If not specified, defaults to ""
type Scope ¶
type Scope struct {
// contains filtered or unexported fields
}
Scope encapsulates common operation properties when building a Graph.
A Scope object (and its derivatives, e.g., obtained from Scope.SubScope) act as a builder for graphs. They allow common properties (such as a name prefix) to be specified for multiple operations being added to the graph.
A Scope object and all its derivatives (e.g., obtained from Scope.SubScope) are not safe for concurrent use by multiple goroutines.
func NewScopeWithGraph ¶
NewScopeWithGraph creates a Scope initialized with the Graph thats passed in
func (*Scope) AddOperation ¶
AddOperation adds the operation to the Graph managed by s.
If there is a name prefix associated with s (such as if s was created by a call to SubScope), then this prefix will be applied to the name of the operation being added. See also Graph.AddOperation.
func (*Scope) Err ¶
Err returns the error, if any, encountered during the construction of the Graph managed by s.
Once Err returns a non-nil error, all future calls will do the same, indicating that the scope should be discarded as the graph could not be constructed.
func (*Scope) Finalize ¶
Finalize returns the Graph on which this scope operates on and renders s unusable. If there was an error during graph construction, that error is returned instead.
func (*Scope) SubScope ¶
SubScope returns a new Scope which will cause all operations added to the graph to be namespaced with 'namespace'. If namespace collides with an existing namespace within the scope, then a suffix will be added.
Example ¶
var ( s = NewScope() c1 = Const(s.SubScope("x"), int64(1)) c2 = Const(s.SubScope("x"), int64(1)) ) if s.Err() != nil { panic(s.Err()) } fmt.Println(c1.Op.Name(), c2.Op.Name())
Output: x/Const x_1/Const
func (*Scope) UpdateErr ¶
UpdateErr is used to notify Scope of any graph construction errors while creating the operation op.
func (*Scope) WithControlDependencies ¶
WithControlDependencies returns a new Scope which will cause all operations added to the graph to execute only after all the provided operations have executed first (in addition to any other control dependencies in s).
func (*Scope) WithDevice ¶
WithDevice returns a new Scope which will cause all operations added to the graph to execute on devices that match the provided device specification.
For example, WithDevice("/device:GPU:0") will cause operations added to the graph to execute on GPU #0.
An empty string removes any device restrictions.
type SdcaOptimizerAttr ¶
type SdcaOptimizerAttr func(optionalAttr)
SdcaOptimizerAttr is an optional argument to SdcaOptimizer.
func SdcaOptimizerAdaptative ¶
func SdcaOptimizerAdaptative(value bool) SdcaOptimizerAttr
SdcaOptimizerAdaptative sets the optional adaptative attribute to value.
value: Whether to use Adaptive SDCA for the inner loop. If not specified, defaults to true
type SdcaOptimizerV2Attr ¶
type SdcaOptimizerV2Attr func(optionalAttr)
SdcaOptimizerV2Attr is an optional argument to SdcaOptimizerV2.
func SdcaOptimizerV2Adaptive ¶
func SdcaOptimizerV2Adaptive(value bool) SdcaOptimizerV2Attr
SdcaOptimizerV2Adaptive sets the optional adaptive attribute to value.
value: Whether to use Adaptive SDCA for the inner loop. If not specified, defaults to true
type SelfAdjointEigV2Attr ¶
type SelfAdjointEigV2Attr func(optionalAttr)
SelfAdjointEigV2Attr is an optional argument to SelfAdjointEigV2.
func SelfAdjointEigV2ComputeV ¶
func SelfAdjointEigV2ComputeV(value bool) SelfAdjointEigV2Attr
SelfAdjointEigV2ComputeV sets the optional compute_v attribute to value.
value: If `True` then eigenvectors will be computed and returned in `v`. Otherwise, only the eigenvalues will be computed. If not specified, defaults to true
type SendAttr ¶
type SendAttr func(optionalAttr)
SendAttr is an optional argument to Send.
func SendClientTerminated ¶
SendClientTerminated sets the optional client_terminated attribute to value.
value: If set to true, this indicates that the node was added to the graph as a result of a client-side feed or fetch of Tensor data, in which case the corresponding send or recv is expected to be managed locally by the caller. If not specified, defaults to false
type SerializeIteratorAttr ¶
type SerializeIteratorAttr func(optionalAttr)
SerializeIteratorAttr is an optional argument to SerializeIterator.
func SerializeIteratorExternalStatePolicy ¶
func SerializeIteratorExternalStatePolicy(value int64) SerializeIteratorAttr
SerializeIteratorExternalStatePolicy sets the optional external_state_policy attribute to value. If not specified, defaults to 0
type SerializeManySparseAttr ¶
type SerializeManySparseAttr func(optionalAttr)
SerializeManySparseAttr is an optional argument to SerializeManySparse.
func SerializeManySparseOutType ¶
func SerializeManySparseOutType(value tf.DataType) SerializeManySparseAttr
SerializeManySparseOutType sets the optional out_type attribute to value.
value: The `dtype` to use for serialization; the supported types are `string` (default) and `variant`. If not specified, defaults to DT_STRING
type SerializeSparseAttr ¶
type SerializeSparseAttr func(optionalAttr)
SerializeSparseAttr is an optional argument to SerializeSparse.
func SerializeSparseOutType ¶
func SerializeSparseOutType(value tf.DataType) SerializeSparseAttr
SerializeSparseOutType sets the optional out_type attribute to value.
value: The `dtype` to use for serialization; the supported types are `string` (default) and `variant`. If not specified, defaults to DT_STRING
type SetSizeAttr ¶
type SetSizeAttr func(optionalAttr)
SetSizeAttr is an optional argument to SetSize.
func SetSizeValidateIndices ¶
func SetSizeValidateIndices(value bool) SetSizeAttr
SetSizeValidateIndices sets the optional validate_indices attribute to value. If not specified, defaults to true
type ShapeAttr ¶
type ShapeAttr func(optionalAttr)
ShapeAttr is an optional argument to Shape.
func ShapeOutType ¶
ShapeOutType sets the optional out_type attribute to value. If not specified, defaults to DT_INT32
type ShapeNAttr ¶
type ShapeNAttr func(optionalAttr)
ShapeNAttr is an optional argument to ShapeN.
func ShapeNOutType ¶
func ShapeNOutType(value tf.DataType) ShapeNAttr
ShapeNOutType sets the optional out_type attribute to value. If not specified, defaults to DT_INT32
type ShardDatasetAttr ¶
type ShardDatasetAttr func(optionalAttr)
ShardDatasetAttr is an optional argument to ShardDataset.
func ShardDatasetMetadata ¶
func ShardDatasetMetadata(value string) ShardDatasetAttr
ShardDatasetMetadata sets the optional metadata attribute to value. If not specified, defaults to ""
func ShardDatasetRequireNonEmpty ¶
func ShardDatasetRequireNonEmpty(value bool) ShardDatasetAttr
ShardDatasetRequireNonEmpty sets the optional require_non_empty attribute to value. If not specified, defaults to false
type ShuffleAndRepeatDatasetAttr ¶
type ShuffleAndRepeatDatasetAttr func(optionalAttr)
ShuffleAndRepeatDatasetAttr is an optional argument to ShuffleAndRepeatDataset.
func ShuffleAndRepeatDatasetMetadata ¶
func ShuffleAndRepeatDatasetMetadata(value string) ShuffleAndRepeatDatasetAttr
ShuffleAndRepeatDatasetMetadata sets the optional metadata attribute to value. If not specified, defaults to ""
func ShuffleAndRepeatDatasetReshuffleEachIteration ¶
func ShuffleAndRepeatDatasetReshuffleEachIteration(value bool) ShuffleAndRepeatDatasetAttr
ShuffleAndRepeatDatasetReshuffleEachIteration sets the optional reshuffle_each_iteration attribute to value. If not specified, defaults to true
type ShuffleDatasetAttr ¶
type ShuffleDatasetAttr func(optionalAttr)
ShuffleDatasetAttr is an optional argument to ShuffleDataset.
func ShuffleDatasetMetadata ¶
func ShuffleDatasetMetadata(value string) ShuffleDatasetAttr
ShuffleDatasetMetadata sets the optional metadata attribute to value. If not specified, defaults to ""
func ShuffleDatasetReshuffleEachIteration ¶
func ShuffleDatasetReshuffleEachIteration(value bool) ShuffleDatasetAttr
ShuffleDatasetReshuffleEachIteration sets the optional reshuffle_each_iteration attribute to value.
value: If true, each iterator over this dataset will be given a different pseudorandomly generated seed, based on a sequence seeded by the `seed` and `seed2` inputs. If false, each iterator will be given the same seed, and repeated iteration over this dataset will yield the exact same sequence of results. If not specified, defaults to true
type SizeAttr ¶
type SizeAttr func(optionalAttr)
SizeAttr is an optional argument to Size.
func SizeOutType ¶
SizeOutType sets the optional out_type attribute to value. If not specified, defaults to DT_INT32
type SkipDatasetAttr ¶
type SkipDatasetAttr func(optionalAttr)
SkipDatasetAttr is an optional argument to SkipDataset.
func SkipDatasetMetadata ¶
func SkipDatasetMetadata(value string) SkipDatasetAttr
SkipDatasetMetadata sets the optional metadata attribute to value. If not specified, defaults to ""
type SkipgramAttr ¶
type SkipgramAttr func(optionalAttr)
SkipgramAttr is an optional argument to Skipgram.
func SkipgramMinCount ¶
func SkipgramMinCount(value int64) SkipgramAttr
SkipgramMinCount sets the optional min_count attribute to value.
value: The minimum number of word occurrences for it to be included in the vocabulary. If not specified, defaults to 5
func SkipgramSubsample ¶
func SkipgramSubsample(value float32) SkipgramAttr
SkipgramSubsample sets the optional subsample attribute to value.
value: Threshold for word occurrence. Words that appear with higher frequency will be randomly down-sampled. Set to 0 to disable. If not specified, defaults to 0.001
func SkipgramWindowSize ¶
func SkipgramWindowSize(value int64) SkipgramAttr
SkipgramWindowSize sets the optional window_size attribute to value.
value: The number of words to predict to the left and right of the target. If not specified, defaults to 5
type SlidingWindowDatasetAttr ¶
type SlidingWindowDatasetAttr func(optionalAttr)
SlidingWindowDatasetAttr is an optional argument to SlidingWindowDataset.
func SlidingWindowDatasetDropRemainder ¶
func SlidingWindowDatasetDropRemainder(value bool) SlidingWindowDatasetAttr
SlidingWindowDatasetDropRemainder sets the optional drop_remainder attribute to value. If not specified, defaults to true
type SnapshotDatasetAttr ¶
type SnapshotDatasetAttr func(optionalAttr)
SnapshotDatasetAttr is an optional argument to SnapshotDataset.
func SnapshotDatasetCompression ¶
func SnapshotDatasetCompression(value string) SnapshotDatasetAttr
SnapshotDatasetCompression sets the optional compression attribute to value. If not specified, defaults to ""
func SnapshotDatasetMode ¶
func SnapshotDatasetMode(value string) SnapshotDatasetAttr
SnapshotDatasetMode sets the optional mode attribute to value. If not specified, defaults to "auto"
func SnapshotDatasetNumReaderThreads ¶
func SnapshotDatasetNumReaderThreads(value int64) SnapshotDatasetAttr
SnapshotDatasetNumReaderThreads sets the optional num_reader_threads attribute to value. If not specified, defaults to 1
func SnapshotDatasetNumWriterThreads ¶
func SnapshotDatasetNumWriterThreads(value int64) SnapshotDatasetAttr
SnapshotDatasetNumWriterThreads sets the optional num_writer_threads attribute to value. If not specified, defaults to 1
func SnapshotDatasetPendingSnapshotExpirySeconds ¶
func SnapshotDatasetPendingSnapshotExpirySeconds(value int64) SnapshotDatasetAttr
SnapshotDatasetPendingSnapshotExpirySeconds sets the optional pending_snapshot_expiry_seconds attribute to value. If not specified, defaults to 86400
func SnapshotDatasetReaderBufferSize ¶
func SnapshotDatasetReaderBufferSize(value int64) SnapshotDatasetAttr
SnapshotDatasetReaderBufferSize sets the optional reader_buffer_size attribute to value. If not specified, defaults to 1
func SnapshotDatasetReaderPathPrefix ¶
func SnapshotDatasetReaderPathPrefix(value string) SnapshotDatasetAttr
SnapshotDatasetReaderPathPrefix sets the optional reader_path_prefix attribute to value. If not specified, defaults to ""
func SnapshotDatasetSeed ¶
func SnapshotDatasetSeed(value int64) SnapshotDatasetAttr
SnapshotDatasetSeed sets the optional seed attribute to value. If not specified, defaults to 0
func SnapshotDatasetSeed2 ¶
func SnapshotDatasetSeed2(value int64) SnapshotDatasetAttr
SnapshotDatasetSeed2 sets the optional seed2 attribute to value. If not specified, defaults to 0
func SnapshotDatasetShardSizeBytes ¶
func SnapshotDatasetShardSizeBytes(value int64) SnapshotDatasetAttr
SnapshotDatasetShardSizeBytes sets the optional shard_size_bytes attribute to value. If not specified, defaults to 10737418240
func SnapshotDatasetShuffleOnRead ¶
func SnapshotDatasetShuffleOnRead(value bool) SnapshotDatasetAttr
SnapshotDatasetShuffleOnRead sets the optional shuffle_on_read attribute to value. If not specified, defaults to false
func SnapshotDatasetSnapshotName ¶
func SnapshotDatasetSnapshotName(value string) SnapshotDatasetAttr
SnapshotDatasetSnapshotName sets the optional snapshot_name attribute to value. If not specified, defaults to ""
func SnapshotDatasetWriterBufferSize ¶
func SnapshotDatasetWriterBufferSize(value int64) SnapshotDatasetAttr
SnapshotDatasetWriterBufferSize sets the optional writer_buffer_size attribute to value. If not specified, defaults to 1
func SnapshotDatasetWriterPathPrefix ¶
func SnapshotDatasetWriterPathPrefix(value string) SnapshotDatasetAttr
SnapshotDatasetWriterPathPrefix sets the optional writer_path_prefix attribute to value. If not specified, defaults to ""
type SobolSampleAttr ¶
type SobolSampleAttr func(optionalAttr)
SobolSampleAttr is an optional argument to SobolSample.
func SobolSampleDtype ¶
func SobolSampleDtype(value tf.DataType) SobolSampleAttr
SobolSampleDtype sets the optional dtype attribute to value.
value: The type of the sample. One of: `float32` or `float64`. If not specified, defaults to DT_FLOAT
type SpaceToDepthAttr ¶
type SpaceToDepthAttr func(optionalAttr)
SpaceToDepthAttr is an optional argument to SpaceToDepth.
func SpaceToDepthDataFormat ¶
func SpaceToDepthDataFormat(value string) SpaceToDepthAttr
SpaceToDepthDataFormat sets the optional data_format attribute to value. If not specified, defaults to "NHWC"
type SparseBincountAttr ¶
type SparseBincountAttr func(optionalAttr)
SparseBincountAttr is an optional argument to SparseBincount.
func SparseBincountBinaryOutput ¶
func SparseBincountBinaryOutput(value bool) SparseBincountAttr
SparseBincountBinaryOutput sets the optional binary_output attribute to value.
value: bool; Whether the kernel should count the appearance or number of occurrences. If not specified, defaults to false
type SparseCountSparseOutputAttr ¶
type SparseCountSparseOutputAttr func(optionalAttr)
SparseCountSparseOutputAttr is an optional argument to SparseCountSparseOutput.
func SparseCountSparseOutputMaxlength ¶
func SparseCountSparseOutputMaxlength(value int64) SparseCountSparseOutputAttr
SparseCountSparseOutputMaxlength sets the optional maxlength attribute to value.
value: Maximum value to count. Can be set to -1 for no maximum. If not specified, defaults to -1
REQUIRES: value >= -1
func SparseCountSparseOutputMinlength ¶
func SparseCountSparseOutputMinlength(value int64) SparseCountSparseOutputAttr
SparseCountSparseOutputMinlength sets the optional minlength attribute to value.
value: Minimum value to count. Can be set to -1 for no minimum. If not specified, defaults to -1
REQUIRES: value >= -1
type SparseMatMulAttr ¶
type SparseMatMulAttr func(optionalAttr)
SparseMatMulAttr is an optional argument to SparseMatMul.
func SparseMatMulAIsSparse ¶
func SparseMatMulAIsSparse(value bool) SparseMatMulAttr
SparseMatMulAIsSparse sets the optional a_is_sparse attribute to value. If not specified, defaults to false
func SparseMatMulBIsSparse ¶
func SparseMatMulBIsSparse(value bool) SparseMatMulAttr
SparseMatMulBIsSparse sets the optional b_is_sparse attribute to value. If not specified, defaults to false
func SparseMatMulTransposeA ¶
func SparseMatMulTransposeA(value bool) SparseMatMulAttr
SparseMatMulTransposeA sets the optional transpose_a attribute to value. If not specified, defaults to false
func SparseMatMulTransposeB ¶
func SparseMatMulTransposeB(value bool) SparseMatMulAttr
SparseMatMulTransposeB sets the optional transpose_b attribute to value. If not specified, defaults to false
type SparseMatrixMatMulAttr ¶
type SparseMatrixMatMulAttr func(optionalAttr)
SparseMatrixMatMulAttr is an optional argument to SparseMatrixMatMul.
func SparseMatrixMatMulAdjointA ¶
func SparseMatrixMatMulAdjointA(value bool) SparseMatrixMatMulAttr
SparseMatrixMatMulAdjointA sets the optional adjoint_a attribute to value.
value: Indicates whether `a` should be conjugate-transposed. If not specified, defaults to false
func SparseMatrixMatMulAdjointB ¶
func SparseMatrixMatMulAdjointB(value bool) SparseMatrixMatMulAttr
SparseMatrixMatMulAdjointB sets the optional adjoint_b attribute to value.
value: Indicates whether `b` should be conjugate-transposed. If not specified, defaults to false
func SparseMatrixMatMulConjugateOutput ¶
func SparseMatrixMatMulConjugateOutput(value bool) SparseMatrixMatMulAttr
SparseMatrixMatMulConjugateOutput sets the optional conjugate_output attribute to value.
value: Conjugates the product of `a` and `b`. If not specified, defaults to false
func SparseMatrixMatMulTransposeA ¶
func SparseMatrixMatMulTransposeA(value bool) SparseMatrixMatMulAttr
SparseMatrixMatMulTransposeA sets the optional transpose_a attribute to value.
value: Indicates whether `a` should be transposed. If not specified, defaults to false
func SparseMatrixMatMulTransposeB ¶
func SparseMatrixMatMulTransposeB(value bool) SparseMatrixMatMulAttr
SparseMatrixMatMulTransposeB sets the optional transpose_b attribute to value.
value: Indicates whether `b` should be transposed. If not specified, defaults to false
func SparseMatrixMatMulTransposeOutput ¶
func SparseMatrixMatMulTransposeOutput(value bool) SparseMatrixMatMulAttr
SparseMatrixMatMulTransposeOutput sets the optional transpose_output attribute to value.
value: Transposes the product of `a` and `b`. If not specified, defaults to false
type SparseMatrixSparseMatMulAttr ¶
type SparseMatrixSparseMatMulAttr func(optionalAttr)
SparseMatrixSparseMatMulAttr is an optional argument to SparseMatrixSparseMatMul.
func SparseMatrixSparseMatMulAdjointA ¶
func SparseMatrixSparseMatMulAdjointA(value bool) SparseMatrixSparseMatMulAttr
SparseMatrixSparseMatMulAdjointA sets the optional adjoint_a attribute to value.
value: Indicates whether `a` should be conjugate-transposed. If not specified, defaults to false
func SparseMatrixSparseMatMulAdjointB ¶
func SparseMatrixSparseMatMulAdjointB(value bool) SparseMatrixSparseMatMulAttr
SparseMatrixSparseMatMulAdjointB sets the optional adjoint_b attribute to value.
value: Indicates whether `b` should be conjugate-transposed. If not specified, defaults to false
func SparseMatrixSparseMatMulTransposeA ¶
func SparseMatrixSparseMatMulTransposeA(value bool) SparseMatrixSparseMatMulAttr
SparseMatrixSparseMatMulTransposeA sets the optional transpose_a attribute to value.
value: Indicates whether `a` should be transposed. If not specified, defaults to false
func SparseMatrixSparseMatMulTransposeB ¶
func SparseMatrixSparseMatMulTransposeB(value bool) SparseMatrixSparseMatMulAttr
SparseMatrixSparseMatMulTransposeB sets the optional transpose_b attribute to value.
value: Indicates whether `b` should be transposed. If not specified, defaults to false
type SparseMatrixTransposeAttr ¶
type SparseMatrixTransposeAttr func(optionalAttr)
SparseMatrixTransposeAttr is an optional argument to SparseMatrixTranspose.
func SparseMatrixTransposeConjugate ¶
func SparseMatrixTransposeConjugate(value bool) SparseMatrixTransposeAttr
SparseMatrixTransposeConjugate sets the optional conjugate attribute to value.
value: Indicates whether `input` should be conjugated. If not specified, defaults to false
type SparseReduceMaxAttr ¶
type SparseReduceMaxAttr func(optionalAttr)
SparseReduceMaxAttr is an optional argument to SparseReduceMax.
func SparseReduceMaxKeepDims ¶
func SparseReduceMaxKeepDims(value bool) SparseReduceMaxAttr
SparseReduceMaxKeepDims sets the optional keep_dims attribute to value.
value: If true, retain reduced dimensions with length 1. If not specified, defaults to false
type SparseReduceMaxSparseAttr ¶
type SparseReduceMaxSparseAttr func(optionalAttr)
SparseReduceMaxSparseAttr is an optional argument to SparseReduceMaxSparse.
func SparseReduceMaxSparseKeepDims ¶
func SparseReduceMaxSparseKeepDims(value bool) SparseReduceMaxSparseAttr
SparseReduceMaxSparseKeepDims sets the optional keep_dims attribute to value.
value: If true, retain reduced dimensions with length 1. If not specified, defaults to false
type SparseReduceSumAttr ¶
type SparseReduceSumAttr func(optionalAttr)
SparseReduceSumAttr is an optional argument to SparseReduceSum.
func SparseReduceSumKeepDims ¶
func SparseReduceSumKeepDims(value bool) SparseReduceSumAttr
SparseReduceSumKeepDims sets the optional keep_dims attribute to value.
value: If true, retain reduced dimensions with length 1. If not specified, defaults to false
type SparseReduceSumSparseAttr ¶
type SparseReduceSumSparseAttr func(optionalAttr)
SparseReduceSumSparseAttr is an optional argument to SparseReduceSumSparse.
func SparseReduceSumSparseKeepDims ¶
func SparseReduceSumSparseKeepDims(value bool) SparseReduceSumSparseAttr
SparseReduceSumSparseKeepDims sets the optional keep_dims attribute to value.
value: If true, retain reduced dimensions with length 1. If not specified, defaults to false
type SparseSegmentMeanAttr ¶ added in v0.7.0
type SparseSegmentMeanAttr func(optionalAttr)
SparseSegmentMeanAttr is an optional argument to SparseSegmentMean.
func SparseSegmentMeanSparseGradient ¶ added in v0.7.0
func SparseSegmentMeanSparseGradient(value bool) SparseSegmentMeanAttr
SparseSegmentMeanSparseGradient sets the optional sparse_gradient attribute to value. If not specified, defaults to false
type SparseSegmentMeanWithNumSegmentsAttr ¶ added in v0.7.0
type SparseSegmentMeanWithNumSegmentsAttr func(optionalAttr)
SparseSegmentMeanWithNumSegmentsAttr is an optional argument to SparseSegmentMeanWithNumSegments.
func SparseSegmentMeanWithNumSegmentsSparseGradient ¶ added in v0.7.0
func SparseSegmentMeanWithNumSegmentsSparseGradient(value bool) SparseSegmentMeanWithNumSegmentsAttr
SparseSegmentMeanWithNumSegmentsSparseGradient sets the optional sparse_gradient attribute to value. If not specified, defaults to false
type SparseSegmentSqrtNAttr ¶ added in v0.7.0
type SparseSegmentSqrtNAttr func(optionalAttr)
SparseSegmentSqrtNAttr is an optional argument to SparseSegmentSqrtN.
func SparseSegmentSqrtNSparseGradient ¶ added in v0.7.0
func SparseSegmentSqrtNSparseGradient(value bool) SparseSegmentSqrtNAttr
SparseSegmentSqrtNSparseGradient sets the optional sparse_gradient attribute to value. If not specified, defaults to false
type SparseSegmentSqrtNWithNumSegmentsAttr ¶ added in v0.7.0
type SparseSegmentSqrtNWithNumSegmentsAttr func(optionalAttr)
SparseSegmentSqrtNWithNumSegmentsAttr is an optional argument to SparseSegmentSqrtNWithNumSegments.
func SparseSegmentSqrtNWithNumSegmentsSparseGradient ¶ added in v0.7.0
func SparseSegmentSqrtNWithNumSegmentsSparseGradient(value bool) SparseSegmentSqrtNWithNumSegmentsAttr
SparseSegmentSqrtNWithNumSegmentsSparseGradient sets the optional sparse_gradient attribute to value. If not specified, defaults to false
type SparseSegmentSumAttr ¶ added in v0.7.0
type SparseSegmentSumAttr func(optionalAttr)
SparseSegmentSumAttr is an optional argument to SparseSegmentSum.
func SparseSegmentSumSparseGradient ¶ added in v0.7.0
func SparseSegmentSumSparseGradient(value bool) SparseSegmentSumAttr
SparseSegmentSumSparseGradient sets the optional sparse_gradient attribute to value. If not specified, defaults to false
type SparseSegmentSumWithNumSegmentsAttr ¶ added in v0.7.0
type SparseSegmentSumWithNumSegmentsAttr func(optionalAttr)
SparseSegmentSumWithNumSegmentsAttr is an optional argument to SparseSegmentSumWithNumSegments.
func SparseSegmentSumWithNumSegmentsSparseGradient ¶ added in v0.7.0
func SparseSegmentSumWithNumSegmentsSparseGradient(value bool) SparseSegmentSumWithNumSegmentsAttr
SparseSegmentSumWithNumSegmentsSparseGradient sets the optional sparse_gradient attribute to value. If not specified, defaults to false
type SparseTensorDenseMatMulAttr ¶
type SparseTensorDenseMatMulAttr func(optionalAttr)
SparseTensorDenseMatMulAttr is an optional argument to SparseTensorDenseMatMul.
func SparseTensorDenseMatMulAdjointA ¶
func SparseTensorDenseMatMulAdjointA(value bool) SparseTensorDenseMatMulAttr
SparseTensorDenseMatMulAdjointA sets the optional adjoint_a attribute to value.
value: Use the adjoint of A in the matrix multiply. If A is complex, this is transpose(conj(A)). Otherwise it's transpose(A). If not specified, defaults to false
func SparseTensorDenseMatMulAdjointB ¶
func SparseTensorDenseMatMulAdjointB(value bool) SparseTensorDenseMatMulAttr
SparseTensorDenseMatMulAdjointB sets the optional adjoint_b attribute to value.
value: Use the adjoint of B in the matrix multiply. If B is complex, this is transpose(conj(B)). Otherwise it's transpose(B). If not specified, defaults to false
type SparseToDenseAttr ¶
type SparseToDenseAttr func(optionalAttr)
SparseToDenseAttr is an optional argument to SparseToDense.
func SparseToDenseValidateIndices ¶
func SparseToDenseValidateIndices(value bool) SparseToDenseAttr
SparseToDenseValidateIndices sets the optional validate_indices attribute to value.
value: If true, indices are checked to make sure they are sorted in lexicographic order and that there are no repeats. If not specified, defaults to true
type SparseToSparseSetOperationAttr ¶
type SparseToSparseSetOperationAttr func(optionalAttr)
SparseToSparseSetOperationAttr is an optional argument to SparseToSparseSetOperation.
func SparseToSparseSetOperationValidateIndices ¶
func SparseToSparseSetOperationValidateIndices(value bool) SparseToSparseSetOperationAttr
SparseToSparseSetOperationValidateIndices sets the optional validate_indices attribute to value. If not specified, defaults to true
type SplitDedupDataAttr ¶ added in v0.5.0
type SplitDedupDataAttr func(optionalAttr)
SplitDedupDataAttr is an optional argument to SplitDedupData.
func SplitDedupDataConfig ¶ added in v0.5.0
func SplitDedupDataConfig(value string) SplitDedupDataAttr
SplitDedupDataConfig sets the optional config attribute to value. If not specified, defaults to ""
type SqueezeAttr ¶
type SqueezeAttr func(optionalAttr)
SqueezeAttr is an optional argument to Squeeze.
func SqueezeAxis ¶
func SqueezeAxis(value []int64) SqueezeAttr
SqueezeAxis sets the optional axis attribute to value.
value: If specified, only squeezes the dimensions listed. The dimension index starts at 0. It is an error to squeeze a dimension that is not 1. Must be in the range `[-rank(input), rank(input))`. If not specified, defaults to {}
REQUIRES: len(value) >= 0
type StackPushV2Attr ¶
type StackPushV2Attr func(optionalAttr)
StackPushV2Attr is an optional argument to StackPushV2.
func StackPushV2SwapMemory ¶
func StackPushV2SwapMemory(value bool) StackPushV2Attr
StackPushV2SwapMemory sets the optional swap_memory attribute to value.
value: Swap `elem` to CPU. Default to false. If not specified, defaults to false
type StackV2Attr ¶
type StackV2Attr func(optionalAttr)
StackV2Attr is an optional argument to StackV2.
func StackV2StackName ¶
func StackV2StackName(value string) StackV2Attr
StackV2StackName sets the optional stack_name attribute to value.
value: Overrides the name used for the temporary stack resource. Default value is the name of the 'Stack' op (which is guaranteed unique). If not specified, defaults to ""
type StageAttr ¶
type StageAttr func(optionalAttr)
StageAttr is an optional argument to Stage.
func StageCapacity ¶
StageCapacity sets the optional capacity attribute to value.
value: Maximum number of elements in the Staging Area. If > 0, inserts on the container will block when the capacity is reached. If not specified, defaults to 0
REQUIRES: value >= 0
func StageContainer ¶
StageContainer sets the optional container attribute to value.
value: If non-empty, this queue is placed in the given container. Otherwise, a default container is used. If not specified, defaults to ""
func StageMemoryLimit ¶
StageMemoryLimit sets the optional memory_limit attribute to value.
value: The maximum number of bytes allowed for Tensors in the Staging Area. If > 0, inserts will block until sufficient space is available. If not specified, defaults to 0
REQUIRES: value >= 0
func StageSharedName ¶
StageSharedName sets the optional shared_name attribute to value.
value: It is necessary to match this name to the matching Unstage Op. If not specified, defaults to ""
type StageClearAttr ¶
type StageClearAttr func(optionalAttr)
StageClearAttr is an optional argument to StageClear.
func StageClearCapacity ¶
func StageClearCapacity(value int64) StageClearAttr
StageClearCapacity sets the optional capacity attribute to value. If not specified, defaults to 0
REQUIRES: value >= 0
func StageClearContainer ¶
func StageClearContainer(value string) StageClearAttr
StageClearContainer sets the optional container attribute to value. If not specified, defaults to ""
func StageClearMemoryLimit ¶
func StageClearMemoryLimit(value int64) StageClearAttr
StageClearMemoryLimit sets the optional memory_limit attribute to value. If not specified, defaults to 0
REQUIRES: value >= 0
func StageClearSharedName ¶
func StageClearSharedName(value string) StageClearAttr
StageClearSharedName sets the optional shared_name attribute to value. If not specified, defaults to ""
type StagePeekAttr ¶
type StagePeekAttr func(optionalAttr)
StagePeekAttr is an optional argument to StagePeek.
func StagePeekCapacity ¶
func StagePeekCapacity(value int64) StagePeekAttr
StagePeekCapacity sets the optional capacity attribute to value. If not specified, defaults to 0
REQUIRES: value >= 0
func StagePeekContainer ¶
func StagePeekContainer(value string) StagePeekAttr
StagePeekContainer sets the optional container attribute to value. If not specified, defaults to ""
func StagePeekMemoryLimit ¶
func StagePeekMemoryLimit(value int64) StagePeekAttr
StagePeekMemoryLimit sets the optional memory_limit attribute to value. If not specified, defaults to 0
REQUIRES: value >= 0
func StagePeekSharedName ¶
func StagePeekSharedName(value string) StagePeekAttr
StagePeekSharedName sets the optional shared_name attribute to value. If not specified, defaults to ""
type StageSizeAttr ¶
type StageSizeAttr func(optionalAttr)
StageSizeAttr is an optional argument to StageSize.
func StageSizeCapacity ¶
func StageSizeCapacity(value int64) StageSizeAttr
StageSizeCapacity sets the optional capacity attribute to value. If not specified, defaults to 0
REQUIRES: value >= 0
func StageSizeContainer ¶
func StageSizeContainer(value string) StageSizeAttr
StageSizeContainer sets the optional container attribute to value. If not specified, defaults to ""
func StageSizeMemoryLimit ¶
func StageSizeMemoryLimit(value int64) StageSizeAttr
StageSizeMemoryLimit sets the optional memory_limit attribute to value. If not specified, defaults to 0
REQUIRES: value >= 0
func StageSizeSharedName ¶
func StageSizeSharedName(value string) StageSizeAttr
StageSizeSharedName sets the optional shared_name attribute to value. If not specified, defaults to ""
type StatefulStandardNormalAttr ¶
type StatefulStandardNormalAttr func(optionalAttr)
StatefulStandardNormalAttr is an optional argument to StatefulStandardNormal.
func StatefulStandardNormalDtype ¶
func StatefulStandardNormalDtype(value tf.DataType) StatefulStandardNormalAttr
StatefulStandardNormalDtype sets the optional dtype attribute to value.
value: The type of the output. If not specified, defaults to DT_FLOAT
type StatefulStandardNormalV2Attr ¶
type StatefulStandardNormalV2Attr func(optionalAttr)
StatefulStandardNormalV2Attr is an optional argument to StatefulStandardNormalV2.
func StatefulStandardNormalV2Dtype ¶
func StatefulStandardNormalV2Dtype(value tf.DataType) StatefulStandardNormalV2Attr
StatefulStandardNormalV2Dtype sets the optional dtype attribute to value.
value: The type of the output. If not specified, defaults to DT_FLOAT
type StatefulTruncatedNormalAttr ¶
type StatefulTruncatedNormalAttr func(optionalAttr)
StatefulTruncatedNormalAttr is an optional argument to StatefulTruncatedNormal.
func StatefulTruncatedNormalDtype ¶
func StatefulTruncatedNormalDtype(value tf.DataType) StatefulTruncatedNormalAttr
StatefulTruncatedNormalDtype sets the optional dtype attribute to value.
value: The type of the output. If not specified, defaults to DT_FLOAT
type StatefulUniformAttr ¶
type StatefulUniformAttr func(optionalAttr)
StatefulUniformAttr is an optional argument to StatefulUniform.
func StatefulUniformDtype ¶
func StatefulUniformDtype(value tf.DataType) StatefulUniformAttr
StatefulUniformDtype sets the optional dtype attribute to value.
value: The type of the output. If not specified, defaults to DT_FLOAT
type StatefulUniformFullIntAttr ¶
type StatefulUniformFullIntAttr func(optionalAttr)
StatefulUniformFullIntAttr is an optional argument to StatefulUniformFullInt.
func StatefulUniformFullIntDtype ¶
func StatefulUniformFullIntDtype(value tf.DataType) StatefulUniformFullIntAttr
StatefulUniformFullIntDtype sets the optional dtype attribute to value.
value: The type of the output. If not specified, defaults to DT_UINT64
type StatelessMultinomialAttr ¶
type StatelessMultinomialAttr func(optionalAttr)
StatelessMultinomialAttr is an optional argument to StatelessMultinomial.
func StatelessMultinomialOutputDtype ¶
func StatelessMultinomialOutputDtype(value tf.DataType) StatelessMultinomialAttr
StatelessMultinomialOutputDtype sets the optional output_dtype attribute to value. If not specified, defaults to DT_INT64
type StatelessRandomBinomialAttr ¶
type StatelessRandomBinomialAttr func(optionalAttr)
StatelessRandomBinomialAttr is an optional argument to StatelessRandomBinomial.
func StatelessRandomBinomialDtype ¶
func StatelessRandomBinomialDtype(value tf.DataType) StatelessRandomBinomialAttr
StatelessRandomBinomialDtype sets the optional dtype attribute to value.
value: The type of the output. If not specified, defaults to DT_INT64
type StatelessRandomNormalAttr ¶
type StatelessRandomNormalAttr func(optionalAttr)
StatelessRandomNormalAttr is an optional argument to StatelessRandomNormal.
func StatelessRandomNormalDtype ¶
func StatelessRandomNormalDtype(value tf.DataType) StatelessRandomNormalAttr
StatelessRandomNormalDtype sets the optional dtype attribute to value.
value: The type of the output. If not specified, defaults to DT_FLOAT
type StatelessRandomNormalV2Attr ¶
type StatelessRandomNormalV2Attr func(optionalAttr)
StatelessRandomNormalV2Attr is an optional argument to StatelessRandomNormalV2.
func StatelessRandomNormalV2Dtype ¶
func StatelessRandomNormalV2Dtype(value tf.DataType) StatelessRandomNormalV2Attr
StatelessRandomNormalV2Dtype sets the optional dtype attribute to value.
value: The type of the output. If not specified, defaults to DT_FLOAT
type StatelessRandomUniformAttr ¶
type StatelessRandomUniformAttr func(optionalAttr)
StatelessRandomUniformAttr is an optional argument to StatelessRandomUniform.
func StatelessRandomUniformDtype ¶
func StatelessRandomUniformDtype(value tf.DataType) StatelessRandomUniformAttr
StatelessRandomUniformDtype sets the optional dtype attribute to value.
value: The type of the output. If not specified, defaults to DT_FLOAT
type StatelessRandomUniformFullIntAttr ¶
type StatelessRandomUniformFullIntAttr func(optionalAttr)
StatelessRandomUniformFullIntAttr is an optional argument to StatelessRandomUniformFullInt.
func StatelessRandomUniformFullIntDtype ¶
func StatelessRandomUniformFullIntDtype(value tf.DataType) StatelessRandomUniformFullIntAttr
StatelessRandomUniformFullIntDtype sets the optional dtype attribute to value.
value: The type of the output. If not specified, defaults to DT_UINT64
type StatelessRandomUniformFullIntV2Attr ¶
type StatelessRandomUniformFullIntV2Attr func(optionalAttr)
StatelessRandomUniformFullIntV2Attr is an optional argument to StatelessRandomUniformFullIntV2.
func StatelessRandomUniformFullIntV2Dtype ¶
func StatelessRandomUniformFullIntV2Dtype(value tf.DataType) StatelessRandomUniformFullIntV2Attr
StatelessRandomUniformFullIntV2Dtype sets the optional dtype attribute to value.
value: The type of the output. If not specified, defaults to DT_UINT64
type StatelessRandomUniformV2Attr ¶
type StatelessRandomUniformV2Attr func(optionalAttr)
StatelessRandomUniformV2Attr is an optional argument to StatelessRandomUniformV2.
func StatelessRandomUniformV2Dtype ¶
func StatelessRandomUniformV2Dtype(value tf.DataType) StatelessRandomUniformV2Attr
StatelessRandomUniformV2Dtype sets the optional dtype attribute to value.
value: The type of the output. If not specified, defaults to DT_FLOAT
type StatelessSampleDistortedBoundingBoxAttr ¶
type StatelessSampleDistortedBoundingBoxAttr func(optionalAttr)
StatelessSampleDistortedBoundingBoxAttr is an optional argument to StatelessSampleDistortedBoundingBox.
func StatelessSampleDistortedBoundingBoxAreaRange ¶
func StatelessSampleDistortedBoundingBoxAreaRange(value []float32) StatelessSampleDistortedBoundingBoxAttr
StatelessSampleDistortedBoundingBoxAreaRange sets the optional area_range attribute to value.
value: The cropped area of the image must contain a fraction of the supplied image within this range. If not specified, defaults to {f:0.05 f:1}
func StatelessSampleDistortedBoundingBoxAspectRatioRange ¶
func StatelessSampleDistortedBoundingBoxAspectRatioRange(value []float32) StatelessSampleDistortedBoundingBoxAttr
StatelessSampleDistortedBoundingBoxAspectRatioRange sets the optional aspect_ratio_range attribute to value.
value: The cropped area of the image must have an aspect ratio = width / height within this range. If not specified, defaults to {f:0.75 f:1.33}
func StatelessSampleDistortedBoundingBoxMaxAttempts ¶
func StatelessSampleDistortedBoundingBoxMaxAttempts(value int64) StatelessSampleDistortedBoundingBoxAttr
StatelessSampleDistortedBoundingBoxMaxAttempts sets the optional max_attempts attribute to value.
value: Number of attempts at generating a cropped region of the image of the specified constraints. After `max_attempts` failures, return the entire image. If not specified, defaults to 100
func StatelessSampleDistortedBoundingBoxUseImageIfNoBoundingBoxes ¶
func StatelessSampleDistortedBoundingBoxUseImageIfNoBoundingBoxes(value bool) StatelessSampleDistortedBoundingBoxAttr
StatelessSampleDistortedBoundingBoxUseImageIfNoBoundingBoxes sets the optional use_image_if_no_bounding_boxes attribute to value.
value: Controls behavior if no bounding boxes supplied. If true, assume an implicit bounding box covering the whole input. If false, raise an error. If not specified, defaults to false
type StatelessTruncatedNormalAttr ¶
type StatelessTruncatedNormalAttr func(optionalAttr)
StatelessTruncatedNormalAttr is an optional argument to StatelessTruncatedNormal.
func StatelessTruncatedNormalDtype ¶
func StatelessTruncatedNormalDtype(value tf.DataType) StatelessTruncatedNormalAttr
StatelessTruncatedNormalDtype sets the optional dtype attribute to value.
value: The type of the output. If not specified, defaults to DT_FLOAT
type StatelessTruncatedNormalV2Attr ¶
type StatelessTruncatedNormalV2Attr func(optionalAttr)
StatelessTruncatedNormalV2Attr is an optional argument to StatelessTruncatedNormalV2.
func StatelessTruncatedNormalV2Dtype ¶
func StatelessTruncatedNormalV2Dtype(value tf.DataType) StatelessTruncatedNormalV2Attr
StatelessTruncatedNormalV2Dtype sets the optional dtype attribute to value.
value: The type of the output. If not specified, defaults to DT_FLOAT
type StaticRegexReplaceAttr ¶
type StaticRegexReplaceAttr func(optionalAttr)
StaticRegexReplaceAttr is an optional argument to StaticRegexReplace.
func StaticRegexReplaceReplaceGlobal ¶
func StaticRegexReplaceReplaceGlobal(value bool) StaticRegexReplaceAttr
StaticRegexReplaceReplaceGlobal sets the optional replace_global attribute to value.
value: If True, the replacement is global, otherwise the replacement is done only on the first match. If not specified, defaults to true
type StatsAggregatorHandleAttr ¶
type StatsAggregatorHandleAttr func(optionalAttr)
StatsAggregatorHandleAttr is an optional argument to StatsAggregatorHandle.
func StatsAggregatorHandleContainer ¶
func StatsAggregatorHandleContainer(value string) StatsAggregatorHandleAttr
StatsAggregatorHandleContainer sets the optional container attribute to value. If not specified, defaults to ""
func StatsAggregatorHandleSharedName ¶
func StatsAggregatorHandleSharedName(value string) StatsAggregatorHandleAttr
StatsAggregatorHandleSharedName sets the optional shared_name attribute to value. If not specified, defaults to ""
type StridedSliceAttr ¶
type StridedSliceAttr func(optionalAttr)
StridedSliceAttr is an optional argument to StridedSlice.
func StridedSliceBeginMask ¶
func StridedSliceBeginMask(value int64) StridedSliceAttr
StridedSliceBeginMask sets the optional begin_mask attribute to value.
value: a bitmask where a bit i being 1 means to ignore the begin value and instead use the largest interval possible. At runtime begin[i] will be replaced with `[0, n-1)` if `stride[i] > 0` or `[-1, n-1]` if `stride[i] < 0` If not specified, defaults to 0
func StridedSliceEllipsisMask ¶
func StridedSliceEllipsisMask(value int64) StridedSliceAttr
StridedSliceEllipsisMask sets the optional ellipsis_mask attribute to value.
value: a bitmask where bit `i` being 1 means the `i`th position is actually an ellipsis. One bit at most can be 1. If `ellipsis_mask == 0`, then an implicit ellipsis mask of `1 << (m+1)` is provided. This means that `foo[3:5] == foo[3:5, ...]`. An ellipsis implicitly creates as many range specifications as necessary to fully specify the sliced range for every dimension. For example for a 4-dimensional tensor `foo` the slice `foo[2, ..., 5:8]` implies `foo[2, :, :, 5:8]`. If not specified, defaults to 0
func StridedSliceEndMask ¶
func StridedSliceEndMask(value int64) StridedSliceAttr
StridedSliceEndMask sets the optional end_mask attribute to value.
value: analogous to `begin_mask` If not specified, defaults to 0
func StridedSliceNewAxisMask ¶
func StridedSliceNewAxisMask(value int64) StridedSliceAttr
StridedSliceNewAxisMask sets the optional new_axis_mask attribute to value.
value: a bitmask where bit `i` being 1 means the `i`th specification creates a new shape 1 dimension. For example `foo[:4, tf.newaxis, :2]` would produce a shape `(4, 1, 2)` tensor. If not specified, defaults to 0
func StridedSliceShrinkAxisMask ¶
func StridedSliceShrinkAxisMask(value int64) StridedSliceAttr
StridedSliceShrinkAxisMask sets the optional shrink_axis_mask attribute to value.
value: a bitmask where bit `i` implies that the `i`th specification should shrink the dimensionality. begin and end must imply a slice of size 1 in the dimension. For example in python one might do `foo[:, 3, :]` which would result in `shrink_axis_mask` being 2. If not specified, defaults to 0
type StridedSliceGradAttr ¶
type StridedSliceGradAttr func(optionalAttr)
StridedSliceGradAttr is an optional argument to StridedSliceGrad.
func StridedSliceGradBeginMask ¶
func StridedSliceGradBeginMask(value int64) StridedSliceGradAttr
StridedSliceGradBeginMask sets the optional begin_mask attribute to value. If not specified, defaults to 0
func StridedSliceGradEllipsisMask ¶
func StridedSliceGradEllipsisMask(value int64) StridedSliceGradAttr
StridedSliceGradEllipsisMask sets the optional ellipsis_mask attribute to value. If not specified, defaults to 0
func StridedSliceGradEndMask ¶
func StridedSliceGradEndMask(value int64) StridedSliceGradAttr
StridedSliceGradEndMask sets the optional end_mask attribute to value. If not specified, defaults to 0
func StridedSliceGradNewAxisMask ¶
func StridedSliceGradNewAxisMask(value int64) StridedSliceGradAttr
StridedSliceGradNewAxisMask sets the optional new_axis_mask attribute to value. If not specified, defaults to 0
func StridedSliceGradShrinkAxisMask ¶
func StridedSliceGradShrinkAxisMask(value int64) StridedSliceGradAttr
StridedSliceGradShrinkAxisMask sets the optional shrink_axis_mask attribute to value. If not specified, defaults to 0
type StringFormatAttr ¶
type StringFormatAttr func(optionalAttr)
StringFormatAttr is an optional argument to StringFormat.
func StringFormatPlaceholder ¶
func StringFormatPlaceholder(value string) StringFormatAttr
StringFormatPlaceholder sets the optional placeholder attribute to value.
value: A string, at each placeholder in the template a subsequent tensor summary will be inserted. If not specified, defaults to "%s"
func StringFormatSummarize ¶
func StringFormatSummarize(value int64) StringFormatAttr
StringFormatSummarize sets the optional summarize attribute to value.
value: When formatting the tensor summaries print the first and last summarize entries of each tensor dimension. If not specified, defaults to 3
func StringFormatTemplate ¶
func StringFormatTemplate(value string) StringFormatAttr
StringFormatTemplate sets the optional template attribute to value.
value: A string, the template to format tensor summaries into. If not specified, defaults to "%s"
type StringJoinAttr ¶
type StringJoinAttr func(optionalAttr)
StringJoinAttr is an optional argument to StringJoin.
func StringJoinSeparator ¶
func StringJoinSeparator(value string) StringJoinAttr
StringJoinSeparator sets the optional separator attribute to value.
value: string, an optional join separator. If not specified, defaults to ""
type StringLengthAttr ¶
type StringLengthAttr func(optionalAttr)
StringLengthAttr is an optional argument to StringLength.
func StringLengthUnit ¶
func StringLengthUnit(value string) StringLengthAttr
StringLengthUnit sets the optional unit attribute to value.
value: The unit that is counted to compute string length. One of: `"BYTE"` (for the number of bytes in each string) or `"UTF8_CHAR"` (for the number of UTF-8 encoded Unicode code points in each string). Results are undefined if `unit=UTF8_CHAR` and the `input` strings do not contain structurally valid UTF-8. If not specified, defaults to "BYTE"
type StringLowerAttr ¶
type StringLowerAttr func(optionalAttr)
StringLowerAttr is an optional argument to StringLower.
func StringLowerEncoding ¶
func StringLowerEncoding(value string) StringLowerAttr
StringLowerEncoding sets the optional encoding attribute to value.
value: Character encoding of `input`. Allowed values are ” and 'utf-8'. Value ” is interpreted as ASCII. If not specified, defaults to ""
type StringSplitAttr ¶
type StringSplitAttr func(optionalAttr)
StringSplitAttr is an optional argument to StringSplit.
func StringSplitSkipEmpty ¶
func StringSplitSkipEmpty(value bool) StringSplitAttr
StringSplitSkipEmpty sets the optional skip_empty attribute to value.
value: A `bool`. If `True`, skip the empty strings from the result. If not specified, defaults to true
type StringSplitV2Attr ¶
type StringSplitV2Attr func(optionalAttr)
StringSplitV2Attr is an optional argument to StringSplitV2.
func StringSplitV2Maxsplit ¶
func StringSplitV2Maxsplit(value int64) StringSplitV2Attr
StringSplitV2Maxsplit sets the optional maxsplit attribute to value.
value: An `int`. If `maxsplit > 0`, limit of the split of the result. If not specified, defaults to -1
type StringToNumberAttr ¶
type StringToNumberAttr func(optionalAttr)
StringToNumberAttr is an optional argument to StringToNumber.
func StringToNumberOutType ¶
func StringToNumberOutType(value tf.DataType) StringToNumberAttr
StringToNumberOutType sets the optional out_type attribute to value.
value: The numeric type to interpret each string in `string_tensor` as. If not specified, defaults to DT_FLOAT
type StringUpperAttr ¶
type StringUpperAttr func(optionalAttr)
StringUpperAttr is an optional argument to StringUpper.
func StringUpperEncoding ¶
func StringUpperEncoding(value string) StringUpperAttr
StringUpperEncoding sets the optional encoding attribute to value.
value: Character encoding of `input`. Allowed values are ” and 'utf-8'. Value ” is interpreted as ASCII. If not specified, defaults to ""
type SubstrAttr ¶
type SubstrAttr func(optionalAttr)
SubstrAttr is an optional argument to Substr.
func SubstrUnit ¶
func SubstrUnit(value string) SubstrAttr
SubstrUnit sets the optional unit attribute to value.
value: The unit that is used to create the substring. One of: `"BYTE"` (for defining position and length by bytes) or `"UTF8_CHAR"` (for the UTF-8 encoded Unicode code points). The default is `"BYTE"`. Results are undefined if `unit=UTF8_CHAR` and the `input` strings do not contain structurally valid UTF-8. If not specified, defaults to "BYTE"
type SumAttr ¶
type SumAttr func(optionalAttr)
SumAttr is an optional argument to Sum.
func SumKeepDims ¶
SumKeepDims sets the optional keep_dims attribute to value.
value: If true, retain reduced dimensions with length 1. If not specified, defaults to false
type SvdAttr ¶
type SvdAttr func(optionalAttr)
SvdAttr is an optional argument to Svd.
func SvdComputeUv ¶
SvdComputeUv sets the optional compute_uv attribute to value.
value: If true, left and right singular vectors will be computed and returned in `u` and `v`, respectively. If false, `u` and `v` are not set and should never referenced. If not specified, defaults to true
func SvdFullMatrices ¶
SvdFullMatrices sets the optional full_matrices attribute to value.
value: If true, compute full-sized `u` and `v`. If false (the default), compute only the leading `P` singular vectors. Ignored if `compute_uv` is `False`. If not specified, defaults to false
type TFRecordDatasetAttr ¶
type TFRecordDatasetAttr func(optionalAttr)
TFRecordDatasetAttr is an optional argument to TFRecordDataset.
func TFRecordDatasetMetadata ¶
func TFRecordDatasetMetadata(value string) TFRecordDatasetAttr
TFRecordDatasetMetadata sets the optional metadata attribute to value. If not specified, defaults to ""
type TFRecordDatasetV2Attr ¶ added in v0.6.0
type TFRecordDatasetV2Attr func(optionalAttr)
TFRecordDatasetV2Attr is an optional argument to TFRecordDatasetV2.
func TFRecordDatasetV2Metadata ¶ added in v0.6.0
func TFRecordDatasetV2Metadata(value string) TFRecordDatasetV2Attr
TFRecordDatasetV2Metadata sets the optional metadata attribute to value. If not specified, defaults to ""
type TFRecordReaderV2Attr ¶
type TFRecordReaderV2Attr func(optionalAttr)
TFRecordReaderV2Attr is an optional argument to TFRecordReaderV2.
func TFRecordReaderV2CompressionType ¶
func TFRecordReaderV2CompressionType(value string) TFRecordReaderV2Attr
TFRecordReaderV2CompressionType sets the optional compression_type attribute to value. If not specified, defaults to ""
func TFRecordReaderV2Container ¶
func TFRecordReaderV2Container(value string) TFRecordReaderV2Attr
TFRecordReaderV2Container sets the optional container attribute to value.
value: If non-empty, this reader is placed in the given container. Otherwise, a default container is used. If not specified, defaults to ""
func TFRecordReaderV2SharedName ¶
func TFRecordReaderV2SharedName(value string) TFRecordReaderV2Attr
TFRecordReaderV2SharedName sets the optional shared_name attribute to value.
value: If non-empty, this reader is named in the given bucket with this shared_name. Otherwise, the node name is used instead. If not specified, defaults to ""
type TPUPartitionedInputAttr ¶
type TPUPartitionedInputAttr func(optionalAttr)
TPUPartitionedInputAttr is an optional argument to TPUPartitionedInput.
func TPUPartitionedInputPartitionDim ¶
func TPUPartitionedInputPartitionDim(value int64) TPUPartitionedInputAttr
TPUPartitionedInputPartitionDim sets the optional partition_dim attribute to value.
value: An integer describles which dimension is partitioned. -1 means those inputs are replicated. If not specified, defaults to 0
type TPUPartitionedInputV2Attr ¶ added in v0.4.0
type TPUPartitionedInputV2Attr func(optionalAttr)
TPUPartitionedInputV2Attr is an optional argument to TPUPartitionedInputV2.
func TPUPartitionedInputV2IsPacked ¶ added in v0.4.0
func TPUPartitionedInputV2IsPacked(value bool) TPUPartitionedInputV2Attr
TPUPartitionedInputV2IsPacked sets the optional is_packed attribute to value.
value: Indicates whether the input is a packed resource. If not specified, defaults to false
type TPUPartitionedOutputAttr ¶
type TPUPartitionedOutputAttr func(optionalAttr)
TPUPartitionedOutputAttr is an optional argument to TPUPartitionedOutput.
func TPUPartitionedOutputPartitionDim ¶
func TPUPartitionedOutputPartitionDim(value int64) TPUPartitionedOutputAttr
TPUPartitionedOutputPartitionDim sets the optional partition_dim attribute to value.
value: An integer describles which dimension is partitioned. If not specified, defaults to 0
type TPUReplicateMetadataAttr ¶
type TPUReplicateMetadataAttr func(optionalAttr)
TPUReplicateMetadataAttr is an optional argument to TPUReplicateMetadata.
func TPUReplicateMetadataAllowSoftPlacement ¶
func TPUReplicateMetadataAllowSoftPlacement(value bool) TPUReplicateMetadataAttr
TPUReplicateMetadataAllowSoftPlacement sets the optional allow_soft_placement attribute to value. If not specified, defaults to false
func TPUReplicateMetadataComputationShape ¶
func TPUReplicateMetadataComputationShape(value []int64) TPUReplicateMetadataAttr
TPUReplicateMetadataComputationShape sets the optional computation_shape attribute to value.
value: DEPRECATED. Use num_cores_per_replica instead. If not specified, defaults to {}
func TPUReplicateMetadataDeviceAssignment ¶
func TPUReplicateMetadataDeviceAssignment(value []int64) TPUReplicateMetadataAttr
TPUReplicateMetadataDeviceAssignment sets the optional device_assignment attribute to value.
value: The assignment of devices for the computation. If not specified, defaults to {}
func TPUReplicateMetadataHostComputeCore ¶
func TPUReplicateMetadataHostComputeCore(value []string) TPUReplicateMetadataAttr
TPUReplicateMetadataHostComputeCore sets the optional host_compute_core attribute to value. If not specified, defaults to {}
func TPUReplicateMetadataNumCoresPerReplica ¶
func TPUReplicateMetadataNumCoresPerReplica(value int64) TPUReplicateMetadataAttr
TPUReplicateMetadataNumCoresPerReplica sets the optional num_cores_per_replica attribute to value.
value: Number of cores per replica. Used for model parallelism. If not specified, defaults to 1
func TPUReplicateMetadataPaddingMap ¶
func TPUReplicateMetadataPaddingMap(value []string) TPUReplicateMetadataAttr
TPUReplicateMetadataPaddingMap sets the optional padding_map attribute to value. If not specified, defaults to {}
func TPUReplicateMetadataStepMarkerLocation ¶
func TPUReplicateMetadataStepMarkerLocation(value string) TPUReplicateMetadataAttr
TPUReplicateMetadataStepMarkerLocation sets the optional step_marker_location attribute to value. If not specified, defaults to "STEP_MARK_AT_ENTRY"
func TPUReplicateMetadataTopology ¶
func TPUReplicateMetadataTopology(value string) TPUReplicateMetadataAttr
TPUReplicateMetadataTopology sets the optional topology attribute to value.
value: TopologyProto indicating the topology of the TPU pod slice. If not specified, defaults to ""
func TPUReplicateMetadataTpuCompileOptionsProto ¶ added in v0.2.0
func TPUReplicateMetadataTpuCompileOptionsProto(value string) TPUReplicateMetadataAttr
TPUReplicateMetadataTpuCompileOptionsProto sets the optional tpu_compile_options_proto attribute to value. If not specified, defaults to ""
func TPUReplicateMetadataUseSpmdForXlaPartitioning ¶
func TPUReplicateMetadataUseSpmdForXlaPartitioning(value bool) TPUReplicateMetadataAttr
TPUReplicateMetadataUseSpmdForXlaPartitioning sets the optional use_spmd_for_xla_partitioning attribute to value. If not specified, defaults to false
func TPUReplicateMetadataUseTpu ¶
func TPUReplicateMetadataUseTpu(value bool) TPUReplicateMetadataAttr
TPUReplicateMetadataUseTpu sets the optional use_tpu attribute to value.
value: Whether to place the computation on the TPU. If not specified, defaults to true
type TPUReplicatedInputAttr ¶
type TPUReplicatedInputAttr func(optionalAttr)
TPUReplicatedInputAttr is an optional argument to TPUReplicatedInput.
func TPUReplicatedInputIndex ¶
func TPUReplicatedInputIndex(value int64) TPUReplicatedInputAttr
TPUReplicatedInputIndex sets the optional index attribute to value. If not specified, defaults to -1
func TPUReplicatedInputIsMirroredVariable ¶
func TPUReplicatedInputIsMirroredVariable(value bool) TPUReplicatedInputAttr
TPUReplicatedInputIsMirroredVariable sets the optional is_mirrored_variable attribute to value. If not specified, defaults to false
func TPUReplicatedInputIsPacked ¶
func TPUReplicatedInputIsPacked(value bool) TPUReplicatedInputAttr
TPUReplicatedInputIsPacked sets the optional is_packed attribute to value. If not specified, defaults to false
type TakeDatasetAttr ¶
type TakeDatasetAttr func(optionalAttr)
TakeDatasetAttr is an optional argument to TakeDataset.
func TakeDatasetMetadata ¶
func TakeDatasetMetadata(value string) TakeDatasetAttr
TakeDatasetMetadata sets the optional metadata attribute to value. If not specified, defaults to ""
type TakeManySparseFromTensorsMapAttr ¶
type TakeManySparseFromTensorsMapAttr func(optionalAttr)
TakeManySparseFromTensorsMapAttr is an optional argument to TakeManySparseFromTensorsMap.
func TakeManySparseFromTensorsMapContainer ¶
func TakeManySparseFromTensorsMapContainer(value string) TakeManySparseFromTensorsMapAttr
TakeManySparseFromTensorsMapContainer sets the optional container attribute to value.
value: The container name for the `SparseTensorsMap` read by this op. If not specified, defaults to ""
func TakeManySparseFromTensorsMapSharedName ¶
func TakeManySparseFromTensorsMapSharedName(value string) TakeManySparseFromTensorsMapAttr
TakeManySparseFromTensorsMapSharedName sets the optional shared_name attribute to value.
value: The shared name for the `SparseTensorsMap` read by this op. It should not be blank; rather the `shared_name` or unique Operation name of the Op that created the original `SparseTensorsMap` should be used. If not specified, defaults to ""
type TensorArrayConcatV2Attr ¶
type TensorArrayConcatV2Attr func(optionalAttr)
TensorArrayConcatV2Attr is an optional argument to TensorArrayConcatV2.
func TensorArrayConcatV2ElementShapeExcept0 ¶
func TensorArrayConcatV2ElementShapeExcept0(value tf.Shape) TensorArrayConcatV2Attr
TensorArrayConcatV2ElementShapeExcept0 sets the optional element_shape_except0 attribute to value. If not specified, defaults to {unknown_rank:true}
type TensorArrayConcatV3Attr ¶
type TensorArrayConcatV3Attr func(optionalAttr)
TensorArrayConcatV3Attr is an optional argument to TensorArrayConcatV3.
func TensorArrayConcatV3ElementShapeExcept0 ¶
func TensorArrayConcatV3ElementShapeExcept0(value tf.Shape) TensorArrayConcatV3Attr
TensorArrayConcatV3ElementShapeExcept0 sets the optional element_shape_except0 attribute to value.
value: The expected shape of an element, if known, excluding the first dimension. Used to validate the shapes of TensorArray elements. If this shape is not fully specified, concatenating zero-size TensorArrays is an error. If not specified, defaults to {unknown_rank:true}
type TensorArrayGatherV2Attr ¶
type TensorArrayGatherV2Attr func(optionalAttr)
TensorArrayGatherV2Attr is an optional argument to TensorArrayGatherV2.
func TensorArrayGatherV2ElementShape ¶
func TensorArrayGatherV2ElementShape(value tf.Shape) TensorArrayGatherV2Attr
TensorArrayGatherV2ElementShape sets the optional element_shape attribute to value. If not specified, defaults to {unknown_rank:true}
type TensorArrayGatherV3Attr ¶
type TensorArrayGatherV3Attr func(optionalAttr)
TensorArrayGatherV3Attr is an optional argument to TensorArrayGatherV3.
func TensorArrayGatherV3ElementShape ¶
func TensorArrayGatherV3ElementShape(value tf.Shape) TensorArrayGatherV3Attr
TensorArrayGatherV3ElementShape sets the optional element_shape attribute to value.
value: The expected shape of an element, if known. Used to validate the shapes of TensorArray elements. If this shape is not fully specified, gathering zero-size TensorArrays is an error. If not specified, defaults to {unknown_rank:true}
type TensorArrayV2Attr ¶
type TensorArrayV2Attr func(optionalAttr)
TensorArrayV2Attr is an optional argument to TensorArrayV2.
func TensorArrayV2ClearAfterRead ¶
func TensorArrayV2ClearAfterRead(value bool) TensorArrayV2Attr
TensorArrayV2ClearAfterRead sets the optional clear_after_read attribute to value. If not specified, defaults to true
func TensorArrayV2DynamicSize ¶
func TensorArrayV2DynamicSize(value bool) TensorArrayV2Attr
TensorArrayV2DynamicSize sets the optional dynamic_size attribute to value. If not specified, defaults to false
func TensorArrayV2ElementShape ¶
func TensorArrayV2ElementShape(value tf.Shape) TensorArrayV2Attr
TensorArrayV2ElementShape sets the optional element_shape attribute to value. If not specified, defaults to {unknown_rank:true}
func TensorArrayV2TensorArrayName ¶
func TensorArrayV2TensorArrayName(value string) TensorArrayV2Attr
TensorArrayV2TensorArrayName sets the optional tensor_array_name attribute to value. If not specified, defaults to ""
type TensorArrayV3Attr ¶
type TensorArrayV3Attr func(optionalAttr)
TensorArrayV3Attr is an optional argument to TensorArrayV3.
func TensorArrayV3ClearAfterRead ¶
func TensorArrayV3ClearAfterRead(value bool) TensorArrayV3Attr
TensorArrayV3ClearAfterRead sets the optional clear_after_read attribute to value.
value: If true (default), Tensors in the TensorArray are cleared after being read. This disables multiple read semantics but allows early release of memory. If not specified, defaults to true
func TensorArrayV3DynamicSize ¶
func TensorArrayV3DynamicSize(value bool) TensorArrayV3Attr
TensorArrayV3DynamicSize sets the optional dynamic_size attribute to value.
value: A boolean that determines whether writes to the TensorArray are allowed to grow the size. By default, this is not allowed. If not specified, defaults to false
func TensorArrayV3ElementShape ¶
func TensorArrayV3ElementShape(value tf.Shape) TensorArrayV3Attr
TensorArrayV3ElementShape sets the optional element_shape attribute to value.
value: The expected shape of an element, if known. Used to validate the shapes of TensorArray elements. If this shape is not fully specified, gathering zero-size TensorArrays is an error. If not specified, defaults to {unknown_rank:true}
func TensorArrayV3IdenticalElementShapes ¶
func TensorArrayV3IdenticalElementShapes(value bool) TensorArrayV3Attr
TensorArrayV3IdenticalElementShapes sets the optional identical_element_shapes attribute to value.
value: If true (default is false), then all elements in the TensorArray will be expected to have identical shapes. This allows certain behaviors, like dynamically checking for consistent shapes on write, and being able to fill in properly shaped zero tensors on stack -- even if the element_shape attribute is not fully defined. If not specified, defaults to false
func TensorArrayV3TensorArrayName ¶
func TensorArrayV3TensorArrayName(value string) TensorArrayV3Attr
TensorArrayV3TensorArrayName sets the optional tensor_array_name attribute to value.
value: Overrides the name used for the temporary tensor_array resource. Default value is the name of the 'TensorArray' op (which is guaranteed unique). If not specified, defaults to ""
type TensorDatasetAttr ¶
type TensorDatasetAttr func(optionalAttr)
TensorDatasetAttr is an optional argument to TensorDataset.
func TensorDatasetMetadata ¶
func TensorDatasetMetadata(value string) TensorDatasetAttr
TensorDatasetMetadata sets the optional metadata attribute to value. If not specified, defaults to ""
type TensorListConcatAttr ¶
type TensorListConcatAttr func(optionalAttr)
TensorListConcatAttr is an optional argument to TensorListConcat.
func TensorListConcatElementShape ¶
func TensorListConcatElementShape(value tf.Shape) TensorListConcatAttr
TensorListConcatElementShape sets the optional element_shape attribute to value. If not specified, defaults to {unknown_rank:true}
type TensorListSetItemAttr ¶ added in v0.5.0
type TensorListSetItemAttr func(optionalAttr)
TensorListSetItemAttr is an optional argument to TensorListSetItem.
func TensorListSetItemResizeIfIndexOutOfBounds ¶ added in v0.5.0
func TensorListSetItemResizeIfIndexOutOfBounds(value bool) TensorListSetItemAttr
TensorListSetItemResizeIfIndexOutOfBounds sets the optional resize_if_index_out_of_bounds attribute to value. If not specified, defaults to false
type TensorListStackAttr ¶
type TensorListStackAttr func(optionalAttr)
TensorListStackAttr is an optional argument to TensorListStack.
func TensorListStackNumElements ¶
func TensorListStackNumElements(value int64) TensorListStackAttr
TensorListStackNumElements sets the optional num_elements attribute to value. If not specified, defaults to -1
type TensorScatterAddAttr ¶ added in v0.8.2
type TensorScatterAddAttr func(optionalAttr)
TensorScatterAddAttr is an optional argument to TensorScatterAdd.
func TensorScatterAddBadIndicesPolicy ¶ added in v0.8.2
func TensorScatterAddBadIndicesPolicy(value string) TensorScatterAddAttr
TensorScatterAddBadIndicesPolicy sets the optional bad_indices_policy attribute to value. If not specified, defaults to ""
type TensorScatterMaxAttr ¶ added in v0.8.2
type TensorScatterMaxAttr func(optionalAttr)
TensorScatterMaxAttr is an optional argument to TensorScatterMax.
func TensorScatterMaxBadIndicesPolicy ¶ added in v0.8.2
func TensorScatterMaxBadIndicesPolicy(value string) TensorScatterMaxAttr
TensorScatterMaxBadIndicesPolicy sets the optional bad_indices_policy attribute to value. If not specified, defaults to ""
type TensorScatterSubAttr ¶ added in v0.8.2
type TensorScatterSubAttr func(optionalAttr)
TensorScatterSubAttr is an optional argument to TensorScatterSub.
func TensorScatterSubBadIndicesPolicy ¶ added in v0.8.2
func TensorScatterSubBadIndicesPolicy(value string) TensorScatterSubAttr
TensorScatterSubBadIndicesPolicy sets the optional bad_indices_policy attribute to value. If not specified, defaults to ""
type TensorScatterUpdateAttr ¶ added in v0.8.2
type TensorScatterUpdateAttr func(optionalAttr)
TensorScatterUpdateAttr is an optional argument to TensorScatterUpdate.
func TensorScatterUpdateBadIndicesPolicy ¶ added in v0.8.2
func TensorScatterUpdateBadIndicesPolicy(value string) TensorScatterUpdateAttr
TensorScatterUpdateBadIndicesPolicy sets the optional bad_indices_policy attribute to value. If not specified, defaults to ""
type TensorSliceDatasetAttr ¶
type TensorSliceDatasetAttr func(optionalAttr)
TensorSliceDatasetAttr is an optional argument to TensorSliceDataset.
func TensorSliceDatasetIsFiles ¶
func TensorSliceDatasetIsFiles(value bool) TensorSliceDatasetAttr
TensorSliceDatasetIsFiles sets the optional is_files attribute to value. If not specified, defaults to false
func TensorSliceDatasetMetadata ¶
func TensorSliceDatasetMetadata(value string) TensorSliceDatasetAttr
TensorSliceDatasetMetadata sets the optional metadata attribute to value. If not specified, defaults to ""
func TensorSliceDatasetReplicateOnSplit ¶ added in v0.2.0
func TensorSliceDatasetReplicateOnSplit(value bool) TensorSliceDatasetAttr
TensorSliceDatasetReplicateOnSplit sets the optional replicate_on_split attribute to value. If not specified, defaults to false
type TensorStridedSliceUpdateAttr ¶
type TensorStridedSliceUpdateAttr func(optionalAttr)
TensorStridedSliceUpdateAttr is an optional argument to TensorStridedSliceUpdate.
func TensorStridedSliceUpdateBeginMask ¶
func TensorStridedSliceUpdateBeginMask(value int64) TensorStridedSliceUpdateAttr
TensorStridedSliceUpdateBeginMask sets the optional begin_mask attribute to value. If not specified, defaults to 0
func TensorStridedSliceUpdateEllipsisMask ¶
func TensorStridedSliceUpdateEllipsisMask(value int64) TensorStridedSliceUpdateAttr
TensorStridedSliceUpdateEllipsisMask sets the optional ellipsis_mask attribute to value. If not specified, defaults to 0
func TensorStridedSliceUpdateEndMask ¶
func TensorStridedSliceUpdateEndMask(value int64) TensorStridedSliceUpdateAttr
TensorStridedSliceUpdateEndMask sets the optional end_mask attribute to value. If not specified, defaults to 0
func TensorStridedSliceUpdateNewAxisMask ¶
func TensorStridedSliceUpdateNewAxisMask(value int64) TensorStridedSliceUpdateAttr
TensorStridedSliceUpdateNewAxisMask sets the optional new_axis_mask attribute to value. If not specified, defaults to 0
func TensorStridedSliceUpdateShrinkAxisMask ¶
func TensorStridedSliceUpdateShrinkAxisMask(value int64) TensorStridedSliceUpdateAttr
TensorStridedSliceUpdateShrinkAxisMask sets the optional shrink_axis_mask attribute to value. If not specified, defaults to 0
type TensorSummaryAttr ¶
type TensorSummaryAttr func(optionalAttr)
TensorSummaryAttr is an optional argument to TensorSummary.
func TensorSummaryDescription ¶
func TensorSummaryDescription(value string) TensorSummaryAttr
TensorSummaryDescription sets the optional description attribute to value.
value: A json-encoded SummaryDescription proto. If not specified, defaults to ""
func TensorSummaryDisplayName ¶
func TensorSummaryDisplayName(value string) TensorSummaryAttr
TensorSummaryDisplayName sets the optional display_name attribute to value.
value: An unused string. If not specified, defaults to ""
func TensorSummaryLabels ¶
func TensorSummaryLabels(value []string) TensorSummaryAttr
TensorSummaryLabels sets the optional labels attribute to value.
value: An unused list of strings. If not specified, defaults to {}
type TextLineDatasetAttr ¶
type TextLineDatasetAttr func(optionalAttr)
TextLineDatasetAttr is an optional argument to TextLineDataset.
func TextLineDatasetMetadata ¶
func TextLineDatasetMetadata(value string) TextLineDatasetAttr
TextLineDatasetMetadata sets the optional metadata attribute to value. If not specified, defaults to ""
type TextLineReaderV2Attr ¶
type TextLineReaderV2Attr func(optionalAttr)
TextLineReaderV2Attr is an optional argument to TextLineReaderV2.
func TextLineReaderV2Container ¶
func TextLineReaderV2Container(value string) TextLineReaderV2Attr
TextLineReaderV2Container sets the optional container attribute to value.
value: If non-empty, this reader is placed in the given container. Otherwise, a default container is used. If not specified, defaults to ""
func TextLineReaderV2SharedName ¶
func TextLineReaderV2SharedName(value string) TextLineReaderV2Attr
TextLineReaderV2SharedName sets the optional shared_name attribute to value.
value: If non-empty, this reader is named in the given bucket with this shared_name. Otherwise, the node name is used instead. If not specified, defaults to ""
func TextLineReaderV2SkipHeaderLines ¶
func TextLineReaderV2SkipHeaderLines(value int64) TextLineReaderV2Attr
TextLineReaderV2SkipHeaderLines sets the optional skip_header_lines attribute to value.
value: Number of lines to skip from the beginning of every file. If not specified, defaults to 0
type ThreadPoolHandleAttr ¶
type ThreadPoolHandleAttr func(optionalAttr)
ThreadPoolHandleAttr is an optional argument to ThreadPoolHandle.
func ThreadPoolHandleContainer ¶
func ThreadPoolHandleContainer(value string) ThreadPoolHandleAttr
ThreadPoolHandleContainer sets the optional container attribute to value. If not specified, defaults to ""
func ThreadPoolHandleMaxIntraOpParallelism ¶
func ThreadPoolHandleMaxIntraOpParallelism(value int64) ThreadPoolHandleAttr
ThreadPoolHandleMaxIntraOpParallelism sets the optional max_intra_op_parallelism attribute to value.
value: The maximum degree of parallelism to use within operations that execute on this threadpool. If not specified, defaults to 1
func ThreadPoolHandleSharedName ¶
func ThreadPoolHandleSharedName(value string) ThreadPoolHandleAttr
ThreadPoolHandleSharedName sets the optional shared_name attribute to value. If not specified, defaults to ""
type ThreadUnsafeUnigramCandidateSamplerAttr ¶
type ThreadUnsafeUnigramCandidateSamplerAttr func(optionalAttr)
ThreadUnsafeUnigramCandidateSamplerAttr is an optional argument to ThreadUnsafeUnigramCandidateSampler.
func ThreadUnsafeUnigramCandidateSamplerSeed ¶
func ThreadUnsafeUnigramCandidateSamplerSeed(value int64) ThreadUnsafeUnigramCandidateSamplerAttr
ThreadUnsafeUnigramCandidateSamplerSeed sets the optional seed attribute to value.
value: If either seed or seed2 are set to be non-zero, the random number generator is seeded by the given seed. Otherwise, it is seeded by a random seed. If not specified, defaults to 0
func ThreadUnsafeUnigramCandidateSamplerSeed2 ¶
func ThreadUnsafeUnigramCandidateSamplerSeed2(value int64) ThreadUnsafeUnigramCandidateSamplerAttr
ThreadUnsafeUnigramCandidateSamplerSeed2 sets the optional seed2 attribute to value.
value: An second seed to avoid seed collision. If not specified, defaults to 0
type TopKAttr ¶
type TopKAttr func(optionalAttr)
TopKAttr is an optional argument to TopK.
func TopKSorted ¶
TopKSorted sets the optional sorted attribute to value.
value: If true the resulting `k` elements will be sorted by the values in descending order. If not specified, defaults to true
type TopKV2Attr ¶
type TopKV2Attr func(optionalAttr)
TopKV2Attr is an optional argument to TopKV2.
func TopKV2IndexType ¶ added in v0.5.0
func TopKV2IndexType(value tf.DataType) TopKV2Attr
TopKV2IndexType sets the optional index_type attribute to value. If not specified, defaults to DT_INT32
func TopKV2Sorted ¶
func TopKV2Sorted(value bool) TopKV2Attr
TopKV2Sorted sets the optional sorted attribute to value.
value: If true the resulting `k` elements will be sorted by the values in descending order. If not specified, defaults to true
type TridiagonalSolveAttr ¶
type TridiagonalSolveAttr func(optionalAttr)
TridiagonalSolveAttr is an optional argument to TridiagonalSolve.
func TridiagonalSolvePartialPivoting ¶
func TridiagonalSolvePartialPivoting(value bool) TridiagonalSolveAttr
TridiagonalSolvePartialPivoting sets the optional partial_pivoting attribute to value.
value: Whether to apply partial pivoting. Partial pivoting makes the procedure more stable, but slower. If not specified, defaults to true
func TridiagonalSolvePerturbSingular ¶
func TridiagonalSolvePerturbSingular(value bool) TridiagonalSolveAttr
TridiagonalSolvePerturbSingular sets the optional perturb_singular attribute to value. If not specified, defaults to false
type TruncatedNormalAttr ¶
type TruncatedNormalAttr func(optionalAttr)
TruncatedNormalAttr is an optional argument to TruncatedNormal.
func TruncatedNormalSeed ¶
func TruncatedNormalSeed(value int64) TruncatedNormalAttr
TruncatedNormalSeed sets the optional seed attribute to value.
value: If either `seed` or `seed2` are set to be non-zero, the random number generator is seeded by the given seed. Otherwise, it is seeded by a random seed. If not specified, defaults to 0
func TruncatedNormalSeed2 ¶
func TruncatedNormalSeed2(value int64) TruncatedNormalAttr
TruncatedNormalSeed2 sets the optional seed2 attribute to value.
value: A second seed to avoid seed collision. If not specified, defaults to 0
type UnbatchAttr ¶
type UnbatchAttr func(optionalAttr)
UnbatchAttr is an optional argument to Unbatch.
func UnbatchContainer ¶
func UnbatchContainer(value string) UnbatchAttr
UnbatchContainer sets the optional container attribute to value. If not specified, defaults to ""
func UnbatchSharedName ¶
func UnbatchSharedName(value string) UnbatchAttr
UnbatchSharedName sets the optional shared_name attribute to value. If not specified, defaults to ""
type UnbatchDatasetAttr ¶
type UnbatchDatasetAttr func(optionalAttr)
UnbatchDatasetAttr is an optional argument to UnbatchDataset.
func UnbatchDatasetMetadata ¶
func UnbatchDatasetMetadata(value string) UnbatchDatasetAttr
UnbatchDatasetMetadata sets the optional metadata attribute to value. If not specified, defaults to ""
type UnbatchGradAttr ¶
type UnbatchGradAttr func(optionalAttr)
UnbatchGradAttr is an optional argument to UnbatchGrad.
func UnbatchGradContainer ¶
func UnbatchGradContainer(value string) UnbatchGradAttr
UnbatchGradContainer sets the optional container attribute to value. If not specified, defaults to ""
func UnbatchGradSharedName ¶
func UnbatchGradSharedName(value string) UnbatchGradAttr
UnbatchGradSharedName sets the optional shared_name attribute to value. If not specified, defaults to ""
type UnicodeDecodeAttr ¶
type UnicodeDecodeAttr func(optionalAttr)
UnicodeDecodeAttr is an optional argument to UnicodeDecode.
func UnicodeDecodeErrors ¶
func UnicodeDecodeErrors(value string) UnicodeDecodeAttr
UnicodeDecodeErrors sets the optional errors attribute to value.
value: Error handling policy when there is invalid formatting found in the input. The value of 'strict' will cause the operation to produce a InvalidArgument error on any invalid input formatting. A value of 'replace' (the default) will cause the operation to replace any invalid formatting in the input with the `replacement_char` codepoint. A value of 'ignore' will cause the operation to skip any invalid formatting in the input and produce no corresponding output character. If not specified, defaults to "replace"
func UnicodeDecodeReplaceControlCharacters ¶
func UnicodeDecodeReplaceControlCharacters(value bool) UnicodeDecodeAttr
UnicodeDecodeReplaceControlCharacters sets the optional replace_control_characters attribute to value.
value: Whether to replace the C0 control characters (00-1F) with the `replacement_char`. Default is false. If not specified, defaults to false
func UnicodeDecodeReplacementChar ¶
func UnicodeDecodeReplacementChar(value int64) UnicodeDecodeAttr
UnicodeDecodeReplacementChar sets the optional replacement_char attribute to value.
value: The replacement character codepoint to be used in place of any invalid formatting in the input when `errors='replace'`. Any valid unicode codepoint may be used. The default value is the default unicode replacement character is 0xFFFD or U+65533.) If not specified, defaults to 65533
func UnicodeDecodeTsplits ¶
func UnicodeDecodeTsplits(value tf.DataType) UnicodeDecodeAttr
UnicodeDecodeTsplits sets the optional Tsplits attribute to value. If not specified, defaults to DT_INT64
type UnicodeDecodeWithOffsetsAttr ¶
type UnicodeDecodeWithOffsetsAttr func(optionalAttr)
UnicodeDecodeWithOffsetsAttr is an optional argument to UnicodeDecodeWithOffsets.
func UnicodeDecodeWithOffsetsErrors ¶
func UnicodeDecodeWithOffsetsErrors(value string) UnicodeDecodeWithOffsetsAttr
UnicodeDecodeWithOffsetsErrors sets the optional errors attribute to value.
value: Error handling policy when there is invalid formatting found in the input. The value of 'strict' will cause the operation to produce a InvalidArgument error on any invalid input formatting. A value of 'replace' (the default) will cause the operation to replace any invalid formatting in the input with the `replacement_char` codepoint. A value of 'ignore' will cause the operation to skip any invalid formatting in the input and produce no corresponding output character. If not specified, defaults to "replace"
func UnicodeDecodeWithOffsetsReplaceControlCharacters ¶
func UnicodeDecodeWithOffsetsReplaceControlCharacters(value bool) UnicodeDecodeWithOffsetsAttr
UnicodeDecodeWithOffsetsReplaceControlCharacters sets the optional replace_control_characters attribute to value.
value: Whether to replace the C0 control characters (00-1F) with the `replacement_char`. Default is false. If not specified, defaults to false
func UnicodeDecodeWithOffsetsReplacementChar ¶
func UnicodeDecodeWithOffsetsReplacementChar(value int64) UnicodeDecodeWithOffsetsAttr
UnicodeDecodeWithOffsetsReplacementChar sets the optional replacement_char attribute to value.
value: The replacement character codepoint to be used in place of any invalid formatting in the input when `errors='replace'`. Any valid unicode codepoint may be used. The default value is the default unicode replacement character is 0xFFFD or U+65533.) If not specified, defaults to 65533
func UnicodeDecodeWithOffsetsTsplits ¶
func UnicodeDecodeWithOffsetsTsplits(value tf.DataType) UnicodeDecodeWithOffsetsAttr
UnicodeDecodeWithOffsetsTsplits sets the optional Tsplits attribute to value. If not specified, defaults to DT_INT64
type UnicodeEncodeAttr ¶
type UnicodeEncodeAttr func(optionalAttr)
UnicodeEncodeAttr is an optional argument to UnicodeEncode.
func UnicodeEncodeErrors ¶
func UnicodeEncodeErrors(value string) UnicodeEncodeAttr
UnicodeEncodeErrors sets the optional errors attribute to value.
value: Error handling policy when there is invalid formatting found in the input. The value of 'strict' will cause the operation to produce a InvalidArgument error on any invalid input formatting. A value of 'replace' (the default) will cause the operation to replace any invalid formatting in the input with the `replacement_char` codepoint. A value of 'ignore' will cause the operation to skip any invalid formatting in the input and produce no corresponding output character. If not specified, defaults to "replace"
func UnicodeEncodeReplacementChar ¶
func UnicodeEncodeReplacementChar(value int64) UnicodeEncodeAttr
UnicodeEncodeReplacementChar sets the optional replacement_char attribute to value.
value: The replacement character codepoint to be used in place of any invalid formatting in the input when `errors='replace'`. Any valid unicode codepoint may be used. The default value is the default unicode replacement character is 0xFFFD (U+65533). If not specified, defaults to 65533
type UnicodeTranscodeAttr ¶
type UnicodeTranscodeAttr func(optionalAttr)
UnicodeTranscodeAttr is an optional argument to UnicodeTranscode.
func UnicodeTranscodeErrors ¶
func UnicodeTranscodeErrors(value string) UnicodeTranscodeAttr
UnicodeTranscodeErrors sets the optional errors attribute to value.
value: Error handling policy when there is invalid formatting found in the input. The value of 'strict' will cause the operation to produce a InvalidArgument error on any invalid input formatting. A value of 'replace' (the default) will cause the operation to replace any invalid formatting in the input with the `replacement_char` codepoint. A value of 'ignore' will cause the operation to skip any invalid formatting in the input and produce no corresponding output character. If not specified, defaults to "replace"
func UnicodeTranscodeReplaceControlCharacters ¶
func UnicodeTranscodeReplaceControlCharacters(value bool) UnicodeTranscodeAttr
UnicodeTranscodeReplaceControlCharacters sets the optional replace_control_characters attribute to value.
value: Whether to replace the C0 control characters (00-1F) with the `replacement_char`. Default is false. If not specified, defaults to false
func UnicodeTranscodeReplacementChar ¶
func UnicodeTranscodeReplacementChar(value int64) UnicodeTranscodeAttr
UnicodeTranscodeReplacementChar sets the optional replacement_char attribute to value.
value: The replacement character codepoint to be used in place of any invalid formatting in the input when `errors='replace'`. Any valid unicode codepoint may be used. The default value is the default unicode replacement character is 0xFFFD or U+65533.)
Note that for UTF-8, passing a replacement character expressible in 1 byte, such as ' ', will preserve string alignment to the source since invalid bytes will be replaced with a 1-byte replacement. For UTF-16-BE and UTF-16-LE, any 1 or 2 byte replacement character will preserve byte alignment to the source. If not specified, defaults to 65533
type UniformCandidateSamplerAttr ¶
type UniformCandidateSamplerAttr func(optionalAttr)
UniformCandidateSamplerAttr is an optional argument to UniformCandidateSampler.
func UniformCandidateSamplerSeed ¶
func UniformCandidateSamplerSeed(value int64) UniformCandidateSamplerAttr
UniformCandidateSamplerSeed sets the optional seed attribute to value.
value: If either seed or seed2 are set to be non-zero, the random number generator is seeded by the given seed. Otherwise, it is seeded by a random seed. If not specified, defaults to 0
func UniformCandidateSamplerSeed2 ¶
func UniformCandidateSamplerSeed2(value int64) UniformCandidateSamplerAttr
UniformCandidateSamplerSeed2 sets the optional seed2 attribute to value.
value: An second seed to avoid seed collision. If not specified, defaults to 0
type UniformDequantizeAttr ¶ added in v0.2.0
type UniformDequantizeAttr func(optionalAttr)
UniformDequantizeAttr is an optional argument to UniformDequantize.
func UniformDequantizeQuantizationAxis ¶ added in v0.2.0
func UniformDequantizeQuantizationAxis(value int64) UniformDequantizeAttr
UniformDequantizeQuantizationAxis sets the optional quantization_axis attribute to value.
value: Indicates the dimension index of the tensor where per-axis quantization is applied for the slices along that dimension. If set to -1 (default), this indicates per-tensor quantization. Otherwise, it must be set within range [0, input.dims()). If not specified, defaults to -1
type UniformQuantizeAttr ¶ added in v0.3.0
type UniformQuantizeAttr func(optionalAttr)
UniformQuantizeAttr is an optional argument to UniformQuantize.
func UniformQuantizeQuantizationAxis ¶ added in v0.3.0
func UniformQuantizeQuantizationAxis(value int64) UniformQuantizeAttr
UniformQuantizeQuantizationAxis sets the optional quantization_axis attribute to value.
value: Indicates the dimension index of the tensor where per-axis quantization is applied for the slices along that dimension. If set to -1 (default), this indicates per-tensor quantization. Otherwise, it must be set within range [0, input.dims()). If not specified, defaults to -1
type UniformQuantizedAddAttr ¶ added in v0.4.0
type UniformQuantizedAddAttr func(optionalAttr)
UniformQuantizedAddAttr is an optional argument to UniformQuantizedAdd.
func UniformQuantizedAddLhsQuantizationAxis ¶ added in v0.4.0
func UniformQuantizedAddLhsQuantizationAxis(value int64) UniformQuantizedAddAttr
UniformQuantizedAddLhsQuantizationAxis sets the optional lhs_quantization_axis attribute to value.
value: Indicates the dimension index of the tensor where per-axis quantization is applied for the slices along that dimension. If set to -1 (default), this indicates per-tensor quantization. For the `lhs`, only per-tensor quantization is supported. Thus, this must be set to -1. Other values will raise error at OpKernel construction. If not specified, defaults to -1
func UniformQuantizedAddOutputQuantizationAxis ¶ added in v0.4.0
func UniformQuantizedAddOutputQuantizationAxis(value int64) UniformQuantizedAddAttr
UniformQuantizedAddOutputQuantizationAxis sets the optional output_quantization_axis attribute to value.
value: Indicates the dimension index of the tensor where per-axis quantization is applied for the slices along that dimension. If set to -1 (default), this indicates per-tensor quantization. For the `output`, only per-tensor quantization or per-channel quantization along `output_feature_dimension` is supported. Thus, this must be set to -1 or `dimension_numbers.output_feature_dimension`. Other values will raise error at OpKernel construction. If not specified, defaults to -1
func UniformQuantizedAddRhsQuantizationAxis ¶ added in v0.4.0
func UniformQuantizedAddRhsQuantizationAxis(value int64) UniformQuantizedAddAttr
UniformQuantizedAddRhsQuantizationAxis sets the optional rhs_quantization_axis attribute to value.
value: Indicates the dimension index of the tensor where per-axis quantization is applied for the slices along that dimension. If set to -1 (default), this indicates per-tensor quantization. For the `rhs`, only per-tensor quantization or per-channel quantization along `kernel_output_feature_dimension` is supported. Thus, this must be set to -1 or `dimension_numbers.kernel_output_feature_dimension`. Other values will raise error at OpKernel construction. If not specified, defaults to -1
type UniformQuantizedClipByValueAttr ¶ added in v0.3.0
type UniformQuantizedClipByValueAttr func(optionalAttr)
UniformQuantizedClipByValueAttr is an optional argument to UniformQuantizedClipByValue.
func UniformQuantizedClipByValueQuantizationAxis ¶ added in v0.3.0
func UniformQuantizedClipByValueQuantizationAxis(value int64) UniformQuantizedClipByValueAttr
UniformQuantizedClipByValueQuantizationAxis sets the optional quantization_axis attribute to value.
value: Indicates the dimension index of the tensor where per-axis quantization is applied for the slices along that dimension. If set to -1 (default), this indicates per-tensor quantization. Otherwise, it must be set within range [0, operand.dims()). If not specified, defaults to -1
type UniformQuantizedConvolutionAttr ¶ added in v0.4.0
type UniformQuantizedConvolutionAttr func(optionalAttr)
UniformQuantizedConvolutionAttr is an optional argument to UniformQuantizedConvolution.
func UniformQuantizedConvolutionBatchGroupCount ¶ added in v0.4.0
func UniformQuantizedConvolutionBatchGroupCount(value int64) UniformQuantizedConvolutionAttr
UniformQuantizedConvolutionBatchGroupCount sets the optional batch_group_count attribute to value.
value: The number of batch groups. Used for grouped filters. Must be a divisor of `output_feature`. If not specified, defaults to 1
func UniformQuantizedConvolutionDimensionNumbers ¶ added in v0.4.0
func UniformQuantizedConvolutionDimensionNumbers(value string) UniformQuantizedConvolutionAttr
UniformQuantizedConvolutionDimensionNumbers sets the optional dimension_numbers attribute to value.
value: Structure of dimension information for the convolution op. Must be an empty string (default) or a serialized string of `tensorflow.UniformQuantizedConvolutionDimensionNumbersAttr` proto. If empty string, the default is `("NCHW", "OIHW", "NCHW")` (for a 2D convolution). If not specified, defaults to ""
func UniformQuantizedConvolutionExplicitPadding ¶ added in v0.4.0
func UniformQuantizedConvolutionExplicitPadding(value []int64) UniformQuantizedConvolutionAttr
UniformQuantizedConvolutionExplicitPadding sets the optional explicit_padding attribute to value.
value: If `padding` is `"EXPLICIT"`, must be set as a list indicating the explicit paddings at the start and end of each `lhs` spatial dimension. Otherwise, this must be empty.
(If used,) Must be a list of size `2 * (number of lhs spatial dimensions)`, where `(explicit_padding[2 * i], explicit_padding[2 * i + 1])` indicates `(start_padding, end_padding)` of `spatial_dimensions[i]`. If not specified, defaults to {}
func UniformQuantizedConvolutionFeatureGroupCount ¶ added in v0.4.0
func UniformQuantizedConvolutionFeatureGroupCount(value int64) UniformQuantizedConvolutionAttr
UniformQuantizedConvolutionFeatureGroupCount sets the optional feature_group_count attribute to value.
value: The number of feature groups. Used for grouped convolutions. Must be a divisor of both `lhs_feature` and `output_feature`. If not specified, defaults to 1
func UniformQuantizedConvolutionLhsDilation ¶ added in v0.4.0
func UniformQuantizedConvolutionLhsDilation(value []int64) UniformQuantizedConvolutionAttr
UniformQuantizedConvolutionLhsDilation sets the optional lhs_dilation attribute to value.
value: The dilation factor to apply in each spatial dimension of `lhs`. Must be an empty list (default) or a list of size (number of `lhs` spatial dimensions). If empty list, the dilation for each `lhs` spatial dimension is set to 1. If not specified, defaults to {}
func UniformQuantizedConvolutionLhsQuantizationAxis ¶ added in v0.4.0
func UniformQuantizedConvolutionLhsQuantizationAxis(value int64) UniformQuantizedConvolutionAttr
UniformQuantizedConvolutionLhsQuantizationAxis sets the optional lhs_quantization_axis attribute to value.
value: Indicates the dimension index of the tensor where per-axis quantization is applied for the slices along that dimension. If set to -1 (default), this indicates per-tensor quantization. For the `lhs`, only per-tensor quantization is supported. Thus, this must be set to -1. Other values will raise error at OpKernel construction. If not specified, defaults to -1
func UniformQuantizedConvolutionOutputQuantizationAxis ¶ added in v0.4.0
func UniformQuantizedConvolutionOutputQuantizationAxis(value int64) UniformQuantizedConvolutionAttr
UniformQuantizedConvolutionOutputQuantizationAxis sets the optional output_quantization_axis attribute to value.
value: Indicates the dimension index of the tensor where per-axis quantization is applied for the slices along that dimension. If set to -1 (default), this indicates per-tensor quantization. For the `output`, only per-tensor quantization or per-channel quantization along `output_feature_dimension` is supported. Thus, this must be set to -1 or `dimension_numbers.output_feature_dimension`. Other values will raise error at OpKernel construction. If not specified, defaults to -1
func UniformQuantizedConvolutionRhsDilation ¶ added in v0.4.0
func UniformQuantizedConvolutionRhsDilation(value []int64) UniformQuantizedConvolutionAttr
UniformQuantizedConvolutionRhsDilation sets the optional rhs_dilation attribute to value.
value: The dilation factor to apply in each spatial dimension of `rhs`. Must be an empty list (default) or a list of size (number of `rhs` spatial dimensions). If empty list, the dilation for each `rhs` spatial dimension is set to 1. If not specified, defaults to {}
func UniformQuantizedConvolutionRhsQuantizationAxis ¶ added in v0.4.0
func UniformQuantizedConvolutionRhsQuantizationAxis(value int64) UniformQuantizedConvolutionAttr
UniformQuantizedConvolutionRhsQuantizationAxis sets the optional rhs_quantization_axis attribute to value.
value: Indicates the dimension index of the tensor where per-axis quantization is applied for the slices along that dimension. If set to -1 (default), this indicates per-tensor quantization. For the `rhs`, only per-tensor quantization or per-channel quantization along `kernel_output_feature_dimension` is supported. Thus, this must be set to -1 or `dimension_numbers.kernel_output_feature_dimension`. Other values will raise error at OpKernel construction. If not specified, defaults to -1
func UniformQuantizedConvolutionWindowStrides ¶ added in v0.4.0
func UniformQuantizedConvolutionWindowStrides(value []int64) UniformQuantizedConvolutionAttr
UniformQuantizedConvolutionWindowStrides sets the optional window_strides attribute to value.
value: The stride of the sliding window for each spatial dimension of `lhs`. Must be an empty list (default) or a list of size (number of spatial dimensions). If an empty list is provided, the stride for each spatial dimension is set to 1. If not specified, defaults to {}
type UniformQuantizedConvolutionHybridAttr ¶ added in v0.4.0
type UniformQuantizedConvolutionHybridAttr func(optionalAttr)
UniformQuantizedConvolutionHybridAttr is an optional argument to UniformQuantizedConvolutionHybrid.
func UniformQuantizedConvolutionHybridBatchGroupCount ¶ added in v0.4.0
func UniformQuantizedConvolutionHybridBatchGroupCount(value int64) UniformQuantizedConvolutionHybridAttr
UniformQuantizedConvolutionHybridBatchGroupCount sets the optional batch_group_count attribute to value.
value: The number of batch groups. Used for grouped filters. Must be a divisor of output_feature. If not specified, defaults to 1
func UniformQuantizedConvolutionHybridDimensionNumbers ¶ added in v0.4.0
func UniformQuantizedConvolutionHybridDimensionNumbers(value string) UniformQuantizedConvolutionHybridAttr
UniformQuantizedConvolutionHybridDimensionNumbers sets the optional dimension_numbers attribute to value.
value: Structure of dimension information for the convolution op. Must be an empty string (default) or a serialized string of tensorflow.UniformQuantizedConvolutionDimensionNumbersAttr proto. If empty string, the default is `("NCHW", "OIHW", "NCHW")` (for a 2D convolution). If not specified, defaults to ""
func UniformQuantizedConvolutionHybridExplicitPadding ¶ added in v0.4.0
func UniformQuantizedConvolutionHybridExplicitPadding(value []int64) UniformQuantizedConvolutionHybridAttr
UniformQuantizedConvolutionHybridExplicitPadding sets the optional explicit_padding attribute to value.
value: If `padding` Attr is `"EXPLICIT"`, must be set as a list indicating the explicit paddings at the start and end of each lhs spatial dimension. Otherwise, this Attr is must be empty.
(If used,) Must be a list of size 2 * (number of lhs spatial dimensions), where (explicit_padding[2 * i], explicit_padding[2 * i + 1]) indicates spatial_dimensions[i] (start_padding, end_padding). If not specified, defaults to {}
func UniformQuantizedConvolutionHybridFeatureGroupCount ¶ added in v0.4.0
func UniformQuantizedConvolutionHybridFeatureGroupCount(value int64) UniformQuantizedConvolutionHybridAttr
UniformQuantizedConvolutionHybridFeatureGroupCount sets the optional feature_group_count attribute to value.
value: The number of feature groups. Used for grouped convolutions. Must be a divisor of both lhs_feature and output_feature. If not specified, defaults to 1
func UniformQuantizedConvolutionHybridLhsDilation ¶ added in v0.4.0
func UniformQuantizedConvolutionHybridLhsDilation(value []int64) UniformQuantizedConvolutionHybridAttr
UniformQuantizedConvolutionHybridLhsDilation sets the optional lhs_dilation attribute to value.
value: The dilation factor to apply in each spatial dimension of `lhs`. Must be an empty list (default) or a list of size (number of lhs spatial dimensions). If empty list, the dilation for each lhs spatial dimension is set to 1. If not specified, defaults to {}
func UniformQuantizedConvolutionHybridRhsDilation ¶ added in v0.4.0
func UniformQuantizedConvolutionHybridRhsDilation(value []int64) UniformQuantizedConvolutionHybridAttr
UniformQuantizedConvolutionHybridRhsDilation sets the optional rhs_dilation attribute to value.
value: The dilation factor to apply in each spatial dimension of `rhs`. Must be an empty list (default) or a list of size (number of rhs spatial dimensions). If empty list, the dilation for each rhs spatial dimension is set to 1. If not specified, defaults to {}
func UniformQuantizedConvolutionHybridRhsQuantizationAxis ¶ added in v0.4.0
func UniformQuantizedConvolutionHybridRhsQuantizationAxis(value int64) UniformQuantizedConvolutionHybridAttr
UniformQuantizedConvolutionHybridRhsQuantizationAxis sets the optional rhs_quantization_axis attribute to value.
value: Indicates the dimension index of the tensor where per-axis quantization is applied for the slices along that dimension. If set to -1 (default), this indicates per-tensor quantization. For the `rhs`, only per-tensor quantization or per-channel quantization along kernel_output_feature_dimension is supported. Thus, this attribute must be set to -1 or `dimension_numbers.kernel_output_feature_dimension`. Other values will raise error at OpKernel construction. If not specified, defaults to -1
func UniformQuantizedConvolutionHybridWindowStrides ¶ added in v0.4.0
func UniformQuantizedConvolutionHybridWindowStrides(value []int64) UniformQuantizedConvolutionHybridAttr
UniformQuantizedConvolutionHybridWindowStrides sets the optional window_strides attribute to value.
value: The stride of the sliding window for each spatial dimension of `lhs`. Must be an empty list (default) or a list of size (number of spatial dimensions). If an empty list is provided, the stride for each spatial dimension is set to 1. If not specified, defaults to {}
type UniformQuantizedDotAttr ¶ added in v0.3.0
type UniformQuantizedDotAttr func(optionalAttr)
UniformQuantizedDotAttr is an optional argument to UniformQuantizedDot.
func UniformQuantizedDotLhsQuantizationAxis ¶ added in v0.3.0
func UniformQuantizedDotLhsQuantizationAxis(value int64) UniformQuantizedDotAttr
UniformQuantizedDotLhsQuantizationAxis sets the optional lhs_quantization_axis attribute to value.
value: Indicates the dimension index of the tensor where per-axis quantization is applied for the slices along that dimension. If set to -1 (default), this indicates per-tensor quantization. For dot op lhs, only per-tensor quantization is supported. Thus, this attribute must be set to -1. Other values are rejected. If not specified, defaults to -1
func UniformQuantizedDotOutputQuantizationAxis ¶ added in v0.3.0
func UniformQuantizedDotOutputQuantizationAxis(value int64) UniformQuantizedDotAttr
UniformQuantizedDotOutputQuantizationAxis sets the optional output_quantization_axis attribute to value.
value: Indicates the dimension index of the tensor where per-axis quantization is applied for the slices along that dimension. If set to -1 (default), this indicates per-tensor quantization. For dot op output, only per-tensor quantization or per-channel quantization along dimension 1 is supported. Thus, this attribute must be set to -1 or 1. Other values are rejected. If not specified, defaults to -1
func UniformQuantizedDotRhsQuantizationAxis ¶ added in v0.3.0
func UniformQuantizedDotRhsQuantizationAxis(value int64) UniformQuantizedDotAttr
UniformQuantizedDotRhsQuantizationAxis sets the optional rhs_quantization_axis attribute to value.
value: Indicates the dimension index of the tensor where per-axis quantization is applied for the slices along that dimension. If set to -1 (default), this indicates per-tensor quantization. For dot op rhs, only per-tensor quantization or per-channel quantization along dimension 1 is supported. Thus, this attribute must be set to -1 or 1. Other values are rejected. If not specified, defaults to -1
type UniformQuantizedDotHybridAttr ¶ added in v0.2.0
type UniformQuantizedDotHybridAttr func(optionalAttr)
UniformQuantizedDotHybridAttr is an optional argument to UniformQuantizedDotHybrid.
func UniformQuantizedDotHybridRhsQuantizationAxis ¶ added in v0.2.0
func UniformQuantizedDotHybridRhsQuantizationAxis(value int64) UniformQuantizedDotHybridAttr
UniformQuantizedDotHybridRhsQuantizationAxis sets the optional rhs_quantization_axis attribute to value.
value: Indicates the dimension index of the tensor where per-axis quantization is applied for the slices along that dimension. If set to -1 (default), this indicates per-tensor quantization. For dot op rhs, only per-tensor quantization or per-channel quantization along dimension 1 is supported. Thus, this attribute must be set to -1 or 1. Other values are rejected. If not specified, defaults to -1
type UniformRequantizeAttr ¶ added in v0.3.0
type UniformRequantizeAttr func(optionalAttr)
UniformRequantizeAttr is an optional argument to UniformRequantize.
func UniformRequantizeInputQuantizationAxis ¶ added in v0.3.0
func UniformRequantizeInputQuantizationAxis(value int64) UniformRequantizeAttr
UniformRequantizeInputQuantizationAxis sets the optional input_quantization_axis attribute to value.
value: The quantization axis that was used when quantizing original data that `input` represents. Indicates the dimension index of the tensor where per-axis quantization is applied for the slices along that dimension. If set to -1 (default), this indicates per-tensor quantization. Otherwise, it must be set within range [0, input.dims()). If not specified, defaults to -1
func UniformRequantizeOutputQuantizationAxis ¶ added in v0.3.0
func UniformRequantizeOutputQuantizationAxis(value int64) UniformRequantizeAttr
UniformRequantizeOutputQuantizationAxis sets the optional output_quantization_axis attribute to value.
value: The new quantization axis to use to quantize original data that `input` represents. If not specified, defaults to -1
type UniqueAttr ¶
type UniqueAttr func(optionalAttr)
UniqueAttr is an optional argument to Unique.
func UniqueOutIdx ¶
func UniqueOutIdx(value tf.DataType) UniqueAttr
UniqueOutIdx sets the optional out_idx attribute to value. If not specified, defaults to DT_INT32
type UniqueDatasetAttr ¶
type UniqueDatasetAttr func(optionalAttr)
UniqueDatasetAttr is an optional argument to UniqueDataset.
func UniqueDatasetMetadata ¶
func UniqueDatasetMetadata(value string) UniqueDatasetAttr
UniqueDatasetMetadata sets the optional metadata attribute to value. If not specified, defaults to ""
type UniqueV2Attr ¶
type UniqueV2Attr func(optionalAttr)
UniqueV2Attr is an optional argument to UniqueV2.
func UniqueV2OutIdx ¶
func UniqueV2OutIdx(value tf.DataType) UniqueV2Attr
UniqueV2OutIdx sets the optional out_idx attribute to value. If not specified, defaults to DT_INT32
type UniqueWithCountsAttr ¶
type UniqueWithCountsAttr func(optionalAttr)
UniqueWithCountsAttr is an optional argument to UniqueWithCounts.
func UniqueWithCountsOutIdx ¶
func UniqueWithCountsOutIdx(value tf.DataType) UniqueWithCountsAttr
UniqueWithCountsOutIdx sets the optional out_idx attribute to value. If not specified, defaults to DT_INT32
type UniqueWithCountsV2Attr ¶
type UniqueWithCountsV2Attr func(optionalAttr)
UniqueWithCountsV2Attr is an optional argument to UniqueWithCountsV2.
func UniqueWithCountsV2OutIdx ¶
func UniqueWithCountsV2OutIdx(value tf.DataType) UniqueWithCountsV2Attr
UniqueWithCountsV2OutIdx sets the optional out_idx attribute to value. If not specified, defaults to DT_INT32
type UnpackAttr ¶
type UnpackAttr func(optionalAttr)
UnpackAttr is an optional argument to Unpack.
func UnpackAxis ¶
func UnpackAxis(value int64) UnpackAttr
UnpackAxis sets the optional axis attribute to value.
value: Dimension along which to unpack. Negative values wrap around, so the valid range is `[-R, R)`. If not specified, defaults to 0
type UnstageAttr ¶
type UnstageAttr func(optionalAttr)
UnstageAttr is an optional argument to Unstage.
func UnstageCapacity ¶
func UnstageCapacity(value int64) UnstageAttr
UnstageCapacity sets the optional capacity attribute to value. If not specified, defaults to 0
REQUIRES: value >= 0
func UnstageContainer ¶
func UnstageContainer(value string) UnstageAttr
UnstageContainer sets the optional container attribute to value. If not specified, defaults to ""
func UnstageMemoryLimit ¶
func UnstageMemoryLimit(value int64) UnstageAttr
UnstageMemoryLimit sets the optional memory_limit attribute to value. If not specified, defaults to 0
REQUIRES: value >= 0
func UnstageSharedName ¶
func UnstageSharedName(value string) UnstageAttr
UnstageSharedName sets the optional shared_name attribute to value. If not specified, defaults to ""
type UpperBoundAttr ¶
type UpperBoundAttr func(optionalAttr)
UpperBoundAttr is an optional argument to UpperBound.
func UpperBoundOutType ¶
func UpperBoundOutType(value tf.DataType) UpperBoundAttr
UpperBoundOutType sets the optional out_type attribute to value. If not specified, defaults to DT_INT32
type VarHandleOpAttr ¶
type VarHandleOpAttr func(optionalAttr)
VarHandleOpAttr is an optional argument to VarHandleOp.
func VarHandleOpAllowedDevices ¶
func VarHandleOpAllowedDevices(value []string) VarHandleOpAttr
VarHandleOpAllowedDevices sets the optional allowed_devices attribute to value.
value: DEPRECATED. The allowed devices containing the resource variable. Set when the output ResourceHandle represents a per-replica/partitioned resource variable. If not specified, defaults to {}
func VarHandleOpContainer ¶
func VarHandleOpContainer(value string) VarHandleOpAttr
VarHandleOpContainer sets the optional container attribute to value.
value: the container this variable is placed in. If not specified, defaults to ""
func VarHandleOpDebugName ¶ added in v0.7.0
func VarHandleOpDebugName(value string) VarHandleOpAttr
VarHandleOpDebugName sets the optional debug_name attribute to value.
value: the user-given name, which still applies in anonymous mode. If not specified, defaults to ""
func VarHandleOpSharedName ¶
func VarHandleOpSharedName(value string) VarHandleOpAttr
VarHandleOpSharedName sets the optional shared_name attribute to value.
value: the name by which this variable is referred to. If not specified, defaults to ""
type VariableShapeAttr ¶
type VariableShapeAttr func(optionalAttr)
VariableShapeAttr is an optional argument to VariableShape.
func VariableShapeOutType ¶
func VariableShapeOutType(value tf.DataType) VariableShapeAttr
VariableShapeOutType sets the optional out_type attribute to value. If not specified, defaults to DT_INT32
type WholeFileReaderV2Attr ¶
type WholeFileReaderV2Attr func(optionalAttr)
WholeFileReaderV2Attr is an optional argument to WholeFileReaderV2.
func WholeFileReaderV2Container ¶
func WholeFileReaderV2Container(value string) WholeFileReaderV2Attr
WholeFileReaderV2Container sets the optional container attribute to value.
value: If non-empty, this reader is placed in the given container. Otherwise, a default container is used. If not specified, defaults to ""
func WholeFileReaderV2SharedName ¶
func WholeFileReaderV2SharedName(value string) WholeFileReaderV2Attr
WholeFileReaderV2SharedName sets the optional shared_name attribute to value.
value: If non-empty, this reader is named in the given bucket with this shared_name. Otherwise, the node name is used instead. If not specified, defaults to ""
type WindowDatasetAttr ¶
type WindowDatasetAttr func(optionalAttr)
WindowDatasetAttr is an optional argument to WindowDataset.
func WindowDatasetMetadata ¶
func WindowDatasetMetadata(value string) WindowDatasetAttr
WindowDatasetMetadata sets the optional metadata attribute to value. If not specified, defaults to ""
type WriteAudioSummaryAttr ¶
type WriteAudioSummaryAttr func(optionalAttr)
WriteAudioSummaryAttr is an optional argument to WriteAudioSummary.
func WriteAudioSummaryMaxOutputs ¶
func WriteAudioSummaryMaxOutputs(value int64) WriteAudioSummaryAttr
WriteAudioSummaryMaxOutputs sets the optional max_outputs attribute to value. If not specified, defaults to 3
REQUIRES: value >= 1
type WriteImageSummaryAttr ¶
type WriteImageSummaryAttr func(optionalAttr)
WriteImageSummaryAttr is an optional argument to WriteImageSummary.
func WriteImageSummaryMaxImages ¶
func WriteImageSummaryMaxImages(value int64) WriteImageSummaryAttr
WriteImageSummaryMaxImages sets the optional max_images attribute to value. If not specified, defaults to 3
REQUIRES: value >= 1
type XlaConcatNDAttr ¶
type XlaConcatNDAttr func(optionalAttr)
XlaConcatNDAttr is an optional argument to XlaConcatND.
func XlaConcatNDPaddings ¶
func XlaConcatNDPaddings(value []int64) XlaConcatNDAttr
XlaConcatNDPaddings sets the optional paddings attribute to value.
value: Optional list of right paddings per dimension to strip from the final merged tensor. These paddings must not exceed the dimension size of the merged result prior to stripping paddings. If not specified, defaults to {}
type XlaConvV2Attr ¶
type XlaConvV2Attr func(optionalAttr)
XlaConvV2Attr is an optional argument to XlaConvV2.
func XlaConvV2BatchGroupCount ¶
func XlaConvV2BatchGroupCount(value int64) XlaConvV2Attr
XlaConvV2BatchGroupCount sets the optional batch_group_count attribute to value.
value: number of batch groups or grouped filters. If not specified, defaults to 1
type XlaRngBitGeneratorAttr ¶
type XlaRngBitGeneratorAttr func(optionalAttr)
XlaRngBitGeneratorAttr is an optional argument to XlaRngBitGenerator.
func XlaRngBitGeneratorDtype ¶
func XlaRngBitGeneratorDtype(value tf.DataType) XlaRngBitGeneratorAttr
XlaRngBitGeneratorDtype sets the optional dtype attribute to value.
value: The type of the tensor. If not specified, defaults to DT_UINT64
type XlaShardingAttr ¶
type XlaShardingAttr func(optionalAttr)
XlaShardingAttr is an optional argument to XlaSharding.
func XlaShardingSharding ¶
func XlaShardingSharding(value string) XlaShardingAttr
XlaShardingSharding sets the optional sharding attribute to value. If not specified, defaults to ""
func XlaShardingUnspecifiedDims ¶
func XlaShardingUnspecifiedDims(value []int64) XlaShardingAttr
XlaShardingUnspecifiedDims sets the optional unspecified_dims attribute to value. If not specified, defaults to {}
type XlaSplitNDAttr ¶
type XlaSplitNDAttr func(optionalAttr)
XlaSplitNDAttr is an optional argument to XlaSplitND.
func XlaSplitNDPaddings ¶
func XlaSplitNDPaddings(value []int64) XlaSplitNDAttr
XlaSplitNDPaddings sets the optional paddings attribute to value.
value: Optional list of right paddings per dimension of input tensor to apply before splitting. This can be used to make a dimension evenly divisible. If not specified, defaults to {}
type XlaSpmdFullToShardShapeAttr ¶
type XlaSpmdFullToShardShapeAttr func(optionalAttr)
XlaSpmdFullToShardShapeAttr is an optional argument to XlaSpmdFullToShardShape.
func XlaSpmdFullToShardShapeDim ¶
func XlaSpmdFullToShardShapeDim(value int64) XlaSpmdFullToShardShapeAttr
XlaSpmdFullToShardShapeDim sets the optional dim attribute to value. If not specified, defaults to -1
func XlaSpmdFullToShardShapeUnspecifiedDims ¶
func XlaSpmdFullToShardShapeUnspecifiedDims(value []int64) XlaSpmdFullToShardShapeAttr
XlaSpmdFullToShardShapeUnspecifiedDims sets the optional unspecified_dims attribute to value. If not specified, defaults to {}
type XlaSpmdShardToFullShapeAttr ¶
type XlaSpmdShardToFullShapeAttr func(optionalAttr)
XlaSpmdShardToFullShapeAttr is an optional argument to XlaSpmdShardToFullShape.
func XlaSpmdShardToFullShapeDim ¶
func XlaSpmdShardToFullShapeDim(value int64) XlaSpmdShardToFullShapeAttr
XlaSpmdShardToFullShapeDim sets the optional dim attribute to value. If not specified, defaults to -1
func XlaSpmdShardToFullShapeUnspecifiedDims ¶
func XlaSpmdShardToFullShapeUnspecifiedDims(value []int64) XlaSpmdShardToFullShapeAttr
XlaSpmdShardToFullShapeUnspecifiedDims sets the optional unspecified_dims attribute to value. If not specified, defaults to {}
type ZipDatasetAttr ¶
type ZipDatasetAttr func(optionalAttr)
ZipDatasetAttr is an optional argument to ZipDataset.
func ZipDatasetMetadata ¶
func ZipDatasetMetadata(value string) ZipDatasetAttr
ZipDatasetMetadata sets the optional metadata attribute to value. If not specified, defaults to ""