diff --git a/.gitattributes b/.gitattributes
new file mode 100644
index 00000000000..4d5bc75f750
--- /dev/null
+++ b/.gitattributes
@@ -0,0 +1,12 @@
+*.java text eol=lf
+*.kt text eol=lf
+*.cc text eol=lf
+*.h text eol=lf
+*.pom text eol=lf
+
+*.md text eol=lf
+*.sh text eol=lf
+
+*.pbtxt text eol=lf
+
+*.pb binary
\ No newline at end of file
diff --git a/tensorflow-core/tensorflow-core-api/pom.xml b/tensorflow-core/tensorflow-core-api/pom.xml
index 9f23757e83d..fe15687edbf 100644
--- a/tensorflow-core/tensorflow-core-api/pom.xml
+++ b/tensorflow-core/tensorflow-core-api/pom.xml
@@ -482,6 +482,40 @@
+
- * Any operation wrapper found in the classpath properly annotated as an{@link org.tensorflow.op.annotation.Operator @Operator} is exposed - * by this API or one of its subgroup. + * + *
Any operation wrapper found in the classpath properly annotated as an{@link + * org.tensorflow.op.annotation.Operator @Operator} is exposed by this API or one of its subgroup. + * *
Example usage: + * *
{@code
* try (Graph g = new Graph()) {
* Ops tf = Ops.create(g);
@@ -326,7 +328,7 @@
* Operand nine = tf.math.add(four, tf.constant(5));
* // Multi-result operations however offer methods to
* // select a particular result for use.
- * Operand result =
+ * Operand result =
* tf.math.add(tf.unique(s, a).y(), b);
* // Optional attributes
* tf.linalg.matMul(a, b, MatMul.transposeA(true));
@@ -365,20 +367,20 @@ public final class Ops {
public final SparseOps sparse;
- public final TpuOps tpu;
-
public final BitwiseOps bitwise;
+ public final TpuOps tpu;
+
public final MathOps math;
public final AudioOps audio;
public final SignalOps signal;
- public final QuantizationOps quantization;
-
public final TrainOps train;
+ public final QuantizationOps quantization;
+
private final Scope scope;
private Ops(Scope scope) {
@@ -396,20 +398,20 @@ private Ops(Scope scope) {
random = new RandomOps(this);
strings = new StringsOps(this);
sparse = new SparseOps(this);
- tpu = new TpuOps(this);
bitwise = new BitwiseOps(this);
+ tpu = new TpuOps(this);
math = new MathOps(this);
audio = new AudioOps(this);
signal = new SignalOps(this);
- quantization = new QuantizationOps(this);
train = new TrainOps(this);
+ quantization = new QuantizationOps(this);
}
/**
- * Raise a exception to abort the process when called.
- * If exit_without_error is true, the process will exit normally,
- * otherwise it will exit with a SIGABORT signal.
- * Returns nothing but an exception.
+ * Raise a exception to abort the process when called. If exit_without_error is true, the process
+ * will exit normally, otherwise it will exit with a SIGABORT signal.
+ *
+ *
Returns nothing but an exception.
*
* @param options carries optional attribute values
* @return a new instance of Abort
@@ -419,15 +421,13 @@ public Abort abort(Abort.Options... options) {
}
/**
- * Computes the "logical and" of elements across dimensions of a tensor.
- * Reduces {@code input} along the dimensions given in {@code axis}. Unless
- * {@code keep_dims} is true, the rank of the tensor is reduced by 1 for each entry in
- * {@code axis}. If {@code keep_dims} is true, the reduced dimensions are
- * retained with length 1.
+ * Computes the "logical and" of elements across dimensions of a tensor. Reduces {@code
+ * input} along the dimensions given in {@code axis}. Unless {@code keep_dims} is true, the rank
+ * of the tensor is reduced by 1 for each entry in {@code axis}. If {@code keep_dims} is true, the
+ * reduced dimensions are retained with length 1.
*
* @param input The tensor to reduce.
- * @param axis The dimensions to reduce. Must be in the range
- * {@code [-rank(input), rank(input))}.
+ * @param axis The dimensions to reduce. Must be in the range {@code [-rank(input), rank(input))}.
* @param options carries optional attribute values
* @return a new instance of All
*/
@@ -436,15 +436,13 @@ public All all(Operand input, Operand extends TNumber> axis, All.Option
}
/**
- * Computes the "logical or" of elements across dimensions of a tensor.
- * Reduces {@code input} along the dimensions given in {@code axis}. Unless
- * {@code keep_dims} is true, the rank of the tensor is reduced by 1 for each entry in
- * {@code axis}. If {@code keep_dims} is true, the reduced dimensions are
- * retained with length 1.
+ * Computes the "logical or" of elements across dimensions of a tensor. Reduces {@code
+ * input} along the dimensions given in {@code axis}. Unless {@code keep_dims} is true, the rank
+ * of the tensor is reduced by 1 for each entry in {@code axis}. If {@code keep_dims} is true, the
+ * reduced dimensions are retained with length 1.
*
* @param input The tensor to reduce.
- * @param axis The dimensions to reduce. Must be in the range
- * {@code [-rank(input), rank(input))}.
+ * @param axis The dimensions to reduce. Must be in the range {@code [-rank(input), rank(input))}.
* @param options carries optional attribute values
* @return a new instance of Any
*/
@@ -527,7 +525,7 @@ public Constant array(float... data) {
*
* @param charset charset for encoding/decoding strings bytes.
* @param data An array containing the values to put into the new constant. String elements are
- * sequences of bytes from the last array dimension.
+ * sequences of bytes from the last array dimension.
* @return the {@code String} constant
*/
public Constant array(Charset charset, String... data) {
@@ -535,24 +533,23 @@ public Constant array(Charset charset, String... data) {
}
/**
- * Asserts that the given condition is true.
- * If {@code condition} evaluates to false, print the list of tensors in {@code data}.
- * {@code summarize} determines how many entries of the tensors to print.
+ * Asserts that the given condition is true. If {@code condition} evaluates to false, print the
+ * list of tensors in {@code data}. {@code summarize} determines how many entries of the tensors
+ * to print.
*
* @param condition The condition to evaluate.
* @param data The tensors to print out when condition is false.
* @param options carries optional attribute values
* @return a new instance of AssertThat
*/
- public AssertThat assertThat(Operand condition, Iterable> data,
- AssertThat.Options... options) {
+ public AssertThat assertThat(
+ Operand condition, Iterable> data, AssertThat.Options... options) {
return AssertThat.create(scope, condition, data, options);
}
/**
- * Update 'ref' by assigning 'value' to it.
- * This operation outputs "ref" after the assignment is done.
- * This makes it easier to chain operations that need to use the reset value.
+ * Update 'ref' by assigning 'value' to it. This operation outputs "ref" after the
+ * assignment is done. This makes it easier to chain operations that need to use the reset value.
*
* @param data type for {@code output_ref} output
* @param ref Should be from a {@code Variable} node. May be uninitialized.
@@ -561,15 +558,14 @@ public AssertThat assertThat(Operand condition, Iterable> data
* @param data type for {@code Assign} output and operands
* @return a new instance of Assign
*/
- public Assign assign(Operand ref, Operand value,
- Assign.Options... options) {
+ public Assign assign(
+ Operand ref, Operand value, Assign.Options... options) {
return Assign.create(scope, ref, value, options);
}
/**
- * Update 'ref' by adding 'value' to it.
- * This operation outputs "ref" after the update is done.
- * This makes it easier to chain operations that need to use the reset value.
+ * Update 'ref' by adding 'value' to it. This operation outputs "ref" after the update
+ * is done. This makes it easier to chain operations that need to use the reset value.
*
* @param data type for {@code output_ref} output
* @param ref Should be from a {@code Variable} node.
@@ -578,29 +574,27 @@ public Assign assign(Operand ref, Operand value,
* @param data type for {@code AssignAdd} output and operands
* @return a new instance of AssignAdd
*/
- public AssignAdd assignAdd(Operand ref, Operand value,
- AssignAdd.Options... options) {
+ public AssignAdd assignAdd(
+ Operand ref, Operand value, AssignAdd.Options... options) {
return AssignAdd.create(scope, ref, value, options);
}
/**
- * Adds a value to the current value of a variable.
- * Any ReadVariableOp with a control dependency on this op is guaranteed to
- * see the incremented value or a subsequent newer one.
+ * Adds a value to the current value of a variable. Any ReadVariableOp with a control dependency
+ * on this op is guaranteed to see the incremented value or a subsequent newer one.
*
* @param resource handle to the resource in which to store the variable.
* @param value the value by which the variable will be incremented.
* @return a new instance of AssignAddVariableOp
*/
- public AssignAddVariableOp assignAddVariableOp(Operand extends TType> resource,
- Operand extends TType> value) {
+ public AssignAddVariableOp assignAddVariableOp(
+ Operand extends TType> resource, Operand extends TType> value) {
return AssignAddVariableOp.create(scope, resource, value);
}
/**
- * Update 'ref' by subtracting 'value' from it.
- * This operation outputs "ref" after the update is done.
- * This makes it easier to chain operations that need to use the reset value.
+ * Update 'ref' by subtracting 'value' from it. This operation outputs "ref" after the
+ * update is done. This makes it easier to chain operations that need to use the reset value.
*
* @param data type for {@code output_ref} output
* @param ref Should be from a {@code Variable} node.
@@ -609,48 +603,45 @@ public AssignAddVariableOp assignAddVariableOp(Operand extends TType> resource
* @param data type for {@code AssignSub} output and operands
* @return a new instance of AssignSub
*/
- public AssignSub assignSub(Operand ref, Operand value,
- AssignSub.Options... options) {
+ public AssignSub assignSub(
+ Operand ref, Operand value, AssignSub.Options... options) {
return AssignSub.create(scope, ref, value, options);
}
/**
- * Subtracts a value from the current value of a variable.
- * Any ReadVariableOp with a control dependency on this op is guaranteed to
- * see the decremented value or a subsequent newer one.
+ * Subtracts a value from the current value of a variable. Any ReadVariableOp with a control
+ * dependency on this op is guaranteed to see the decremented value or a subsequent newer one.
*
* @param resource handle to the resource in which to store the variable.
* @param value the value by which the variable will be incremented.
* @return a new instance of AssignSubVariableOp
*/
- public AssignSubVariableOp assignSubVariableOp(Operand extends TType> resource,
- Operand extends TType> value) {
+ public AssignSubVariableOp assignSubVariableOp(
+ Operand extends TType> resource, Operand extends TType> value) {
return AssignSubVariableOp.create(scope, resource, value);
}
/**
- * Assigns a new value to a variable.
- * Any ReadVariableOp with a control dependency on this op is guaranteed to return
- * this value or a subsequent newer value of the variable.
+ * Assigns a new value to a variable. Any ReadVariableOp with a control dependency on this op is
+ * guaranteed to return this value or a subsequent newer value of the variable.
*
* @param resource handle to the resource in which to store the variable.
* @param value the value to set the new tensor to use.
* @return a new instance of AssignVariableOp
*/
- public AssignVariableOp assignVariableOp(Operand extends TType> resource,
- Operand extends TType> value) {
+ public AssignVariableOp assignVariableOp(
+ Operand extends TType> resource, Operand extends TType> value) {
return AssignVariableOp.create(scope, resource, value);
}
/**
- * Defines a barrier that persists across different graph executions.
- * A barrier represents a key-value map, where each key is a string, and
- * each value is a tuple of tensors.
- * At runtime, the barrier contains 'complete' and 'incomplete'
- * elements. A complete element has defined tensors for all components of
- * its value tuple, and may be accessed using BarrierTakeMany. An
- * incomplete element has some undefined components in its value tuple,
- * and may be updated using BarrierInsertMany.
+ * Defines a barrier that persists across different graph executions. A barrier represents a
+ * key-value map, where each key is a string, and each value is a tuple of tensors.
+ *
+ *
At runtime, the barrier contains 'complete' and 'incomplete' elements. A complete element
+ * has defined tensors for all components of its value tuple, and may be accessed using
+ * BarrierTakeMany. An incomplete element has some undefined components in its value tuple, and
+ * may be updated using BarrierInsertMany.
*
* @param componentTypes The type of each component in a value.
* @param options carries optional attribute values
@@ -661,13 +652,12 @@ public Barrier barrier(List> componentTypes, Barrier.Opti
}
/**
- * Closes the given barrier.
- * This operation signals that no more new elements will be inserted in the
- * given barrier. Subsequent InsertMany that try to introduce a new key will fail.
- * Subsequent InsertMany operations that just add missing components to already
- * existing elements will continue to succeed. Subsequent TakeMany operations will
- * continue to succeed if sufficient completed elements remain in the barrier.
- * Subsequent TakeMany operations that would block will fail immediately.
+ * Closes the given barrier. This operation signals that no more new elements will be inserted in
+ * the given barrier. Subsequent InsertMany that try to introduce a new key will fail. Subsequent
+ * InsertMany operations that just add missing components to already existing elements will
+ * continue to succeed. Subsequent TakeMany operations will continue to succeed if sufficient
+ * completed elements remain in the barrier. Subsequent TakeMany operations that would block will
+ * fail immediately.
*
* @param handle The handle to a barrier.
* @param options carries optional attribute values
@@ -688,21 +678,23 @@ public BarrierIncompleteSize barrierIncompleteSize(Operand handle) {
}
/**
- * For each key, assigns the respective value to the specified component.
- * If a key is not found in the barrier, this operation will create a new
- * incomplete element. If a key is found in the barrier, and the element
- * already has a value at component_index, this operation will fail with
- * INVALID_ARGUMENT, and leave the barrier in an undefined state.
+ * For each key, assigns the respective value to the specified component. If a key is not found in
+ * the barrier, this operation will create a new incomplete element. If a key is found in the
+ * barrier, and the element already has a value at component_index, this operation will fail with
+ * INVALID_ARGUMENT, and leave the barrier in an undefined state.
*
* @param handle The handle to a barrier.
* @param keys A one-dimensional tensor of keys, with length n.
- * @param values An any-dimensional tensor of values, which are associated with the
- * respective keys. The 0th dimension must have length n.
+ * @param values An any-dimensional tensor of values, which are associated with the respective
+ * keys. The 0th dimension must have length n.
* @param componentIndex The component of the barrier elements that is being assigned.
* @return a new instance of BarrierInsertMany
*/
- public BarrierInsertMany barrierInsertMany(Operand handle, Operand keys,
- Operand extends TType> values, Long componentIndex) {
+ public BarrierInsertMany barrierInsertMany(
+ Operand handle,
+ Operand keys,
+ Operand extends TType> values,
+ Long componentIndex) {
return BarrierInsertMany.create(scope, handle, keys, values, componentIndex);
}
@@ -717,59 +709,58 @@ public BarrierReadySize barrierReadySize(Operand handle) {
}
/**
- * Takes the given number of completed elements from a barrier.
- * This operation concatenates completed-element component tensors along
- * the 0th dimension to make a single component tensor.
- * Elements come out of the barrier when they are complete, and in the order
- * in which they were placed into the barrier. The indices output provides
- * information about the batch in which each element was originally inserted
- * into the barrier.
+ * Takes the given number of completed elements from a barrier. This operation concatenates
+ * completed-element component tensors along the 0th dimension to make a single component tensor.
+ *
+ *
Elements come out of the barrier when they are complete, and in the order in which they were
+ * placed into the barrier. The indices output provides information about the batch in which each
+ * element was originally inserted into the barrier.
*
* @param handle The handle to a barrier.
- * @param numElements A single-element tensor containing the number of elements to
- * take.
+ * @param numElements A single-element tensor containing the number of elements to take.
* @param componentTypes The type of each component in a value.
* @param options carries optional attribute values
* @return a new instance of BarrierTakeMany
*/
- public BarrierTakeMany barrierTakeMany(Operand handle, Operand numElements,
- List> componentTypes, BarrierTakeMany.Options... options) {
+ public BarrierTakeMany barrierTakeMany(
+ Operand handle,
+ Operand numElements,
+ List> componentTypes,
+ BarrierTakeMany.Options... options) {
return BarrierTakeMany.create(scope, handle, numElements, componentTypes, options);
}
/**
- * Batches all input tensors nondeterministically.
- * When many instances of this Op are being run concurrently with the same
- * container/shared_name in the same device, some will output zero-shaped Tensors
- * and others will output Tensors of size up to max_batch_size.
- * All Tensors in in_tensors are batched together (so, for example, labels and
- * features should be batched with a single instance of this operation.
- *
Each invocation of batch emits an {@code id} scalar which will be used to identify
- * this particular invocation when doing unbatch or its gradient.
- *
Each op which emits a non-empty batch will also emit a non-empty batch_index
- * Tensor, which, is a [K, 3] matrix where each row contains the invocation's id,
- * start, and length of elements of each set of Tensors present in batched_tensors.
- *
Batched tensors are concatenated along the first dimension, and all tensors in
- * in_tensors must have the first dimension of the same size.
- *
in_tensors: The tensors to be batched.
- * num_batch_threads: Number of scheduling threads for processing batches of work.
- * Determines the number of batches processed in parallel.
- * max_batch_size: Batch sizes will never be bigger than this.
- * batch_timeout_micros: Maximum number of microseconds to wait before outputting
- * an incomplete batch.
- * allowed_batch_sizes: Optional list of allowed batch sizes. If left empty, does
- * nothing. Otherwise, supplies a list of batch sizes, causing the op to pad
- * batches up to one of those sizes. The entries must increase monotonically, and
- * the final entry must equal max_batch_size.
- * grad_timeout_micros: The timeout to use for the gradient. See Unbatch.
- * batched_tensors: Either empty tensors or a batch of concatenated Tensors.
- * batch_index: If out_tensors is non-empty, has information to invert it.
- * container: Controls the scope of sharing of this batch.
- * id: always contains a scalar with a unique ID for this invocation of Batch.
- * shared_name: Concurrently running instances of batch in the same device with the
- * same container and shared_name will batch their elements together. If left
- * empty, the op name will be used as the shared name.
- * T: the types of tensors to be batched.
+ * Batches all input tensors nondeterministically. When many instances of this Op are being run
+ * concurrently with the same container/shared_name in the same device, some will output
+ * zero-shaped Tensors and others will output Tensors of size up to max_batch_size.
+ *
+ *
All Tensors in in_tensors are batched together (so, for example, labels and features should
+ * be batched with a single instance of this operation.
+ *
+ *
Each invocation of batch emits an {@code id} scalar which will be used to identify this
+ * particular invocation when doing unbatch or its gradient.
+ *
+ *
Each op which emits a non-empty batch will also emit a non-empty batch_index Tensor, which,
+ * is a [K, 3] matrix where each row contains the invocation's id, start, and length of elements
+ * of each set of Tensors present in batched_tensors.
+ *
+ *
Batched tensors are concatenated along the first dimension, and all tensors in in_tensors
+ * must have the first dimension of the same size.
+ *
+ *
in_tensors: The tensors to be batched. num_batch_threads: Number of scheduling threads for
+ * processing batches of work. Determines the number of batches processed in parallel.
+ * max_batch_size: Batch sizes will never be bigger than this. batch_timeout_micros: Maximum
+ * number of microseconds to wait before outputting an incomplete batch. allowed_batch_sizes:
+ * Optional list of allowed batch sizes. If left empty, does nothing. Otherwise, supplies a list
+ * of batch sizes, causing the op to pad batches up to one of those sizes. The entries must
+ * increase monotonically, and the final entry must equal max_batch_size. grad_timeout_micros: The
+ * timeout to use for the gradient. See Unbatch. batched_tensors: Either empty tensors or a batch
+ * of concatenated Tensors. batch_index: If out_tensors is non-empty, has information to invert
+ * it. container: Controls the scope of sharing of this batch. id: always contains a scalar with a
+ * unique ID for this invocation of Batch. shared_name: Concurrently running instances of batch in
+ * the same device with the same container and shared_name will batch their elements together. If
+ * left empty, the op name will be used as the shared name. T: the types of tensors to be batched.
*
* @param inTensors the inTensors value
* @param numBatchThreads the value of the numBatchThreads property
@@ -779,15 +770,28 @@ public BarrierTakeMany barrierTakeMany(Operand handle, Operand
* @param options carries optional attribute values
* @return a new instance of Batch
*/
- public Batch batch(Iterable> inTensors, Long numBatchThreads, Long maxBatchSize,
- Long batchTimeoutMicros, Long gradTimeoutMicros, Batch.Options... options) {
- return Batch.create(scope, inTensors, numBatchThreads, maxBatchSize, batchTimeoutMicros, gradTimeoutMicros, options);
+ public Batch batch(
+ Iterable> inTensors,
+ Long numBatchThreads,
+ Long maxBatchSize,
+ Long batchTimeoutMicros,
+ Long gradTimeoutMicros,
+ Batch.Options... options) {
+ return Batch.create(
+ scope,
+ inTensors,
+ numBatchThreads,
+ maxBatchSize,
+ batchTimeoutMicros,
+ gradTimeoutMicros,
+ options);
}
/**
- * Batches all the inputs tensors to the computation done by the function.
- * So, for example, in the following code
- *
+ * Batches all the inputs tensors to the computation done by the function. So, for example, in the
+ * following code
+ *
+ *
*
* # This input will be captured.
* y = tf.placeholder_with_default(1.0, shape=[])
@@ -807,238 +811,258 @@ public Batch batch(Iterable> inTensors, Long numBatchThreads, Long ma
* allowed_batch_sizes=[3, 10],
* batching_queue="")
*
- * If more than one session.run call is simultaneously trying to compute {@code b}
- * the values of {@code a} will be gathered, non-deterministically concatenated
- * along the first axis, and only one thread will run the computation.
- *
Assumes that all arguments of the function are Tensors which will be batched
- * along their first dimension.
- *
Arguments that are captured, are not batched. The session.run call which does
- * the concatenation, will use the values of the captured tensors available to it.
- * Therefore, typical uses of captured tensors should involve values which remain
- * unchanged across session.run calls. Inference is a good example of this.
- *
SparseTensor is not supported. The return value of the decorated function
- * must be a Tensor or a list/tuple of Tensors.
+ *
+ *
If more than one session.run call is simultaneously trying to compute {@code b} the values
+ * of {@code a} will be gathered, non-deterministically concatenated along the first axis, and
+ * only one thread will run the computation.
+ *
+ *
Assumes that all arguments of the function are Tensors which will be batched along their
+ * first dimension.
+ *
+ *
Arguments that are captured, are not batched. The session.run call which does the
+ * concatenation, will use the values of the captured tensors available to it. Therefore, typical
+ * uses of captured tensors should involve values which remain unchanged across session.run calls.
+ * Inference is a good example of this.
+ *
+ *
SparseTensor is not supported. The return value of the decorated function must be a Tensor
+ * or a list/tuple of Tensors.
*
* @param inTensors The tensors to be batched.
- * @param capturedTensors The tensors which are captured in the function, and don't need
- * to be batched.
+ * @param capturedTensors The tensors which are captured in the function, and don't need to be
+ * batched.
* @param f the value of the f property
- * @param numBatchThreads Number of scheduling threads for processing batches of work.
- * Determines the number of batches processed in parallel.
+ * @param numBatchThreads Number of scheduling threads for processing batches of work. Determines
+ * the number of batches processed in parallel.
* @param maxBatchSize Batch sizes will never be bigger than this.
- * @param batchTimeoutMicros Maximum number of microseconds to wait before outputting
- * an incomplete batch.
+ * @param batchTimeoutMicros Maximum number of microseconds to wait before outputting an
+ * incomplete batch.
* @param Tout the types of the output tensors.
* @param options carries optional attribute values
* @return a new instance of BatchFunction
*/
- public BatchFunction batchFunction(Iterable> inTensors,
- Iterable> capturedTensors, ConcreteFunction f, Long numBatchThreads,
- Long maxBatchSize, Long batchTimeoutMicros, List> Tout,
+ public BatchFunction batchFunction(
+ Iterable> inTensors,
+ Iterable> capturedTensors,
+ ConcreteFunction f,
+ Long numBatchThreads,
+ Long maxBatchSize,
+ Long batchTimeoutMicros,
+ List> Tout,
BatchFunction.Options... options) {
- return BatchFunction.create(scope, inTensors, capturedTensors, f, numBatchThreads, maxBatchSize, batchTimeoutMicros, Tout, options);
+ return BatchFunction.create(
+ scope,
+ inTensors,
+ capturedTensors,
+ f,
+ numBatchThreads,
+ maxBatchSize,
+ batchTimeoutMicros,
+ Tout,
+ options);
}
/**
- * BatchToSpace for 4-D tensors of type T.
- * This is a legacy version of the more general BatchToSpaceND.
- * Rearranges (permutes) data from batch into blocks of spatial data, followed by
- * cropping. This is the reverse transformation of SpaceToBatch. More specifically,
- * this op outputs a copy of the input tensor where values from the {@code batch}
- * dimension are moved in spatial blocks to the {@code height} and {@code width} dimensions,
- * followed by cropping along the {@code height} and {@code width} dimensions.
+ * BatchToSpace for 4-D tensors of type T. This is a legacy version of the more general
+ * BatchToSpaceND.
+ *
+ *
Rearranges (permutes) data from batch into blocks of spatial data, followed by cropping.
+ * This is the reverse transformation of SpaceToBatch. More specifically, this op outputs a copy
+ * of the input tensor where values from the {@code batch} dimension are moved in spatial blocks
+ * to the {@code height} and {@code width} dimensions, followed by cropping along the {@code
+ * height} and {@code width} dimensions.
*
* @param data type for {@code output} output
- * @param input 4-D tensor with shape
- * {@code [batch*block_size*block_size, height_pad/block_size, width_pad/block_size, depth]}. Note that the batch size of the input tensor must be divisible by
- * {@code block_size * block_size}.
- * @param crops 2-D tensor of non-negative integers with shape {@code [2, 2]}. It specifies
- * how many elements to crop from the intermediate result across the spatial
- * dimensions as follows:
- *
+ * @param input 4-D tensor with shape {@code [batch*block_size*block_size, height_pad/block_size,
+ * width_pad/block_size, depth]}. Note that the batch size of the input tensor must be
+ * divisible by {@code block_size * block_size}.
+ * @param crops 2-D tensor of non-negative integers with shape {@code [2, 2]}. It specifies how
+ * many elements to crop from the intermediate result across the spatial dimensions as
+ * follows:
+ *
* crops = [[crop_top, crop_bottom], [crop_left, crop_right]]
*
+ *
* @param blockSize the value of the blockSize property
* @param data type for {@code BatchToSpace} output and operands
* @return a new instance of BatchToSpace
*/
- public BatchToSpace batchToSpace(Operand input,
- Operand extends TNumber> crops, Long blockSize) {
+ public BatchToSpace batchToSpace(
+ Operand input, Operand extends TNumber> crops, Long blockSize) {
return BatchToSpace.create(scope, input, crops, blockSize);
}
/**
- * BatchToSpace for N-D tensors of type T.
- * This operation reshapes the "batch" dimension 0 into {@code M + 1} dimensions of shape
- * {@code block_shape + [batch]}, interleaves these blocks back into the grid defined by
- * the spatial dimensions {@code [1, ..., M]}, to obtain a result with the same rank as
- * the input. The spatial dimensions of this intermediate result are then
- * optionally cropped according to {@code crops} to produce the output. This is the
- * reverse of SpaceToBatch. See below for a precise description.
+ * BatchToSpace for N-D tensors of type T. This operation reshapes the "batch" dimension
+ * 0 into {@code M + 1} dimensions of shape {@code block_shape + [batch]}, interleaves these
+ * blocks back into the grid defined by the spatial dimensions {@code [1, ..., M]}, to obtain a
+ * result with the same rank as the input. The spatial dimensions of this intermediate result are
+ * then optionally cropped according to {@code crops} to produce the output. This is the reverse
+ * of SpaceToBatch. See below for a precise description.
*
* @param data type for {@code output} output
* @param input N-D with shape {@code input_shape = [batch] + spatial_shape + remaining_shape},
- * where spatial_shape has M dimensions.
+ * where spatial_shape has M dimensions.
* @param blockShape 1-D with shape {@code [M]}, all values must be >= 1.
- * @param crops 2-D with shape {@code [M, 2]}, all values must be >= 0.
- * {@code crops[i] = [crop_start, crop_end]} specifies the amount to crop from input
- * dimension {@code i + 1}, which corresponds to spatial dimension {@code i}. It is
- * required that
- * {@code crop_start[i] + crop_end[i] <= block_shape[i] * input_shape[i + 1]}.
- * This operation is equivalent to the following steps:
- *
- * -
- *
Reshape {@code input} to {@code reshaped} of shape:
- * [block_shape[0], ..., block_shape[M-1],
- * batch / prod(block_shape),
- * input_shape[1], ..., input_shape[N-1]]
- *
- * -
- *
Permute dimensions of {@code reshaped} to produce {@code permuted} of shape
- * [batch / prod(block_shape),
- *
input_shape[1], block_shape[0],
- * ...,
- * input_shape[M], block_shape[M-1],
- *
input_shape[M+1], ..., input_shape[N-1]]
- *
- * -
- *
Reshape {@code permuted} to produce {@code reshaped_permuted} of shape
- * [batch / prod(block_shape),
- *
input_shape[1] * block_shape[0],
- * ...,
- * input_shape[M] * block_shape[M-1],
- *
input_shape[M+1],
- * ...,
- * input_shape[N-1]]
- *
- * -
- *
Crop the start and end of dimensions {@code [1, ..., M]} of
- * {@code reshaped_permuted} according to {@code crops} to produce the output of shape:
- * [batch / prod(block_shape),
- *
input_shape[1] * block_shape[0] - crops[0,0] - crops[0,1],
- * ...,
- * input_shape[M] * block_shape[M-1] - crops[M-1,0] - crops[M-1,1],
- *
input_shape[M+1], ..., input_shape[N-1]]
- *
- *
- * Some examples:
- *
(1) For the following input of shape {@code [4, 1, 1, 1]}, {@code block_shape = [2, 2]}, and
- * {@code crops = [[0, 0], [0, 0]]}:
- *
+ * @param crops 2-D with shape {@code [M, 2]}, all values must be >= 0. {@code crops[i] =
+ * [crop_start, crop_end]} specifies the amount to crop from input dimension {@code i + 1},
+ * which corresponds to spatial dimension {@code i}. It is required that {@code crop_start[i]
+ * + crop_end[i] <= block_shape[i] * input_shape[i + 1]}.
+ * This operation is equivalent to the following steps:
+ *
+ * -
+ *
Reshape {@code input} to {@code reshaped} of shape: [block_shape[0], ...,
+ * block_shape[M-1], batch / prod(block_shape), input_shape[1], ..., input_shape[N-1]]
+ *
-
+ *
Permute dimensions of {@code reshaped} to produce {@code permuted} of shape [batch
+ * / prod(block_shape),
+ *
input_shape[1], block_shape[0], ..., input_shape[M], block_shape[M-1],
+ *
input_shape[M+1], ..., input_shape[N-1]]
+ *
-
+ *
Reshape {@code permuted} to produce {@code reshaped_permuted} of shape [batch /
+ * prod(block_shape),
+ *
input_shape[1] * block_shape[0], ..., input_shape[M] * block_shape[M-1],
+ *
input_shape[M+1], ..., input_shape[N-1]]
+ *
-
+ *
Crop the start and end of dimensions {@code [1, ..., M]} of {@code
+ * reshaped_permuted} according to {@code crops} to produce the output of shape: [batch
+ * / prod(block_shape),
+ *
input_shape[1] * block_shape[0] - crops[0,0] - crops[0,1], ..., input_shape[M] *
+ * block_shape[M-1] - crops[M-1,0] - crops[M-1,1],
+ *
input_shape[M+1], ..., input_shape[N-1]]
+ *
+ * Some examples:
+ *
(1) For the following input of shape {@code [4, 1, 1, 1]}, {@code block_shape = [2, 2]},
+ * and {@code crops = [[0, 0], [0, 0]]}:
+ *
* [[[[1]]], [[[2]]], [[[3]]], [[[4]]]]
*
- * The output tensor has shape {@code [1, 2, 2, 1]} and value:
- *
+ * The output tensor has shape {@code [1, 2, 2, 1]} and value:
+ *
* x = [[[[1], [2]], [[3], [4]]]]
*
- * (2) For the following input of shape {@code [4, 1, 1, 3]}, {@code block_shape = [2, 2]}, and
- * {@code crops = [[0, 0], [0, 0]]}:
- *
+ * (2) For the following input of shape {@code [4, 1, 1, 3]}, {@code block_shape = [2, 2]},
+ * and {@code crops = [[0, 0], [0, 0]]}:
+ *
* [[[[1, 2, 3]]], [[[4, 5, 6]]], [[[7, 8, 9]]], [[[10, 11, 12]]]]
*
- * The output tensor has shape {@code [1, 2, 2, 3]} and value:
- *
+ * The output tensor has shape {@code [1, 2, 2, 3]} and value:
+ *
* x = [[[[1, 2, 3], [4, 5, 6]],
* [[7, 8, 9], [10, 11, 12]]]]
*
- * (3) For the following input of shape {@code [4, 2, 2, 1]}, {@code block_shape = [2, 2]}, and
- * {@code crops = [[0, 0], [0, 0]]}:
- *
+ * (3) For the following input of shape {@code [4, 2, 2, 1]}, {@code block_shape = [2, 2]},
+ * and {@code crops = [[0, 0], [0, 0]]}:
+ *
* x = [[[[1], [3]], [[9], [11]]],
* [[[2], [4]], [[10], [12]]],
* [[[5], [7]], [[13], [15]]],
* [[[6], [8]], [[14], [16]]]]
*
- * The output tensor has shape {@code [1, 4, 4, 1]} and value:
- *
+ * The output tensor has shape {@code [1, 4, 4, 1]} and value:
+ *
* x = [[[[1], [2], [3], [4]],
* [[5], [6], [7], [8]],
* [[9], [10], [11], [12]],
* [[13], [14], [15], [16]]]]
*
- * (4) For the following input of shape {@code [8, 1, 3, 1]}, {@code block_shape = [2, 2]}, and
- * {@code crops = [[0, 0], [2, 0]]}:
- *
+ * (4) For the following input of shape {@code [8, 1, 3, 1]}, {@code block_shape = [2, 2]},
+ * and {@code crops = [[0, 0], [2, 0]]}:
+ *
* x = [[[[0], [1], [3]]], [[[0], [9], [11]]],
* [[[0], [2], [4]]], [[[0], [10], [12]]],
* [[[0], [5], [7]]], [[[0], [13], [15]]],
* [[[0], [6], [8]]], [[[0], [14], [16]]]]
*
- * The output tensor has shape {@code [2, 2, 4, 1]} and value:
- *
+ * The output tensor has shape {@code [2, 2, 4, 1]} and value:
+ *
* x = [[[[1], [2], [3], [4]],
* [[5], [6], [7], [8]]],
* [[[9], [10], [11], [12]],
* [[13], [14], [15], [16]]]]
*
+ *
* @param data type for {@code BatchToSpaceND} output and operands
* @return a new instance of BatchToSpaceNd
*/
- public BatchToSpaceNd batchToSpaceNd(Operand input,
- Operand extends TNumber> blockShape, Operand extends TNumber> crops) {
+ public BatchToSpaceNd batchToSpaceNd(
+ Operand input, Operand extends TNumber> blockShape, Operand extends TNumber> crops) {
return BatchToSpaceNd.create(scope, input, blockShape, crops);
}
/**
- * Bitcasts a tensor from one type to another without copying data.
- * Given a tensor {@code input}, this operation returns a tensor that has the same buffer
- * data as {@code input} with datatype {@code type}.
- * If the input datatype {@code T} is larger than the output datatype {@code type} then the
- * shape changes from [...] to [..., sizeof({@code T})/sizeof({@code type})].
- *
If {@code T} is smaller than {@code type}, the operator requires that the rightmost
- * dimension be equal to sizeof({@code type})/sizeof({@code T}). The shape then goes from
- * [..., sizeof({@code type})/sizeof({@code T})] to [...].
- *
tf.bitcast() and tf.cast() work differently when real dtype is casted as a complex dtype
- * (e.g. tf.complex64 or tf.complex128) as tf.cast() make imaginary part 0 while tf.bitcast()
- * gives module error.
- * For example,
- *
Example 1:
- *
- *
- *
- * a = [1., 2., 3.]
- * equality_bitcast = tf.bitcast(a, tf.complex128)
- * Traceback (most recent call last):
- * ...
- * InvalidArgumentError: Cannot bitcast from 1 to 18 [Op:Bitcast]
- * equality_cast = tf.cast(a, tf.complex128)
- * print(equality_cast)
- * tf.Tensor([1.+0.j 2.+0.j 3.+0.j], shape=(3,), dtype=complex128)
- *
- *
- *
- * Example 2:
- *
- *
- *
- * tf.bitcast(tf.constant(0xffffffff, dtype=tf.uint32), tf.uint8)
- * <tf.Tensor: shape=(4,), dtype=uint8, numpy=array([255, 255, 255, 255], dtype=uint8)>
- *
- *
- *
- * Example 3:
- *
- *
- *
- * x = [1., 2., 3.]
- * y = [0., 2., 3.]
- * equality= tf.equal(x,y)
- * equality_cast = tf.cast(equality,tf.float32)
- * equality_bitcast = tf.bitcast(equality_cast,tf.uint8)
- * print(equality)
- * tf.Tensor([False True True], shape=(3,), dtype=bool)
- * print(equality_cast)
- * tf.Tensor([0. 1. 1.], shape=(3,), dtype=float32)
- * print(equality_bitcast)
- * tf.Tensor(
- * [[ 0 0 0 0]
- * [ 0 0 128 63]
- * [ 0 0 128 63]], shape=(3, 4), dtype=uint8)
- *
- *
- *
- * NOTE: Bitcast is implemented as a low-level cast, so machines with different
- * endian orderings will give different results.
+ * Bitcasts a tensor from one type to another without copying data. Given a tensor {@code input},
+ * this operation returns a tensor that has the same buffer data as {@code input} with datatype
+ * {@code type}.
+ *
+ *
If the input datatype {@code T} is larger than the output datatype {@code type} then the
+ * shape changes from [...] to [..., sizeof({@code T})/sizeof({@code type})].
+ *
+ *
If {@code T} is smaller than {@code type}, the operator requires that the rightmost
+ * dimension be equal to sizeof({@code type})/sizeof({@code T}). The shape then goes from [...,
+ * sizeof({@code type})/sizeof({@code T})] to [...].
+ *
+ *
tf.bitcast() and tf.cast() work differently when real dtype is casted as a complex dtype
+ * (e.g. tf.complex64 or tf.complex128) as tf.cast() make imaginary part 0 while tf.bitcast()
+ * gives module error. For example,
+ *
+ *
Example 1:
+ *
+ *
+ *
+ *
+ *
+ *
+ *
+ * a = [1., 2., 3.] equality_bitcast = tf.bitcast(a, tf.complex128) Traceback (most recent call
+ * last): ... InvalidArgumentError: Cannot bitcast from 1 to 18 [Op:Bitcast] equality_cast =
+ * tf.cast(a, tf.complex128) print(equality_cast) tf.Tensor([1.+0.j 2.+0.j 3.+0.j], shape=(3,),
+ * dtype=complex128)
+ *
+ *
+ *
+ *
+ *
+ *
+ *
+ * Example 2:
+ *
+ *
+ *
+ *
+ *
+ *
+ *
+ * tf.bitcast(tf.constant(0xffffffff, dtype=tf.uint32), tf.uint8) <tf.Tensor: shape=(4,),
+ * dtype=uint8, numpy=array([255, 255, 255, 255], dtype=uint8)>
+ *
+ *
+ *
+ *
+ *
+ *
+ *
+ * Example 3:
+ *
+ *
+ *
+ *
+ *
+ *
+ *
+ * x = [1., 2., 3.] y = [0., 2., 3.] equality= tf.equal(x,y) equality_cast =
+ * tf.cast(equality,tf.float32) equality_bitcast = tf.bitcast(equality_cast,tf.uint8)
+ * print(equality) tf.Tensor([False True True], shape=(3,), dtype=bool) print(equality_cast)
+ * tf.Tensor([0. 1. 1.], shape=(3,), dtype=float32) print(equality_bitcast) tf.Tensor( [[ 0 0 0 0]
+ * [ 0 0 128 63] [ 0 0 128 63]], shape=(3, 4), dtype=uint8)
+ *
+ *
+ *
+ *
+ *
+ *
+ *
+ * NOTE: Bitcast is implemented as a low-level cast, so machines with different endian
+ * orderings will give different results.
*
* @param data type for {@code output} output
* @param input the input value
@@ -1051,47 +1075,49 @@ public Bitcast bitcast(Operand extends TType> input, Clas
}
/**
- * Apply boolean mask to tensor. Returns the flat array of each element corresponding to a {@code true} in the mask.
- *
- * Numpy equivalent is {@code tensor[mask]}.
- *
- * In general, {@code 0 < dim(mask) = K <= dim(tensor)}, and {@code mask}'s shape must match
- * the first K dimensions of {@code tensor}'s shape. We then have:
- * {@code booleanMask(tensor, mask)[i, j1,...,jd] = tensor[i1,...,iK,j1,...,jd]}
- * where {@code (i1,...,iK)} is the ith {@code true} entry of {@code mask} (row-major order).
- *
- * The {@code axis} could be used with {@code mask} to indicate the axis to mask from (it's 0 by default).
- * In that case, {@code axis + dim(mask) <= dim(tensor)} and {@code mask}'s shape must match
- * the first {@code axis + dim(mask)} dimensions of {@code tensor}'s shape.
+ * Apply boolean mask to tensor. Returns the flat array of each element corresponding to a {@code
+ * true} in the mask.
+ *
+ *
Numpy equivalent is {@code tensor[mask]}.
+ *
+ *
In general, {@code 0 < dim(mask) = K <= dim(tensor)}, and {@code mask}'s shape must match
+ * the first K dimensions of {@code tensor}'s shape. We then have: {@code booleanMask(tensor,
+ * mask)[i, j1,...,jd] = tensor[i1,...,iK,j1,...,jd]} where {@code (i1,...,iK)} is the ith {@code
+ * true} entry of {@code mask} (row-major order).
+ *
+ *
The {@code axis} could be used with {@code mask} to indicate the axis to mask from (it's 0
+ * by default). In that case, {@code axis + dim(mask) <= dim(tensor)} and {@code mask}'s shape
+ * must match the first {@code axis + dim(mask)} dimensions of {@code tensor}'s shape.
*
* @param tensor The tensor to mask.
* @param mask The mask to apply.
* @param options carries optional attributes values
* @return The masked tensor.
*/
- public Operand booleanMask(Operand tensor, Operand mask,
- BooleanMask.Options... options) {
+ public Operand booleanMask(
+ Operand tensor, Operand mask, BooleanMask.Options... options) {
return BooleanMask.create(scope, tensor, mask, options);
}
/**
- * Updates a tensor at the masked values, and returns the updated tensor. Does not mutate the input tensors. {@code
- * updates} will be broadcasted by default
- *
- * Numpy equivalent is `tensor[mask] = updates`.
- *
- * In general, {@code 0 < dim(mask) = K <= dim(tensor)}, and {@code mask}'s shape must match the first K dimensions of
- * {@code tensor}'s shape. We then have: {@code booleanMask(tensor, mask)[i, j1,...,jd] =
- * tensor[i1,...,iK,j1,...,jd]} where {@code (i1,...,iK)} is the ith {@code true} entry of {@code mask} (row-major
- * order).
- *
- * The {@code axis} could be used with {@code mask} to indicate the axis to mask from (it's 0 by default). In that
- * case, {@code axis + dim(mask) <= dim(tensor)} and {@code mask}'s shape must match the first {@code axis +
- * dim(mask)} dimensions of {@code tensor}'s shape.
- *
- * The shape of {@code updates} should be {@code [n, t_1, t_2, ...]} where {@code n} is the number of true values in
- * {@code mask} and {@code t_i} is the {@code i}th dimension of {@code tensor} after {@code axis} and {@code mask}.
- * {@code updates} will be broadcasted to this shape by default, which can be disabled using {@code options}.
+ * Updates a tensor at the masked values, and returns the updated tensor. Does not mutate the
+ * input tensors. {@code updates} will be broadcasted by default
+ *
+ *
Numpy equivalent is `tensor[mask] = updates`.
+ *
+ *
In general, {@code 0 < dim(mask) = K <= dim(tensor)}, and {@code mask}'s shape must match
+ * the first K dimensions of {@code tensor}'s shape. We then have: {@code booleanMask(tensor,
+ * mask)[i, j1,...,jd] = tensor[i1,...,iK,j1,...,jd]} where {@code (i1,...,iK)} is the ith {@code
+ * true} entry of {@code mask} (row-major order).
+ *
+ *
The {@code axis} could be used with {@code mask} to indicate the axis to mask from (it's 0
+ * by default). In that case, {@code axis + dim(mask) <= dim(tensor)} and {@code mask}'s shape
+ * must match the first {@code axis + dim(mask)} dimensions of {@code tensor}'s shape.
+ *
+ *
The shape of {@code updates} should be {@code [n, t_1, t_2, ...]} where {@code n} is the
+ * number of true values in {@code mask} and {@code t_i} is the {@code i}th dimension of {@code
+ * tensor} after {@code axis} and {@code mask}. {@code updates} will be broadcasted to this shape
+ * by default, which can be disabled using {@code options}.
*
* @param tensor The tensor to mask.
* @param mask The mask to apply.
@@ -1099,15 +1125,18 @@ public Operand booleanMask(Operand tensor, Operand Operand booleanMaskUpdate(Operand tensor, Operand mask,
- Operand updates, BooleanMaskUpdate.Options... options) {
+ public Operand booleanMaskUpdate(
+ Operand tensor,
+ Operand mask,
+ Operand updates,
+ BooleanMaskUpdate.Options... options) {
return BooleanMaskUpdate.create(scope, tensor, mask, updates, options);
}
/**
- * Return the shape of s0 op s1 with broadcast.
- * Given {@code s0} and {@code s1}, tensors that represent shapes, compute {@code r0}, the
- * broadcasted shape. {@code s0}, {@code s1} and {@code r0} are all integer vectors.
+ * Return the shape of s0 op s1 with broadcast. Given {@code s0} and {@code s1}, tensors that
+ * represent shapes, compute {@code r0}, the broadcasted shape. {@code s0}, {@code s1} and {@code
+ * r0} are all integer vectors.
*
* @param data type for {@code r0} output
* @param s0 the s0 value
@@ -1115,41 +1144,43 @@ public Operand booleanMaskUpdate(Operand tensor, Operand
* @param data type for {@code BroadcastArgs} output and operands
* @return a new instance of BroadcastDynamicShape
*/
- public BroadcastDynamicShape broadcastDynamicShape(Operand s0,
- Operand s1) {
+ public BroadcastDynamicShape broadcastDynamicShape(
+ Operand s0, Operand s1) {
return BroadcastDynamicShape.create(scope, s0, s1);
}
/**
- * Broadcast an array for a compatible shape.
- * Broadcasting is the process of making arrays to have compatible shapes
- * for arithmetic operations. Two shapes are compatible if for each
- * dimension pair they are either equal or one of them is one. When trying
- * to broadcast a Tensor to a shape, it starts with the trailing dimensions,
- * and works its way forward.
- * For example,
- *
- *
- *
- * x = tf.constant([1, 2, 3])
- * y = tf.broadcast_to(x, [3, 3])
- * print(y)
- * tf.Tensor(
- * [[1 2 3]
- * [1 2 3]
- * [1 2 3]], shape=(3, 3), dtype=int32)
- *
- *
- *
- * In the above example, the input Tensor with the shape of {@code [1, 3]}
- * is broadcasted to output Tensor with shape of {@code [3, 3]}.
- *
When doing broadcasted operations such as multiplying a tensor
- * by a scalar, broadcasting (usually) confers some time or space
- * benefit, as the broadcasted tensor is never materialized.
- *
However, {@code broadcast_to} does not carry with it any such benefits.
- * The newly-created tensor takes the full memory of the broadcasted
- * shape. (In a graph context, {@code broadcast_to} might be fused to
- * subsequent operation and then be optimized away, however.)
+ * Broadcast an array for a compatible shape. Broadcasting is the process of making arrays to have
+ * compatible shapes for arithmetic operations. Two shapes are compatible if for each dimension
+ * pair they are either equal or one of them is one. When trying to broadcast a Tensor to a shape,
+ * it starts with the trailing dimensions, and works its way forward.
+ *
+ *
For example,
+ *
+ *
+ *
+ *
+ *
+ *
+ *
+ * x = tf.constant([1, 2, 3]) y = tf.broadcast_to(x, [3, 3]) print(y) tf.Tensor( [[1 2 3] [1 2
+ * 3] [1 2 3]], shape=(3, 3), dtype=int32)
+ *
+ *
+ *
+ *
+ *
+ *
+ *
+ * In the above example, the input Tensor with the shape of {@code [1, 3]} is broadcasted to
+ * output Tensor with shape of {@code [3, 3]}.
+ *
+ *
When doing broadcasted operations such as multiplying a tensor by a scalar, broadcasting
+ * (usually) confers some time or space benefit, as the broadcasted tensor is never materialized.
+ *
+ *
However, {@code broadcast_to} does not carry with it any such benefits. The newly-created
+ * tensor takes the full memory of the broadcasted shape. (In a graph context, {@code
+ * broadcast_to} might be fused to subsequent operation and then be optimized away, however.)
*
* @param data type for {@code output} output
* @param input A Tensor to broadcast.
@@ -1157,22 +1188,16 @@ public BroadcastDynamicShape broadcastDynamicShape(Operan
* @param data type for {@code BroadcastTo} output and operands
* @return a new instance of BroadcastTo
*/
- public BroadcastTo broadcastTo(Operand input,
- Operand extends TNumber> shape) {
+ public BroadcastTo broadcastTo(
+ Operand input, Operand extends TNumber> shape) {
return BroadcastTo.create(scope, input, shape);
}
/**
- * Bucketizes 'input' based on 'boundaries'.
- * For example, if the inputs are
- * boundaries = [0, 10, 100]
- * input = [[-5, 10000]
- * [150, 10]
- * [5, 100]]
- * then the output will be
- * output = [[0, 3]
- * [3, 2]
- * [1, 3]]
+ * Bucketizes 'input' based on 'boundaries'. For example, if the inputs are boundaries = [0, 10,
+ * 100] input = [[-5, 10000] [150, 10] [5, 100]]
+ *
+ *
then the output will be output = [[0, 3] [3, 2] [1, 3]]
*
* @param input Any shape of Tensor contains with int or float type.
* @param boundaries A sorted list of floats gives the boundary of the buckets.
@@ -1184,7 +1209,7 @@ public Bucketize bucketize(Operand extends TNumber> input, List boundar
/**
* Calls the function in an execution environment, adding its graph as a function if it isn't
- * already present. Only works for functions with a single input and output.
+ * already present. Only works for functions with a single input and output.
*
* @param argument the argument to the call
* @return the output of the function
@@ -1196,20 +1221,21 @@ public Operand> call(ConcreteFunction function, Operand> argument) {
/**
* Calls the function in an execution environment, adding its graph as a function if it isn't
- * already present. The inputs and outputs are keyed by the names set in the {@code Signature}.
+ * already present. The inputs and outputs are keyed by the names set in the {@code Signature}.
*
* @param arguments the arguments to the call
* @return the outputs of the function
* @see ConcreteFunction#call(Ops, Map)
*/
- public Map> call(ConcreteFunction function,
- Map> arguments) {
+ public Map> call(
+ ConcreteFunction function, Map> arguments) {
return Function.call(scope, function, arguments);
}
/**
* An n-way switch statement which calls a single branch function.
- *
+ *
+ *
* An n-way switch statement, implementing the following:
* ```
* switch (branch_index) {
@@ -1228,41 +1254,48 @@ public Map> call(ConcreteFunction function,
* ```
*
*
- * Selects between {@link StatefulCase} and {@link StatelessCase} based on the statefulness of the function arguments.
+ *
Selects between {@link StatefulCase} and {@link StatelessCase} based on the statefulness of
+ * the function arguments.
*
* @param branchIndex The branch selector, an int32 Tensor.
* @param input A list of input tensors passed to the branch function.
* @param Tout A list of output types.
- * @param branches
+ * @param branches
+ *
* A list of functions each of which takes 'inputs' and returns a list of
* tensors, whose types are the same as what every other branch returns.
*
+ *
* @param options carries optional attribute values
* @return a new instance of Case
*/
- public Case caseOp(Operand branchIndex, Iterable> input,
- List> Tout, List branches, Case.Options... options) {
+ public Case caseOp(
+ Operand branchIndex,
+ Iterable> input,
+ List> Tout,
+ List branches,
+ Case.Options... options) {
return Case.create(scope, branchIndex, input, Tout, branches, options);
}
/**
- * Clips tensor values to a specified min and max.
- * Given a tensor {@code t}, this operation returns a tensor of the same type and
- * shape as {@code t} with its values clipped to {@code clip_value_min} and {@code clip_value_max}.
- * Any values less than {@code clip_value_min} are set to {@code clip_value_min}. Any values
- * greater than {@code clip_value_max} are set to {@code clip_value_max}.
+ * Clips tensor values to a specified min and max. Given a tensor {@code t}, this operation
+ * returns a tensor of the same type and shape as {@code t} with its values clipped to {@code
+ * clip_value_min} and {@code clip_value_max}. Any values less than {@code clip_value_min} are set
+ * to {@code clip_value_min}. Any values greater than {@code clip_value_max} are set to {@code
+ * clip_value_max}.
*
* @param data type for {@code output} output
* @param t A {@code Tensor}.
- * @param clipValueMin A 0-D (scalar) {@code Tensor}, or a {@code Tensor} with the same shape
- * as {@code t}. The minimum value to clip by.
- * @param clipValueMax A 0-D (scalar) {@code Tensor}, or a {@code Tensor} with the same shape
- * as {@code t}. The maximum value to clip by.
+ * @param clipValueMin A 0-D (scalar) {@code Tensor}, or a {@code Tensor} with the same shape as
+ * {@code t}. The minimum value to clip by.
+ * @param clipValueMax A 0-D (scalar) {@code Tensor}, or a {@code Tensor} with the same shape as
+ * {@code t}. The maximum value to clip by.
* @param data type for {@code ClipByValue} output and operands
* @return a new instance of ClipByValue
*/
- public ClipByValue clipByValue(Operand t, Operand clipValueMin,
- Operand clipValueMax) {
+ public ClipByValue clipByValue(
+ Operand t, Operand clipValueMin, Operand clipValueMax) {
return ClipByValue.create(scope, t, clipValueMin, clipValueMax);
}
@@ -1270,15 +1303,15 @@ public ClipByValue clipByValue(Operand t, Operand cli
* Concatenates tensors along one dimension.
*
* @param data type for {@code output} output
- * @param values List of {@code N} Tensors to concatenate. Their ranks and types must match,
- * and their sizes must match in all dimensions except {@code concat_dim}.
- * @param axis 0-D. The dimension along which to concatenate. Must be in the
- * range [-rank(values), rank(values)).
+ * @param values List of {@code N} Tensors to concatenate. Their ranks and types must match, and
+ * their sizes must match in all dimensions except {@code concat_dim}.
+ * @param axis 0-D. The dimension along which to concatenate. Must be in the range [-rank(values),
+ * rank(values)).
* @param data type for {@code ConcatV2} output and operands
* @return a new instance of Concat
*/
- public Concat concat(Iterable> values,
- Operand extends TNumber> axis) {
+ public Concat concat(
+ Iterable> values, Operand extends TNumber> axis) {
return Concat.create(scope, values, axis);
}
@@ -1296,7 +1329,7 @@ public Constant constant(int data) {
* Creates a rank-3 constant of {@code double} elements.
*
* @param data An array containing the values to put into the new constant. The dimensions of the
- * new constant will match those of the array.
+ * new constant will match those of the array.
* @return a double constant
*/
public Constant constant(double[][][] data) {
@@ -1307,7 +1340,7 @@ public Constant constant(double[][][] data) {
* Creates a rank-5 constant of {@code byte} elements.
*
* @param data An array containing the values to put into the new constant. The dimensions of the
- * new constant will match those of the array.
+ * new constant will match those of the array.
* @return a byte constant
*/
public Constant constant(byte[][][][][] data) {
@@ -1316,7 +1349,7 @@ public Constant constant(byte[][][][][] data) {
/**
* Creates a constant of {@code String} elements that is a copy of a given n-dimensional array,
- * using the default UTF-8 encoding.
+ * using the default UTF-8 encoding.
*
* @param data an n-dimensional array of {@code String} elements.
* @return a string constant
@@ -1329,7 +1362,7 @@ public Constant constant(NdArray data) {
* Creates a rank-4 constant of {@code int} elements.
*
* @param data An array containing the values to put into the new constant. The dimensions of the
- * new constant will match those of the array.
+ * new constant will match those of the array.
* @return an integer constant
*/
public Constant constant(int[][][][] data) {
@@ -1350,7 +1383,7 @@ public Constant constant(byte data) {
* Creates a rank-2 constant of {@code long} elements.
*
* @param data An array containing the values to put into the new constant. The dimensions of the
- * new constant will match those of the array.
+ * new constant will match those of the array.
* @return a long constant
*/
public Constant constant(long[][] data) {
@@ -1361,7 +1394,7 @@ public Constant constant(long[][] data) {
* Creates a rank-6 constant of {@code float} elements.
*
* @param data An array containing the values to put into the new constant. The dimensions of the
- * new constant will match those of the array.
+ * new constant will match those of the array.
* @return a float constant
*/
public Constant constant(float[][][][][][] data) {
@@ -1372,7 +1405,7 @@ public Constant constant(float[][][][][][] data) {
* Creates a rank-6 constant of {@code boolean} elements.
*
* @param data An array containing the values to put into the new constant. The dimensions of the
- * new constant will match those of the array.
+ * new constant will match those of the array.
* @return a boolean constant
*/
public Constant constant(boolean[][][][][][] data) {
@@ -1383,7 +1416,7 @@ public Constant constant(boolean[][][][][][] data) {
* Creates a rank-4 constant of {@code boolean} elements.
*
* @param data An array containing the values to put into the new constant. The dimensions of the
- * new constant will match those of the array.
+ * new constant will match those of the array.
* @return a boolean constant
*/
public Constant constant(boolean[][][][] data) {
@@ -1394,7 +1427,7 @@ public Constant constant(boolean[][][][] data) {
* Creates a rank-3 constant of {@code float} elements.
*
* @param data An array containing the values to put into the new constant. The dimensions of the
- * new constant will match those of the array.
+ * new constant will match those of the array.
* @return a float constant
*/
public Constant constant(float[][][] data) {
@@ -1405,7 +1438,7 @@ public Constant constant(float[][][] data) {
* Creates a rank-5 constant of {@code float} elements.
*
* @param data An array containing the values to put into the new constant. The dimensions of the
- * new constant will match those of the array.
+ * new constant will match those of the array.
* @return a float constant
*/
public Constant constant(float[][][][][] data) {
@@ -1416,7 +1449,7 @@ public Constant constant(float[][][][][] data) {
* Creates a rank-5 constant of {@code long} elements.
*
* @param data An array containing the values to put into the new constant. The dimensions of the
- * new constant will match those of the array.
+ * new constant will match those of the array.
* @return a long constant
*/
public Constant constant(long[][][][][] data) {
@@ -1427,7 +1460,7 @@ public Constant constant(long[][][][][] data) {
* Creates a rank-1 constant of {@code int} elements.
*
* @param data An array containing the values to put into the new constant. The dimensions of the
- * new constant will match those of the array.
+ * new constant will match those of the array.
* @return an integer constant
*/
public Constant constant(int[] data) {
@@ -1438,7 +1471,7 @@ public Constant constant(int[] data) {
* Creates a rank-2 constant of {@code float} elements.
*
* @param data An array containing the values to put into the new constant. The dimensions of the
- * new constant will match those of the array.
+ * new constant will match those of the array.
* @return a float constant
*/
public Constant constant(float[][] data) {
@@ -1449,7 +1482,7 @@ public Constant constant(float[][] data) {
* Creates a rank-2 constant of {@code boolean} elements.
*
* @param data An array containing the values to put into the new constant. The dimensions of the
- * new constant will match those of the array.
+ * new constant will match those of the array.
* @return a boolean constant
*/
public Constant constant(boolean[][] data) {
@@ -1510,7 +1543,7 @@ public Constant constant(BooleanNdArray data) {
* Creates a rank-1 constant of {@code double} elements.
*
* @param data An array containing the values to put into the new constant. The dimensions of the
- * new constant will match those of the array.
+ * new constant will match those of the array.
* @return a double constant
*/
public Constant constant(double[] data) {
@@ -1531,7 +1564,7 @@ public Constant constant(LongNdArray data) {
* Creates a rank-1 constant of {@code float} elements.
*
* @param data An array containing the values to put into the new constant. The dimensions of the
- * new constant will match those of the array.
+ * new constant will match those of the array.
* @return a float constant
*/
public Constant constant(float[] data) {
@@ -1542,7 +1575,7 @@ public Constant constant(float[] data) {
* Creates a rank-3 constant of {@code long} elements.
*
* @param data An array containing the values to put into the new constant. The dimensions of the
- * new constant will match those of the array.
+ * new constant will match those of the array.
* @return a long constant
*/
public Constant constant(long[][][] data) {
@@ -1553,7 +1586,7 @@ public Constant constant(long[][][] data) {
* Creates a rank-3 constant of {@code boolean} elements.
*
* @param data An array containing the values to put into the new constant. The dimensions of the
- * new constant will match those of the array.
+ * new constant will match those of the array.
* @return a boolean constant
*/
public Constant constant(boolean[][][] data) {
@@ -1564,7 +1597,7 @@ public Constant constant(boolean[][][] data) {
* Creates a rank-1 constant of {@code byte} elements.
*
* @param data An array containing the values to put into the new constant. The dimensions of the
- * new constant will match those of the array.
+ * new constant will match those of the array.
* @return a byte constant
*/
public Constant constant(byte[] data) {
@@ -1575,7 +1608,7 @@ public Constant constant(byte[] data) {
* Creates a rank-3 constant of {@code int} elements.
*
* @param data An array containing the values to put into the new constant. The dimensions of the
- * new constant will match those of the array.
+ * new constant will match those of the array.
* @return an integer constant
*/
public Constant constant(int[][][] data) {
@@ -1596,7 +1629,7 @@ public Constant constant(IntNdArray data) {
* Creates a rank-1 constant of {@code long} elements.
*
* @param data An array containing the values to put into the new constant. The dimensions of the
- * new constant will match those of the array.
+ * new constant will match those of the array.
* @return a long constant
*/
public Constant constant(long[] data) {
@@ -1617,7 +1650,7 @@ public Constant constant(FloatNdArray data) {
* Creates a rank-5 constant of {@code int} elements.
*
* @param data An array containing the values to put into the new constant. The dimensions of the
- * new constant will match those of the array.
+ * new constant will match those of the array.
* @return an integer constant
*/
public Constant constant(int[][][][][] data) {
@@ -1628,7 +1661,7 @@ public Constant constant(int[][][][][] data) {
* Creates a rank-5 constant of {@code double} elements.
*
* @param data An array containing the values to put into the new constant. The dimensions of the
- * new constant will match those of the array.
+ * new constant will match those of the array.
* @return a double constant
*/
public Constant constant(double[][][][][] data) {
@@ -1639,7 +1672,7 @@ public Constant constant(double[][][][][] data) {
* Creates a rank-5 constant of {@code boolean} elements.
*
* @param data An array containing the values to put into the new constant. The dimensions of the
- * new constant will match those of the array.
+ * new constant will match those of the array.
* @return a boolean constant
*/
public Constant constant(boolean[][][][][] data) {
@@ -1650,7 +1683,7 @@ public Constant constant(boolean[][][][][] data) {
* Creates a rank-6 constant of {@code int} elements.
*
* @param data An array containing the values to put into the new constant. The dimensions of the
- * new constant will match those of the array.
+ * new constant will match those of the array.
* @return an integer constant
*/
public Constant constant(int[][][][][][] data) {
@@ -1671,7 +1704,7 @@ public Constant constant(DoubleNdArray data) {
* Creates a rank-6 constant of {@code double} elements.
*
* @param data An array containing the values to put into the new constant. The dimensions of the
- * new constant will match those of the array.
+ * new constant will match those of the array.
* @return a double constant
*/
public Constant constant(double[][][][][][] data) {
@@ -1682,7 +1715,7 @@ public Constant constant(double[][][][][][] data) {
* Creates a rank-6 constant of {@code long} elements.
*
* @param data An array containing the values to put into the new constant. The dimensions of the
- * new constant will match those of the array.
+ * new constant will match those of the array.
* @return a long constant
*/
public Constant constant(long[][][][][][] data) {
@@ -1693,7 +1726,7 @@ public Constant constant(long[][][][][][] data) {
* Creates a rank-2 constant of {@code int} elements.
*
* @param data An array containing the values to put into the new constant. The dimensions of the
- * new constant will match those of the array.
+ * new constant will match those of the array.
* @return an integer constant
*/
public Constant constant(int[][] data) {
@@ -1704,7 +1737,7 @@ public Constant constant(int[][] data) {
* Creates a rank-1 constant of {@code boolean} elements.
*
* @param data An array containing the values to put into the new constant. The dimensions of the
- * new constant will match those of the array.
+ * new constant will match those of the array.
* @return a boolean constant
*/
public Constant constant(boolean[] data) {
@@ -1725,7 +1758,7 @@ public Constant constant(float data) {
* Creates a rank-4 constant of {@code byte} elements.
*
* @param data An array containing the values to put into the new constant. The dimensions of the
- * new constant will match those of the array.
+ * new constant will match those of the array.
* @return a byte constant
*/
public Constant constant(byte[][][][] data) {
@@ -1736,7 +1769,7 @@ public Constant constant(byte[][][][] data) {
* Creates a rank-4 constant of {@code float} elements.
*
* @param data An array containing the values to put into the new constant. The dimensions of the
- * new constant will match those of the array.
+ * new constant will match those of the array.
* @return a float constant
*/
public Constant constant(float[][][][] data) {
@@ -1757,7 +1790,7 @@ public Constant constant(ByteNdArray data) {
* Creates a rank-6 constant of {@code byte} elements.
*
* @param data An array containing the values to put into the new constant. The dimensions of the
- * new constant will match those of the array.
+ * new constant will match those of the array.
* @return a byte constant
*/
public Constant constant(byte[][][][][][] data) {
@@ -1768,7 +1801,7 @@ public Constant constant(byte[][][][][][] data) {
* Creates a rank-4 constant of {@code long} elements.
*
* @param data An array containing the values to put into the new constant. The dimensions of the
- * new constant will match those of the array.
+ * new constant will match those of the array.
* @return a long constant
*/
public Constant