diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml
index 8f862e329..02601764c 100644
--- a/.github/workflows/release.yml
+++ b/.github/workflows/release.yml
@@ -53,7 +53,7 @@ jobs:
}
- name: Upload packages artifacts
- uses: actions/upload-artifact@v1.0.0
+ uses: actions/upload-artifact@v4.0.0
with:
name: "drop-ci-packages"
path: './packages'
diff --git a/README.md b/README.md
index 36ec1660c..75cad0aa7 100644
--- a/README.md
+++ b/README.md
@@ -15,19 +15,13 @@
English | [中文](docs/README-CN.md)
-**=========================================================**
-
-### [Voting: Naming Convention Approach of v1.0.0](https://github.com/SciSharp/TensorFlow.NET/issues/1074)
-
-Dear all,
-
-We would like to urge you to participate in our upcoming vote regarding the naming convention for TensorFlow.NET version 1.0.0 in [#1074](https://github.com/SciSharp/TensorFlow.NET/issues/1074). Your participation in the vote is essential to help us decide on the best approach for improving the naming convention used in previous versions.
-
-Thank you,
-
-TensorFlow.NET Authors
-
-**=========================================================**
+> [!IMPORTANT]
+> We're happy that our work on tensorflow.net has attracted many users. However, at this time, none of the main maintainers of this repo is available for new features and bug fix. We won't refuse PRs and will help to review them.
+>
+> If you would like to be a contributor or maintainer of tensorflow.net, we'd like to help you to start up.
+>
+> We feel sorry for that and we'll resume the maintaining for this project once one of us has bandwidth for it.
+>
*master branch and v0.100.x is corresponding to tensorflow v2.10, v0.6x branch is from tensorflow v2.6, v0.15-tensorflow1.15 is from tensorflow1.15. Please add `https://www.myget.org/F/scisharp/api/v3/index.json` to nuget source to use nightly release.*
@@ -75,9 +69,12 @@ PM> Install-Package TensorFlow.Keras
The second part is the computing support part. Only one of the following packages is needed, depending on your device and system.
```
-### CPU version for Windows, Linux and Mac
+### CPU version for Windows and Linux
PM> Install-Package SciSharp.TensorFlow.Redist
+### CPU version for MacOS
+PM> Install-Package SciSharp.TensorFlow.Redist-OSX
+
### GPU version for Windows (CUDA and cuDNN are required)
PM> Install-Package SciSharp.TensorFlow.Redist-Windows-GPU
diff --git a/TensorFlow.NET.sln b/TensorFlow.NET.sln
index 87729e27d..e0c273568 100644
--- a/TensorFlow.NET.sln
+++ b/TensorFlow.NET.sln
@@ -39,6 +39,10 @@ Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Tensorflow.Benchmark", "too
EndProject
Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Tensorflow.Console", "tools\TensorFlowNET.Console\Tensorflow.Console.csproj", "{1DC32255-BA1F-4D6D-A9C9-5BD5ED71CAA0}"
EndProject
+Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "TensorFlow.Kernel.UnitTest", "test\TensorFlow.Kernel.UnitTest\TensorFlow.Kernel.UnitTest.csproj", "{654A027D-1364-4729-880B-144DFE1FF5BB}"
+EndProject
+Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "Tensorflow.UnitTest", "test\Tensorflow.UnitTest\Tensorflow.UnitTest.csproj", "{A73DF5A6-866E-4AED-9017-AA2EE86368C4}"
+EndProject
Global
GlobalSection(SolutionConfigurationPlatforms) = preSolution
Debug|Any CPU = Debug|Any CPU
@@ -322,6 +326,42 @@ Global
{1DC32255-BA1F-4D6D-A9C9-5BD5ED71CAA0}.Release|x64.Build.0 = Release|x64
{1DC32255-BA1F-4D6D-A9C9-5BD5ED71CAA0}.Release|x86.ActiveCfg = Release|Any CPU
{1DC32255-BA1F-4D6D-A9C9-5BD5ED71CAA0}.Release|x86.Build.0 = Release|Any CPU
+ {654A027D-1364-4729-880B-144DFE1FF5BB}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
+ {654A027D-1364-4729-880B-144DFE1FF5BB}.Debug|Any CPU.Build.0 = Debug|Any CPU
+ {654A027D-1364-4729-880B-144DFE1FF5BB}.Debug|x64.ActiveCfg = Debug|Any CPU
+ {654A027D-1364-4729-880B-144DFE1FF5BB}.Debug|x64.Build.0 = Debug|Any CPU
+ {654A027D-1364-4729-880B-144DFE1FF5BB}.Debug|x86.ActiveCfg = Debug|Any CPU
+ {654A027D-1364-4729-880B-144DFE1FF5BB}.Debug|x86.Build.0 = Debug|Any CPU
+ {654A027D-1364-4729-880B-144DFE1FF5BB}.GPU|Any CPU.ActiveCfg = Debug|Any CPU
+ {654A027D-1364-4729-880B-144DFE1FF5BB}.GPU|Any CPU.Build.0 = Debug|Any CPU
+ {654A027D-1364-4729-880B-144DFE1FF5BB}.GPU|x64.ActiveCfg = Debug|Any CPU
+ {654A027D-1364-4729-880B-144DFE1FF5BB}.GPU|x64.Build.0 = Debug|Any CPU
+ {654A027D-1364-4729-880B-144DFE1FF5BB}.GPU|x86.ActiveCfg = Debug|Any CPU
+ {654A027D-1364-4729-880B-144DFE1FF5BB}.GPU|x86.Build.0 = Debug|Any CPU
+ {654A027D-1364-4729-880B-144DFE1FF5BB}.Release|Any CPU.ActiveCfg = Release|Any CPU
+ {654A027D-1364-4729-880B-144DFE1FF5BB}.Release|Any CPU.Build.0 = Release|Any CPU
+ {654A027D-1364-4729-880B-144DFE1FF5BB}.Release|x64.ActiveCfg = Release|Any CPU
+ {654A027D-1364-4729-880B-144DFE1FF5BB}.Release|x64.Build.0 = Release|Any CPU
+ {654A027D-1364-4729-880B-144DFE1FF5BB}.Release|x86.ActiveCfg = Release|Any CPU
+ {654A027D-1364-4729-880B-144DFE1FF5BB}.Release|x86.Build.0 = Release|Any CPU
+ {A73DF5A6-866E-4AED-9017-AA2EE86368C4}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
+ {A73DF5A6-866E-4AED-9017-AA2EE86368C4}.Debug|Any CPU.Build.0 = Debug|Any CPU
+ {A73DF5A6-866E-4AED-9017-AA2EE86368C4}.Debug|x64.ActiveCfg = Debug|Any CPU
+ {A73DF5A6-866E-4AED-9017-AA2EE86368C4}.Debug|x64.Build.0 = Debug|Any CPU
+ {A73DF5A6-866E-4AED-9017-AA2EE86368C4}.Debug|x86.ActiveCfg = Debug|Any CPU
+ {A73DF5A6-866E-4AED-9017-AA2EE86368C4}.Debug|x86.Build.0 = Debug|Any CPU
+ {A73DF5A6-866E-4AED-9017-AA2EE86368C4}.GPU|Any CPU.ActiveCfg = Debug|Any CPU
+ {A73DF5A6-866E-4AED-9017-AA2EE86368C4}.GPU|Any CPU.Build.0 = Debug|Any CPU
+ {A73DF5A6-866E-4AED-9017-AA2EE86368C4}.GPU|x64.ActiveCfg = Debug|Any CPU
+ {A73DF5A6-866E-4AED-9017-AA2EE86368C4}.GPU|x64.Build.0 = Debug|Any CPU
+ {A73DF5A6-866E-4AED-9017-AA2EE86368C4}.GPU|x86.ActiveCfg = Debug|Any CPU
+ {A73DF5A6-866E-4AED-9017-AA2EE86368C4}.GPU|x86.Build.0 = Debug|Any CPU
+ {A73DF5A6-866E-4AED-9017-AA2EE86368C4}.Release|Any CPU.ActiveCfg = Release|Any CPU
+ {A73DF5A6-866E-4AED-9017-AA2EE86368C4}.Release|Any CPU.Build.0 = Release|Any CPU
+ {A73DF5A6-866E-4AED-9017-AA2EE86368C4}.Release|x64.ActiveCfg = Release|Any CPU
+ {A73DF5A6-866E-4AED-9017-AA2EE86368C4}.Release|x64.Build.0 = Release|Any CPU
+ {A73DF5A6-866E-4AED-9017-AA2EE86368C4}.Release|x86.ActiveCfg = Release|Any CPU
+ {A73DF5A6-866E-4AED-9017-AA2EE86368C4}.Release|x86.Build.0 = Release|Any CPU
EndGlobalSection
GlobalSection(SolutionProperties) = preSolution
HideSolutionNode = FALSE
@@ -342,6 +382,8 @@ Global
{D24FCAA5-548C-4251-B226-A1B6535D0845} = {E1A5D2B7-10AF-4876-85C0-7714EF274214}
{C23563DB-FE21-48E7-A411-87A109E4A899} = {E1A5D2B7-10AF-4876-85C0-7714EF274214}
{1DC32255-BA1F-4D6D-A9C9-5BD5ED71CAA0} = {E1A5D2B7-10AF-4876-85C0-7714EF274214}
+ {654A027D-1364-4729-880B-144DFE1FF5BB} = {1B0918B9-65AD-4F34-A287-AF4597B27DBD}
+ {A73DF5A6-866E-4AED-9017-AA2EE86368C4} = {1B0918B9-65AD-4F34-A287-AF4597B27DBD}
EndGlobalSection
GlobalSection(ExtensibilityGlobals) = postSolution
SolutionGuid = {2DEAD3CC-486B-4918-A607-50B0DE7B114A}
diff --git a/data/img001.bmp b/data/img001.bmp
new file mode 100644
index 000000000..d149d76f1
Binary files /dev/null and b/data/img001.bmp differ
diff --git a/src/TensorFlowNET.Core/APIs/c_api.customize.cs b/src/TensorFlowNET.Core/APIs/c_api.customize.cs
index 510e52eb7..bee4897ee 100644
--- a/src/TensorFlowNET.Core/APIs/c_api.customize.cs
+++ b/src/TensorFlowNET.Core/APIs/c_api.customize.cs
@@ -8,10 +8,10 @@ namespace Tensorflow
public partial class c_api
{
[DllImport(TensorFlowLibName)]
- public static extern void TFC_SetAttr(SafeGraphHandle graph, IntPtr op, string attr_name, SafeBufferHandle attr_value_proto, SafeStatusHandle status);
+ public static extern void TF_SetAttr(SafeGraphHandle graph, IntPtr op, string attr_name, SafeBufferHandle attr_value_proto, SafeStatusHandle status);
[DllImport(TensorFlowLibName)]
- public static extern SafeBufferHandle TFC_GetHandleShapeAndType(SafeGraphHandle c_graph, TF_Output output);
+ public static extern SafeBufferHandle TF_GetHandleShapeAndType(SafeGraphHandle c_graph, TF_Output output);
[DllImport(TensorFlowLibName)]
- public static extern void TFC_SetHandleShapeAndType(SafeGraphHandle c_graph, TF_Output output, byte[] data, long proto_len, SafeStatusHandle status);
+ public static extern void TF_SetHandleShapeAndType(SafeGraphHandle c_graph, TF_Output output, byte[] data, long proto_len, SafeStatusHandle status);
}
}
diff --git a/src/TensorFlowNET.Core/APIs/tf.array.cs b/src/TensorFlowNET.Core/APIs/tf.array.cs
index 4d9c3da58..b529cd319 100644
--- a/src/TensorFlowNET.Core/APIs/tf.array.cs
+++ b/src/TensorFlowNET.Core/APIs/tf.array.cs
@@ -140,6 +140,16 @@ public Tensor identity(Tensor input, string name = null)
public Tensor gather(Tensor @params, Tensor indices, string name = null, int axis = 0)
=> array_ops.gather(@params, indices, name: name, axis: ops.convert_to_tensor(axis));
+ ///
+ /// Gather slices from `params` into a Tensor with shape specified by `indices`.
+ ///
+ ///
+ ///
+ ///
+ ///
+ public Tensor gather_nd(Tensor @params, Tensor indices, string name = null)
+ => gen_array_ops.gather_nd(@params, indices, name: name);
+
///
/// Return the elements, either from `x` or `y`, depending on the `condition`.
///
diff --git a/src/TensorFlowNET.Core/APIs/tf.image.cs b/src/TensorFlowNET.Core/APIs/tf.image.cs
index ac9cbc60d..41ef52967 100644
--- a/src/TensorFlowNET.Core/APIs/tf.image.cs
+++ b/src/TensorFlowNET.Core/APIs/tf.image.cs
@@ -339,6 +339,13 @@ public Tensor decode_image(Tensor contents, int channels = 0, TF_DataType dtype
=> image_ops_impl.decode_image(contents, channels: channels, dtype: dtype,
name: name, expand_animations: expand_animations);
+ public Tensor encode_png(Tensor contents, string name = null)
+ => image_ops_impl.encode_png(contents, name: name);
+
+ public Tensor encode_jpeg(Tensor contents, string name = null)
+ => image_ops_impl.encode_jpeg(contents, name: name);
+
+
///
/// Convenience function to check if the 'contents' encodes a JPEG image.
///
diff --git a/src/TensorFlowNET.Core/APIs/tf.io.cs b/src/TensorFlowNET.Core/APIs/tf.io.cs
index be1e86e6c..ea1e44b28 100644
--- a/src/TensorFlowNET.Core/APIs/tf.io.cs
+++ b/src/TensorFlowNET.Core/APIs/tf.io.cs
@@ -16,6 +16,7 @@ limitations under the License.
using System.Collections.Generic;
using Tensorflow.IO;
+using Tensorflow.Operations;
namespace Tensorflow
{
@@ -46,6 +47,12 @@ public Operation save_v2(Tensor prefix, string[] tensor_names,
public Tensor[] restore_v2(Tensor prefix, string[] tensor_names,
string[] shape_and_slices, TF_DataType[] dtypes, string name = null)
=> ops.restore_v2(prefix, tensor_names, shape_and_slices, dtypes, name: name);
+
+ public Operation write_file(string filename, Tensor conentes, string name = null)
+ => write_file(Tensorflow.ops.convert_to_tensor(filename, TF_DataType.TF_STRING), conentes, name);
+
+ public Operation write_file(Tensor filename, Tensor conentes, string name = null)
+ => gen_ops.write_file(filename, conentes, name);
}
public GFile gfile = new GFile();
diff --git a/src/TensorFlowNET.Core/APIs/tf.nn.cs b/src/TensorFlowNET.Core/APIs/tf.nn.cs
index 397c68c7c..112c48628 100644
--- a/src/TensorFlowNET.Core/APIs/tf.nn.cs
+++ b/src/TensorFlowNET.Core/APIs/tf.nn.cs
@@ -101,6 +101,8 @@ public Tensor embedding_lookup(Tensor @params,
name: name);
public IActivation relu() => new relu();
+
+
public IActivation swish() => new swish();
public IActivation tanh() => new tanh();
@@ -111,6 +113,9 @@ public Tensor tanh(Tensor x, string name = null)
public Tensor relu(Tensor features, string name = null)
=> gen_nn_ops.relu(features, name);
+ public Tensor relu6(Tensor features, string name = null)
+ => gen_nn_ops.relu6(features, name);
+
public Tensor[] fused_batch_norm(Tensor x,
Tensor scale,
Tensor offset,
diff --git a/src/TensorFlowNET.Core/Eager/EagerRunner.RecordGradient.cs b/src/TensorFlowNET.Core/Eager/EagerRunner.RecordGradient.cs
index 59d5fd030..2bdd65f5b 100644
--- a/src/TensorFlowNET.Core/Eager/EagerRunner.RecordGradient.cs
+++ b/src/TensorFlowNET.Core/Eager/EagerRunner.RecordGradient.cs
@@ -80,6 +80,11 @@ BackwardFunction GetGradientFunction(string op_name,
Tensor[] op_outputs)
=> (out_grads, unneeded_gradients) =>
{
+ if(!ops.gradientFunctions.ContainsKey(op_name))
+ {
+ throw new Exception($"gradientFunctions not find op_name: {op_name}");
+ }
+
if (ops.gradientFunctions[op_name] == null)
return new Tensor[op_inputs.Length];
diff --git a/src/TensorFlowNET.Core/Gradients/array_grad.cs b/src/TensorFlowNET.Core/Gradients/array_grad.cs
index 4b7027992..a4da60eed 100644
--- a/src/TensorFlowNET.Core/Gradients/array_grad.cs
+++ b/src/TensorFlowNET.Core/Gradients/array_grad.cs
@@ -381,5 +381,48 @@ public static Tensor[] _ReverseV2Grad(Operation op, Tensor[] grads)
var axis = op.inputs[1];
return new Tensor[] { array_ops.reverse(grad, axis), null };
}
+
+ [RegisterGradient("Tile")]
+ public static Tensor[] _TileGrad(Operation op, Tensor[] grads)
+ {
+ var grad = grads[0];
+ var input_shape = array_ops.shape(op.inputs[0], out_type: op.inputs[1].dtype);
+ var split_shape = array_ops.reshape(array_ops.transpose(array_ops.stack(new Tensor[] { op.inputs[1], input_shape })), new Shape(-1));
+ var axes = math_ops.range(0, array_ops.size(split_shape), 2);
+
+ //# Sum reduces grad along the first dimension for IndexedSlices
+ //if isinstance(grad, indexed_slices_lib.IndexedSlices):
+ //input_shape_0 = math_ops.cast(input_shape[0], grad.indices.dtype)
+ //grad = math_ops.unsorted_segment_sum(
+ // grad.values, math_ops.mod(grad.indices, input_shape_0), input_shape_0)
+ //split_shape = array_ops.concat([[1], split_shape[1:]], axis = 0)
+
+ var input_grad = math_ops.reduce_sum(array_ops.reshape(grad, split_shape), axes);
+ if (!tf.Context.executing_eagerly())
+ {
+ input_grad.set_shape(op.inputs[0].GetShape());
+ }
+ return new Tensor[] { input_grad, null };
+ }
+
+ [RegisterGradient("GatherNd")]
+ public static Tensor[] _GatherNdGrad(Operation op, Tensor[] grads)
+ {
+ var @ref = op.inputs[0];
+ var indices = op.inputs[1];
+ var grad = grads[0];
+ var ref_shape = array_ops.shape(@ref, out_type: indices.dtype);
+ Tensor ref_grad = null;
+ if (indices.shape.ndim == 2 && indices.shape.dims[indices.shape.Length - 1] == 1)
+ {
+ ref_grad = (Tensor)new IndexedSlices(grad, array_ops.squeeze(indices, axis: -1), ref_shape);
+ }
+ else
+ {
+ ref_grad = gen_array_ops.scatter_nd(indices, grad, ref_shape);
+ }
+ return new Tensor[] { ref_grad, null };
+ }
+
}
}
diff --git a/src/TensorFlowNET.Core/Gradients/nn_grad.cs b/src/TensorFlowNET.Core/Gradients/nn_grad.cs
index a43a91b9a..87646a9ea 100644
--- a/src/TensorFlowNET.Core/Gradients/nn_grad.cs
+++ b/src/TensorFlowNET.Core/Gradients/nn_grad.cs
@@ -229,6 +229,37 @@ public static Tensor[] _Conv2DGrad(Operation op, Tensor[] grads)
};
}
+ ///
+ /// Gradient function for Conv2D.
+ ///
+ ///
+ ///
+ ///
+ [RegisterGradient("DepthwiseConv2dNative")]
+ public static Tensor[] _DepthwiseConv2DGrad(Operation op, Tensor[] grads)
+ {
+ var dilations = op.get_attr_list("dilations");
+ var strides = op.get_attr_list("strides");
+ var padding = op.get_attr("padding");
+ var explicit_paddings = op.get_attr_list("explicit_paddings");
+ var data_format = op.get_attr("data_format");
+ var shape = gen_array_ops.shape_n(new Tensor[] { op.inputs[0], op.inputs[1] });
+
+ return new Tensor[]
+ {
+ gen_nn_ops.depthwise_conv2d_native_backprop_input(
+ shape[0], op.inputs[1], grads[0],
+ strides, padding, explicit_paddings,
+ dilations: dilations,
+ data_format: data_format),
+ gen_nn_ops.depthwise_conv2d_native_backprop_filter(op.inputs[0], shape[1], grads[0],
+ strides, padding,
+ dilations: dilations,
+ explicit_paddings: explicit_paddings,
+ data_format: data_format)
+ };
+ }
+
[RegisterGradient("FusedBatchNorm")]
public static Tensor[] _FusedBatchNormGrad(Operation op, Tensor[] grads)
=> _BaseFusedBatchNormGrad(op, 0, grads);
diff --git a/src/TensorFlowNET.Core/Keras/Activations/Activations.cs b/src/TensorFlowNET.Core/Keras/Activations/Activations.cs
index f0d59ed62..37264104a 100644
--- a/src/TensorFlowNET.Core/Keras/Activations/Activations.cs
+++ b/src/TensorFlowNET.Core/Keras/Activations/Activations.cs
@@ -32,6 +32,7 @@ public interface IActivationsApi
Activation Linear { get; }
Activation Relu { get; }
+ Activation Relu6 { get; }
Activation Sigmoid { get; }
diff --git a/src/TensorFlowNET.Core/Keras/ArgsDefinition/Activation/ExponentialArgs.cs b/src/TensorFlowNET.Core/Keras/ArgsDefinition/Activation/ExponentialArgs.cs
new file mode 100644
index 000000000..ef024971d
--- /dev/null
+++ b/src/TensorFlowNET.Core/Keras/ArgsDefinition/Activation/ExponentialArgs.cs
@@ -0,0 +1,10 @@
+using System;
+using System.Collections.Generic;
+using System.Text;
+
+namespace Tensorflow.Keras.ArgsDefinition
+{
+ public class ExponentialArgs : LayerArgs
+ {
+ }
+}
diff --git a/src/TensorFlowNET.Core/Keras/ArgsDefinition/Activation/HardSigmoidArgs.cs b/src/TensorFlowNET.Core/Keras/ArgsDefinition/Activation/HardSigmoidArgs.cs
new file mode 100644
index 000000000..788e0f36d
--- /dev/null
+++ b/src/TensorFlowNET.Core/Keras/ArgsDefinition/Activation/HardSigmoidArgs.cs
@@ -0,0 +1,10 @@
+using System;
+using System.Collections.Generic;
+using System.Text;
+
+namespace Tensorflow.Keras.ArgsDefinition
+{
+ public class HardSigmoidArgs : LayerArgs
+ {
+ }
+}
diff --git a/src/TensorFlowNET.Core/Keras/ArgsDefinition/Activation/SELUArgs.cs b/src/TensorFlowNET.Core/Keras/ArgsDefinition/Activation/SELUArgs.cs
new file mode 100644
index 000000000..eb0e18446
--- /dev/null
+++ b/src/TensorFlowNET.Core/Keras/ArgsDefinition/Activation/SELUArgs.cs
@@ -0,0 +1,11 @@
+using System;
+using System.Collections.Generic;
+using System.Text;
+
+namespace Tensorflow.Keras.ArgsDefinition
+{
+ public class SELUArgs : LayerArgs
+ {
+
+ }
+}
diff --git a/src/TensorFlowNET.Core/Keras/ArgsDefinition/Activation/SoftplusArgs.cs b/src/TensorFlowNET.Core/Keras/ArgsDefinition/Activation/SoftplusArgs.cs
new file mode 100644
index 000000000..7b4f20795
--- /dev/null
+++ b/src/TensorFlowNET.Core/Keras/ArgsDefinition/Activation/SoftplusArgs.cs
@@ -0,0 +1,10 @@
+using System;
+using System.Collections.Generic;
+using System.Text;
+
+namespace Tensorflow.Keras.ArgsDefinition
+{
+ public class SoftplusArgs : LayerArgs
+ {
+ }
+}
diff --git a/src/TensorFlowNET.Core/Keras/ArgsDefinition/Activation/SoftsignArgs.cs b/src/TensorFlowNET.Core/Keras/ArgsDefinition/Activation/SoftsignArgs.cs
new file mode 100644
index 000000000..4e23d261d
--- /dev/null
+++ b/src/TensorFlowNET.Core/Keras/ArgsDefinition/Activation/SoftsignArgs.cs
@@ -0,0 +1,10 @@
+using System;
+using System.Collections.Generic;
+using System.Text;
+
+namespace Tensorflow.Keras.ArgsDefinition
+{
+ public class SoftsignArgs : LayerArgs
+ {
+ }
+}
diff --git a/src/TensorFlowNET.Core/Keras/ArgsDefinition/Activation/SwishArgs.cs b/src/TensorFlowNET.Core/Keras/ArgsDefinition/Activation/SwishArgs.cs
new file mode 100644
index 000000000..3dea06a23
--- /dev/null
+++ b/src/TensorFlowNET.Core/Keras/ArgsDefinition/Activation/SwishArgs.cs
@@ -0,0 +1,10 @@
+using System;
+using System.Collections.Generic;
+using System.Text;
+
+namespace Tensorflow.Keras.ArgsDefinition
+{
+ public class SwishArgs : LayerArgs
+ {
+ }
+}
diff --git a/src/TensorFlowNET.Core/Keras/ArgsDefinition/Activation/TanhArgs.cs b/src/TensorFlowNET.Core/Keras/ArgsDefinition/Activation/TanhArgs.cs
new file mode 100644
index 000000000..5df41b71b
--- /dev/null
+++ b/src/TensorFlowNET.Core/Keras/ArgsDefinition/Activation/TanhArgs.cs
@@ -0,0 +1,10 @@
+using System;
+using System.Collections.Generic;
+using System.Text;
+
+namespace Tensorflow.Keras.ArgsDefinition
+{
+ public class TanhArgs : LayerArgs
+ {
+ }
+}
diff --git a/src/TensorFlowNET.Core/Keras/ArgsDefinition/Convolution/Conv2DTransposeArgs.cs b/src/TensorFlowNET.Core/Keras/ArgsDefinition/Convolution/Conv2DTransposeArgs.cs
new file mode 100644
index 000000000..3daba9465
--- /dev/null
+++ b/src/TensorFlowNET.Core/Keras/ArgsDefinition/Convolution/Conv2DTransposeArgs.cs
@@ -0,0 +1,10 @@
+using System;
+using System.Collections.Generic;
+using System.Text;
+
+namespace Tensorflow.Keras.ArgsDefinition
+{
+ public class Conv2DTransposeArgs : Conv2DArgs
+ {
+ }
+}
diff --git a/src/TensorFlowNET.Core/Keras/ArgsDefinition/DataAdapterArgs.cs b/src/TensorFlowNET.Core/Keras/ArgsDefinition/DataAdapterArgs.cs
index 78882e82d..ba0332836 100644
--- a/src/TensorFlowNET.Core/Keras/ArgsDefinition/DataAdapterArgs.cs
+++ b/src/TensorFlowNET.Core/Keras/ArgsDefinition/DataAdapterArgs.cs
@@ -1,5 +1,6 @@
using Tensorflow.Keras.Engine;
using Tensorflow.Keras.Saving;
+using Tensorflow.NumPy;
namespace Tensorflow.Keras.ArgsDefinition
{
@@ -16,5 +17,7 @@ public class DataAdapterArgs: IKerasConfig
public int Worker { get; set; }
public bool UseMultiprocessing { get; set; }
public IModel Model { get; set; }
+ public Dictionary ClassWeight = null;
+ public NDArray SampleWeight = null;
}
}
diff --git a/src/TensorFlowNET.Core/Keras/ArgsDefinition/DataHandlerArgs.cs b/src/TensorFlowNET.Core/Keras/ArgsDefinition/DataHandlerArgs.cs
index 82530e950..72d0bb811 100644
--- a/src/TensorFlowNET.Core/Keras/ArgsDefinition/DataHandlerArgs.cs
+++ b/src/TensorFlowNET.Core/Keras/ArgsDefinition/DataHandlerArgs.cs
@@ -1,5 +1,6 @@
using Tensorflow.Keras.Engine;
using Tensorflow.Keras.Saving;
+using Tensorflow.NumPy;
namespace Tensorflow.Keras.ArgsDefinition
{
@@ -18,5 +19,7 @@ public class DataHandlerArgs: IKerasConfig
public bool UseMultiprocessing { get; set; } = false;
public IModel Model { get; set; }
public IVariableV1 StepsPerExecution { get; set; }
+ public Dictionary ClassWeight = null;
+ public NDArray SampleWeight = null;
}
}
diff --git a/src/TensorFlowNET.Core/Keras/ArgsDefinition/Merging/AddArgs.cs b/src/TensorFlowNET.Core/Keras/ArgsDefinition/Merging/AddArgs.cs
new file mode 100644
index 000000000..016d58203
--- /dev/null
+++ b/src/TensorFlowNET.Core/Keras/ArgsDefinition/Merging/AddArgs.cs
@@ -0,0 +1,10 @@
+using System;
+using System.Collections.Generic;
+using System.Text;
+
+namespace Tensorflow.Keras.ArgsDefinition
+{
+ public class AddArgs : MergeArgs
+ {
+ }
+}
diff --git a/src/TensorFlowNET.Core/Keras/ArgsDefinition/Merging/ConcatenateArgs.cs b/src/TensorFlowNET.Core/Keras/ArgsDefinition/Merging/ConcatenateArgs.cs
new file mode 100644
index 000000000..4a81d139d
--- /dev/null
+++ b/src/TensorFlowNET.Core/Keras/ArgsDefinition/Merging/ConcatenateArgs.cs
@@ -0,0 +1,10 @@
+using System;
+using System.Collections.Generic;
+using System.Text;
+
+namespace Tensorflow.Keras.ArgsDefinition
+{
+ public class ConcatenateArgs : MergeArgs
+ {
+ }
+}
diff --git a/src/TensorFlowNET.Core/Keras/ArgsDefinition/Merging/MergeArgs.cs b/src/TensorFlowNET.Core/Keras/ArgsDefinition/Merging/MergeArgs.cs
index 0140b3dd0..9bcf1908e 100644
--- a/src/TensorFlowNET.Core/Keras/ArgsDefinition/Merging/MergeArgs.cs
+++ b/src/TensorFlowNET.Core/Keras/ArgsDefinition/Merging/MergeArgs.cs
@@ -1,13 +1,15 @@
-using System;
+using Newtonsoft.Json;
+using System;
using System.Collections.Generic;
using System.Text;
namespace Tensorflow.Keras.ArgsDefinition
{
// TODO: complete the implementation
- public class MergeArgs : LayerArgs
+ public class MergeArgs : AutoSerializeLayerArgs
{
public Tensors Inputs { get; set; }
+ [JsonProperty("axis")]
public int Axis { get; set; }
}
}
diff --git a/src/TensorFlowNET.Core/Keras/ArgsDefinition/Merging/SubtractArgs.cs b/src/TensorFlowNET.Core/Keras/ArgsDefinition/Merging/SubtractArgs.cs
new file mode 100644
index 000000000..1e3621cb6
--- /dev/null
+++ b/src/TensorFlowNET.Core/Keras/ArgsDefinition/Merging/SubtractArgs.cs
@@ -0,0 +1,10 @@
+using System;
+using System.Collections.Generic;
+using System.Text;
+
+namespace Tensorflow.Keras.ArgsDefinition
+{
+ public class SubtractArgs : MergeArgs
+ {
+ }
+}
diff --git a/src/TensorFlowNET.Core/Keras/ArgsDefinition/Pooling/GlobalAveragePooling1DArgs.cs b/src/TensorFlowNET.Core/Keras/ArgsDefinition/Pooling/GlobalAveragePooling1DArgs.cs
new file mode 100644
index 000000000..e73aff766
--- /dev/null
+++ b/src/TensorFlowNET.Core/Keras/ArgsDefinition/Pooling/GlobalAveragePooling1DArgs.cs
@@ -0,0 +1,10 @@
+using System;
+using System.Collections.Generic;
+using System.Text;
+
+namespace Tensorflow.Keras.ArgsDefinition
+{
+ public class GlobalAveragePooling1DArgs : Pooling1DArgs
+ {
+ }
+}
diff --git a/src/TensorFlowNET.Core/Keras/ArgsDefinition/Pooling/GlobalAveragePooling2DArgs.cs b/src/TensorFlowNET.Core/Keras/ArgsDefinition/Pooling/GlobalAveragePooling2DArgs.cs
new file mode 100644
index 000000000..d143cf471
--- /dev/null
+++ b/src/TensorFlowNET.Core/Keras/ArgsDefinition/Pooling/GlobalAveragePooling2DArgs.cs
@@ -0,0 +1,10 @@
+using System;
+using System.Collections.Generic;
+using System.Text;
+
+namespace Tensorflow.Keras.ArgsDefinition
+{
+ public class GlobalAveragePooling2DArgs : Pooling2DArgs
+ {
+ }
+}
diff --git a/src/TensorFlowNET.Core/Keras/ArgsDefinition/Pooling/GlobalMaxPooling1DArgs.cs b/src/TensorFlowNET.Core/Keras/ArgsDefinition/Pooling/GlobalMaxPooling1DArgs.cs
new file mode 100644
index 000000000..e03227feb
--- /dev/null
+++ b/src/TensorFlowNET.Core/Keras/ArgsDefinition/Pooling/GlobalMaxPooling1DArgs.cs
@@ -0,0 +1,10 @@
+using System;
+using System.Collections.Generic;
+using System.Text;
+
+namespace Tensorflow.Keras.ArgsDefinition
+{
+ public class GlobalMaxPooling1DArgs : Pooling1DArgs
+ {
+ }
+}
diff --git a/src/TensorFlowNET.Core/Keras/ArgsDefinition/Pooling/GlobalMaxPooling2DArgs.cs b/src/TensorFlowNET.Core/Keras/ArgsDefinition/Pooling/GlobalMaxPooling2DArgs.cs
new file mode 100644
index 000000000..a95cac836
--- /dev/null
+++ b/src/TensorFlowNET.Core/Keras/ArgsDefinition/Pooling/GlobalMaxPooling2DArgs.cs
@@ -0,0 +1,10 @@
+using System;
+using System.Collections.Generic;
+using System.Text;
+
+namespace Tensorflow.Keras.ArgsDefinition
+{
+ public class GlobalMaxPooling2DArgs : Pooling2DArgs
+ {
+ }
+}
diff --git a/src/TensorFlowNET.Core/Keras/ArgsDefinition/Pooling/MaxPooling1DArgs.cs b/src/TensorFlowNET.Core/Keras/ArgsDefinition/Pooling/MaxPooling1DArgs.cs
new file mode 100644
index 000000000..4cfff2c15
--- /dev/null
+++ b/src/TensorFlowNET.Core/Keras/ArgsDefinition/Pooling/MaxPooling1DArgs.cs
@@ -0,0 +1,10 @@
+using System;
+using System.Collections.Generic;
+using System.Text;
+
+namespace Tensorflow.Keras.ArgsDefinition
+{
+ public class MaxPooling1DArgs : Pooling1DArgs
+ {
+ }
+}
diff --git a/src/TensorFlowNET.Core/Keras/ArgsDefinition/Rnn/GRUArgs.cs b/src/TensorFlowNET.Core/Keras/ArgsDefinition/Rnn/GRUArgs.cs
new file mode 100644
index 000000000..cdc3097e9
--- /dev/null
+++ b/src/TensorFlowNET.Core/Keras/ArgsDefinition/Rnn/GRUArgs.cs
@@ -0,0 +1,29 @@
+using System;
+using System.Collections.Generic;
+using System.Text;
+
+namespace Tensorflow.Keras.ArgsDefinition
+{
+ public class GRUArgs : AutoSerializeLayerArgs
+ {
+ public int Units { get; set; }
+ public Activation Activation { get; set; }
+ public Activation RecurrentActivation { get; set; }
+ public bool UseBias { get; set; } = true;
+ public float Dropout { get; set; } = .0f;
+ public float RecurrentDropout { get; set; } = .0f;
+ public IInitializer KernelInitializer { get; set; }
+ public IInitializer RecurrentInitializer { get; set; }
+ public IInitializer BiasInitializer { get; set; }
+ public bool ReturnSequences { get;set; }
+ public bool ReturnState { get;set; }
+ public bool GoBackwards { get;set; }
+ public bool Stateful { get;set; }
+ public bool Unroll { get;set; }
+ public bool TimeMajor { get;set; }
+ public bool ResetAfter { get;set; }
+ public int Implementation { get; set; } = 2;
+
+ }
+
+}
diff --git a/src/TensorFlowNET.Core/Keras/ArgsDefinition/Rnn/GRUOptionalArgs.cs b/src/TensorFlowNET.Core/Keras/ArgsDefinition/Rnn/GRUOptionalArgs.cs
new file mode 100644
index 000000000..1d215576f
--- /dev/null
+++ b/src/TensorFlowNET.Core/Keras/ArgsDefinition/Rnn/GRUOptionalArgs.cs
@@ -0,0 +1,11 @@
+using System;
+using System.Collections.Generic;
+using System.Text;
+
+namespace Tensorflow.Keras.ArgsDefinition
+{
+ public class GRUOptionalArgs : RnnOptionalArgs
+ {
+ public string Identifier => "GRU";
+ }
+}
diff --git a/src/TensorFlowNET.Core/Keras/ArgsDefinition/Rnn/LSTMOptionalArgs.cs b/src/TensorFlowNET.Core/Keras/ArgsDefinition/Rnn/LSTMOptionalArgs.cs
new file mode 100644
index 000000000..2829927c3
--- /dev/null
+++ b/src/TensorFlowNET.Core/Keras/ArgsDefinition/Rnn/LSTMOptionalArgs.cs
@@ -0,0 +1,11 @@
+using System;
+using System.Collections.Generic;
+using System.Text;
+
+namespace Tensorflow.Keras.ArgsDefinition.Rnn
+{
+ public class LSTMOptionalArgs : RnnOptionalArgs
+ {
+ public string Identifier => "LSTM";
+ }
+}
diff --git a/src/TensorFlowNET.Core/Keras/ArgsDefinition/Rnn/SimpleRNNOptionalArgs.cs b/src/TensorFlowNET.Core/Keras/ArgsDefinition/Rnn/SimpleRNNOptionalArgs.cs
new file mode 100644
index 000000000..a8b8caf06
--- /dev/null
+++ b/src/TensorFlowNET.Core/Keras/ArgsDefinition/Rnn/SimpleRNNOptionalArgs.cs
@@ -0,0 +1,11 @@
+using System;
+using System.Collections.Generic;
+using System.Text;
+
+namespace Tensorflow.Keras.ArgsDefinition.Rnn
+{
+ public class SimpleRNNOptionalArgs : RnnOptionalArgs
+ {
+ public string Identifier => "SimpleRNN";
+ }
+}
diff --git a/src/TensorFlowNET.Core/Keras/Engine/IModel.cs b/src/TensorFlowNET.Core/Keras/Engine/IModel.cs
index 19f3df9ba..889c76d91 100644
--- a/src/TensorFlowNET.Core/Keras/Engine/IModel.cs
+++ b/src/TensorFlowNET.Core/Keras/Engine/IModel.cs
@@ -3,6 +3,7 @@
using Tensorflow.Keras.Metrics;
using Tensorflow.Keras.Saving;
using Tensorflow.NumPy;
+using Tensorflow.Util;
namespace Tensorflow.Keras.Engine;
@@ -22,8 +23,11 @@ ICallback fit(NDArray x, NDArray y,
int verbose = 1,
List callbacks = null,
float validation_split = 0f,
- (NDArray val_x, NDArray val_y)? validation_data = null,
+ ValidationDataPack validation_data = null,
+ int validation_step = 10,
bool shuffle = true,
+ Dictionary class_weight = null,
+ NDArray sample_weight = null,
int initial_epoch = 0,
int max_queue_size = 10,
int workers = 1,
@@ -35,8 +39,24 @@ ICallback fit(IEnumerable x, NDArray y,
int verbose = 1,
List callbacks = null,
float validation_split = 0f,
- (IEnumerable val_x, NDArray val_y)? validation_data = null,
+ ValidationDataPack validation_data = null,
bool shuffle = true,
+ Dictionary class_weight = null,
+ NDArray sample_weight = null,
+ int initial_epoch = 0,
+ int max_queue_size = 10,
+ int workers = 1,
+ bool use_multiprocessing = false);
+
+ public ICallback fit(IDatasetV2 dataset,
+ int batch_size = -1,
+ int epochs = 1,
+ int verbose = 1,
+ List callbacks = null,
+ IDatasetV2 validation_data = null,
+ int validation_step = 10, // 间隔多少次会进行一次验证
+ bool shuffle = true,
+ Dictionary class_weight = null,
int initial_epoch = 0,
int max_queue_size = 10,
int workers = 1,
@@ -63,6 +83,8 @@ void load_weights(string filepath,
Dictionary evaluate(NDArray x, NDArray y,
int batch_size = -1,
int verbose = 1,
+ NDArray sample_weight = null,
+
int steps = -1,
int max_queue_size = 10,
int workers = 1,
@@ -78,6 +100,14 @@ Tensors predict(Tensors x,
int workers = 1,
bool use_multiprocessing = false);
+ public Tensors predict(IDatasetV2 dataset,
+ int batch_size = -1,
+ int verbose = 0,
+ int steps = -1,
+ int max_queue_size = 10,
+ int workers = 1,
+ bool use_multiprocessing = false);
+
void summary(int line_length = -1, float[] positions = null);
IKerasConfig get_config();
diff --git a/src/TensorFlowNET.Core/Keras/IOptimizerApi.cs b/src/TensorFlowNET.Core/Keras/IOptimizerApi.cs
index 19e3a7b8c..6c15fd469 100644
--- a/src/TensorFlowNET.Core/Keras/IOptimizerApi.cs
+++ b/src/TensorFlowNET.Core/Keras/IOptimizerApi.cs
@@ -63,6 +63,6 @@ IOptimizer RMSprop(float learning_rate = 0.001f,
bool centered = false,
string name = "RMSprop");
- IOptimizer SGD(float learning_rate, float momentum);
+ IOptimizer SGD(float learning_rate = 0.01f, float momentum = 0f);
}
}
diff --git a/src/TensorFlowNET.Core/Keras/Layers/ILayersApi.cs b/src/TensorFlowNET.Core/Keras/Layers/ILayersApi.cs
index b8aff5fb6..57273eb08 100644
--- a/src/TensorFlowNET.Core/Keras/Layers/ILayersApi.cs
+++ b/src/TensorFlowNET.Core/Keras/Layers/ILayersApi.cs
@@ -55,6 +55,12 @@ public ILayer Conv1D(int filters,
string kernel_initializer = "glorot_uniform",
string bias_initializer = "zeros");
+ public ILayer Conv2D(int filters,
+ Shape kernel_size = null,
+ Shape strides = null,
+ string padding = "valid"
+ );
+
public ILayer Conv2D(int filters,
Shape kernel_size = null,
Shape strides = null,
@@ -95,6 +101,19 @@ public ILayer Conv2D(int filters,
bool use_bias = true,
string kernel_initializer = "glorot_uniform",
string bias_initializer = "zeros");
+ public ILayer DepthwiseConv2D(Shape kernel_size = null,
+ Shape strides = null,
+ string padding = "valid",
+ string data_format = null,
+ Shape dilation_rate = null,
+ int groups = 1,
+ int depth_multiplier = 1,
+ string activation = null,
+ bool use_bias = false,
+ string kernel_initializer = "glorot_uniform",
+ string bias_initializer = "zeros",
+ string depthwise_initializer = "glorot_uniform"
+ );
public ILayer Dense(int units);
public ILayer Dense(int units,
@@ -161,6 +180,9 @@ public ILayer LayerNormalization(Axis? axis,
public ILayer Normalization(Shape? input_shape = null, int? axis = -1, float? mean = null, float? variance = null, bool invert = false);
public ILayer LeakyReLU(float alpha = 0.3f);
+ public ILayer ReLU6();
+
+
public IRnnCell LSTMCell(int uints,
string activation = "tanh",
string recurrent_activation = "sigmoid",
@@ -259,6 +281,25 @@ public IRnnCell GRUCell(
float recurrent_dropout = 0f,
bool reset_after = true);
+ public ILayer GRU(
+ int units,
+ string activation = "tanh",
+ string recurrent_activation = "sigmoid",
+ bool use_bias = true,
+ string kernel_initializer = "glorot_uniform",
+ string recurrent_initializer = "orthogonal",
+ string bias_initializer = "zeros",
+ float dropout = 0f,
+ float recurrent_dropout = 0f,
+ bool return_sequences = false,
+ bool return_state = false,
+ bool go_backwards = false,
+ bool stateful = false,
+ bool unroll = false,
+ bool time_major = false,
+ bool reset_after = true
+ );
+
///
/// Bidirectional wrapper for RNNs.
///
diff --git a/src/TensorFlowNET.Core/Keras/Regularizers/IRegularizer.cs b/src/TensorFlowNET.Core/Keras/Regularizers/IRegularizer.cs
index f4045c7b2..06dbb7c8c 100644
--- a/src/TensorFlowNET.Core/Keras/Regularizers/IRegularizer.cs
+++ b/src/TensorFlowNET.Core/Keras/Regularizers/IRegularizer.cs
@@ -1,7 +1,25 @@
-namespace Tensorflow.Keras
+using Newtonsoft.Json;
+using System.Collections.Generic;
+using Tensorflow.Keras.Saving.Common;
+
+namespace Tensorflow.Keras
{
- public interface IRegularizer
- {
- Tensor Apply(RegularizerArgs args);
- }
+ [JsonConverter(typeof(CustomizedRegularizerJsonConverter))]
+ public interface IRegularizer
+ {
+ [JsonProperty("class_name")]
+ string ClassName { get; }
+ [JsonProperty("config")]
+ IDictionary Config { get; }
+ Tensor Apply(RegularizerArgs args);
+ }
+
+ public interface IRegularizerApi
+ {
+ IRegularizer GetRegularizerFromName(string name);
+ IRegularizer L1 { get; }
+ IRegularizer L2 { get; }
+ IRegularizer L1L2 { get; }
+ }
+
}
diff --git a/src/TensorFlowNET.Core/Keras/Saving/Json/CustomizedRegularizerJsonConverter.cs b/src/TensorFlowNET.Core/Keras/Saving/Json/CustomizedRegularizerJsonConverter.cs
new file mode 100644
index 000000000..4b1790aca
--- /dev/null
+++ b/src/TensorFlowNET.Core/Keras/Saving/Json/CustomizedRegularizerJsonConverter.cs
@@ -0,0 +1,57 @@
+using Newtonsoft.Json.Linq;
+using Newtonsoft.Json;
+using System;
+using System.Collections.Generic;
+using System.Text;
+using Tensorflow.Operations.Regularizers;
+
+namespace Tensorflow.Keras.Saving.Common
+{
+ class RegularizerInfo
+ {
+ public string class_name { get; set; }
+ public JObject config { get; set; }
+ }
+
+ public class CustomizedRegularizerJsonConverter : JsonConverter
+ {
+ public override bool CanConvert(Type objectType)
+ {
+ return objectType == typeof(IRegularizer);
+ }
+
+ public override bool CanRead => true;
+
+ public override bool CanWrite => true;
+
+ public override void WriteJson(JsonWriter writer, object? value, JsonSerializer serializer)
+ {
+ var regularizer = value as IRegularizer;
+ if (regularizer is null)
+ {
+ JToken.FromObject(null).WriteTo(writer);
+ return;
+ }
+ JToken.FromObject(new RegularizerInfo()
+ {
+ class_name = regularizer.ClassName,
+ config = JObject.FromObject(regularizer.Config)
+ }, serializer).WriteTo(writer);
+ }
+
+ public override object? ReadJson(JsonReader reader, Type objectType, object? existingValue, JsonSerializer serializer)
+ {
+ var info = serializer.Deserialize(reader);
+ if (info is null)
+ {
+ return null;
+ }
+ return info.class_name switch
+ {
+ "L1L2" => new L1L2 (info.config["l1"].ToObject(), info.config["l2"].ToObject()),
+ "L1" => new L1(info.config["l1"].ToObject()),
+ "L2" => new L2(info.config["l2"].ToObject()),
+ };
+ }
+ }
+}
diff --git a/src/TensorFlowNET.Core/NumPy/Implementation/NumPyImpl.Creation.cs b/src/TensorFlowNET.Core/NumPy/Implementation/NumPyImpl.Creation.cs
index f29879b0f..c0f9e695d 100644
--- a/src/TensorFlowNET.Core/NumPy/Implementation/NumPyImpl.Creation.cs
+++ b/src/TensorFlowNET.Core/NumPy/Implementation/NumPyImpl.Creation.cs
@@ -4,6 +4,8 @@
using System.Linq;
using System.Text;
using Tensorflow.Util;
+using Razorvine.Pickle;
+using Tensorflow.NumPy.Pickle;
using static Tensorflow.Binding;
namespace Tensorflow.NumPy
@@ -97,6 +99,14 @@ Array ReadValueMatrix(BinaryReader reader, Array matrix, int bytes, Type type, i
return matrix;
}
+ Array ReadObjectMatrix(BinaryReader reader, Array matrix, int[] shape)
+ {
+ Stream deflateStream = reader.BaseStream;
+ BufferedStream bufferedStream = new BufferedStream(deflateStream);
+ var unpickler = new Unpickler();
+ return (MultiArrayPickleWarpper)unpickler.load(bufferedStream);
+ }
+
public (NDArray, NDArray) meshgrid(T[] array, bool copy = true, bool sparse = false)
{
var tensors = array_ops.meshgrid(array, copy: copy, sparse: sparse);
diff --git a/src/TensorFlowNET.Core/NumPy/Implementation/NumPyImpl.load.cs b/src/TensorFlowNET.Core/NumPy/Implementation/NumPyImpl.load.cs
index 05f53d5e7..199e5ced3 100644
--- a/src/TensorFlowNET.Core/NumPy/Implementation/NumPyImpl.load.cs
+++ b/src/TensorFlowNET.Core/NumPy/Implementation/NumPyImpl.load.cs
@@ -27,8 +27,14 @@ public Array LoadMatrix(Stream stream)
Array matrix = Array.CreateInstance(type, shape);
//if (type == typeof(String))
- //return ReadStringMatrix(reader, matrix, bytes, type, shape);
- return ReadValueMatrix(reader, matrix, bytes, type, shape);
+ //return ReadStringMatrix(reader, matrix, bytes, type, shape);
+
+ if (type == typeof(Object))
+ return ReadObjectMatrix(reader, matrix, shape);
+ else
+ {
+ return ReadValueMatrix(reader, matrix, bytes, type, shape);
+ }
}
}
@@ -37,7 +43,7 @@ public T Load(Stream stream)
ICloneable, IList, ICollection, IEnumerable, IStructuralComparable, IStructuralEquatable
{
// if (typeof(T).IsArray && (typeof(T).GetElementType().IsArray || typeof(T).GetElementType() == typeof(string)))
- // return LoadJagged(stream) as T;
+ // return LoadJagged(stream) as T;
return LoadMatrix(stream) as T;
}
@@ -93,7 +99,7 @@ bool ParseReader(BinaryReader reader, out int bytes, out Type t, out int[] shape
Type GetType(string dtype, out int bytes, out bool? isLittleEndian)
{
isLittleEndian = IsLittleEndian(dtype);
- bytes = Int32.Parse(dtype.Substring(2));
+ bytes = dtype.Length > 2 ? Int32.Parse(dtype.Substring(2)) : 0;
string typeCode = dtype.Substring(1);
@@ -121,6 +127,8 @@ Type GetType(string dtype, out int bytes, out bool? isLittleEndian)
return typeof(Double);
if (typeCode.StartsWith("S"))
return typeof(String);
+ if (typeCode.StartsWith("O"))
+ return typeof(Object);
throw new NotSupportedException();
}
diff --git a/src/TensorFlowNET.Core/NumPy/Implementation/RandomizedImpl.cs b/src/TensorFlowNET.Core/NumPy/Implementation/RandomizedImpl.cs
index 064c7362f..a707e8aae 100644
--- a/src/TensorFlowNET.Core/NumPy/Implementation/RandomizedImpl.cs
+++ b/src/TensorFlowNET.Core/NumPy/Implementation/RandomizedImpl.cs
@@ -14,9 +14,9 @@ public class RandomizedImpl
public NDArray permutation(NDArray x) => new NDArray(random_ops.random_shuffle(x));
[AutoNumPy]
- public void shuffle(NDArray x)
+ public void shuffle(NDArray x, int? seed = null)
{
- var y = random_ops.random_shuffle(x);
+ var y = random_ops.random_shuffle(x, seed);
Marshal.Copy(y.BufferToArray(), 0, x.TensorDataPointer, (int)x.bytesize);
}
diff --git a/src/TensorFlowNET.Core/NumPy/NDArrayConverter.cs b/src/TensorFlowNET.Core/NumPy/NDArrayConverter.cs
index c8c2d45fa..4c64eba74 100644
--- a/src/TensorFlowNET.Core/NumPy/NDArrayConverter.cs
+++ b/src/TensorFlowNET.Core/NumPy/NDArrayConverter.cs
@@ -10,6 +10,7 @@ public class NDArrayConverter
public unsafe static T Scalar(NDArray nd) where T : unmanaged
=> nd.dtype switch
{
+ TF_DataType.TF_BOOL => Scalar(*(bool*)nd.data),
TF_DataType.TF_UINT8 => Scalar(*(byte*)nd.data),
TF_DataType.TF_FLOAT => Scalar(*(float*)nd.data),
TF_DataType.TF_INT32 => Scalar(*(int*)nd.data),
diff --git a/src/TensorFlowNET.Core/NumPy/Numpy.Manipulation.cs b/src/TensorFlowNET.Core/NumPy/Numpy.Manipulation.cs
index 940856056..5e2574170 100644
--- a/src/TensorFlowNET.Core/NumPy/Numpy.Manipulation.cs
+++ b/src/TensorFlowNET.Core/NumPy/Numpy.Manipulation.cs
@@ -30,6 +30,15 @@ public static NDArray concatenate((NDArray, NDArray) tuple, int axis = 0)
[AutoNumPy]
public static NDArray stack(params NDArray[] arrays) => new NDArray(array_ops.stack(arrays));
+ [AutoNumPy]
+ public static NDArray stack(NDArray[] arrays, int axis = 0) => new NDArray(array_ops.stack(arrays, axis));
+
+ [AutoNumPy]
+ public static NDArray stack((NDArray, NDArray) tuple, int axis = 0) => new NDArray(array_ops.stack(new[] { tuple.Item1, tuple.Item2 }, axis));
+
+ [AutoNumPy]
+ public static NDArray stack((NDArray, NDArray, NDArray) tuple, int axis = 0) => new NDArray(array_ops.stack(new[] { tuple.Item1, tuple.Item2, tuple.Item3 }, axis));
+
[AutoNumPy]
public static NDArray moveaxis(NDArray array, Axis source, Axis destination) => new NDArray(array_ops.moveaxis(array, source, destination));
}
diff --git a/src/TensorFlowNET.Core/NumPy/Numpy.Math.cs b/src/TensorFlowNET.Core/NumPy/Numpy.Math.cs
index 5bc97952b..2559638b3 100644
--- a/src/TensorFlowNET.Core/NumPy/Numpy.Math.cs
+++ b/src/TensorFlowNET.Core/NumPy/Numpy.Math.cs
@@ -85,5 +85,11 @@ public static NDArray dot(NDArray x1, NDArray x2, NDArray? axes = null, string?
[AutoNumPy]
public static NDArray add(NDArray x, NDArray y) => new NDArray(math_ops.add(x, y));
+
+ [AutoNumPy]
+ public static NDArray greater(NDArray x, NDArray y) => new NDArray(tf.greater(x, y));
+
+ [AutoNumPy]
+ public static NDArray less(NDArray x, NDArray y) => new NDArray(tf.less(x, y));
}
}
diff --git a/src/TensorFlowNET.Core/NumPy/Pickle/DTypePickleWarpper.cs b/src/TensorFlowNET.Core/NumPy/Pickle/DTypePickleWarpper.cs
new file mode 100644
index 000000000..5dff6c16b
--- /dev/null
+++ b/src/TensorFlowNET.Core/NumPy/Pickle/DTypePickleWarpper.cs
@@ -0,0 +1,20 @@
+using System;
+using System.Collections.Generic;
+using System.Text;
+
+namespace Tensorflow.NumPy.Pickle
+{
+ public class DTypePickleWarpper
+ {
+ TF_DataType dtype { get; set; }
+ public DTypePickleWarpper(TF_DataType dtype)
+ {
+ this.dtype = dtype;
+ }
+ public void __setstate__(object[] args) { }
+ public static implicit operator TF_DataType(DTypePickleWarpper dTypeWarpper)
+ {
+ return dTypeWarpper.dtype;
+ }
+ }
+}
diff --git a/src/TensorFlowNET.Core/NumPy/Pickle/DtypeConstructor.cs b/src/TensorFlowNET.Core/NumPy/Pickle/DtypeConstructor.cs
new file mode 100644
index 000000000..160c7d4e9
--- /dev/null
+++ b/src/TensorFlowNET.Core/NumPy/Pickle/DtypeConstructor.cs
@@ -0,0 +1,52 @@
+using System;
+using System.Collections.Generic;
+using System.Diagnostics.CodeAnalysis;
+using System.Text;
+using Razorvine.Pickle;
+
+namespace Tensorflow.NumPy.Pickle
+{
+ ///
+ ///
+ ///
+ [SuppressMessage("ReSharper", "InconsistentNaming")]
+ [SuppressMessage("ReSharper", "MemberCanBePrivate.Global")]
+ [SuppressMessage("ReSharper", "MemberCanBeMadeStatic.Global")]
+ class DtypeConstructor : IObjectConstructor
+ {
+ public object construct(object[] args)
+ {
+ var typeCode = (string)args[0];
+ TF_DataType dtype;
+ if (typeCode == "b1")
+ dtype = np.@bool;
+ else if (typeCode == "i1")
+ dtype = np.@byte;
+ else if (typeCode == "i2")
+ dtype = np.int16;
+ else if (typeCode == "i4")
+ dtype = np.int32;
+ else if (typeCode == "i8")
+ dtype = np.int64;
+ else if (typeCode == "u1")
+ dtype = np.ubyte;
+ else if (typeCode == "u2")
+ dtype = np.uint16;
+ else if (typeCode == "u4")
+ dtype = np.uint32;
+ else if (typeCode == "u8")
+ dtype = np.uint64;
+ else if (typeCode == "f4")
+ dtype = np.float32;
+ else if (typeCode == "f8")
+ dtype = np.float64;
+ else if (typeCode.StartsWith("S"))
+ dtype = np.@string;
+ else if (typeCode.StartsWith("O"))
+ dtype = np.@object;
+ else
+ throw new NotSupportedException();
+ return new DTypePickleWarpper(dtype);
+ }
+ }
+}
diff --git a/src/TensorFlowNET.Core/NumPy/Pickle/MultiArrayConstructor.cs b/src/TensorFlowNET.Core/NumPy/Pickle/MultiArrayConstructor.cs
new file mode 100644
index 000000000..885f368c4
--- /dev/null
+++ b/src/TensorFlowNET.Core/NumPy/Pickle/MultiArrayConstructor.cs
@@ -0,0 +1,53 @@
+using System;
+using System.Collections.Generic;
+using System.Diagnostics.CodeAnalysis;
+using System.Text;
+using Razorvine.Pickle;
+using Razorvine.Pickle.Objects;
+
+namespace Tensorflow.NumPy.Pickle
+{
+ ///
+ /// Creates multiarrays of objects. Returns a primitive type multiarray such as int[][] if
+ /// the objects are ints, etc.
+ ///
+ [SuppressMessage("ReSharper", "InconsistentNaming")]
+ [SuppressMessage("ReSharper", "MemberCanBePrivate.Global")]
+ [SuppressMessage("ReSharper", "MemberCanBeMadeStatic.Global")]
+ public class MultiArrayConstructor : IObjectConstructor
+ {
+ public object construct(object[] args)
+ {
+ if (args.Length != 3)
+ throw new InvalidArgumentError($"Invalid number of arguments in MultiArrayConstructor._reconstruct. Expected three arguments. Given {args.Length} arguments.");
+
+ var types = (ClassDictConstructor)args[0];
+ if (types.module != "numpy" || types.name != "ndarray")
+ throw new RuntimeError("_reconstruct: First argument must be a sub-type of ndarray");
+
+ var arg1 = (object[])args[1];
+ var dims = new int[arg1.Length];
+ for (var i = 0; i < arg1.Length; i++)
+ {
+ dims[i] = (int)arg1[i];
+ }
+ var shape = new Shape(dims);
+
+ TF_DataType dtype;
+ string identifier;
+ if (args[2].GetType() == typeof(string))
+ identifier = (string)args[2];
+ else
+ identifier = Encoding.UTF8.GetString((byte[])args[2]);
+ switch (identifier)
+ {
+ case "u": dtype = np.uint32; break;
+ case "c": dtype = np.complex_; break;
+ case "f": dtype = np.float32; break;
+ case "b": dtype = np.@bool; break;
+ default: throw new NotImplementedException($"Unsupported data type: {args[2]}");
+ }
+ return new MultiArrayPickleWarpper(shape, dtype);
+ }
+ }
+}
diff --git a/src/TensorFlowNET.Core/NumPy/Pickle/MultiArrayPickleWarpper.cs b/src/TensorFlowNET.Core/NumPy/Pickle/MultiArrayPickleWarpper.cs
new file mode 100644
index 000000000..af8d1ecc2
--- /dev/null
+++ b/src/TensorFlowNET.Core/NumPy/Pickle/MultiArrayPickleWarpper.cs
@@ -0,0 +1,119 @@
+using Newtonsoft.Json.Linq;
+using Serilog.Debugging;
+using System;
+using System.Collections;
+using System.Collections.Generic;
+using System.Text;
+
+namespace Tensorflow.NumPy.Pickle
+{
+ public class MultiArrayPickleWarpper
+ {
+ public Shape reconstructedShape { get; set; }
+ public TF_DataType reconstructedDType { get; set; }
+ public NDArray reconstructedNDArray { get; set; }
+ public Array reconstructedMultiArray { get; set; }
+ public MultiArrayPickleWarpper(Shape shape, TF_DataType dtype)
+ {
+ reconstructedShape = shape;
+ reconstructedDType = dtype;
+ }
+ public void __setstate__(object[] args)
+ {
+ if (args.Length != 5)
+ throw new InvalidArgumentError($"Invalid number of arguments in NDArray.__setstate__. Expected five arguments. Given {args.Length} arguments.");
+
+ var version = (int)args[0]; // version
+
+ var arg1 = (object[])args[1];
+ var dims = new int[arg1.Length];
+ for (var i = 0; i < arg1.Length; i++)
+ {
+ dims[i] = (int)arg1[i];
+ }
+ var _ShapeLike = new Shape(dims); // shape
+
+ TF_DataType _DType_co = (DTypePickleWarpper)args[2]; // DType
+
+ var F_continuous = (bool)args[3]; // F-continuous
+ if (F_continuous)
+ throw new InvalidArgumentError("Fortran Continuous memory layout is not supported. Please use C-continuous layout or check the data format.");
+
+ var data = args[4]; // Data
+ /*
+ * If we ever need another pickle format, increment the version
+ * number. But we should still be able to handle the old versions.
+ */
+ if (version < 0 || version > 4)
+ throw new ValueError($"can't handle version {version} of numpy.dtype pickle");
+
+ // TODO: Implement the missing details and checks from the official Numpy C code here.
+ // https://github.com/numpy/numpy/blob/2f0bd6e86a77e4401d0384d9a75edf9470c5deb6/numpy/core/src/multiarray/descriptor.c#L2761
+
+ if (data.GetType() == typeof(ArrayList))
+ {
+ Reconstruct((ArrayList)data);
+ }
+ else
+ throw new NotImplementedException("");
+ }
+ private void Reconstruct(ArrayList arrayList)
+ {
+ int ndim = 1;
+ var subArrayList = arrayList;
+ while (subArrayList.Count > 0 && subArrayList[0] != null && subArrayList[0].GetType() == typeof(ArrayList))
+ {
+ subArrayList = (ArrayList)subArrayList[0];
+ ndim += 1;
+ }
+ var type = subArrayList[0].GetType();
+ if (type == typeof(int))
+ {
+ if (ndim == 1)
+ {
+ int[] list = (int[])arrayList.ToArray(typeof(int));
+ Shape shape = new Shape(new int[] { arrayList.Count });
+ reconstructedMultiArray = list;
+ reconstructedNDArray = new NDArray(list, shape);
+ }
+ if (ndim == 2)
+ {
+ int secondDim = 0;
+ foreach (ArrayList subArray in arrayList)
+ {
+ secondDim = subArray.Count > secondDim ? subArray.Count : secondDim;
+ }
+ int[,] list = new int[arrayList.Count, secondDim];
+ for (int i = 0; i < arrayList.Count; i++)
+ {
+ var subArray = (ArrayList?)arrayList[i];
+ if (subArray == null)
+ throw new NullReferenceException("");
+ for (int j = 0; j < subArray.Count; j++)
+ {
+ var element = subArray[j];
+ if (element == null)
+ throw new NoNullAllowedException("the element of ArrayList cannot be null.");
+ list[i, j] = (int)element;
+ }
+ }
+ Shape shape = new Shape(new int[] { arrayList.Count, secondDim });
+ reconstructedMultiArray = list;
+ reconstructedNDArray = new NDArray(list, shape);
+ }
+ if (ndim > 2)
+ throw new NotImplementedException("can't handle ArrayList with more than two dimensions.");
+ }
+ else
+ throw new NotImplementedException("");
+ }
+ public static implicit operator Array(MultiArrayPickleWarpper arrayWarpper)
+ {
+ return arrayWarpper.reconstructedMultiArray;
+ }
+ public static implicit operator NDArray(MultiArrayPickleWarpper arrayWarpper)
+ {
+ return arrayWarpper.reconstructedNDArray;
+ }
+ }
+}
diff --git a/src/TensorFlowNET.Core/Numpy/Numpy.cs b/src/TensorFlowNET.Core/Numpy/Numpy.cs
index 72d2e981c..fee2d63fc 100644
--- a/src/TensorFlowNET.Core/Numpy/Numpy.cs
+++ b/src/TensorFlowNET.Core/Numpy/Numpy.cs
@@ -43,7 +43,9 @@ public partial class np
public static readonly TF_DataType @decimal = TF_DataType.TF_DOUBLE;
public static readonly TF_DataType complex_ = TF_DataType.TF_COMPLEX;
public static readonly TF_DataType complex64 = TF_DataType.TF_COMPLEX64;
- public static readonly TF_DataType complex128 = TF_DataType.TF_COMPLEX128;
+ public static readonly TF_DataType complex128 = TF_DataType.TF_COMPLEX128;
+ public static readonly TF_DataType @string = TF_DataType.TF_STRING;
+ public static readonly TF_DataType @object = TF_DataType.TF_VARIANT;
#endregion
public static double nan => double.NaN;
diff --git a/src/TensorFlowNET.Core/Operations/Operation.cs b/src/TensorFlowNET.Core/Operations/Operation.cs
index e59c381cb..2105c53fa 100644
--- a/src/TensorFlowNET.Core/Operations/Operation.cs
+++ b/src/TensorFlowNET.Core/Operations/Operation.cs
@@ -437,7 +437,7 @@ internal void _set_attr(string attr_name, AttrValue attr_value)
internal void _set_attr_with_buf(string attr_name, Buffer attr_buf)
{
Status status = new();
- c_api.TFC_SetAttr(graph, _handle, attr_name, attr_buf, status);
+ c_api.TF_SetAttr(graph, _handle, attr_name, attr_buf, status);
status.Check(true);
}
}
diff --git a/src/TensorFlowNET.Core/Operations/Regularizers/L1.cs b/src/TensorFlowNET.Core/Operations/Regularizers/L1.cs
new file mode 100644
index 000000000..9e0619454
--- /dev/null
+++ b/src/TensorFlowNET.Core/Operations/Regularizers/L1.cs
@@ -0,0 +1,33 @@
+using System;
+
+using Tensorflow.Keras;
+
+namespace Tensorflow.Operations.Regularizers
+{
+ public class L1 : IRegularizer
+ {
+ float _l1;
+ private readonly Dictionary _config;
+
+ public string ClassName => "L1";
+ public virtual IDictionary Config => _config;
+
+ public L1(float l1 = 0.01f)
+ {
+ // l1 = 0.01 if l1 is None else l1
+ // validate_float_arg(l1, name = "l1")
+ // self.l1 = ops.convert_to_tensor(l1)
+ this._l1 = l1;
+
+ _config = new();
+ _config["l1"] = _l1;
+ }
+
+
+ public Tensor Apply(RegularizerArgs args)
+ {
+ //return self.l1 * ops.sum(ops.absolute(x))
+ return _l1 * math_ops.reduce_sum(math_ops.abs(args.X));
+ }
+ }
+}
diff --git a/src/TensorFlowNET.Core/Operations/Regularizers/L1L2.cs b/src/TensorFlowNET.Core/Operations/Regularizers/L1L2.cs
new file mode 100644
index 000000000..e3af00eb5
--- /dev/null
+++ b/src/TensorFlowNET.Core/Operations/Regularizers/L1L2.cs
@@ -0,0 +1,48 @@
+using System;
+
+using Tensorflow.Keras;
+
+namespace Tensorflow.Operations.Regularizers
+{
+ public class L1L2 : IRegularizer
+ {
+ float _l1;
+ float _l2;
+ private readonly Dictionary _config;
+
+ public string ClassName => "L1L2";
+ public virtual IDictionary Config => _config;
+
+ public L1L2(float l1 = 0.0f, float l2 = 0.0f)
+ {
+ //l1 = 0.0 if l1 is None else l1
+ //l2 = 0.0 if l2 is None else l2
+ // validate_float_arg(l1, name = "l1")
+ // validate_float_arg(l2, name = "l2")
+
+ // self.l1 = l1
+ // self.l2 = l2
+ this._l1 = l1;
+ this._l2 = l2;
+
+ _config = new();
+ _config["l1"] = l1;
+ _config["l2"] = l2;
+ }
+
+ public Tensor Apply(RegularizerArgs args)
+ {
+ //regularization = ops.convert_to_tensor(0.0, dtype = x.dtype)
+ //if self.l1:
+ // regularization += self.l1 * ops.sum(ops.absolute(x))
+ //if self.l2:
+ // regularization += self.l2 * ops.sum(ops.square(x))
+ //return regularization
+
+ Tensor regularization = tf.constant(0.0, args.X.dtype);
+ regularization += _l1 * math_ops.reduce_sum(math_ops.abs(args.X));
+ regularization += _l2 * math_ops.reduce_sum(math_ops.square(args.X));
+ return regularization;
+ }
+ }
+}
diff --git a/src/TensorFlowNET.Core/Operations/Regularizers/L2.cs b/src/TensorFlowNET.Core/Operations/Regularizers/L2.cs
new file mode 100644
index 000000000..6c0e950a9
--- /dev/null
+++ b/src/TensorFlowNET.Core/Operations/Regularizers/L2.cs
@@ -0,0 +1,33 @@
+using System;
+
+using Tensorflow.Keras;
+
+namespace Tensorflow.Operations.Regularizers
+{
+ public class L2 : IRegularizer
+ {
+ float _l2;
+ private readonly Dictionary _config;
+
+ public string ClassName => "L2";
+ public virtual IDictionary Config => _config;
+
+ public L2(float l2 = 0.01f)
+ {
+ // l2 = 0.01 if l2 is None else l2
+ // validate_float_arg(l2, name = "l2")
+ // self.l2 = l2
+ this._l2 = l2;
+
+ _config = new();
+ _config["l2"] = _l2;
+ }
+
+
+ public Tensor Apply(RegularizerArgs args)
+ {
+ //return self.l2 * ops.sum(ops.square(x))
+ return _l2 * math_ops.reduce_sum(math_ops.square(args.X));
+ }
+ }
+}
diff --git a/src/TensorFlowNET.Core/Operations/array_ops.cs b/src/TensorFlowNET.Core/Operations/array_ops.cs
index f80dcd2c4..548a885ed 100644
--- a/src/TensorFlowNET.Core/Operations/array_ops.cs
+++ b/src/TensorFlowNET.Core/Operations/array_ops.cs
@@ -166,6 +166,11 @@ public static Tensor boolean_mask(T1 tensor, T2 mask, string name = "boo
throw new ValueError("mask cannot be scalar.");
var leading_size = gen_math_ops.prod(shape(tensor_tensor)[$"{axis}:{axis + ndims_mask}"], ops.convert_to_tensor(new[] { 0 }));
+ if (leading_size.rank == 0)
+ {
+ leading_size = expand_dims(leading_size, 0);
+ }
+
var shape1 = concat(new[]
{
shape(tensor_tensor)[$":{axis}"],
@@ -185,7 +190,7 @@ public static Tensor boolean_mask(T1 tensor, T2 mask, string name = "boo
private static Tensor _apply_mask_1d(Tensor reshaped_tensor, Tensor mask, int axis = 0)
{
- var indices = squeeze(where(mask), axis: new[] { 1 });
+ var indices = squeeze(where_v2(mask), axis: new[] { 1 });
return gather(reshaped_tensor, indices, axis: ops.convert_to_tensor(axis));
}
@@ -829,7 +834,7 @@ public static Tensor strided_slice_grad(Tensor shape, Tensor begin, Tensor end,
/// A `Tensor`. Has the same type as `input`.
/// Contains the same data as `input`, but has one or more dimensions of
/// size 1 removed.
- public static Tensor squeeze(Tensor input, int[] axis = null, string name = null)
+ public static Tensor squeeze(Tensor input, Axis axis = null, string name = null)
=> gen_array_ops.squeeze(input, axis, name);
public static Tensor identity(Tensor input, string name = null)
@@ -990,7 +995,7 @@ public static Tensor gather(ResourceVariable @params, Tensor indices, string nam
return @params.sparse_read(indices, name);
}
- public static Tensor transpose(T1 a, Axis perm, string name = "transpose", bool conjugate = false)
+ public static Tensor transpose(T1 a, Axis perm = null, string name = "transpose", bool conjugate = false)
{
return tf_with(ops.name_scope(name, "transpose", new { a }), scope =>
{
@@ -1139,5 +1144,18 @@ public static Tensor placeholder(TF_DataType dtype, Shape shape = null, string n
var _op = tf.OpDefLib._apply_op_helper("Placeholder", name: name, args: new { dtype, shape });
return _op.output;
}
+
+ public static int get_positive_axis(int axis, int ndims=-100, string axis_name="axis", string ndims_name= "ndims")
+ {
+ if(ndims != -100)
+ {
+ if (axis >= 0 && axis < ndims) return axis;
+ else if (-ndims <= axis && axis < 0) return axis + ndims;
+ else throw new ValueError($"{axis_name}={axis} out of bounds:expected {-ndims}<={axis_name}<{ndims}");
+
+ } else if(axis < 0) throw new ValueError($"{axis_name}={axis} may only be negative if {ndims_name} is statically known.");
+ return axis;
+ }
+
}
}
diff --git a/src/TensorFlowNET.Core/Operations/handle_data_util.cs b/src/TensorFlowNET.Core/Operations/handle_data_util.cs
index a01efc520..363d3144e 100644
--- a/src/TensorFlowNET.Core/Operations/handle_data_util.cs
+++ b/src/TensorFlowNET.Core/Operations/handle_data_util.cs
@@ -51,7 +51,7 @@ public static void set_handle_data(Tensor target_t, HandleData handle_data)
}
Status status = new();
var proto = handle_data.ToByteArray();
- c_api.TFC_SetHandleShapeAndType(target_t.graph.c_graph, target_t._as_tf_output(), proto, proto.Length, status);
+ c_api.TF_SetHandleShapeAndType(target_t.graph.c_graph, target_t._as_tf_output(), proto, proto.Length, status);
status.Check(true);
}
diff --git a/src/TensorFlowNET.Core/Operations/image_ops_impl.cs b/src/TensorFlowNET.Core/Operations/image_ops_impl.cs
index 318b8b142..f1aff28ee 100644
--- a/src/TensorFlowNET.Core/Operations/image_ops_impl.cs
+++ b/src/TensorFlowNET.Core/Operations/image_ops_impl.cs
@@ -102,7 +102,10 @@ internal static Operation[] _CheckAtLeast3DImage(Tensor image, bool require_stat
{
throw new ValueError("\'image\' must be fully defined.");
}
- var dims = image_shape["-3:"];
+ var dims = new Shape(new[] {
+ image_shape.dims[image_shape.dims.Length - 3],
+ image_shape.dims[image_shape.dims.Length - 2],
+ image_shape.dims[image_shape.dims.Length - 1]});
foreach (var dim in dims.dims)
{
if (dim == 0)
@@ -112,16 +115,18 @@ internal static Operation[] _CheckAtLeast3DImage(Tensor image, bool require_stat
}
var image_shape_last_three_elements = new Shape(new[] {
- image_shape.dims[image_shape.dims.Length - 1],
+ image_shape.dims[image_shape.dims.Length - 3],
image_shape.dims[image_shape.dims.Length - 2],
- image_shape.dims[image_shape.dims.Length - 3]});
+ image_shape.dims[image_shape.dims.Length - 1]});
if (!image_shape_last_three_elements.IsFullyDefined)
{
Tensor image_shape_ = array_ops.shape(image);
- var image_shape_return = tf.constant(new[] {
- image_shape_.dims[image_shape.dims.Length - 1],
- image_shape_.dims[image_shape.dims.Length - 2],
- image_shape_.dims[image_shape.dims.Length - 3]});
+ var image_shape_return = tf.slice(image_shape_, new[] { Math.Max(image_shape.dims.Length - 3, 0) }, new[] { 3 });
+
+ //var image_shape_return = tf.constant(new[] {
+ // image_shape_.dims[image_shape_.dims.Length - 3],
+ // image_shape_.dims[image_shape_.dims.Length - 2],
+ // image_shape_.dims[image_shape_.dims.Length - 1]});
return new Operation[] {
check_ops.assert_positive(
@@ -209,10 +214,10 @@ internal static Tensor _random_flip(Tensor image, int flip_index, int seed, stri
}
public static Tensor flip_left_right(Tensor image)
- => _flip(image, 0, "flip_left_right");
+ => _flip(image, 1, "flip_left_right");
public static Tensor flip_up_down(Tensor image)
- => _flip(image, 1, "flip_up_down");
+ => _flip(image, 0, "flip_up_down");
internal static Tensor _flip(Tensor image, int flip_index, string scope_name)
{
@@ -223,11 +228,11 @@ internal static Tensor _flip(Tensor image, int flip_index, string scope_name)
Shape shape = image.shape;
if (shape.ndim == 3 || shape.ndim == Unknown)
{
- return fix_image_flip_shape(image, gen_array_ops.reverse(image, ops.convert_to_tensor(new int[] { flip_index })));
+ return fix_image_flip_shape(image, gen_array_ops.reverse_v2(image, ops.convert_to_tensor(new int[] { flip_index })));
}
else if (shape.ndim == 4)
{
- return gen_array_ops.reverse_v2(image, ops.convert_to_tensor(new[] { (flip_index + 1) % 2 }));
+ return gen_array_ops.reverse_v2(image, ops.convert_to_tensor(new[] { flip_index + 1 }));
}
else
{
@@ -2047,6 +2052,22 @@ internal static (Tensor, Tensor) non_max_suppression_padded_v1(Tensor boxes, Ten
});
}
+ public static Tensor encode_jpeg(Tensor contents, string name = null)
+ {
+ return tf_with(ops.name_scope(name, "encode_jpeg"), scope =>
+ {
+ return gen_ops.encode_jpeg(contents, name:name);
+ });
+ }
+
+ public static Tensor encode_png(Tensor contents, string name = null)
+ {
+ return tf_with(ops.name_scope(name, "encode_png"), scope =>
+ {
+ return gen_ops.encode_png(contents, name: name);
+ });
+ }
+
public static Tensor is_jpeg(Tensor contents, string name = null)
{
return tf_with(ops.name_scope(name, "is_jpeg"), scope =>
diff --git a/src/TensorFlowNET.Core/Tensorflow.Binding.csproj b/src/TensorFlowNET.Core/Tensorflow.Binding.csproj
index ca5aa47a9..42c0399da 100644
--- a/src/TensorFlowNET.Core/Tensorflow.Binding.csproj
+++ b/src/TensorFlowNET.Core/Tensorflow.Binding.csproj
@@ -4,14 +4,14 @@
netstandard2.0;net6.0
Tensorflow.Binding
Tensorflow
- 2.11.0
- 0.110.2
+ 2.15.0
+ 0.150.0
10.0
enable
Haiping Chen, Eli Belash, Yaohui Liu, Meinrad Recheis
SciSharp STACK
False
- Apache 2.0, Haiping Chen $([System.DateTime]::UtcNow.ToString(yyyy))
+ Apache 2.0, Haiping Chen since 2018
https://github.com/SciSharp/TensorFlow.NET
git
http://scisharpstack.org
@@ -20,12 +20,16 @@
Google's TensorFlow full binding in .NET Standard.
Building, training and infering deep learning models.
https://tensorflownet.readthedocs.io
- 0.110.1.0
+ 0.150.0.0
+ tf.net 0.150.x and above are based on tensorflow native 2.15.0
+ * Support BERT model.
+
tf.net 0.110.x and above are based on tensorflow native 2.11.0
* Support RNN, LSTM model.
* Support Transformer model.
-
+ * Added IMDB dataset.
+
tf.net 0.100.x and above are based on tensorflow native 2.10.0
* Eager Mode is added finally.
@@ -42,8 +46,9 @@ https://tensorflownet.readthedocs.io
tf.net 0.7x.x aligns with TensorFlow v2.7.x native library.
tf.net 0.10x.x aligns with TensorFlow v2.10.x native library.
tf.net 0.11x.x aligns with TensorFlow v2.11.x native library.
+ tf.net 0.15x.x aligns with TensorFlow v2.15.x native library.
- 0.110.2.0
+ 0.150.0.0
LICENSE
true
packages
@@ -172,10 +177,11 @@ https://tensorflownet.readthedocs.io
-
+
-
-
+
+
+
diff --git a/src/TensorFlowNET.Core/Tensors/Ragged/RaggedTensor.cs b/src/TensorFlowNET.Core/Tensors/Ragged/RaggedTensor.cs
index 4f85e1081..0f09d4128 100644
--- a/src/TensorFlowNET.Core/Tensors/Ragged/RaggedTensor.cs
+++ b/src/TensorFlowNET.Core/Tensors/Ragged/RaggedTensor.cs
@@ -163,5 +163,38 @@ public static implicit operator RaggedTensor(Tensor tensor)
{
return tensor.Tag as RaggedTensor;
}
+ public Tensor nrows(TF_DataType out_type, string name = null)
+ {
+ tf_with(ops.name_scope(name, "RaggedNRows"), scope =>
+ {
+ return math_ops.cast(this._row_partition.nrows(), dtype: out_type);
+ });
+ return null;
+ }
+ public RaggedTensor row_lengths(int axis=-1, string name=null)
+ {
+ if (axis == 0) return this._row_partition.nrows();
+ if (axis == 1) return this._row_partition.row_lengths();
+ var values = (RaggedTensor)this._values;
+ axis = array_ops.get_positive_axis(
+ axis, this.shape.rank, ndims_name: "rank(this)");
+ if (axis == 0) return this.nrows(this._row_partition.GetDataType());
+ else if (axis == 1)
+ {
+ var splits = this._row_partition.row_splits;
+ return splits[new Slice(start: 1)] - splits[new Slice(stop: -1)];
+
+ }
+ else if (this._values is RaggedTensor)
+ {
+ return values.row_lengths(axis - 1);
+ }
+ else
+ {
+ var shape = array_ops.shape(values, out_type: this._row_partition.GetDataType());
+ return array_ops.ones(shape[new Slice(stop:axis - 1)], this._row_partition.GetDataType()) *
+ shape[axis - 1];
+ }
+ }
}
}
diff --git a/src/TensorFlowNET.Core/Tensors/Ragged/RowPartition.cs b/src/TensorFlowNET.Core/Tensors/Ragged/RowPartition.cs
index 29dc525df..9e242ff38 100644
--- a/src/TensorFlowNET.Core/Tensors/Ragged/RowPartition.cs
+++ b/src/TensorFlowNET.Core/Tensors/Ragged/RowPartition.cs
@@ -14,10 +14,15 @@ You may obtain a copy of the License at
limitations under the License.
******************************************************************************/
+using Serilog.Debugging;
using System;
+using System.Collections.Concurrent;
using System.Collections.Generic;
+//using System.ComponentModel.DataAnnotations;
using System.Text;
+using System.Xml.Linq;
using Tensorflow.Framework;
+using Tensorflow.NumPy;
using static Tensorflow.Binding;
namespace Tensorflow
@@ -99,5 +104,55 @@ public static RowPartition from_row_splits(Tensor row_splits,
return new RowPartition(row_splits);
});
}
+
+ public static RowPartition from_row_lengths(Tensor row_lengths,
+ bool validate=true,
+ TF_DataType dtype = TF_DataType.TF_INT32,
+ TF_DataType dtype_hint= TF_DataType.TF_INT32)
+ {
+ row_lengths = _convert_row_partition(
+ row_lengths, "row_lengths", dtype_hint: dtype_hint, dtype: dtype);
+ Tensor row_limits = math_ops.cumsum(row_lengths, tf.constant(-1));
+ Tensor row_splits = array_ops.concat(new Tensor[] { tf.convert_to_tensor(np.array(new int[] { 0 }, TF_DataType.TF_INT64)), row_limits }, axis:0);
+ return new RowPartition(row_splits: row_splits, row_lengths: row_lengths);
+ }
+
+ public static Tensor _convert_row_partition(Tensor partition, string name, TF_DataType dtype,
+ TF_DataType dtype_hint= TF_DataType.TF_INT64)
+ {
+ if (partition is NDArray && partition.GetDataType() == np.int32) partition = ops.convert_to_tensor(partition, name: name);
+ if (partition.GetDataType() != np.int32 && partition.GetDataType() != np.int64) throw new ValueError($"{name} must have dtype int32 or int64");
+ return partition;
+ }
+
+ public Tensor nrows()
+ {
+ /*Returns the number of rows created by this `RowPartition*/
+ if (this._nrows != null) return this._nrows;
+ var nsplits = tensor_shape.dimension_at_index(this._row_splits.shape, 0);
+ if (nsplits == null) return array_ops.shape(this._row_splits, out_type: this.row_splits.dtype)[0] - 1;
+ else return constant_op.constant(nsplits.value - 1, dtype: this.row_splits.dtype);
+ }
+
+ public Tensor row_lengths()
+ {
+
+ if (this._row_splits != null)
+ {
+ int nrows_plus_one = tensor_shape.dimension_value(this._row_splits.shape[0]);
+ return tf.constant(nrows_plus_one - 1);
+
+ }
+ if (this._row_lengths != null)
+ {
+ var nrows = tensor_shape.dimension_value(this._row_lengths.shape[0]);
+ return tf.constant(nrows);
+ }
+ if(this._nrows != null)
+ {
+ return tensor_util.constant_value(this._nrows);
+ }
+ return tf.constant(-1);
+ }
}
}
diff --git a/src/TensorFlowNET.Core/Tensors/tensor_util.cs b/src/TensorFlowNET.Core/Tensors/tensor_util.cs
index e65c4850d..6e5024efd 100644
--- a/src/TensorFlowNET.Core/Tensors/tensor_util.cs
+++ b/src/TensorFlowNET.Core/Tensors/tensor_util.cs
@@ -1,4 +1,4 @@
-/*****************************************************************************
+/*****************************************************************************
Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
@@ -67,7 +67,7 @@ public static NDArray MakeNdarray(TensorProto tensor)
T[] ExpandArrayToSize(IList src)
{
- if(src.Count == 0)
+ if (src.Count == 0)
{
return new T[0];
}
@@ -77,7 +77,7 @@ T[] ExpandArrayToSize(IList src)
var first_elem = src[0];
var last_elem = src[src.Count - 1];
T[] res = new T[num_elements];
- for(long i = 0; i < num_elements; i++)
+ for (long i = 0; i < num_elements; i++)
{
if (i < pre) res[i] = first_elem;
else if (i >= num_elements - after) res[i] = last_elem;
@@ -121,7 +121,7 @@ T[] ExpandArrayToSize(IList src)
$"https://www.tensorflow.org/api_docs/python/tf/dtypes%20for%20supported%20TF%20dtypes.");
}
- if(values.size == 0)
+ if (values.size == 0)
{
return np.zeros(shape, tensor_dtype);
}
@@ -135,6 +135,47 @@ T[] ExpandArrayToSize(IList src)
TF_DataType.TF_QINT32
};
+ private static Array ConvertArray(Array inputArray, Func