@@ -33,6 +33,14 @@ In comparison to other projects, like for instance [TensorFlowSharp](https://www
3333| tf.net 0.15 | x | x | |
3434| tf.net 0.14 | x | | |
3535
36+ Read the docs & book [ The Definitive Guide to Tensorflow.NET] ( https://tensorflownet.readthedocs.io/en/latest/FrontCover.html ) .
37+
38+ There are many examples reside at [ TensorFlow.NET Examples] ( https://github.com/SciSharp/TensorFlow.NET-Examples ) .
39+
40+ Troubleshooting of running example or installation, please refer [ here] ( tensorflowlib/README.md ) .
41+
42+ #### C# Example
43+
3644Install TF.NET and TensorFlow binary through NuGet.
3745``` sh
3846# ## install tensorflow C#/F# binding
@@ -63,6 +71,13 @@ int training_steps = 1000;
6371float learning_rate = 0 . 01 f ;
6472int display_step = 100 ;
6573
74+ // Sample data
75+ train_X = np .array (3 . 3 f , 4 . 4 f , 5 . 5 f , 6 . 71 f , 6 . 93 f , 4 . 168 f , 9 . 779 f , 6 . 182 f , 7 . 59 f , 2 . 167 f ,
76+ 7 . 042 f , 10 . 791 f , 5 . 313 f , 7 . 997 f , 5 . 654 f , 9 . 27 f , 3 . 1 f );
77+ train_Y = np .array (1 . 7 f , 2 . 76 f , 2 . 09 f , 3 . 19 f , 1 . 694 f , 1 . 573 f , 3 . 366 f , 2 . 596 f , 2 . 53 f , 1 . 221 f ,
78+ 2 . 827 f , 3 . 465 f , 1 . 65 f , 2 . 904 f , 2 . 42 f , 2 . 94 f , 1 . 3 f );
79+ n_samples = train_X .shape [0 ];
80+
6681// We can set a fixed init value in order to demo
6782var W = tf .Variable (- 0 . 06 f , name : " weight" );
6883var b = tf .Variable (- 0 . 73 f , name : " bias" );
@@ -142,11 +157,65 @@ model.fit(x_train[new Slice(0, 1000)], y_train[new Slice(0, 1000)],
142157 validation_split : 0 . 2 f );
143158```
144159
145- Read the docs & book [ The Definitive Guide to Tensorflow.NET ] ( https://tensorflownet.readthedocs.io/en/latest/FrontCover.html ) .
160+ #### F# Example
146161
147- There are many examples reside at [ TensorFlow.NET Examples] ( https://github.com/SciSharp/TensorFlow.NET-Examples ) .
162+ Linear Regression in ` Eager ` mode:
163+
164+ ``` fsharp
165+ #r "nuget: TensorFlow.Net"
166+ #r "nuget: TensorFlow.Keras"
167+ #r "nuget: SciSharp.TensorFlow.Redist"
168+ #r "nuget: NumSharp"
169+
170+ open System
171+ open NumSharp
172+ open Tensorflow
173+ open Tensorflow.Keras
174+
175+ let tf = Binding.New<tensorflow>()
176+ tf.enable_eager_execution()
177+
178+ // Parameters
179+ let training_steps = 1000
180+ let learning_rate = 0.01f
181+ let display_step = 100
182+
183+ // Sample data
184+ let train_X =
185+ np.array(3.3f, 4.4f, 5.5f, 6.71f, 6.93f, 4.168f, 9.779f, 6.182f, 7.59f, 2.167f,
186+ 7.042f, 10.791f, 5.313f, 7.997f, 5.654f, 9.27f, 3.1f)
187+ let train_Y =
188+ np.array(1.7f, 2.76f, 2.09f, 3.19f, 1.694f, 1.573f, 3.366f, 2.596f, 2.53f, 1.221f,
189+ 2.827f, 3.465f, 1.65f, 2.904f, 2.42f, 2.94f, 1.3f)
190+ let n_samples = train_X.shape.[0]
191+
192+ // We can set a fixed init value in order to demo
193+ let W = tf.Variable(-0.06f,name = "weight")
194+ let b = tf.Variable(-0.73f, name = "bias")
195+ let optimizer = KerasApi.keras.optimizers.SGD(learning_rate)
196+
197+ // Run training for the given number of steps.
198+ for step = 1 to (training_steps + 1) do
199+ // Run the optimization to update W and b values.
200+ // Wrap computation inside a GradientTape for automatic differentiation.
201+ use g = tf.GradientTape()
202+ // Linear regressoin (Wx + b).
203+ let pred = W * train_X + b
204+ // Mean square error.
205+ let loss = tf.reduce_sum(tf.pow(pred - train_Y,2)) / (2 * n_samples)
206+ // should stop recording
207+ // compute gradients
208+ let gradients = g.gradient(loss,struct (W,b))
209+
210+ // Update W and b following gradients.
211+ optimizer.apply_gradients(Binding.zip(gradients, struct (W,b)))
212+
213+ if (step % display_step) = 0 then
214+ let pred = W * train_X + b
215+ let loss = tf.reduce_sum(tf.pow(pred-train_Y,2)) / (2 * n_samples)
216+ printfn $"step: {step}, loss: {loss.numpy()}, W: {W.numpy()}, b: {b.numpy()}"
217+ ```
148218
149- Troubleshooting of running example or installation, please refer [ here] ( tensorflowlib/README.md ) .
150219
151220### Contribute:
152221
0 commit comments