@@ -40,11 +40,7 @@ private void PrepareData()
4040 var pred = tf . nn . softmax ( tf . matmul ( x , W ) + b ) ; // Softmax
4141
4242 // Minimize error using cross entropy
43- var log = tf . log ( pred ) ;
44- var mul = y * log ;
45- var sum = tf . reduce_sum ( mul , reduction_indices : 1 ) ;
46- var neg = - sum ;
47- var cost = tf . reduce_mean ( neg ) ;
43+ var cost = tf . reduce_mean ( - tf . reduce_sum ( y * tf . log ( pred ) , reduction_indices : 1 ) ) ;
4844
4945 // Gradient Descent
5046 var optimizer = tf . train . GradientDescentOptimizer ( learning_rate ) . minimize ( cost ) ;
@@ -68,14 +64,23 @@ private void PrepareData()
6864 {
6965 var ( batch_xs , batch_ys ) = mnist . train . next_batch ( batch_size ) ;
7066 // Run optimization op (backprop) and cost op (to get loss value)
71- var ( _ , c ) = sess . run ( optimizer ,
67+ var result = sess . run ( new object [ ] { optimizer , cost } ,
7268 new FeedItem ( x , batch_xs ) ,
7369 new FeedItem ( y , batch_ys ) ) ;
7470
71+ var c = ( float ) result [ 1 ] ;
7572 // Compute average loss
7673 avg_cost += c / total_batch ;
7774 }
75+
76+ // Display logs per epoch step
77+ if ( ( epoch + 1 ) % display_step == 0 )
78+ print ( $ "Epoch: { ( epoch + 1 ) . ToString ( "D4" ) } cost= { avg_cost . ToString ( "G9" ) } ") ;
7879 }
80+
81+ print ( "Optimization Finished!" ) ;
82+
83+ // Test model
7984 } ) ;
8085 }
8186 }
0 commit comments