1818import os
1919import tensorflow as tf
2020
21- tf .logging .set_verbosity (tf .logging .DEBUG )
21+ tf .compat . v1 . logging .set_verbosity (tf . compat . v1 .logging .DEBUG )
2222
2323
2424def cnn_model_fn (features , labels , mode ):
@@ -33,30 +33,30 @@ def cnn_model_fn(features, labels, mode):
3333 # Padding is added to preserve width and height.
3434 # Input Tensor Shape: [batch_size, 28, 28, 1]
3535 # Output Tensor Shape: [batch_size, 28, 28, 32]
36- conv1 = tf .layers .conv2d (
36+ conv1 = tf .compat . v1 . layers .conv2d (
3737 inputs = input_layer , filters = 32 , kernel_size = [5 , 5 ], padding = "same" , activation = tf .nn .relu
3838 )
3939
4040 # Pooling Layer #1
4141 # First max pooling layer with a 2x2 filter and stride of 2
4242 # Input Tensor Shape: [batch_size, 28, 28, 32]
4343 # Output Tensor Shape: [batch_size, 14, 14, 32]
44- pool1 = tf .layers .max_pooling2d (inputs = conv1 , pool_size = [2 , 2 ], strides = 2 )
44+ pool1 = tf .compat . v1 . layers .max_pooling2d (inputs = conv1 , pool_size = [2 , 2 ], strides = 2 )
4545
4646 # Convolutional Layer #2
4747 # Computes 64 features using a 5x5 filter.
4848 # Padding is added to preserve width and height.
4949 # Input Tensor Shape: [batch_size, 14, 14, 32]
5050 # Output Tensor Shape: [batch_size, 14, 14, 64]
51- conv2 = tf .layers .conv2d (
51+ conv2 = tf .compat . v1 . layers .conv2d (
5252 inputs = pool1 , filters = 64 , kernel_size = [5 , 5 ], padding = "same" , activation = tf .nn .relu
5353 )
5454
5555 # Pooling Layer #2
5656 # Second max pooling layer with a 2x2 filter and stride of 2
5757 # Input Tensor Shape: [batch_size, 14, 14, 64]
5858 # Output Tensor Shape: [batch_size, 7, 7, 64]
59- pool2 = tf .layers .max_pooling2d (inputs = conv2 , pool_size = [2 , 2 ], strides = 2 )
59+ pool2 = tf .compat . v1 . layers .max_pooling2d (inputs = conv2 , pool_size = [2 , 2 ], strides = 2 )
6060
6161 # Flatten tensor into a batch of vectors
6262 # Input Tensor Shape: [batch_size, 7, 7, 64]
@@ -67,17 +67,17 @@ def cnn_model_fn(features, labels, mode):
6767 # Densely connected layer with 1024 neurons
6868 # Input Tensor Shape: [batch_size, 7 * 7 * 64]
6969 # Output Tensor Shape: [batch_size, 1024]
70- dense = tf .layers .dense (inputs = pool2_flat , units = 1024 , activation = tf .nn .relu )
70+ dense = tf .compat . v1 . layers .dense (inputs = pool2_flat , units = 1024 , activation = tf .nn .relu )
7171
7272 # Add dropout operation; 0.6 probability that element will be kept
73- dropout = tf .layers .dropout (
73+ dropout = tf .compat . v1 . layers .dropout (
7474 inputs = dense , rate = 0.4 , training = mode == tf .estimator .ModeKeys .TRAIN
7575 )
7676
7777 # Logits layer
7878 # Input Tensor Shape: [batch_size, 1024]
7979 # Output Tensor Shape: [batch_size, 10]
80- logits = tf .layers .dense (inputs = dropout , units = 10 )
80+ logits = tf .compat . v1 . layers .dense (inputs = dropout , units = 10 )
8181
8282 predictions = {
8383 # Generate predictions (for PREDICT and EVAL mode)
@@ -90,17 +90,17 @@ def cnn_model_fn(features, labels, mode):
9090 return tf .estimator .EstimatorSpec (mode = mode , predictions = predictions )
9191
9292 # Calculate Loss (for both TRAIN and EVAL modes)
93- loss = tf .losses .sparse_softmax_cross_entropy (labels = labels , logits = logits )
93+ loss = tf .compat . v1 . losses .sparse_softmax_cross_entropy (labels = labels , logits = logits )
9494
9595 # Configure the Training Op (for TRAIN mode)
9696 if mode == tf .estimator .ModeKeys .TRAIN :
97- optimizer = tf .train .GradientDescentOptimizer (learning_rate = 0.001 )
98- train_op = optimizer .minimize (loss = loss , global_step = tf .train .get_global_step ())
97+ optimizer = tf .compat . v1 . train .GradientDescentOptimizer (learning_rate = 0.001 )
98+ train_op = optimizer .minimize (loss = loss , global_step = tf .compat . v1 . train .get_global_step ())
9999 return tf .estimator .EstimatorSpec (mode = mode , loss = loss , train_op = train_op )
100100
101101 # Add evaluation metrics (for EVAL mode)
102102 eval_metric_ops = {
103- "accuracy" : tf .metrics .accuracy (labels = labels , predictions = predictions ["classes" ])
103+ "accuracy" : tf .compat . v1 . metrics .accuracy (labels = labels , predictions = predictions ["classes" ])
104104 }
105105 return tf .estimator .EstimatorSpec (mode = mode , loss = loss , eval_metric_ops = eval_metric_ops )
106106
@@ -134,7 +134,7 @@ def _parse_args():
134134
135135
136136def serving_input_fn ():
137- inputs = {"x" : tf .placeholder (tf .float32 , [None , 784 ])}
137+ inputs = {"x" : tf .compat . v1 . placeholder (tf .float32 , [None , 784 ])}
138138 return tf .estimator .export .ServingInputReceiver (inputs , inputs )
139139
140140
@@ -155,15 +155,15 @@ def serving_input_fn():
155155 # Set up logging for predictions
156156 # Log the values in the "Softmax" tensor with label "probabilities"
157157 tensors_to_log = {"probabilities" : "softmax_tensor" }
158- logging_hook = tf .train .LoggingTensorHook (tensors = tensors_to_log , every_n_iter = 50 )
158+ logging_hook = tf .estimator .LoggingTensorHook (tensors = tensors_to_log , every_n_iter = 50 )
159159
160160 # Train the model
161- train_input_fn = tf .estimator .inputs .numpy_input_fn (
161+ train_input_fn = tf .compat . v1 . estimator .inputs .numpy_input_fn (
162162 x = {"x" : train_data }, y = train_labels , batch_size = 50 , num_epochs = None , shuffle = True
163163 )
164164
165165 # Evaluate the model and print results
166- eval_input_fn = tf .estimator .inputs .numpy_input_fn (
166+ eval_input_fn = tf .compat . v1 . estimator .inputs .numpy_input_fn (
167167 x = {"x" : eval_data }, y = eval_labels , num_epochs = 1 , shuffle = False
168168 )
169169
@@ -172,4 +172,4 @@ def serving_input_fn():
172172 tf .estimator .train_and_evaluate (mnist_classifier , train_spec , eval_spec )
173173
174174 if args .current_host == args .hosts [0 ]:
175- mnist_classifier .export_savedmodel ("/opt/ml/model" , serving_input_fn )
175+ mnist_classifier .export_saved_model ("/opt/ml/model" , serving_input_fn )
0 commit comments