diff --git a/resnet.py b/resnet.py index a056c41..7270abe 100644 --- a/resnet.py +++ b/resnet.py @@ -101,7 +101,7 @@ def inference_small(x, c['fc_units_out'] = num_classes c['num_blocks'] = num_blocks c['num_classes'] = num_classes - inference_small_config(x, c) + return inference_small_config(x, c) def inference_small_config(x, c): c['bottleneck'] = False @@ -151,7 +151,7 @@ def loss(logits, labels): regularization_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES) loss_ = tf.add_n([cross_entropy_mean] + regularization_losses) - tf.scalar_summary('loss', loss_) + tf.summary.scalar('loss', loss_) return loss_ @@ -241,7 +241,7 @@ def bn(x, c): initializer=tf.zeros_initializer) gamma = _get_variable('gamma', params_shape, - initializer=tf.ones_initializer) + initializer=tf.ones_initializer()) moving_mean = _get_variable('moving_mean', params_shape, @@ -249,7 +249,7 @@ def bn(x, c): trainable=False) moving_variance = _get_variable('moving_variance', params_shape, - initializer=tf.ones_initializer, + initializer=tf.ones_initializer(), trainable=False) # These ops will only be preformed when training. @@ -300,7 +300,7 @@ def _get_variable(name, regularizer = tf.contrib.layers.l2_regularizer(weight_decay) else: regularizer = None - collections = [tf.GraphKeys.VARIABLES, RESNET_VARIABLES] + collections = [tf.GraphKeys.GLOBAL_VARIABLES, RESNET_VARIABLES] return tf.get_variable(name, shape=shape, initializer=initializer, diff --git a/resnet_train.py b/resnet_train.py index ce6a0bb..8111a7b 100644 --- a/resnet_train.py +++ b/resnet_train.py @@ -39,15 +39,15 @@ def train(is_training, logits, images, labels): # loss_avg ema = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY, global_step) tf.add_to_collection(UPDATE_OPS_COLLECTION, ema.apply([loss_])) - tf.scalar_summary('loss_avg', ema.average(loss_)) + tf.summary.scalar('loss_avg', ema.average(loss_)) # validation stats ema = tf.train.ExponentialMovingAverage(0.9, val_step) val_op = tf.group(val_step.assign_add(1), ema.apply([top1_error])) top1_error_avg = ema.average(top1_error) - tf.scalar_summary('val_top1_error_avg', top1_error_avg) + tf.summary.scalar('val_top1_error_avg', top1_error_avg) - tf.scalar_summary('learning_rate', FLAGS.learning_rate) + tf.summary.scalar('learning_rate', FLAGS.learning_rate) opt = tf.train.MomentumOptimizer(FLAGS.learning_rate, MOMENTUM) grads = opt.compute_gradients(loss_) @@ -67,17 +67,17 @@ def train(is_training, logits, images, labels): batchnorm_updates_op = tf.group(*batchnorm_updates) train_op = tf.group(apply_gradient_op, batchnorm_updates_op) - saver = tf.train.Saver(tf.all_variables()) + saver = tf.train.Saver(tf.global_variables()) - summary_op = tf.merge_all_summaries() + summary_op = tf.summary.merge_all() - init = tf.initialize_all_variables() + init = tf.global_variables_initializer() sess = tf.Session(config=tf.ConfigProto(log_device_placement=False)) sess.run(init) tf.train.start_queue_runners(sess=sess) - summary_writer = tf.train.SummaryWriter(FLAGS.train_dir, sess.graph) + summary_writer = tf.summary.FileWriter(FLAGS.train_dir, sess.graph) if FLAGS.resume: latest = tf.train.latest_checkpoint(FLAGS.train_dir) diff --git a/train_cifar.py b/train_cifar.py index cccf590..80391de 100644 --- a/train_cifar.py +++ b/train_cifar.py @@ -192,7 +192,7 @@ def distorted_inputs(data_dir, batch_size): distorted_image, lower=0.2, upper=1.8) # Subtract off the mean and divide by the variance of the pixels. - float_image = tf.image.per_image_whitening(distorted_image) + float_image = tf.image.per_image_standardization(distorted_image) # Ensure that the random shuffling has good mixing properties. min_fraction_of_examples_in_queue = 0.4 @@ -250,7 +250,7 @@ def inputs(eval_data, data_dir, batch_size): width, height) # Subtract off the mean and divide by the variance of the pixels. - float_image = tf.image.per_image_whitening(resized_image) + float_image = tf.image.per_image_standardization(resized_image) # Ensure that the random shuffling has good mixing properties. min_fraction_of_examples_in_queue = 0.4