Skip to content

Commit 052e5e8

Browse files
committed
Converted the models repo to TF 1.0 using the upgrade script
1 parent f21c427 commit 052e5e8

File tree

71 files changed

+314
-314
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

71 files changed

+314
-314
lines changed

autoencoder/autoencoder_models/Autoencoder.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@ def __init__(self, n_input, n_hidden, transfer_function=tf.nn.softplus, optimize
1818
self.reconstruction = tf.add(tf.matmul(self.hidden, self.weights['w2']), self.weights['b2'])
1919

2020
# cost
21-
self.cost = 0.5 * tf.reduce_sum(tf.pow(tf.sub(self.reconstruction, self.x), 2.0))
21+
self.cost = 0.5 * tf.reduce_sum(tf.pow(tf.subtract(self.reconstruction, self.x), 2.0))
2222
self.optimizer = optimizer.minimize(self.cost)
2323

2424
init = tf.global_variables_initializer()

autoencoder/autoencoder_models/DenoisingAutoencoder.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,7 @@ def __init__(self, n_input, n_hidden, transfer_function = tf.nn.softplus, optimi
2222
self.reconstruction = tf.add(tf.matmul(self.hidden, self.weights['w2']), self.weights['b2'])
2323

2424
# cost
25-
self.cost = 0.5 * tf.reduce_sum(tf.pow(tf.sub(self.reconstruction, self.x), 2.0))
25+
self.cost = 0.5 * tf.reduce_sum(tf.pow(tf.subtract(self.reconstruction, self.x), 2.0))
2626
self.optimizer = optimizer.minimize(self.cost)
2727

2828
init = tf.global_variables_initializer()
@@ -89,7 +89,7 @@ def __init__(self, n_input, n_hidden, transfer_function = tf.nn.softplus, optimi
8989
self.reconstruction = tf.add(tf.matmul(self.hidden, self.weights['w2']), self.weights['b2'])
9090

9191
# cost
92-
self.cost = 0.5 * tf.reduce_sum(tf.pow(tf.sub(self.reconstruction, self.x), 2.0))
92+
self.cost = 0.5 * tf.reduce_sum(tf.pow(tf.subtract(self.reconstruction, self.x), 2.0))
9393
self.optimizer = optimizer.minimize(self.cost)
9494

9595
init = tf.global_variables_initializer()

autoencoder/autoencoder_models/VariationalAutoencoder.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -17,13 +17,13 @@ def __init__(self, n_input, n_hidden, optimizer = tf.train.AdamOptimizer()):
1717
self.z_log_sigma_sq = tf.add(tf.matmul(self.x, self.weights['log_sigma_w1']), self.weights['log_sigma_b1'])
1818

1919
# sample from gaussian distribution
20-
eps = tf.random_normal(tf.pack([tf.shape(self.x)[0], self.n_hidden]), 0, 1, dtype = tf.float32)
21-
self.z = tf.add(self.z_mean, tf.mul(tf.sqrt(tf.exp(self.z_log_sigma_sq)), eps))
20+
eps = tf.random_normal(tf.stack([tf.shape(self.x)[0], self.n_hidden]), 0, 1, dtype = tf.float32)
21+
self.z = tf.add(self.z_mean, tf.multiply(tf.sqrt(tf.exp(self.z_log_sigma_sq)), eps))
2222

2323
self.reconstruction = tf.add(tf.matmul(self.z, self.weights['w2']), self.weights['b2'])
2424

2525
# cost
26-
reconstr_loss = 0.5 * tf.reduce_sum(tf.pow(tf.sub(self.reconstruction, self.x), 2.0))
26+
reconstr_loss = 0.5 * tf.reduce_sum(tf.pow(tf.subtract(self.reconstruction, self.x), 2.0))
2727
latent_loss = -0.5 * tf.reduce_sum(1 + self.z_log_sigma_sq
2828
- tf.square(self.z_mean)
2929
- tf.exp(self.z_log_sigma_sq), 1)

compression/decoder.py

100755100644
File mode changed.

compression/encoder.py

100755100644
File mode changed.

compression/msssim.py

100755100644
File mode changed.

differential_privacy/dp_sgd/dp_mnist/dp_mnist.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -273,7 +273,7 @@ def Train(mnist_train_file, mnist_test_file, network_parameters, num_steps,
273273
images, network_parameters)
274274

275275
cost = tf.nn.softmax_cross_entropy_with_logits(
276-
logits, tf.one_hot(labels, 10))
276+
logits=logits, labels=tf.one_hot(labels, 10))
277277

278278
# The actual cost is the average across the examples.
279279
cost = tf.reduce_sum(cost, [0]) / batch_size
@@ -343,7 +343,7 @@ def Train(mnist_train_file, mnist_test_file, network_parameters, num_steps,
343343

344344
# We need to maintain the intialization sequence.
345345
for v in tf.trainable_variables():
346-
sess.run(tf.initialize_variables([v]))
346+
sess.run(tf.variables_initializer([v]))
347347
sess.run(tf.global_variables_initializer())
348348
sess.run(init_ops)
349349

differential_privacy/dp_sgd/dp_optimizer/utils.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -236,7 +236,7 @@ def BatchClipByL2norm(t, upper_bound, name=None):
236236
with tf.op_scope([t, upper_bound], name, "batch_clip_by_l2norm") as name:
237237
saved_shape = tf.shape(t)
238238
batch_size = tf.slice(saved_shape, [0], [1])
239-
t2 = tf.reshape(t, tf.concat(0, [batch_size, [-1]]))
239+
t2 = tf.reshape(t, tf.concat(axis=0, values=[batch_size, [-1]]))
240240
upper_bound_inv = tf.fill(tf.slice(saved_shape, [0], [1]),
241241
tf.constant(1.0/upper_bound))
242242
# Add a small number to avoid divide by 0
@@ -266,7 +266,7 @@ def SoftThreshold(t, threshold_ratio, name=None):
266266
assert threshold_ratio >= 0
267267
with tf.op_scope([t, threshold_ratio], name, "soft_thresholding") as name:
268268
saved_shape = tf.shape(t)
269-
t2 = tf.reshape(t, tf.concat(0, [tf.slice(saved_shape, [0], [1]), -1]))
269+
t2 = tf.reshape(t, tf.concat(axis=0, values=[tf.slice(saved_shape, [0], [1]), -1]))
270270
t_abs = tf.abs(t2)
271271
t_x = tf.sign(t2) * tf.nn.relu(t_abs -
272272
(tf.reduce_mean(t_abs, [0],

differential_privacy/dp_sgd/per_example_gradients/per_example_gradients.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -189,7 +189,7 @@ def __call__(self, x, z_grads):
189189
z_grads, = z_grads
190190
x_expanded = tf.expand_dims(x, 2)
191191
z_grads_expanded = tf.expand_dims(z_grads, 1)
192-
return tf.mul(x_expanded, z_grads_expanded)
192+
return tf.multiply(x_expanded, z_grads_expanded)
193193

194194

195195
pxg_registry.Register("MatMul", MatMulPXG)
@@ -245,7 +245,7 @@ def _PxConv2DBuilder(self, input_, w, strides, padding):
245245
num_x = int(conv_x.get_shape()[0])
246246
assert num_x == 1, num_x
247247
assert len(conv_px) == batch_size
248-
conv = tf.concat(0, conv_px)
248+
conv = tf.concat(axis=0, values=conv_px)
249249
assert int(conv.get_shape()[0]) == batch_size
250250
return conv, w_px
251251

@@ -274,7 +274,7 @@ def __call__(self, w, z_grads):
274274
self.colocate_gradients_with_ops,
275275
gate_gradients=self.gate_gradients)
276276

277-
return tf.pack(gradients_list)
277+
return tf.stack(gradients_list)
278278

279279
pxg_registry.Register("Conv2D", Conv2DPXG)
280280

differential_privacy/multiple_teachers/deep_cnn.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -75,7 +75,7 @@ def _variable_with_weight_decay(name, shape, stddev, wd):
7575
var = _variable_on_cpu(name, shape,
7676
tf.truncated_normal_initializer(stddev=stddev))
7777
if wd is not None:
78-
weight_decay = tf.mul(tf.nn.l2_loss(var), wd, name='weight_loss')
78+
weight_decay = tf.multiply(tf.nn.l2_loss(var), wd, name='weight_loss')
7979
tf.add_to_collection('losses', weight_decay)
8080
return var
8181

@@ -398,7 +398,7 @@ def train_op_fun(total_loss, global_step):
398398
decay_steps,
399399
LEARNING_RATE_DECAY_FACTOR,
400400
staircase=True)
401-
tf.scalar_summary('learning_rate', lr)
401+
tf.summary.scalar('learning_rate', lr)
402402

403403
# Generate moving averages of all losses and associated summaries.
404404
loss_averages_op = moving_av(total_loss)
@@ -413,7 +413,7 @@ def train_op_fun(total_loss, global_step):
413413

414414
# Add histograms for trainable variables.
415415
for var in tf.trainable_variables():
416-
tf.histogram_summary(var.op.name, var)
416+
tf.summary.histogram(var.op.name, var)
417417

418418
# Track the moving averages of all trainable variables.
419419
variable_averages = tf.train.ExponentialMovingAverage(
@@ -485,7 +485,7 @@ def train(images, labels, ckpt_path, dropout=False):
485485
train_op = train_op_fun(loss, global_step)
486486

487487
# Create a saver.
488-
saver = tf.train.Saver(tf.all_variables())
488+
saver = tf.train.Saver(tf.global_variables())
489489

490490
print("Graph constructed and saver created")
491491

0 commit comments

Comments
 (0)