Skip to content

Commit

Permalink
Fixes for differential_privacy
Browse files Browse the repository at this point in the history
  • Loading branch information
nealwu committed Apr 12, 2017
1 parent 0b1e767 commit 3f8ea5c
Show file tree
Hide file tree
Showing 4 changed files with 38 additions and 38 deletions.
6 changes: 3 additions & 3 deletions differential_privacy/multiple_teachers/analysis.py
Original file line number Diff line number Diff line change
Expand Up @@ -216,10 +216,10 @@ def main(unused_argv):
# If we are reproducing results from paper https://arxiv.org/abs/1610.05755,
# download the required binaries with label information.
##################################################################

# Binaries for MNIST results
paper_binaries_mnist = \
["https://github.com/npapernot/multiple-teachers-for-privacy/blob/master/mnist_250_teachers_labels.npy?raw=true",
["https://github.com/npapernot/multiple-teachers-for-privacy/blob/master/mnist_250_teachers_labels.npy?raw=true",
"https://github.com/npapernot/multiple-teachers-for-privacy/blob/master/mnist_250_teachers_100_indices_used_by_student.npy?raw=true"]
if FLAGS.counts_file == "mnist_250_teachers_labels.npy" \
or FLAGS.indices_file == "mnist_250_teachers_100_indices_used_by_student.npy":
Expand Down Expand Up @@ -254,7 +254,7 @@ def main(unused_argv):
total_log_mgf_nm = np.array([0.0 for _ in l_list])
total_ss_nm = np.array([0.0 for _ in l_list])
noise_eps = FLAGS.noise_eps

for i in indices:
total_log_mgf_nm += np.array(
[logmgf_from_counts(counts_mat[i], noise_eps, l)
Expand Down
56 changes: 28 additions & 28 deletions differential_privacy/multiple_teachers/deep_cnn.py
Original file line number Diff line number Diff line change
Expand Up @@ -95,9 +95,9 @@ def inference(images, dropout=False):

# conv1
with tf.variable_scope('conv1') as scope:
kernel = _variable_with_weight_decay('weights',
kernel = _variable_with_weight_decay('weights',
shape=first_conv_shape,
stddev=1e-4,
stddev=1e-4,
wd=0.0)
conv = tf.nn.conv2d(images, kernel, [1, 1, 1, 1], padding='SAME')
biases = _variable_on_cpu('biases', [64], tf.constant_initializer(0.0))
Expand All @@ -108,25 +108,25 @@ def inference(images, dropout=False):


# pool1
pool1 = tf.nn.max_pool(conv1,
ksize=[1, 3, 3, 1],
pool1 = tf.nn.max_pool(conv1,
ksize=[1, 3, 3, 1],
strides=[1, 2, 2, 1],
padding='SAME',
padding='SAME',
name='pool1')

# norm1
norm1 = tf.nn.lrn(pool1,
4,
bias=1.0,
alpha=0.001 / 9.0,
norm1 = tf.nn.lrn(pool1,
4,
bias=1.0,
alpha=0.001 / 9.0,
beta=0.75,
name='norm1')

# conv2
with tf.variable_scope('conv2') as scope:
kernel = _variable_with_weight_decay('weights',
kernel = _variable_with_weight_decay('weights',
shape=[5, 5, 64, 128],
stddev=1e-4,
stddev=1e-4,
wd=0.0)
conv = tf.nn.conv2d(norm1, kernel, [1, 1, 1, 1], padding='SAME')
biases = _variable_on_cpu('biases', [128], tf.constant_initializer(0.1))
Expand All @@ -137,28 +137,28 @@ def inference(images, dropout=False):


# norm2
norm2 = tf.nn.lrn(conv2,
4,
bias=1.0,
alpha=0.001 / 9.0,
norm2 = tf.nn.lrn(conv2,
4,
bias=1.0,
alpha=0.001 / 9.0,
beta=0.75,
name='norm2')

# pool2
pool2 = tf.nn.max_pool(norm2,
pool2 = tf.nn.max_pool(norm2,
ksize=[1, 3, 3, 1],
strides=[1, 2, 2, 1],
padding='SAME',
strides=[1, 2, 2, 1],
padding='SAME',
name='pool2')

# local3
with tf.variable_scope('local3') as scope:
# Move everything into depth so we can perform a single matrix multiply.
reshape = tf.reshape(pool2, [FLAGS.batch_size, -1])
dim = reshape.get_shape()[1].value
weights = _variable_with_weight_decay('weights',
weights = _variable_with_weight_decay('weights',
shape=[dim, 384],
stddev=0.04,
stddev=0.04,
wd=0.004)
biases = _variable_on_cpu('biases', [384], tf.constant_initializer(0.1))
local3 = tf.nn.relu(tf.matmul(reshape, weights) + biases, name=scope.name)
Expand All @@ -167,9 +167,9 @@ def inference(images, dropout=False):

# local4
with tf.variable_scope('local4') as scope:
weights = _variable_with_weight_decay('weights',
weights = _variable_with_weight_decay('weights',
shape=[384, 192],
stddev=0.04,
stddev=0.04,
wd=0.004)
biases = _variable_on_cpu('biases', [192], tf.constant_initializer(0.1))
local4 = tf.nn.relu(tf.matmul(local3, weights) + biases, name=scope.name)
Expand All @@ -178,11 +178,11 @@ def inference(images, dropout=False):

# compute logits
with tf.variable_scope('softmax_linear') as scope:
weights = _variable_with_weight_decay('weights',
weights = _variable_with_weight_decay('weights',
[192, FLAGS.nb_labels],
stddev=1/192.0,
stddev=1/192.0,
wd=0.0)
biases = _variable_on_cpu('biases',
biases = _variable_on_cpu('biases',
[FLAGS.nb_labels],
tf.constant_initializer(0.0))
logits = tf.add(tf.matmul(local4, weights), biases, name=scope.name)
Expand Down Expand Up @@ -386,7 +386,7 @@ def train_op_fun(total_loss, global_step):
"""
# Variables that affect learning rate.
nb_ex_per_train_epoch = int(60000 / FLAGS.nb_teachers)

num_batches_per_epoch = nb_ex_per_train_epoch / FLAGS.batch_size
decay_steps = int(num_batches_per_epoch * FLAGS.epochs_per_decay)

Expand Down
6 changes: 3 additions & 3 deletions differential_privacy/multiple_teachers/input.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@ def create_dir_if_needed(dest_directory):
def maybe_download(file_urls, directory):
"""
Download a set of files in temporary local folder
:param directory: the directory where to download
:param directory: the directory where to download
:return: a tuple of filepaths corresponding to the files given as input
"""
# Create directory if doesn't exist
Expand All @@ -73,7 +73,7 @@ def maybe_download(file_urls, directory):
result.append(filepath)

# Test if file already exists
if not gfile.Exists(filepath):
if not tf.gfile.Exists(filepath):
def _progress(count, block_size, total_size):
sys.stdout.write('\r>> Downloading %s %.1f%%' % (filename,
float(count * block_size) / float(total_size) * 100.0))
Expand Down Expand Up @@ -124,7 +124,7 @@ def extract_svhn(local_url):
:return:
"""

with gfile.Open(local_url, mode='r') as file_obj:
with tf.gfile.Open(local_url, mode='r') as file_obj:
# Load MATLAB matrix using scipy IO
dict = loadmat(file_obj)

Expand Down
8 changes: 4 additions & 4 deletions differential_privacy/multiple_teachers/train_teachers.py
Original file line number Diff line number Diff line change
Expand Up @@ -64,11 +64,11 @@ def train_teacher(dataset, nb_teachers, teacher_id):
else:
print("Check value of dataset flag")
return False

# Retrieve subset of data for this teacher
data, labels = input.partition_dataset(train_data,
train_labels,
nb_teachers,
data, labels = input.partition_dataset(train_data,
train_labels,
nb_teachers,
teacher_id)

print("Length of training data: " + str(len(labels)))
Expand Down

0 comments on commit 3f8ea5c

Please sign in to comment.