Skip to content

Commit

Permalink
Expose differentially private RMSPropOptimizer.
Browse files Browse the repository at this point in the history
PiperOrigin-RevId: 311072544
  • Loading branch information
tensorflower-gardener committed May 12, 2020
1 parent 10335f6 commit da9fb28
Show file tree
Hide file tree
Showing 2 changed files with 20 additions and 6 deletions.
3 changes: 3 additions & 0 deletions tensorflow_privacy/privacy/optimizers/dp_optimizer.py
Original file line number Diff line number Diff line change
Expand Up @@ -228,12 +228,15 @@ def ledger(self):
AdagradOptimizer = tf.train.AdagradOptimizer
AdamOptimizer = tf.train.AdamOptimizer
GradientDescentOptimizer = tf.train.GradientDescentOptimizer
RMSPropOptimizer = tf.train.RMSPropOptimizer

DPAdagradOptimizer = make_optimizer_class(AdagradOptimizer)
DPAdamOptimizer = make_optimizer_class(AdamOptimizer)
DPGradientDescentOptimizer = make_optimizer_class(GradientDescentOptimizer)
DPRMSPropOptimizer = make_optimizer_class(RMSPropOptimizer)

DPAdagradGaussianOptimizer = make_gaussian_optimizer_class(AdagradOptimizer)
DPAdamGaussianOptimizer = make_gaussian_optimizer_class(AdamOptimizer)
DPGradientDescentGaussianOptimizer = make_gaussian_optimizer_class(
GradientDescentOptimizer)
DPRMSPropGaussianOptimizer = make_gaussian_optimizer_class(RMSPropOptimizer)
23 changes: 17 additions & 6 deletions tensorflow_privacy/privacy/optimizers/dp_optimizer_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,13 @@ def _loss(self, val0, val1):
('DPAdagrad 4', dp_optimizer.DPAdagradOptimizer, 4, [-2.5, -2.5]),
('DPAdam 1', dp_optimizer.DPAdamOptimizer, 1, [-2.5, -2.5]),
('DPAdam 2', dp_optimizer.DPAdamOptimizer, 2, [-2.5, -2.5]),
('DPAdam 4', dp_optimizer.DPAdamOptimizer, 4, [-2.5, -2.5]))
('DPAdam 4', dp_optimizer.DPAdamOptimizer, 4, [-2.5, -2.5]),
('DPRMSPropOptimizer 1', dp_optimizer.DPRMSPropOptimizer, 1,
[-2.5, -2.5]),
('DPRMSPropOptimizer 2', dp_optimizer.DPRMSPropOptimizer, 2,
[-2.5, -2.5]),
('DPRMSPropOptimizer 4', dp_optimizer.DPRMSPropOptimizer, 4, [-2.5, -2.5])
)
def testBaseline(self, cls, num_microbatches, expected_answer):
with self.cached_session() as sess:
var0 = tf.Variable([1.0, 2.0])
Expand Down Expand Up @@ -76,7 +82,8 @@ def testBaseline(self, cls, num_microbatches, expected_answer):
@parameterized.named_parameters(
('DPGradientDescent', dp_optimizer.DPGradientDescentOptimizer),
('DPAdagrad', dp_optimizer.DPAdagradOptimizer),
('DPAdam', dp_optimizer.DPAdamOptimizer))
('DPAdam', dp_optimizer.DPAdamOptimizer),
('DPRMSPropOptimizer', dp_optimizer.DPRMSPropOptimizer))
def testClippingNorm(self, cls):
with self.cached_session() as sess:
var0 = tf.Variable([0.0, 0.0])
Expand All @@ -99,7 +106,8 @@ def testClippingNorm(self, cls):
@parameterized.named_parameters(
('DPGradientDescent', dp_optimizer.DPGradientDescentOptimizer),
('DPAdagrad', dp_optimizer.DPAdagradOptimizer),
('DPAdam', dp_optimizer.DPAdamOptimizer))
('DPAdam', dp_optimizer.DPAdamOptimizer),
('DPRMSPropOptimizer', dp_optimizer.DPRMSPropOptimizer))
def testNoiseMultiplier(self, cls):
with self.cached_session() as sess:
var0 = tf.Variable([0.0])
Expand Down Expand Up @@ -182,7 +190,8 @@ def linear_model_fn(features, labels, mode):
@parameterized.named_parameters(
('DPGradientDescent', dp_optimizer.DPGradientDescentOptimizer),
('DPAdagrad', dp_optimizer.DPAdagradOptimizer),
('DPAdam', dp_optimizer.DPAdamOptimizer))
('DPAdam', dp_optimizer.DPAdamOptimizer),
('DPRMSPropOptimizer', dp_optimizer.DPRMSPropOptimizer))
def testUnrollMicrobatches(self, cls):
with self.cached_session() as sess:
var0 = tf.Variable([1.0, 2.0])
Expand Down Expand Up @@ -213,7 +222,8 @@ def testUnrollMicrobatches(self, cls):
@parameterized.named_parameters(
('DPGradientDescent', dp_optimizer.DPGradientDescentGaussianOptimizer),
('DPAdagrad', dp_optimizer.DPAdagradGaussianOptimizer),
('DPAdam', dp_optimizer.DPAdamGaussianOptimizer))
('DPAdam', dp_optimizer.DPAdamGaussianOptimizer),
('DPRMSPropOptimizer', dp_optimizer.DPRMSPropGaussianOptimizer))
def testDPGaussianOptimizerClass(self, cls):
with self.cached_session() as sess:
var0 = tf.Variable([0.0])
Expand Down Expand Up @@ -241,7 +251,8 @@ def testDPGaussianOptimizerClass(self, cls):
@parameterized.named_parameters(
('DPGradientDescent', dp_optimizer.DPGradientDescentOptimizer),
('DPAdagrad', dp_optimizer.DPAdagradOptimizer),
('DPAdam', dp_optimizer.DPAdamOptimizer))
('DPAdam', dp_optimizer.DPAdamOptimizer),
('DPRMSPropOptimizer', dp_optimizer.DPRMSPropOptimizer))
def testAssertOnNoCallOfComputeGradients(self, cls):
dp_sum_query = gaussian_query.GaussianSumQuery(1.0e9, 0.0)
opt = cls(dp_sum_query, num_microbatches=1, learning_rate=1.0)
Expand Down

0 comments on commit da9fb28

Please sign in to comment.