Skip to content

Commit

Permalink
more lint
Browse files Browse the repository at this point in the history
  • Loading branch information
npapernot committed Jul 29, 2019
1 parent 33c3f05 commit f06443d
Showing 1 changed file with 41 additions and 29 deletions.
70 changes: 41 additions & 29 deletions privacy/bolton/optimizers.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Bolton Optimizer for bolton method"""
"""Bolton Optimizer for bolton method."""

from __future__ import absolute_import
from __future__ import division
Expand All @@ -28,34 +28,38 @@
class GammaBetaDecreasingStep(
optimizer_v2.learning_rate_schedule.LearningRateSchedule):
"""Computes LR as minimum of 1/beta and 1/(gamma * step) at each step.
A required step for privacy guarantees.
This is a required step for privacy guarantees.
"""

def __init__(self):
self.is_init = False
self.beta = None
self.gamma = None

def __call__(self, step):
"""Computes and returns the learning rate.
Args:
step: the current iteration number
Returns:
decayed learning rate to minimum of 1/beta and 1/(gamma * step) as per
the Bolton privacy requirements.
Args:
step: the current iteration number
Returns:
decayed learning rate to minimum of 1/beta and 1/(gamma * step) as per
the Bolton privacy requirements.
"""
if not self.is_init:
raise AttributeError('Please initialize the {0} Learning Rate Scheduler.'
'This is performed automatically by using the '
'{1} as a context manager, '
'as desired'.format(self.__class__.__name__,
Bolton.__class__.__name__
)
)
)
)
dtype = self.beta.dtype
one = tf.constant(1, dtype)
return tf.math.minimum(tf.math.reduce_min(one/self.beta),
one/(self.gamma*math_ops.cast(step, dtype))
)
)

def get_config(self):
"""Return config to setup the learning rate scheduler."""
Expand Down Expand Up @@ -107,22 +111,24 @@ class Bolton(optimizer_v2.OptimizerV2):
Bolt-on Differential Privacy for Scalable Stochastic Gradient
Descent-based Analytics by Xi Wu et. al.
"""

def __init__(self, # pylint: disable=super-init-not-called
optimizer,
loss,
dtype=tf.float32,
):
):
"""Constructor.
Args:
optimizer: Optimizer_v2 or subclass to be used as the optimizer
(wrapped).
loss: StrongConvexLoss function that the model is being compiled with.
Args:
optimizer: Optimizer_v2 or subclass to be used as the optimizer
(wrapped).
loss: StrongConvexLoss function that the model is being compiled with.
dtype: dtype
"""

if not isinstance(loss, StrongConvexMixin):
raise ValueError("loss function must be a Strongly Convex and therefore "
"extend the StrongConvexMixin.")
raise ValueError('loss function must be a Strongly Convex and therefore '
'extend the StrongConvexMixin.')
self._private_attributes = ['_internal_optimizer',
'dtype',
'noise_distribution',
Expand All @@ -134,7 +140,7 @@ def __init__(self, # pylint: disable=super-init-not-called
'layers',
'batch_size',
'_is_init'
]
]
self._internal_optimizer = optimizer
self.learning_rate = GammaBetaDecreasingStep() # use the Bolton Learning
# rate scheduler, as required for privacy guarantees. This will still need
Expand All @@ -154,6 +160,9 @@ def project_weights_to_r(self, force=False):
Args:
force: True to normalize regardless of previous weight values.
False to check if weights > R-ball and only normalize then.
Raises:
Exception:
"""
if not self._is_init:
raise Exception('This method must be called from within the optimizer\'s '
Expand All @@ -171,14 +180,17 @@ def project_weights_to_r(self, force=False):
)

def get_noise(self, input_dim, output_dim):
"""Sample noise to be added to weights for privacy guarantee
"""Sample noise to be added to weights for privacy guarantee.
Args:
input_dim: the input dimensionality for the weights
output_dim the output dimensionality for the weights
Args:
input_dim: the input dimensionality for the weights
output_dim the output dimensionality for the weights
Returns:
Noise in shape of layer's weights to be added to the weights.
Returns:
Noise in shape of layer's weights to be added to the weights.
Raises:
Exception:
"""
if not self._is_init:
raise Exception('This method must be called from within the optimizer\'s '
Expand Down Expand Up @@ -206,7 +218,7 @@ def get_noise(self, input_dim, output_dim):
beta=1 / beta,
seed=1,
dtype=self.dtype
)
)
return unit_vector * gamma
raise NotImplementedError('Noise distribution: {0} is not '
'a valid distribution'.format(distribution))
Expand Down Expand Up @@ -236,7 +248,7 @@ def __getattr__(self, name):
"".format(self.__class__.__name__,
self._internal_optimizer.__class__.__name__,
name
)
)
)

def __setattr__(self, key, value):
Expand Down Expand Up @@ -304,7 +316,7 @@ def __call__(self,
class_weights,
n_samples,
batch_size
):
):
"""Accepts required values for bolton method from context entry point.
Stores them on the optimizer for use throughout fitting.
Expand All @@ -328,7 +340,7 @@ def __call__(self,
self.noise_distribution = noise_distribution
self.learning_rate.initialize(self.loss.beta(class_weights),
self.loss.gamma()
)
)
self.epsilon = tf.constant(epsilon, dtype=self.dtype)
self.class_weights = tf.constant(class_weights, dtype=self.dtype)
self.n_samples = tf.constant(n_samples, dtype=self.dtype)
Expand All @@ -354,7 +366,7 @@ def __exit__(self, *args):
output_dim = layer.units
noise = self.get_noise(input_dim,
output_dim,
)
)
layer.kernel = tf.math.add(layer.kernel, noise)
self.noise_distribution = None
self.learning_rate.de_initialize()
Expand Down

0 comments on commit f06443d

Please sign in to comment.