Skip to content

Commit

Permalink
Further fixes and improvements.
Browse files Browse the repository at this point in the history
  • Loading branch information
fchollet committed Feb 22, 2017
1 parent 9e25cb7 commit db22fdf
Show file tree
Hide file tree
Showing 3 changed files with 32 additions and 2 deletions.
27 changes: 26 additions & 1 deletion keras/engine/training.py
Original file line number Diff line number Diff line change
Expand Up @@ -666,6 +666,12 @@ def compile(self, optimizer, loss, metrics=None, loss_weights=None,
To specify different metrics for different outputs of a
multi-output model, you could also pass a dictionary,
such as `metrics={'output_a': 'accuracy'}`.
loss_weights: Optional list or dictionary specifying scalar
coefficients (Python floats) to weight the loss contributions
of different model outputs.
If a list, it is expected to have a 1:1 mapping
to the model's outputs. If a tensor, it is expected to map
output names (strings) to scalar coefficients.
sample_weight_mode: if you need to do timestep-wise
sample weighting (2D weights), set this to `"temporal"`.
`None` defaults to sample-wise weights (1D).
Expand Down Expand Up @@ -1269,6 +1275,10 @@ class indices (integers) to
# Returns
A `History` instance. Its `history` attribute contains
all information collected during training.
# Raises
ValueError: In case of mismatch between the provided input data
and what the model expects.
"""
# validate user data
x, y, sample_weights = self._standardize_user_data(
Expand Down Expand Up @@ -1375,6 +1385,9 @@ def evaluate(self, x, y, batch_size=32, verbose=1, sample_weight=None):
you can also pass a dictionary
mapping output names to Numpy arrays.
batch_size: integer. Number of samples per gradient update.
verbose: verbosity mode, 0 or 1.
sample_weight: Array of weights to weight the contribution
of different samples to the loss and metrics.
# Returns
Scalar test loss (if the model has a single output and no metrics)
Expand Down Expand Up @@ -1411,7 +1424,13 @@ def predict(self, x, batch_size=32, verbose=0):
verbose: verbosity mode, 0 or 1.
# Returns
A Numpy array of predictions.
Numpy array(s) of predictions.
# Raises
ValueError: In case of mismatch between the provided
input data and the model's expectations,
or in case a stateful model receives a number of samples
that is not a multiple of the batch size.
"""
# validate user data
x = _standardize_input_data(x, self.input_names,
Expand Down Expand Up @@ -1531,6 +1550,12 @@ def test_on_batch(self, x, y, sample_weight=None):

def predict_on_batch(self, x):
"""Returns predictions for a single batch of samples.
# Arguments
x: Input samples, as a Numpy array.
# Returns
Numpy array(s) of predictions.
"""
x = _standardize_input_data(x, self.input_names,
self.internal_input_shapes)
Expand Down
2 changes: 1 addition & 1 deletion keras/layers/core.py
Original file line number Diff line number Diff line change
Expand Up @@ -688,7 +688,7 @@ def get_config(self):
def from_config(cls, config, custom_objects=None):
globs = globals()
if custom_objects:
globs = dict(globs.items() + custom_objects.items())
globs = dict(list(globs.items()) + list(custom_objects.items()))
function_type = config.pop('function_type')
if function_type == 'function':
# Simple lookup in custom objects
Expand Down
5 changes: 5 additions & 0 deletions keras/models.py
Original file line number Diff line number Diff line change
Expand Up @@ -187,6 +187,11 @@ def convert_custom_objects(obj):
# Arguments
obj: object, dict, or list.
# Returns
The same structure, where occurences
of a custom object name have been replaced
with the custom object.
"""
if isinstance(obj, list):
deserialized = []
Expand Down

0 comments on commit db22fdf

Please sign in to comment.