Skip to content

Commit

Permalink
Merge pull request tensorflow#4700 from jhseu/branch_134847453
Browse files Browse the repository at this point in the history
Branch 134847453
  • Loading branch information
jhseu authored Oct 1, 2016
2 parents e5a71cb + e509b1c commit aaeb50c
Show file tree
Hide file tree
Showing 108 changed files with 4,454 additions and 1,561 deletions.
5 changes: 1 addition & 4 deletions tensorflow/BUILD
Original file line number Diff line number Diff line change
Expand Up @@ -54,10 +54,7 @@ config_setting(

package_group(
name = "internal",
packages = [
"//learning/vis/...",
"//tensorflow/...",
],
packages = ["//tensorflow/..."],
)

sh_binary(
Expand Down
4 changes: 2 additions & 2 deletions tensorflow/cc/saved_model/constants.h
Original file line number Diff line number Diff line change
Expand Up @@ -28,11 +28,11 @@ constexpr char kSavedModelFilenamePbTxt[] = "saved_model.pbtxt";
constexpr char kSavedModelVariablesDirectory[] = "variables";

// SavedModel variables filename.
constexpr char kSavedModelVariablesFilename[] = "saved_model_variables";
constexpr char kSavedModelVariablesFilename[] = "variables";

// SavedModel sharded variables filename.
constexpr char kSavedModelVariablesShardedFilename[] =
"saved_model_variables-\?\?\?\?\?-of-\?\?\?\?\?";
"variables-\?\?\?\?\?-of-\?\?\?\?\?";

// Commonly used tags.
constexpr char kSavedModelTagServe[] = "serve";
Expand Down
Binary file modified tensorflow/cc/saved_model/testdata/half_plus_two/saved_model.pb
Binary file not shown.
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
model_checkpoint_path: "/tmp/saved_model/half_plus_two/saved_model_variables"
all_model_checkpoint_paths: "/tmp/saved_model/half_plus_two/saved_model_variables"
model_checkpoint_path: "/tmp/saved_model/half_plus_two/variables/variables"
all_model_checkpoint_paths: "/tmp/saved_model/half_plus_two/variables/variables"

This file was deleted.

Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
model_checkpoint_path: "/tmp/saved_model/half_plus_two_pbtxt/variables/variables-?????-of-00001"
all_model_checkpoint_paths: "/tmp/saved_model/half_plus_two_pbtxt/variables/variables-?????-of-00001"
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
model_checkpoint_path: "/tmp/saved_model/half_plus_two/variables/saved_model_variables-?????-of-00001"
all_model_checkpoint_paths: "/tmp/saved_model/half_plus_two/variables/saved_model_variables-?????-of-00001"
model_checkpoint_path: "/tmp/saved_model/half_plus_two/variables/variables-?????-of-00001"
all_model_checkpoint_paths: "/tmp/saved_model/half_plus_two/variables/variables-?????-of-00001"
1 change: 1 addition & 0 deletions tensorflow/contrib/BUILD
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,7 @@ py_library(
"//tensorflow/contrib/tensor_forest/hybrid:ops_lib",
"//tensorflow/contrib/tensorboard",
"//tensorflow/contrib/testing:testing_py",
"//tensorflow/contrib/tfprof",
"//tensorflow/contrib/training:training_py",
"//tensorflow/contrib/util:util_py",
],
Expand Down
1 change: 1 addition & 0 deletions tensorflow/contrib/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,5 +41,6 @@
from tensorflow.contrib import tensor_forest
from tensorflow.contrib import tensorboard
from tensorflow.contrib import testing
from tensorflow.contrib import tfprof
from tensorflow.contrib import training
from tensorflow.contrib import util
12 changes: 8 additions & 4 deletions tensorflow/contrib/learn/python/learn/estimators/classifier.py
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,7 @@ class Classifier(estimator.Estimator):
PROBABILITY_OUTPUT = 'probabilities'

def __init__(self, model_fn, n_classes, model_dir=None, config=None,
params=None):
params=None, feature_engineering_fn=None):
"""Constructor for Classifier.
Args:
Expand All @@ -73,16 +73,20 @@ def __init__(self, model_fn, n_classes, model_dir=None, config=None,
continue training a previously saved model.
config: Configuration object (optional)
params: `dict` of hyper parameters that will be passed into `model_fn`.
feature_engineering_fn: Feature engineering function. Takes features and
targets which are the output of `input_fn` and
returns features and targets which will be fed
into the model.
"""
self._n_classes = n_classes
self._logits_fn = model_fn
if params:
model_fn = self._classifier_model_with_params
else:
model_fn = self._classifier_model
super(Classifier, self).__init__(model_fn=model_fn,
model_dir=model_dir, config=config,
params=params)
super(Classifier, self).__init__(
model_fn=model_fn, model_dir=model_dir, config=config, params=params,
feature_engineering_fn=feature_engineering_fn)

def evaluate(self,
x=None,
Expand Down
40 changes: 32 additions & 8 deletions tensorflow/contrib/learn/python/learn/estimators/dnn.py
Original file line number Diff line number Diff line change
Expand Up @@ -348,7 +348,8 @@ def __init__(self,
dropout=None,
gradient_clip_norm=None,
enable_centered_bias=None,
config=None):
config=None,
feature_engineering_fn=None):
"""Initializes a DNNClassifier instance.
Args:
Expand Down Expand Up @@ -379,6 +380,10 @@ def __init__(self,
bias variable for each class. Rest of the model structure learns the
residual after centered bias.
config: `RunConfig` object to configure the runtime settings.
feature_engineering_fn: Feature engineering function. Takes features and
targets which are the output of `input_fn` and
returns features and targets which will be fed
into the model.
Returns:
A `DNNClassifier` estimator.
Expand Down Expand Up @@ -415,7 +420,8 @@ def __init__(self,
"gradient_clip_norm": gradient_clip_norm,
"enable_centered_bias": enable_centered_bias,
"num_ps_replicas": num_ps_replicas,
})
},
feature_engineering_fn=feature_engineering_fn)

def fit(self, x=None, y=None, input_fn=None, steps=None, batch_size=None,
monitors=None, max_steps=None):
Expand Down Expand Up @@ -561,8 +567,10 @@ def model_dir(self):
return self._model_dir

@property
@deprecated("2016-10-13", "This method inspects the private state of the "
"object, and should not be used")
@deprecated("2016-10-30",
"This method will be removed after the deprecation date. "
"To inspect variables, use get_variable_names() and "
"get_variable_value().")
def weights_(self):
hiddenlayer_weights = [checkpoints.load_variable(
self._model_dir, name=("dnn/hiddenlayer_%d/weights" % i))
Expand All @@ -572,8 +580,10 @@ def weights_(self):
return hiddenlayer_weights + logits_weights

@property
@deprecated("2016-10-13", "This method inspects the private state of the "
"object, and should not be used")
@deprecated("2016-10-30",
"This method will be removed after the deprecation date. "
"To inspect variables, use get_variable_names() and "
"get_variable_value().")
def bias_(self):
hiddenlayer_bias = [checkpoints.load_variable(
self._model_dir, name=("dnn/hiddenlayer_%d/biases" % i))
Expand Down Expand Up @@ -655,7 +665,8 @@ def __init__(self,
dropout=None,
gradient_clip_norm=None,
enable_centered_bias=None,
config=None):
config=None,
feature_engineering_fn=None):
"""Initializes a `DNNRegressor` instance.
Args:
Expand Down Expand Up @@ -684,6 +695,10 @@ def __init__(self,
bias variable for each class. Rest of the model structure learns the
residual after centered bias.
config: `RunConfig` object to configure the runtime settings.
feature_engineering_fn: Feature engineering function. Takes features and
targets which are the output of `input_fn` and
returns features and targets which will be fed
into the model.
Returns:
A `DNNRegressor` estimator.
Expand All @@ -701,7 +716,8 @@ def __init__(self,
dnn_dropout=dropout,
gradient_clip_norm=gradient_clip_norm,
enable_centered_bias=enable_centered_bias,
config=config)
config=config,
feature_engineering_fn=feature_engineering_fn)
self.feature_columns = feature_columns
self.optimizer = optimizer
self.activation_fn = activation_fn
Expand All @@ -710,9 +726,17 @@ def __init__(self,
self._feature_columns_inferred = False

@property
@deprecated("2016-10-30",
"This method will be removed after the deprecation date. "
"To inspect variables, use get_variable_names() and "
"get_variable_value().")
def weights_(self):
return self.dnn_weights_

@property
@deprecated("2016-10-30",
"This method will be removed after the deprecation date. "
"To inspect variables, use get_variable_names() and "
"get_variable_value().")
def bias_(self):
return self.dnn_bias_
Original file line number Diff line number Diff line change
Expand Up @@ -79,7 +79,8 @@ def __init__(self, # _joint_linear_weights pylint: disable=invalid-name
dnn_dropout=None,
gradient_clip_norm=None,
enable_centered_bias=True,
config=None):
config=None,
feature_engineering_fn=None):
"""Initializes a _DNNLinearCombinedBaseEstimator instance.
Args:
Expand Down Expand Up @@ -114,6 +115,10 @@ def __init__(self, # _joint_linear_weights pylint: disable=invalid-name
bias variable for each class. Rest of the model structure learns the
residual after centered bias.
config: RunConfig object to configure the runtime settings.
feature_engineering_fn: Feature engineering function. Takes features and
targets which are the output of `input_fn` and
returns features and targets which will be fed
into the model.
Raises:
ValueError: If both linear_feature_columns and dnn_features_columns are
Expand Down Expand Up @@ -147,24 +152,43 @@ def __init__(self, # _joint_linear_weights pylint: disable=invalid-name
self._centered_bias_weight_collection = "centered_bias"
self._enable_centered_bias = enable_centered_bias
self._target_column = target_column
self._feature_engineering_fn = (
feature_engineering_fn or
(lambda features, targets: (features, targets)))

@property
@deprecated("2016-10-30",
"This method will be removed after the deprecation date. "
"To inspect variables, use get_variable_names() and "
"get_variable_value().")
def linear_weights_(self):
"""Returns weights per feature of the linear part."""
return self._linear_model.get_weights(model_dir=self._model_dir)

@property
@deprecated("2016-10-30",
"This method will be removed after the deprecation date. "
"To inspect variables, use get_variable_names() and "
"get_variable_value().")
def linear_bias_(self):
"""Returns bias of the linear part."""
return (self._linear_model.get_bias(model_dir=self._model_dir) +
self.get_variable_value("centered_bias_weight"))

@property
@deprecated("2016-10-30",
"This method will be removed after the deprecation date. "
"To inspect variables, use get_variable_names() and "
"get_variable_value().")
def dnn_weights_(self):
"""Returns weights of deep neural network part."""
return self._dnn_model.get_weights(model_dir=self._model_dir)

@property
@deprecated("2016-10-30",
"This method will be removed after the deprecation date. "
"To inspect variables, use get_variable_names() and "
"get_variable_value().")
def dnn_bias_(self):
"""Returns bias of deep neural network part."""
return (self._dnn_model.get_bias(model_dir=self._model_dir) +
Expand All @@ -185,6 +209,7 @@ def _get_train_ops(self, features, targets):
assert global_step

features = self._get_feature_dict(features)
features, targets = self._feature_engineering_fn(features, targets)
logits = self._logits(features, is_training=True)
if self._enable_centered_bias:
centered_bias_step = [self._centered_bias_step(targets, features)]
Expand All @@ -209,12 +234,14 @@ def _get_train_ops(self, features, targets):
def _get_eval_ops(self, features, targets, metrics=None):
"""See base class."""
features = self._get_feature_dict(features)
features, targets = self._feature_engineering_fn(features, targets)
logits = self._logits(features)
return self._target_column.get_eval_ops(features, logits, targets, metrics)

def _get_predict_ops(self, features):
"""See base class."""
features = self._get_feature_dict(features)
features, _ = self._feature_engineering_fn(features, None)
logits = self._logits(features)
return self._target_column.logits_to_predictions(logits, proba=True)

Expand Down Expand Up @@ -366,7 +393,8 @@ def __init__(self, # _joint_linear_weights pylint: disable=invalid-name
dnn_dropout=None,
gradient_clip_norm=None,
enable_centered_bias=None,
config=None):
config=None,
feature_engineering_fn=None):
"""Constructs a DNNLinearCombinedClassifier instance.
Args:
Expand Down Expand Up @@ -403,6 +431,10 @@ def __init__(self, # _joint_linear_weights pylint: disable=invalid-name
bias variable for each class. Rest of the model structure learns the
residual after centered bias.
config: RunConfig object to configure the runtime settings.
feature_engineering_fn: Feature engineering function. Takes features and
targets which are the output of `input_fn` and
returns features and targets which will be fed
into the model.
Raises:
ValueError: If `n_classes` < 2.
Expand Down Expand Up @@ -433,7 +465,8 @@ def __init__(self, # _joint_linear_weights pylint: disable=invalid-name
gradient_clip_norm=gradient_clip_norm,
enable_centered_bias=enable_centered_bias,
target_column=target_column,
config=config)
config=config,
feature_engineering_fn=feature_engineering_fn)

@deprecated_arg_values(
estimator.AS_ITERABLE_DATE, estimator.AS_ITERABLE_INSTRUCTIONS,
Expand Down Expand Up @@ -559,7 +592,8 @@ def __init__(self, # _joint_linear_weights pylint: disable=invalid-name
gradient_clip_norm=None,
enable_centered_bias=None,
target_dimension=1,
config=None):
config=None,
feature_engineering_fn=None):
"""Initializes a DNNLinearCombinedRegressor instance.
Args:
Expand Down Expand Up @@ -596,6 +630,10 @@ def __init__(self, # _joint_linear_weights pylint: disable=invalid-name
residual after centered bias.
target_dimension: TODO(zakaria): dimension of the target for multilabels.
config: RunConfig object to configure the runtime settings.
feature_engineering_fn: Feature engineering function. Takes features and
targets which are the output of `input_fn` and
returns features and targets which will be fed
into the model.
Raises:
ValueError: If both linear_feature_columns and dnn_features_columns are
Expand All @@ -620,4 +658,5 @@ def __init__(self, # _joint_linear_weights pylint: disable=invalid-name
gradient_clip_norm=gradient_clip_norm,
enable_centered_bias=enable_centered_bias,
target_column=target_column,
config=config)
config=config,
feature_engineering_fn=feature_engineering_fn)
Original file line number Diff line number Diff line change
Expand Up @@ -1003,5 +1003,45 @@ def _input_fn(num_epochs=None):
self.assertLess(scores['loss'], 0.2)


class FeatureEngineeringFunctionTest(tf.test.TestCase):
"""Tests feature_engineering_fn."""

def testNoneFeatureEngineeringFn(self):
def input_fn():
# Create 4 rows of (y = x)
target = tf.constant([[100.], [3.], [2.], [2.]])
features = {'x': tf.constant([[100.], [3.], [2.], [2.]])}
return features, target

def feature_engineering_fn(features, targets):
_, _ = features, targets
target = tf.constant([[1000.], [30.], [20.], [20.]])
features = {'x': tf.constant([[1000.], [30.], [20.], [20.]])}
return features, target

estimator_with_fe_fn = tf.contrib.learn.DNNLinearCombinedRegressor(
linear_feature_columns=[tf.contrib.layers.real_valued_column('x')],
dnn_feature_columns=[tf.contrib.layers.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=tf.contrib.learn.RunConfig(tf_random_seed=1),
feature_engineering_fn=feature_engineering_fn)
estimator_with_fe_fn.fit(input_fn=input_fn, steps=100)

estimator_without_fe_fn = tf.contrib.learn.DNNLinearCombinedRegressor(
linear_feature_columns=[tf.contrib.layers.real_valued_column('x')],
dnn_feature_columns=[tf.contrib.layers.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=tf.contrib.learn.RunConfig(tf_random_seed=1))
estimator_without_fe_fn.fit(input_fn=input_fn, steps=100)

# predictions = y
prediction_with_fe_fn = next(
estimator_with_fe_fn.predict(input_fn=input_fn, as_iterable=True))
self.assertAlmostEqual(1000., prediction_with_fe_fn, delta=1.0)
prediction_without_fe_fn = next(
estimator_without_fe_fn.predict(input_fn=input_fn, as_iterable=True))
self.assertAlmostEqual(100., prediction_without_fe_fn, delta=1.0)


if __name__ == '__main__':
tf.test.main()
Loading

0 comments on commit aaeb50c

Please sign in to comment.