Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Got AttributeError: 'NoneType' object has no attribute 'logits' error while applying tutorial #162

Open
georgech3 opened this issue Mar 6, 2021 · 0 comments

Comments

@georgech3
Copy link

Hello everyone,

I'm trying to apply AdaNet from tutorial in my local jupyter notebook, as I follow the tutorial here: https://github.com/tensorflow/adanet/blob/master/adanet/experimental/adanet_modelflow_tutorial.ipynb

System information

  • Devices: Decktop PC
  • Environment: Windows 10 Professional
  • Running Platform: Anaconda
  • Python version: 3.7
  • Tensorflow version: 2.2 ( installed by conda )
  • CUDA/cuDNN version: 10.1, V10.1.243
  • GPU: 1080Ti

Problem Description
I got the error: AttributeError: 'NoneType' object has no attribute 'logits'
I'm wondering where my code applying tutorial is wrong so that model can't be created.

Addition Information
Full Code

FEATURES_KEY = "x"


def input_fn(partition, training, batch_size):
    """Generate an input function for the Estimator."""
    def _input_fn():

        if partition == "train":
            dataset = tf.data.Dataset.from_tensor_slices(({
                FEATURES_KEY:
                tf.log1p(X_train)
            }, tf.log1p(y_train)))
        else:
            dataset = tf.data.Dataset.from_tensor_slices(({
                FEATURES_KEY:
                tf.log1p(X_test)
            }, tf.log1p(y_test)))

        # We call repeat after shuffling, rather than before, to prevent separate
        # epochs from blending together.
        if training:
            dataset = dataset.shuffle(10 * batch_size,
                                      seed=RANDOM_SEED)

        dataset = dataset.batch(batch_size)
        iterator = dataset.make_one_shot_iterator()
        features, labels = iterator.get_next()

        return features, labels

    return _input_fn


_NUM_LAYERS_KEY = "num_layers"

class _SimpleDNNBuilder(adanet.subnetwork.Builder):
    def __init__(self, optimizer, layer_size, num_layers, learn_mixture_weights,
               seed):
        self._optimizer = optimizer
        self._layer_size = layer_size
        self._num_layers = num_layers
        self._learn_mixture_weights = learn_mixture_weights
        self._seed = seed

    def build_subnetwork(self,
                       features,
                       logits_dimension,
                       training,
                       iteration_step,
                       summary,
                       previous_ensemble=None):
        input_layer = tf.to_float(features[FEATURES_KEY])
        kernel_initializer = tf.glorot_uniform_initializer(seed=self._seed)
        last_layer = input_layer
        for _ in range(self._num_layers):
            last_layer = tf.layers.dense(last_layer,
                                         units=self._layer_size,
                                         activation=tf.nn.relu,
                                         kernel_initializer=kernel_initializer)
            logits = tf.layers.dense(
                last_layer,
                units=logits_dimension,
                kernel_initializer=kernel_initializer)
            
            shared = {_NUM_LAYERS_KEY: self._num_layers}   
            
            return adanet.Subnetwork(last_layer=last_layer,logits=logits,complexity=self._measure_complexity(),shared=shared)

    def _measure_complexity(self):
        return tf.sqrt(tf.to_float(self._num_layers))

    def build_subnetwork_train_op(self, subnetwork, loss, var_list, labels,
                                iteration_step, summary, previous_ensemble):
        return self._optimizer.minimize(loss=loss, var_list=var_list)

    @property
    def name(self):

        if self._num_layers == 0:
            # A DNN with no hidden layers is a linear model.
            return "linear"
        
        return "{}_layer_dnn".format(self._num_layers)


class SimpleDNNGenerator(adanet.subnetwork.Generator):
    def __init__(self,
               optimizer,
               layer_size=64,
               learn_mixture_weights=False,
               seed=None):
        self._seed = seed
        self._dnn_builder_fn = functools.partial(
            _SimpleDNNBuilder,
            optimizer=optimizer,
            layer_size=layer_size,
            learn_mixture_weights=learn_mixture_weights)

    def generate_candidates(self, previous_ensemble, iteration_number,previous_ensemble_reports, all_reports):
        num_layers = 0
        seed = self._seed
        if previous_ensemble:num_layers = previous_ensemble.subnetworks[-1].shared[_NUM_LAYERS_KEY]
        if seed is not None:seed += iteration_number
            
        return [
            self._dnn_builder_fn(num_layers=num_layers, seed=seed),
            self._dnn_builder_fn(num_layers=num_layers + 1, seed=seed),
        ]

#@title AdaNet parameters
LEARNING_RATE = 0.001  #@param {type:"number"}
TRAIN_STEPS = 60000  #@param {type:"integer"}
BATCH_SIZE = 32  #@param {type:"integer"}

LEARN_MIXTURE_WEIGHTS = False  #@param {type:"boolean"}
ADANET_LAMBDA = 0  #@param {type:"number"}
ADANET_ITERATIONS = 3  #@param {type:"integer"}


def train_and_evaluate(experiment_name, learn_mixture_weights=LEARN_MIXTURE_WEIGHTS,
                       adanet_lambda=ADANET_LAMBDA):
    model_dir = os.path.join(LOG_DIR, experiment_name)
    ensembler_optimizer = None
    if learn_mixture_weights:
        ensembler_optimizer = tf.train.RMSPropOptimizer(
        learning_rate=LEARNING_RATE)

    estimator = adanet.Estimator(
        # Since we are predicting housing prices, we'll use a regression
        # head that optimizes for MSE.
        head=tf.estimator.RegressionHead(),

        # Define the generator, which defines our search space of subnetworks
        # to train as candidates to add to the final AdaNet model.
        subnetwork_generator=SimpleDNNGenerator(
            optimizer=tf.train.RMSPropOptimizer(learning_rate=LEARNING_RATE),
            learn_mixture_weights=learn_mixture_weights,
            seed=RANDOM_SEED),

        # The number of train steps per iteration.
        max_iteration_steps=TRAIN_STEPS // ADANET_ITERATIONS,

        # The evaluator will evaluate the model on the full training set to
        # compute the overall AdaNet loss (train loss + complexity
        # regularization) to select the best candidate to include in the
        # final AdaNet model.
        evaluator=adanet.Evaluator(
            input_fn=input_fn("train", training=False, batch_size=BATCH_SIZE)),

        ensemblers=[
            adanet.ensemble.ComplexityRegularizedEnsembler(
                optimizer=ensembler_optimizer,
                # Lambda is a the strength of complexity regularization. A larger
                # value will penalize more complex subnetworks.
                adanet_lambda=adanet_lambda),
        ],      

        # Configuration for Estimators.
        config=tf.estimator.RunConfig(
                save_summary_steps=5000,
                save_checkpoints_steps=5000,
                tf_random_seed=RANDOM_SEED,
                model_dir=model_dir)
    )

    # Train and evaluate using using the tf.estimator tooling.
    train_spec = tf.estimator.TrainSpec(input_fn=input_fn("train", training=True, batch_size=BATCH_SIZE),max_steps=TRAIN_STEPS)
    
    eval_spec = tf.estimator.EvalSpec(input_fn=input_fn("test", training=False, batch_size=BATCH_SIZE),steps=None,
      start_delay_secs=1,
      throttle_secs=30,
    )
    tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)
    
    return estimator.evaluate(input_fn("test", training=False, batch_size=BATCH_SIZE),steps=None)

def ensemble_architecture(result):
    architecture = result["architecture/adanet/ensembles"]
    # The architecture is a serialized Summary proto for TensorBoard.
    summary_proto = tf.summary.Summary.FromString(architecture)
    return summary_proto.value[0].tensor.string_val[0]

Full Log

INFO:tensorflow:Using config: {'_model_dir': '.\\results\\20210305\\uniform_average_ensemble_baseline', '_tf_random_seed': 42, '_save_summary_steps': 5000, '_save_checkpoints_steps': 5000, '_save_checkpoints_secs': None, '_session_config': allow_soft_placement: true
graph_options {
  rewrite_options {
    meta_optimizer_iterations: ONE
  }
}
, '_keep_checkpoint_max': 5, '_keep_checkpoint_every_n_hours': 10000, '_log_step_count_steps': 100, '_train_distribute': None, '_device_fn': None, '_protocol': None, '_eval_distribute': None, '_experimental_distribute': None, '_experimental_max_worker_delay_secs': None, '_session_creation_timeout_secs': 7200, '_service': None, '_cluster_spec': ClusterSpec({}), '_task_type': 'worker', '_task_id': 0, '_global_id_in_cluster': 0, '_master': '', '_evaluation_master': '', '_is_chief': True, '_num_ps_replicas': 0, '_num_worker_replicas': 1}
INFO:tensorflow:Using config: {'_model_dir': '.\\results\\20210305\\uniform_average_ensemble_baseline', '_tf_random_seed': 42, '_save_summary_steps': 5000, '_save_checkpoints_steps': 5000, '_save_checkpoints_secs': None, '_session_config': allow_soft_placement: true
graph_options {
  rewrite_options {
    meta_optimizer_iterations: ONE
  }
}
, '_keep_checkpoint_max': 5, '_keep_checkpoint_every_n_hours': 10000, '_log_step_count_steps': 100, '_train_distribute': None, '_device_fn': None, '_protocol': None, '_eval_distribute': None, '_experimental_distribute': None, '_experimental_max_worker_delay_secs': None, '_session_creation_timeout_secs': 7200, '_service': None, '_cluster_spec': ClusterSpec({}), '_task_type': 'worker', '_task_id': 0, '_global_id_in_cluster': 0, '_master': '', '_evaluation_master': '', '_is_chief': True, '_num_ps_replicas': 0, '_num_worker_replicas': 1}
INFO:tensorflow:Not using Distribute Coordinator.
INFO:tensorflow:Not using Distribute Coordinator.
INFO:tensorflow:Running training and evaluation locally (non-distributed).
INFO:tensorflow:Running training and evaluation locally (non-distributed).
INFO:tensorflow:Start train and evaluate loop. The evaluate will happen after every checkpoint. Checkpoint frequency is determined based on RunConfig arguments: save_checkpoints_steps 5000 or save_checkpoints_secs None.
INFO:tensorflow:Start train and evaluate loop. The evaluate will happen after every checkpoint. Checkpoint frequency is determined based on RunConfig arguments: save_checkpoints_steps 5000 or save_checkpoints_secs None.
INFO:tensorflow:Using config: {'_model_dir': '.\\results\\20210305\\uniform_average_ensemble_baseline', '_tf_random_seed': 42, '_save_summary_steps': 5000, '_save_checkpoints_steps': 5000, '_save_checkpoints_secs': None, '_session_config': allow_soft_placement: true
graph_options {
  rewrite_options {
    meta_optimizer_iterations: ONE
  }
}
, '_keep_checkpoint_max': 5, '_keep_checkpoint_every_n_hours': 10000, '_log_step_count_steps': 100, '_train_distribute': None, '_device_fn': None, '_protocol': None, '_eval_distribute': None, '_experimental_distribute': None, '_experimental_max_worker_delay_secs': None, '_session_creation_timeout_secs': 7200, '_service': None, '_cluster_spec': ClusterSpec({}), '_task_type': 'worker', '_task_id': 0, '_global_id_in_cluster': 0, '_master': '', '_evaluation_master': '', '_is_chief': True, '_num_ps_replicas': 0, '_num_worker_replicas': 1}
INFO:tensorflow:Using config: {'_model_dir': '.\\results\\20210305\\uniform_average_ensemble_baseline', '_tf_random_seed': 42, '_save_summary_steps': 5000, '_save_checkpoints_steps': 5000, '_save_checkpoints_secs': None, '_session_config': allow_soft_placement: true
graph_options {
  rewrite_options {
    meta_optimizer_iterations: ONE
  }
}
, '_keep_checkpoint_max': 5, '_keep_checkpoint_every_n_hours': 10000, '_log_step_count_steps': 100, '_train_distribute': None, '_device_fn': None, '_protocol': None, '_eval_distribute': None, '_experimental_distribute': None, '_experimental_max_worker_delay_secs': None, '_session_creation_timeout_secs': 7200, '_service': None, '_cluster_spec': ClusterSpec({}), '_task_type': 'worker', '_task_id': 0, '_global_id_in_cluster': 0, '_master': '', '_evaluation_master': '', '_is_chief': True, '_num_ps_replicas': 0, '_num_worker_replicas': 1}
WARNING:tensorflow:Estimator's model_fn (<function Estimator._create_model_fn.<locals>._adanet_model_fn at 0x000002D952D3EE58>) includes params argument, but params are not passed to Estimator.
WARNING:tensorflow:Estimator's model_fn (<function Estimator._create_model_fn.<locals>._adanet_model_fn at 0x000002D952D3EE58>) includes params argument, but params are not passed to Estimator.
INFO:tensorflow:Calling model_fn.
INFO:tensorflow:Calling model_fn.
---------------------------------------------------------------------------
AttributeError                            Traceback (most recent call last)
<ipython-input-108-bdd9b905e372> in <module>
----> 1 results = train_and_evaluate("uniform_average_ensemble_baseline")
      2 print("Loss:", results["average_loss"])
      3 print("Architecture:", ensemble_architecture(results))

<ipython-input-107-6138e4940fa0> in train_and_evaluate(experiment_name, learn_mixture_weights, adanet_lambda)
     59 
     60     eval_spec = tf.estimator.EvalSpec(input_fn=input_fn("test", training=False, batch_size=BATCH_SIZE),steps=None,)
---> 61     tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)
     62 
     63     return estimator.evaluate(input_fn("test", training=False, batch_size=BATCH_SIZE),steps=None)

~\anaconda3\envs\tf-gpu\lib\site-packages\tensorflow_estimator\python\estimator\training.py in train_and_evaluate(estimator, train_spec, eval_spec)
    470         '(with task id 0).  Given task id {}'.format(config.task_id))
    471 
--> 472   return executor.run()
    473 
    474 

~\anaconda3\envs\tf-gpu\lib\site-packages\tensorflow_estimator\python\estimator\training.py in run(self)
    611       tf.compat.v1.logging.info(
    612           'Running training and evaluation locally (non-distributed).')
--> 613       return self.run_local()
    614 
    615     # Distributed case.

~\anaconda3\envs\tf-gpu\lib\site-packages\tensorflow_estimator\python\estimator\training.py in run_local(self)
    712         max_steps=self._train_spec.max_steps,
    713         hooks=train_hooks,
--> 714         saving_listeners=saving_listeners)
    715 
    716     eval_result = listener_for_eval.eval_result or _EvalResult(

~\anaconda3\envs\tf-gpu\lib\site-packages\adanet\core\estimator.py in train(self, input_fn, hooks, steps, max_steps, saving_listeners)
    898             hooks=hooks,
    899             max_steps=max_steps,
--> 900             saving_listeners=saving_listeners)
    901         # In TensorFlow v2.0.0.rc1 and below, saving listeners are attached to
    902         # the first CheckpointSaverHook each time train is called. Instead, we

~\anaconda3\envs\tf-gpu\lib\site-packages\tensorflow_estimator\python\estimator\estimator.py in train(self, input_fn, hooks, steps, max_steps, saving_listeners)
    347 
    348       saving_listeners = _check_listeners_type(saving_listeners)
--> 349       loss = self._train_model(input_fn, hooks, saving_listeners)
    350       logging.info('Loss for final step: %s.', loss)
    351       return self

~\anaconda3\envs\tf-gpu\lib\site-packages\tensorflow_estimator\python\estimator\estimator.py in _train_model(self, input_fn, hooks, saving_listeners)
   1180       return self._train_model_distributed(input_fn, hooks, saving_listeners)
   1181     else:
-> 1182       return self._train_model_default(input_fn, hooks, saving_listeners)
   1183 
   1184   def _train_model_default(self, input_fn, hooks, saving_listeners):

~\anaconda3\envs\tf-gpu\lib\site-packages\tensorflow_estimator\python\estimator\estimator.py in _train_model_default(self, input_fn, hooks, saving_listeners)
   1209       worker_hooks.extend(input_hooks)
   1210       estimator_spec = self._call_model_fn(features, labels, ModeKeys.TRAIN,
-> 1211                                            self.config)
   1212       global_step_tensor = tf.compat.v1.train.get_global_step(g)
   1213       return self._train_with_estimator_spec(estimator_spec, worker_hooks,

~\anaconda3\envs\tf-gpu\lib\site-packages\tensorflow_estimator\python\estimator\estimator.py in _call_model_fn(self, features, labels, mode, config)
   1168 
   1169     logging.info('Calling model_fn.')
-> 1170     model_fn_results = self._model_fn(features=features, **kwargs)
   1171     logging.info('Done calling model_fn.')
   1172 

~\anaconda3\envs\tf-gpu\lib\site-packages\adanet\core\estimator.py in _adanet_model_fn(***failed resolving arguments***)
   2198           checkpoint_path=path,
   2199           hooks=hooks,
-> 2200           best_ensemble_index_override=best_ensemble_index)
   2201 
   2202       # Variable which allows us to read the current iteration from a

~\anaconda3\envs\tf-gpu\lib\site-packages\adanet\core\estimator.py in _create_iteration(self, features, labels, mode, config, is_growing_phase, checkpoint_path, hooks, best_ensemble_index_override)
   2126           previous_ensemble_summary=previous_ensemble_summary,
   2127           best_ensemble_index_override=best_ensemble_index_override,
-> 2128           previous_iteration=previous_iteration)
   2129     return current_iteration, previous_iteration_vars
   2130 

~\anaconda3\envs\tf-gpu\lib\site-packages\adanet\core\iteration.py in build_iteration(self, base_global_step, iteration_number, ensemble_candidates, subnetwork_builders, features, mode, config, labels, previous_ensemble_summary, rebuilding, rebuilding_ensembler_name, best_ensemble_index_override, previous_iteration)
    650               labels=labels,
    651               previous_ensemble=previous_ensemble,
--> 652               config=config)
    653           subnetwork_specs.append(subnetwork_spec)
    654           # Workers that don't build ensembles need a dummy candidate in order

~\anaconda3\envs\tf-gpu\lib\site-packages\adanet\core\ensemble_builder.py in build_subnetwork_spec(self, name, subnetwork_builder, summary, features, mode, labels, previous_ensemble, config)
    755 
    756       estimator_spec = _create_estimator_spec(self._head, features, labels,
--> 757                                               mode, subnetwork.logits,
    758                                               self._use_tpu)
    759 

AttributeError: 'NoneType' object has no attribute 'logits'
@georgech3 georgech3 changed the title Tutorial Got AttributeError: 'NoneType' object has no attribute 'logits' error while applying tutorial Mar 6, 2021
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Labels
None yet
Projects
None yet
Development

No branches or pull requests

1 participant