Skip to content

Commit

Permalink
drop old models
Browse files Browse the repository at this point in the history
  • Loading branch information
GreenWizard2015 committed Apr 6, 2024
1 parent 0d2d03e commit 06c25bf
Show file tree
Hide file tree
Showing 4 changed files with 23 additions and 185 deletions.
3 changes: 1 addition & 2 deletions NN/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -49,9 +49,8 @@ def _nerf_from_config(config):
trainingLoss=_makeTrainingLoss(config.get('training loss', None)),
residual=config.get('residual', False),
extraLatents=config.get('extra latents', None),
format=config['format']
)
# If format is not specified, use BGR, because old models were trained to predict BGR
nerfParams['format'] = config.get('format', 'bgr')

return lambda encoder, renderer: CNerf2D(
encoder=encoder,
Expand Down
151 changes: 0 additions & 151 deletions NN/encoding/CCoordsEncodingLayer.py

This file was deleted.

18 changes: 0 additions & 18 deletions NN/encoding/__init__.py
Original file line number Diff line number Diff line change
@@ -1,22 +1,8 @@
import tensorflow as tf
from .CCoordsEncodingLayer import CCoordsEncodingLayerV1
from .CCoordsEncodingLayerV2 import CCoordsEncodingLayerV2 as CCoordsEncodingLayer
from .CCoordsGridLayer import CCoordsGridLayer
from .CFixedSinCosEncoding import CFixedSinCosEncoding

# Old and incorrect implementation of the encoding layer
class CFlatCoordsEncodingLayer_OLD(tf.keras.layers.Layer):
def __init__(self, N=32, **kwargs):
super().__init__(**kwargs)
self._enc = CCoordsEncodingLayerV1(N)
return

def call(self, x):
B = tf.shape(x)[0]
tf.assert_equal(tf.shape(x)[:-1], (B, ))
x = tf.cast(x, tf.float32)[..., None]
return self._enc(x)[:, 0]

# Correct implementation of the encoding layer
class CFlatCoordsEncodingLayer(tf.keras.layers.Layer):
def __init__(self, encoder, **kwargs):
Expand All @@ -38,12 +24,8 @@ def encoding_from_config(config):
if isinstance(config, dict):
name = config['name']
params = { k: v for k, v in config.items() if k != 'name' }
if 'learned' == name: return CFlatCoordsEncodingLayer_OLD(**params)
if 'fixed' == name: return CFixedSinCosEncoding(**params)

if 'learned v2' == name: return CFlatCoordsEncodingLayer(
encoder=CCoordsEncodingLayerV1(**params)
)
if 'learned v3' == name: return CFlatCoordsEncodingLayer(
encoder=CCoordsEncodingLayer(**params)
)
Expand Down
36 changes: 22 additions & 14 deletions train.py
Original file line number Diff line number Diff line change
@@ -1,14 +1,28 @@
from Utils.utils import setupGPU, load_config, setGPUMemoryLimit
from Utils.utils import setupGPU, load_config, setGPUMemoryLimit, upgrade_configs_structure
setupGPU() # call it on startup to prevent OOM errors on my machine

import argparse, os, shutil, json
import tensorflow as tf
from NN import model_from_config, model_to_architecture
from Utils import dataset_from_config

def validateLayersNames(model):
not_unique_layers = []
layers_names = set()
for layer in model.trainable_variables:
if layer.name in layers_names:
not_unique_layers.append(layer.name)
layers_names.add(layer.name)
continue
for layer in not_unique_layers:
print(f"Layer name '{layer}' is not unique")
assert not not_unique_layers, "Model contains not unique layers names"
return

def main(args):
folder = os.path.dirname(__file__)
config = load_config(args.config, folder=folder)

assert "experiment" in config, "Config must contain 'experiment' key"
# store args as part of config
config['experiment']['command line arguments'] = vars(args)
Expand Down Expand Up @@ -37,17 +51,7 @@ def main(args):
# Create model
model = model_from_config(config["model"], compile=True)
model.summary(expand_nested=True)
# check if model is contain only unique layers names
not_unique_layers = []
layers_names = set()
for layer in model.trainable_variables:
if layer.name in layers_names:
not_unique_layers.append(layer.name)
layers_names.add(layer.name)
continue
for layer in not_unique_layers:
print(f"Layer name '{layer}' is not unique")
assert not not_unique_layers, "Model contains not unique layers names"
validateLayersNames(model)
# save to config model architecture and number of parameters
config['architecture'] = model_to_architecture(model)

Expand Down Expand Up @@ -82,10 +86,13 @@ def main(args):
),
tf.keras.callbacks.TerminateOnNaN(),
]

if args.wandb: # init wandb
if args.wandb:
import wandb

wandb.init(project=args.wandb, entity=args.wandb_entity, config=config)
# assign run name if specified
if args.wandb_name: wandb.run.name = args.wandb_name
# track model metrics only
callbacks.append(wandb.keras.WandbCallback(
save_model=False, # save model to wandb manually
Expand Down Expand Up @@ -130,6 +137,7 @@ def main(args):

parser.add_argument('--wandb', type=str, help='Wandb project name (optional)')
parser.add_argument('--wandb-entity', type=str, help='Wandb entity name (optional)')
parser.add_argument('--wandb-name', type=str, help='Wandb run name (optional)')

args = parser.parse_args()
if args.gpu_memory_mb: setGPUMemoryLimit(args.gpu_memory_mb)
Expand Down

0 comments on commit 06c25bf

Please sign in to comment.