Skip to content

Commit

Permalink
Small style fixes
Browse files Browse the repository at this point in the history
  • Loading branch information
fchollet committed Jul 26, 2015
1 parent 275f416 commit b64217c
Show file tree
Hide file tree
Showing 21 changed files with 106 additions and 104 deletions.
20 changes: 10 additions & 10 deletions examples/cifar10_cnn.py
Original file line number Diff line number Diff line change
Expand Up @@ -82,16 +82,16 @@

# this will do preprocessing and realtime data augmentation
datagen = ImageDataGenerator(
featurewise_center=True, # set input mean to 0 over the dataset
samplewise_center=False, # set each sample mean to 0
featurewise_std_normalization=True, # divide inputs by std of the dataset
samplewise_std_normalization=False, # divide each input by its std
zca_whitening=False, # apply ZCA whitening
rotation_range=20, # randomly rotate images in the range (degrees, 0 to 180)
width_shift_range=0.2, # randomly shift images horizontally (fraction of total width)
height_shift_range=0.2, # randomly shift images vertically (fraction of total height)
horizontal_flip=True, # randomly flip images
vertical_flip=False) # randomly flip images
featurewise_center=True, # set input mean to 0 over the dataset
samplewise_center=False, # set each sample mean to 0
featurewise_std_normalization=True, # divide inputs by std of the dataset
samplewise_std_normalization=False, # divide each input by its std
zca_whitening=False, # apply ZCA whitening
rotation_range=20, # randomly rotate images in the range (degrees, 0 to 180)
width_shift_range=0.2, # randomly shift images horizontally (fraction of total width)
height_shift_range=0.2, # randomly shift images vertically (fraction of total height)
horizontal_flip=True, # randomly flip images
vertical_flip=False) # randomly flip images

# compute quantities required for featurewise normalization
# (std, mean, and principal components if ZCA whitening is applied)
Expand Down
6 changes: 3 additions & 3 deletions examples/imdb_lstm.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
from __future__ import absolute_import
from __future__ import print_function
import numpy as np
np.random.seed(1337) # for reproducibility
np.random.seed(1337) # for reproducibility

from keras.preprocessing import sequence
from keras.optimizers import SGD, RMSprop, Adagrad
Expand Down Expand Up @@ -32,7 +32,7 @@
'''

max_features = 20000
maxlen = 100 # cut texts after this number of words (among top max_features most common words)
maxlen = 100 # cut texts after this number of words (among top max_features most common words)
batch_size = 32

print("Loading data...")
Expand All @@ -49,7 +49,7 @@
print('Build model...')
model = Sequential()
model.add(Embedding(max_features, 128))
model.add(LSTM(128, 128)) # try using a GRU instead, for fun
model.add(LSTM(128, 128)) # try using a GRU instead, for fun
model.add(Dropout(0.5))
model.add(Dense(128, 1))
model.add(Activation('sigmoid'))
Expand Down
4 changes: 2 additions & 2 deletions examples/kaggle_otto_nn.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@

import numpy as np
import pandas as pd
np.random.seed(1337) # for reproducibility
np.random.seed(1337) # for reproducibility

from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation
Expand Down Expand Up @@ -41,7 +41,7 @@ def load_data(path, train=True):
df = pd.read_csv(path)
X = df.values.copy()
if train:
np.random.shuffle(X) # https://youtu.be/uyUXoap67N8
np.random.shuffle(X) # https://youtu.be/uyUXoap67N8
X, labels = X[:, 1:-1].astype(np.float32), X[:, -1]
return X, labels
else:
Expand Down
2 changes: 1 addition & 1 deletion examples/mnist_cnn.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
from __future__ import absolute_import
from __future__ import print_function
import numpy as np
np.random.seed(1337) # for reproducibility
np.random.seed(1337) # for reproducibility

from keras.datasets import mnist
from keras.models import Sequential
Expand Down
2 changes: 1 addition & 1 deletion examples/mnist_irnn.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
from __future__ import absolute_import
from __future__ import print_function
import numpy as np
np.random.seed(1337) # for reproducibility
np.random.seed(1337) # for reproducibility

from keras.datasets import mnist
from keras.models import Sequential
Expand Down
2 changes: 1 addition & 1 deletion examples/mnist_mlp.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
from __future__ import absolute_import
from __future__ import print_function
import numpy as np
np.random.seed(1337) # for reproducibility
np.random.seed(1337) # for reproducibility

from keras.datasets import mnist
from keras.models import Sequential
Expand Down
2 changes: 1 addition & 1 deletion examples/reuters_mlp.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
from __future__ import absolute_import
from __future__ import print_function
import numpy as np
np.random.seed(1337) # for reproducibility
np.random.seed(1337) # for reproducibility

from keras.datasets import reuters
from keras.models import Sequential
Expand Down
40 changes: 20 additions & 20 deletions examples/skipgram_word_embeddings.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,10 +43,10 @@
from six.moves import range
from six.moves import zip

max_features = 50000 # vocabulary size: top 50,000 most common words in data
skip_top = 100 # ignore top 100 most common words
max_features = 50000 # vocabulary size: top 50,000 most common words in data
skip_top = 100 # ignore top 100 most common words
nb_epoch = 1
dim_proj = 256 # embedding space dimension
dim_proj = 256 # embedding space dimension

save = True
load_model = False
Expand Down Expand Up @@ -196,23 +196,23 @@ def closest_to_word(w, nb_closest=10):
'''

words = [
"article", # post, story, hn, read, comments
"3", # 6, 4, 5, 2
"two", # three, few, several, each
"great", # love, nice, working, looking
"data", # information, memory, database
"money", # company, pay, customers, spend
"years", # ago, year, months, hours, week, days
"android", # ios, release, os, mobile, beta
"javascript", # js, css, compiler, library, jquery, ruby
"look", # looks, looking
"business", # industry, professional, customers
"company", # companies, startup, founders, startups
"after", # before, once, until
"own", # personal, our, having
"us", # united, country, american, tech, diversity, usa, china, sv
"using", # javascript, js, tools (lol)
"here", # hn, post, comments
"article", # post, story, hn, read, comments
"3", # 6, 4, 5, 2
"two", # three, few, several, each
"great", # love, nice, working, looking
"data", # information, memory, database
"money", # company, pay, customers, spend
"years", # ago, year, months, hours, week, days
"android", # ios, release, os, mobile, beta
"javascript", # js, css, compiler, library, jquery, ruby
"look", # looks, looking
"business", # industry, professional, customers
"company", # companies, startup, founders, startups
"after", # before, once, until
"own", # personal, our, having
"us", # united, country, american, tech, diversity, usa, china, sv
"using", # javascript, js, tools (lol)
"here", # hn, post, comments
]

for w in words:
Expand Down
6 changes: 3 additions & 3 deletions keras/callbacks.py
Original file line number Diff line number Diff line change
Expand Up @@ -106,8 +106,8 @@ def on_train_begin(self, logs={}):
def on_epoch_begin(self, epoch, logs={}):
if self.verbose:
print('Epoch %d' % epoch)
self.progbar = Progbar(target=self.params['nb_sample'], \
verbose=self.verbose)
self.progbar = Progbar(target=self.params['nb_sample'],
verbose=self.verbose)
self.seen = 0
self.totals = {}

Expand Down Expand Up @@ -193,7 +193,7 @@ def on_epoch_end(self, epoch, logs={}):
if current < self.best:
if self.verbose > 0:
print("Epoch %05d: %s improved from %0.5f to %0.5f, saving model to %s"
% (epoch, self.monitor, self.best, current, self.filepath))
% (epoch, self.monitor, self.best, current, self.filepath))
self.best = current
self.model.save_weights(self.filepath, overwrite=True)
else:
Expand Down
5 changes: 3 additions & 2 deletions keras/datasets/cifar10.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@
import numpy as np
import os


def load_data():
dirname = "cifar-10-batches-py"
origin = "http://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz"
Expand All @@ -20,11 +21,11 @@ def load_data():
data, labels = load_batch(fpath)
X_train[(i-1)*10000:i*10000, :, :, :] = data
y_train[(i-1)*10000:i*10000] = labels

fpath = os.path.join(path, 'test_batch')
X_test, y_test = load_batch(fpath)

y_train = np.reshape(y_train, (len(y_train), 1))
y_test = np.reshape(y_test, (len(y_test), 1))

return (X_train, y_train), (X_test, y_test)
return (X_train, y_train), (X_test, y_test)
3 changes: 2 additions & 1 deletion keras/datasets/cifar100.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@
import numpy as np
import os


def load_data(label_mode='fine'):
if label_mode not in ['fine', 'coarse']:
raise Exception('label_mode must be one of "fine" "coarse".')
Expand All @@ -24,4 +25,4 @@ def load_data(label_mode='fine'):
y_train = np.reshape(y_train, (len(y_train), 1))
y_test = np.reshape(y_test, (len(y_test), 1))

return (X_train, y_train), (X_test, y_test)
return (X_train, y_train), (X_test, y_test)
6 changes: 3 additions & 3 deletions keras/datasets/imdb.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,8 +6,9 @@
from six.moves import zip
import numpy as np

def load_data(path="imdb.pkl", nb_words=None, skip_top=0, maxlen=None, test_split=0.2, seed=113,
start_char=1, oov_char=2, index_from=3):

def load_data(path="imdb.pkl", nb_words=None, skip_top=0, maxlen=None, test_split=0.2, seed=113,
start_char=1, oov_char=2, index_from=3):

path = get_file(path, origin="https://s3.amazonaws.com/text-datasets/imdb.pkl")

Expand Down Expand Up @@ -63,4 +64,3 @@ def load_data(path="imdb.pkl", nb_words=None, skip_top=0, maxlen=None, test_spli
y_test = labels[int(len(X)*(1-test_split)):]

return (X_train, y_train), (X_test, y_test)

3 changes: 2 additions & 1 deletion keras/datasets/mnist.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@
import six.moves.cPickle
import sys


def load_data(path="mnist.pkl.gz"):
path = get_file(path, origin="https://s3.amazonaws.com/img-datasets/mnist.pkl.gz")

Expand All @@ -19,4 +20,4 @@ def load_data(path="mnist.pkl.gz"):

f.close()

return data # (X_train, y_train), (X_test, y_test)
return data # (X_train, y_train), (X_test, y_test)
11 changes: 5 additions & 6 deletions keras/datasets/reuters.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@
from six.moves import zip
import numpy as np


def make_reuters_dataset(path=os.path.join('datasets', 'temp', 'reuters21578'), min_samples_per_topic=15):
import re
from ..preprocessing.text import Tokenizer
Expand All @@ -24,8 +25,7 @@ def make_reuters_dataset(path=os.path.join('datasets', 'temp', 'reuters21578'),
while tag in s:
s = s[s.find(tag)+len(tag):]
topics = s[:s.find('</')]

if topics and not '</D><D>' in topics:
if topics and '</D><D>' not in topics:
topic = topics.replace('<D>', '').replace('</D>', '')
wire_topics.append(topic)
topic_counts[topic] = topic_counts.get(topic, 0) + 1
Expand All @@ -39,7 +39,7 @@ def make_reuters_dataset(path=os.path.join('datasets', 'temp', 'reuters21578'),

# only keep most common topics
items = list(topic_counts.items())
items.sort(key = lambda x: x[1])
items.sort(key=lambda x: x[1])
kept_topics = set()
for x in items:
print(x[0] + ': ' + str(x[1]))
Expand Down Expand Up @@ -75,16 +75,15 @@ def make_reuters_dataset(path=os.path.join('datasets', 'temp', 'reuters21578'),
reverse_word_index = dict([(v, k) for k, v in tokenizer.word_index.items()])
print(' '.join(reverse_word_index[i] for i in X[10]))

dataset = (X, labels)
dataset = (X, labels)
print('-')
print('Saving...')
six.moves.cPickle.dump(dataset, open(os.path.join('datasets', 'data', 'reuters.pkl'), 'w'))
six.moves.cPickle.dump(tokenizer.word_index, open(os.path.join('datasets', 'data', 'reuters_word_index.pkl'), 'w'))



def load_data(path="reuters.pkl", nb_words=None, skip_top=0, maxlen=None, test_split=0.2, seed=113,
start_char=1, oov_char=2, index_from=3):
start_char=1, oov_char=2, index_from=3):

path = get_file(path, origin="https://s3.amazonaws.com/text-datasets/reuters.pkl")
f = open(path, 'rb')
Expand Down
18 changes: 9 additions & 9 deletions keras/layers/containers.py
Original file line number Diff line number Diff line change
Expand Up @@ -102,15 +102,15 @@ class Graph(Layer):
- set_weights
'''
def __init__(self):
self.namespace = set() # strings
self.nodes = {} # layer-like
self.inputs = {} # layer-like
self.input_order = [] # strings
self.outputs = {} # layer-like
self.output_order = [] # strings
self.input_config = [] # dicts
self.output_config = [] # dicts
self.node_config = [] # dicts
self.namespace = set() # strings
self.nodes = {} # layer-like
self.inputs = {} # layer-like
self.input_order = [] # strings
self.outputs = {} # layer-like
self.output_order = [] # strings
self.input_config = [] # dicts
self.output_config = [] # dicts
self.node_config = [] # dicts

self.params = []
self.regularizers = []
Expand Down
Loading

0 comments on commit b64217c

Please sign in to comment.