forked from keras-team/keras
-
-
Notifications
You must be signed in to change notification settings - Fork 0
/
deep_dream.py
219 lines (183 loc) · 7.22 KB
/
deep_dream.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
'''Deep Dreaming in Keras.
Run the script with:
```
python deep_dream.py path_to_your_base_image.jpg prefix_for_results
```
e.g.:
```
python deep_dream.py img/mypic.jpg results/dream
```
It is preferable to run this script on GPU, for speed.
If running on CPU, prefer the TensorFlow backend (much faster).
Example results: http://i.imgur.com/FX6ROg9.jpg
'''
from __future__ import print_function
from keras.preprocessing.image import load_img, img_to_array
import numpy as np
from scipy.misc import imsave
from scipy.optimize import fmin_l_bfgs_b
import time
import argparse
from keras.applications import vgg16
from keras import backend as K
from keras.layers import Input
parser = argparse.ArgumentParser(description='Deep Dreams with Keras.')
parser.add_argument('base_image_path', metavar='base', type=str,
help='Path to the image to transform.')
parser.add_argument('result_prefix', metavar='res_prefix', type=str,
help='Prefix for the saved results.')
args = parser.parse_args()
base_image_path = args.base_image_path
result_prefix = args.result_prefix
# dimensions of the generated picture.
img_height = 600
img_width = 600
# some settings we found interesting
saved_settings = {
'bad_trip': {'features': {'block4_conv1': 0.05,
'block4_conv2': 0.01,
'block4_conv3': 0.01},
'continuity': 0.1,
'dream_l2': 0.8,
'jitter': 5},
'dreamy': {'features': {'block5_conv1': 0.05,
'block5_conv2': 0.02},
'continuity': 0.1,
'dream_l2': 0.02,
'jitter': 0},
}
# the settings we will use in this experiment
settings = saved_settings['dreamy']
def preprocess_image(image_path):
# util function to open, resize and format pictures
# into appropriate tensors
img = load_img(image_path, target_size=(img_height, img_width))
img = img_to_array(img)
img = np.expand_dims(img, axis=0)
img = vgg16.preprocess_input(img)
return img
def deprocess_image(x):
# util function to convert a tensor into a valid image
if K.image_data_format() == 'channels_first':
x = x.reshape((3, img_height, img_width))
x = x.transpose((1, 2, 0))
else:
x = x.reshape((img_height, img_width, 3))
# Remove zero-center by mean pixel
x[:, :, 0] += 103.939
x[:, :, 1] += 116.779
x[:, :, 2] += 123.68
# 'BGR'->'RGB'
x = x[:, :, ::-1]
x = np.clip(x, 0, 255).astype('uint8')
return x
if K.image_data_format() == 'channels_first':
img_size = (3, img_height, img_width)
else:
img_size = (img_height, img_width, 3)
# this will contain our generated image
dream = Input(batch_shape=(1,) + img_size)
# build the VGG16 network with our placeholder
# the model will be loaded with pre-trained ImageNet weights
model = vgg16.VGG16(input_tensor=dream,
weights='imagenet', include_top=False)
print('Model loaded.')
# get the symbolic outputs of each "key" layer (we gave them unique names).
layer_dict = dict([(layer.name, layer) for layer in model.layers])
def continuity_loss(x):
# continuity loss util function
assert K.ndim(x) == 4
if K.image_data_format() == 'channels_first':
a = K.square(x[:, :, :img_height - 1, :img_width - 1] -
x[:, :, 1:, :img_width - 1])
b = K.square(x[:, :, :img_height - 1, :img_width - 1] -
x[:, :, :img_height - 1, 1:])
else:
a = K.square(x[:, :img_height - 1, :img_width - 1, :] -
x[:, 1:, :img_width - 1, :])
b = K.square(x[:, :img_height - 1, :img_width - 1, :] -
x[:, :img_height - 1, 1:, :])
return K.sum(K.pow(a + b, 1.25))
# define the loss
loss = K.variable(0.)
for layer_name in settings['features']:
# add the L2 norm of the features of a layer to the loss
assert layer_name in layer_dict.keys(), 'Layer ' + layer_name + ' not found in model.'
coeff = settings['features'][layer_name]
x = layer_dict[layer_name].output
shape = layer_dict[layer_name].output_shape
# we avoid border artifacts by only involving non-border pixels in the loss
if K.image_data_format() == 'channels_first':
loss -= coeff * K.sum(K.square(x[:, :, 2: shape[2] - 2, 2: shape[3] - 2])) / np.prod(shape[1:])
else:
loss -= coeff * K.sum(K.square(x[:, 2: shape[1] - 2, 2: shape[2] - 2, :])) / np.prod(shape[1:])
# add continuity loss (gives image local coherence, can result in an artful blur)
loss += settings['continuity'] * continuity_loss(dream) / np.prod(img_size)
# add image L2 norm to loss (prevents pixels from taking very high values, makes image darker)
loss += settings['dream_l2'] * K.sum(K.square(dream)) / np.prod(img_size)
# feel free to further modify the loss as you see fit, to achieve new effects...
# compute the gradients of the dream wrt the loss
grads = K.gradients(loss, dream)
outputs = [loss]
if isinstance(grads, (list, tuple)):
outputs += grads
else:
outputs.append(grads)
f_outputs = K.function([dream], outputs)
def eval_loss_and_grads(x):
x = x.reshape((1,) + img_size)
outs = f_outputs([x])
loss_value = outs[0]
if len(outs[1:]) == 1:
grad_values = outs[1].flatten().astype('float64')
else:
grad_values = np.array(outs[1:]).flatten().astype('float64')
return loss_value, grad_values
class Evaluator(object):
"""Loss and gradients evaluator.
This Evaluator class makes it possible
to compute loss and gradients in one pass
while retrieving them via two separate functions,
"loss" and "grads". This is done because scipy.optimize
requires separate functions for loss and gradients,
but computing them separately would be inefficient.
"""
def __init__(self):
self.loss_value = None
self.grad_values = None
def loss(self, x):
assert self.loss_value is None
loss_value, grad_values = eval_loss_and_grads(x)
self.loss_value = loss_value
self.grad_values = grad_values
return self.loss_value
def grads(self, x):
assert self.loss_value is not None
grad_values = np.copy(self.grad_values)
self.loss_value = None
self.grad_values = None
return grad_values
evaluator = Evaluator()
# Run scipy-based optimization (L-BFGS) over the pixels of the generated image
# so as to minimize the loss
x = preprocess_image(base_image_path)
for i in range(5):
print('Start of iteration', i)
start_time = time.time()
# Add a random jitter to the initial image.
# This will be reverted at decoding time
random_jitter = (settings['jitter'] * 2) * (np.random.random(img_size) - 0.5)
x += random_jitter
# Run L-BFGS for 7 steps
x, min_val, info = fmin_l_bfgs_b(evaluator.loss, x.flatten(),
fprime=evaluator.grads, maxfun=7)
print('Current loss value:', min_val)
# Decode the dream and save it
x = x.reshape(img_size)
x -= random_jitter
img = deprocess_image(np.copy(x))
fname = result_prefix + '_at_iteration_%d.png' % i
imsave(fname, img)
end_time = time.time()
print('Image saved as', fname)
print('Iteration %d completed in %ds' % (i, end_time - start_time))