forked from thoppe/DeepMDMA
-
Notifications
You must be signed in to change notification settings - Fork 1
Expand file tree
/
Copy pathrender_activations.py
More file actions
128 lines (97 loc) · 3.3 KB
/
render_activations.py
File metadata and controls
128 lines (97 loc) · 3.3 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
"""Render activations from the Inception model
Usage:
render_activations.py [<channel> <k>] [options]
Options:
channel Specify a channel (default, all channels)
k Specify a color in the channel (default, all valid)
--n_training=<n> Number of training steps [default: 1024]
-o --output_image_size=<n> Square size of image [default: 600]
--model_size=<n> Size of the model CCN, don't change? [default: 200]
-h --help Show this screen.
"""
import numpy as np
import os
from tqdm import tqdm
from scipy.misc import imsave
import tensorflow as tf
from tensorflow.contrib import slim
from lucid.modelzoo import vision_models
from lucid.misc.io import show, save, load
from lucid.optvis import objectives
from lucid.optvis import render
from lucid.misc.tfutil import create_session
from lucid.optvis.param import cppn
def render_set(n, channel):
print ("Starting", channel, n)
obj = objectives.channel(channel, n)
# Add this to "sharpen" the image... too much and it gets crazy
#obj += 0.001*objectives.total_variation()
sess = create_session()
t_size = tf.placeholder_with_default(size_n, [])
f_model = os.path.join(save_model_dest, channel + f"_{n}.npy")
T = render.make_vis_T(
model, obj,
param_f=lambda: cppn(t_size),
transforms=[],
optimizer=optimizer,
)
tf.global_variables_initializer().run()
train_vars = sess.graph.get_collection(
tf.GraphKeys.TRAINABLE_VARIABLES)
if not os.path.exists(f_model):
for i in tqdm(range(training_steps)):
_, loss = sess.run([T("vis_op"), T("loss"), ])
# Save trained variables
params = np.array(sess.run(train_vars), object)
save(params, f_model)
else:
params = load(f_model)
# Save final image
feed_dict = dict(zip(train_vars, params))
feed_dict[t_size] = image_size
images = T("input").eval(feed_dict)
img = images[0]
sess.close()
f_image = os.path.join(save_image_dest, channel + f"_{n}.jpg")
imsave(f_image, img)
print(f"Saved to {f_image}")
from docopt import docopt
dargs = docopt(__doc__)
print (f"Start {dargs}")
print ("Loading model")
model = vision_models.InceptionV1()
model.load_graphdef()
size_n = int(dargs["--model_size"])
training_steps = int(float(dargs["--n_training"]))
image_size = int(dargs["--output_image_size"])
optimizer = tf.train.AdamOptimizer(0.005)
transforms=[]
save_image_dest = "results/images"
save_model_dest = "results/models"
os.system(f'mkdir -p {save_model_dest}')
os.system(f'mkdir -p {save_image_dest}')
if not dargs["<channel>"]:
CHANNELS = [
"mixed4a_3x3_pre_relu",
"mixed4b_3x3_pre_relu",
"mixed4c_3x3_pre_relu",
"mixed4d_3x3_pre_relu",
"mixed4e_3x3_pre_relu",
]
else:
CHANNELS = [dargs["<channel>"],]
if not dargs["<k>"]:
COLORSET = range(2**10)
else:
COLORSET = [int(dargs["<k>"]),]
for channel in CHANNELS:
for n in COLORSET:
f_image = os.path.join(save_image_dest, channel + f"_{n}.jpg")
if os.path.exists(f_image):
continue
print("Starting", f_image)
try:
render_set(n, channel)
except Exception as EX:
print("EXCEPTION", channel, EX)
break