Skip to content

Commit cf51300

Browse files
committed
update readme
1 parent 7433735 commit cf51300

File tree

3 files changed

+43
-41
lines changed

3 files changed

+43
-41
lines changed

README.md

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -14,22 +14,22 @@ TensorFlow Implementation of ["Photo-Realistic Single Image Super-Resolution Usi
1414
</a>
1515

1616

17-
#### Results
17+
### Results
1818

1919
<a href="http://tensorlayer.readthedocs.io">
2020
<div align="center">
2121
<img src="img/SRGAN_Result2.png" width="80%" height="50%"/>
2222
</div>
2323
</a>
2424

25-
#### Prepare Data and Pre-trained VGG
25+
### Prepare Data and Pre-trained VGG
2626
- In this experiment, we used images from [DIV2K - bicubic downscaling x4 competition](http://www.vision.ee.ethz.ch/ntire17/), you can also use your own data by setting your image folder in `config.py`
2727

2828
- Download VGG model as [tutorial_vgg16.py](https://github.com/zsdonghao/tensorlayer/blob/master/example/tutorial_vgg16.py) show.
2929

3030

3131

32-
#### Run
32+
### Run
3333
- Set your image folder in `config.py`.
3434

3535
```python

config.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,7 @@
1515
# config.TRAIN.decay_every_init = int(config.TRAIN.n_epoch_init / 2)
1616

1717
## adversarial learning (SRGAN)
18-
config.TRAIN.n_epoch = 1000
18+
config.TRAIN.n_epoch = 2000
1919
config.TRAIN.lr_decay = 0.1
2020
config.TRAIN.decay_every = int(config.TRAIN.n_epoch / 2)
2121

model.py

Lines changed: 39 additions & 37 deletions
Original file line numberDiff line numberDiff line change
@@ -157,59 +157,61 @@ def SRGAN_d(input_images, is_train=True, reuse=False):
157157
b_init = None # tf.constant_initializer(value=0.0)
158158
gamma_init=tf.random_normal_initializer(1., 0.02)
159159
df_dim = 64
160-
with tf.variable_scope("SRGAN_d2", reuse=reuse):
160+
lrelu = lambda x: tl.act.lrelu(x, 0.2)
161+
with tf.variable_scope("SRGAN_d", reuse=reuse):
161162
tl.layers.set_name_reuse(reuse)
162-
net_in = InputLayer(input_images, name='d_input/images')
163-
net_h0 = Conv2d(net_in, df_dim, (4, 4), (2, 2), act=lambda x: tl.act.lrelu(x, 0.2),
164-
padding='SAME', W_init=w_init, name='d_h0/conv2d')
163+
net_in = InputLayer(input_images, name='input/images')
164+
net_h0 = Conv2d(net_in, df_dim, (4, 4), (2, 2), act=lrelu,
165+
padding='SAME', W_init=w_init, name='h0/c')
165166

166167
net_h1 = Conv2d(net_h0, df_dim*2, (4, 4), (2, 2), act=None,
167-
padding='SAME', W_init=w_init, b_init=b_init, name='d_h1/conv2d')
168-
net_h1 = BatchNormLayer(net_h1, act=lambda x: tl.act.lrelu(x, 0.2),
169-
is_train=is_train, gamma_init=gamma_init, name='d_h1/batchnorm')
168+
padding='SAME', W_init=w_init, b_init=b_init, name='h1/c')
169+
net_h1 = BatchNormLayer(net_h1, act=lrelu, is_train=is_train,
170+
gamma_init=gamma_init, name='h1/bn')
170171
net_h2 = Conv2d(net_h1, df_dim*4, (4, 4), (2, 2), act=None,
171-
padding='SAME', W_init=w_init, b_init=b_init, name='d_h2/conv2d')
172-
net_h2 = BatchNormLayer(net_h2, act=lambda x: tl.act.lrelu(x, 0.2),
173-
is_train=is_train, gamma_init=gamma_init, name='d_h2/batchnorm')
172+
padding='SAME', W_init=w_init, b_init=b_init, name='h2/c')
173+
net_h2 = BatchNormLayer(net_h2, act=lrelu, is_train=is_train,
174+
gamma_init=gamma_init, name='h2/bn')
174175
net_h3 = Conv2d(net_h2, df_dim*8, (4, 4), (2, 2), act=None,
175-
padding='SAME', W_init=w_init, b_init=b_init, name='d_h3/conv2d')
176-
net_h3 = BatchNormLayer(net_h3, act=lambda x: tl.act.lrelu(x, 0.2),
177-
is_train=is_train, gamma_init=gamma_init, name='d_h3/batchnorm')
176+
padding='SAME', W_init=w_init, b_init=b_init, name='h3/c')
177+
net_h3 = BatchNormLayer(net_h3, act=lrelu, is_train=is_train,
178+
gamma_init=gamma_init, name='h3/bn')
178179
net_h4 = Conv2d(net_h3, df_dim*16, (4, 4), (2, 2), act=None,
179-
padding='SAME', W_init=w_init, b_init=b_init, name='d_h4/conv2d')
180-
net_h4 = BatchNormLayer(net_h4, act=lambda x: tl.act.lrelu(x, 0.2),
181-
is_train=is_train, gamma_init=gamma_init, name='d_h4/batchnorm')
180+
padding='SAME', W_init=w_init, b_init=b_init, name='h4/c')
181+
net_h4 = BatchNormLayer(net_h4, act=lrelu, is_train=is_train,
182+
gamma_init=gamma_init, name='h4/bn')
182183
net_h5 = Conv2d(net_h4, df_dim*32, (4, 4), (2, 2), act=None,
183-
padding='SAME', W_init=w_init, b_init=b_init, name='d_h5/conv2d')
184-
net_h5 = BatchNormLayer(net_h5, act=lambda x: tl.act.lrelu(x, 0.2),
185-
is_train=is_train, gamma_init=gamma_init, name='d_h5/batchnorm')
184+
padding='SAME', W_init=w_init, b_init=b_init, name='h5/c')
185+
net_h5 = BatchNormLayer(net_h5, act=lrelu, is_train=is_train,
186+
gamma_init=gamma_init, name='h5/bn')
186187
net_h6 = Conv2d(net_h5, df_dim*16, (1, 1), (1, 1), act=None,
187-
padding='SAME', W_init=w_init, b_init=b_init, name='d_h6/conv2d')
188-
net_h6 = BatchNormLayer(net_h6, act=lambda x: tl.act.lrelu(x, 0.2),
189-
is_train=is_train, gamma_init=gamma_init, name='d_h6/batchnorm')
188+
padding='SAME', W_init=w_init, b_init=b_init, name='h6/c')
189+
net_h6 = BatchNormLayer(net_h6, act=lrelu, is_train=is_train,
190+
gamma_init=gamma_init, name='h6/bn')
190191
net_h7 = Conv2d(net_h6, df_dim*8, (1, 1), (1, 1), act=None,
191-
padding='SAME', W_init=w_init, b_init=b_init, name='d_h7/conv2d')
192-
net_h7 = BatchNormLayer(net_h7, #act=lambda x: tl.act.lrelu(x, 0.2),
193-
is_train=is_train, gamma_init=gamma_init, name='d_h7/batchnorm')
192+
padding='SAME', W_init=w_init, b_init=b_init, name='h7/c')
193+
net_h7 = BatchNormLayer(net_h7, is_train=is_train,
194+
gamma_init=gamma_init, name='h7/bn')
194195

195196
net = Conv2d(net_h7, df_dim*2, (1, 1), (1, 1), act=None,
196-
padding='SAME', W_init=w_init, b_init=b_init, name='d_h8_res/conv2d')
197-
net = BatchNormLayer(net, act=lambda x: tl.act.lrelu(x, 0.2),
198-
is_train=is_train, gamma_init=gamma_init, name='d_h8_res/batchnorm')
197+
padding='SAME', W_init=w_init, b_init=b_init, name='res/c')
198+
net = BatchNormLayer(net, act=lrelu, is_train=is_train,
199+
gamma_init=gamma_init, name='res/bn')
199200
net = Conv2d(net, df_dim*2, (3, 3), (1, 1), act=None,
200-
padding='SAME', W_init=w_init, b_init=b_init, name='d_h8_res/conv2d2')
201-
net = BatchNormLayer(net, act=lambda x: tl.act.lrelu(x, 0.2),
202-
is_train=is_train, gamma_init=gamma_init, name='d_h8_res/batchnorm2')
201+
padding='SAME', W_init=w_init, b_init=b_init, name='res/c2')
202+
net = BatchNormLayer(net, act=lrelu, is_train=is_train,
203+
gamma_init=gamma_init, name='res/bn2')
203204
net = Conv2d(net, df_dim*8, (3, 3), (1, 1), act=None,
204-
padding='SAME', W_init=w_init, b_init=b_init, name='d_h8_res/conv2d3')
205-
net = BatchNormLayer(net, #act=lambda x: tl.act.lrelu(x, 0.2),
206-
is_train=is_train, gamma_init=gamma_init, name='d_h8_res/batchnorm3')
207-
net_h8 = ElementwiseLayer(layer=[net_h7, net], combine_fn=tf.add, name='d_h8/add')
205+
padding='SAME', W_init=w_init, b_init=b_init, name='res/c3')
206+
net = BatchNormLayer(net, is_train=is_train,
207+
gamma_init=gamma_init, name='res/bn3')
208+
net_h8 = ElementwiseLayer(layer=[net_h7, net],
209+
combine_fn=tf.add, name='res/add')
208210
net_h8.outputs = tl.act.lrelu(net_h8.outputs, 0.2)
209211

210-
net_ho = FlattenLayer(net_h8, name='d_ho/flatten')
212+
net_ho = FlattenLayer(net_h8, name='ho/flatten')
211213
net_ho = DenseLayer(net_ho, n_units=1, act=tf.identity,
212-
W_init = w_init, name='d_ho/dense')
214+
W_init = w_init, name='ho/dense')
213215
logits = net_ho.outputs
214216
net_ho.outputs = tf.nn.sigmoid(net_ho.outputs)
215217

0 commit comments

Comments
 (0)