|
3 | 3 |
|
4 | 4 | import tensorflow as tf |
5 | 5 |
|
6 | | -def identity(x, name=None): |
7 | | - """The identity activation function, Shortcut is ``linear``. |
| 6 | + |
| 7 | +def identity(x): |
| 8 | + """The identity activation function. |
| 9 | + Shortcut is ``linear``. |
8 | 10 |
|
9 | 11 | Parameters |
10 | 12 | ---------- |
11 | | - x : a tensor input |
12 | | - input(s) |
| 13 | + x : Tensor |
| 14 | + input. |
13 | 15 |
|
14 | 16 | Returns |
15 | | - -------- |
16 | | - A `Tensor` with the same type as `x`. |
| 17 | + ------- |
| 18 | + Tensor |
| 19 | + A ``Tensor`` in the same type as ``x``. |
| 20 | +
|
17 | 21 | """ |
18 | 22 | return x |
19 | 23 |
|
20 | | -# Shortcut |
21 | | -linear = identity |
22 | 24 |
|
23 | | -def ramp(x=None, v_min=0, v_max=1, name=None): |
| 25 | +def ramp(x, v_min=0, v_max=1, name=None): |
24 | 26 | """The ramp activation function. |
25 | 27 |
|
26 | 28 | Parameters |
27 | 29 | ---------- |
28 | | - x : a tensor input |
29 | | - input(s) |
| 30 | + x : Tensor |
| 31 | + input. |
30 | 32 | v_min : float |
31 | | - if input(s) smaller than v_min, change inputs to v_min |
| 33 | + cap input to v_min as a lower bound. |
32 | 34 | v_max : float |
33 | | - if input(s) greater than v_max, change inputs to v_max |
34 | | - name : a string or None |
35 | | - An optional name to attach to this activation function. |
| 35 | + cap input to v_max as a upper bound. |
| 36 | + name : str |
| 37 | + The function name (optional). |
36 | 38 |
|
37 | 39 | Returns |
38 | | - -------- |
39 | | - A `Tensor` with the same type as `x`. |
| 40 | + ------- |
| 41 | + Tensor |
| 42 | + A ``Tensor`` in the same type as ``x``. |
| 43 | +
|
40 | 44 | """ |
41 | 45 | return tf.clip_by_value(x, clip_value_min=v_min, clip_value_max=v_max, name=name) |
42 | 46 |
|
43 | | -def leaky_relu(x=None, alpha=0.1, name="lrelu"): |
| 47 | + |
| 48 | +def leaky_relu(x, alpha=0.1, name="lrelu"): |
44 | 49 | """The LeakyReLU, Shortcut is ``lrelu``. |
45 | 50 |
|
46 | | - Modified version of ReLU, introducing a nonzero gradient for negative |
47 | | - input. |
| 51 | + Modified version of ReLU, introducing a nonzero gradient for negative input. |
48 | 52 |
|
49 | 53 | Parameters |
50 | 54 | ---------- |
51 | | - x : A `Tensor` with type `float`, `double`, `int32`, `int64`, `uint8`, |
52 | | - `int16`, or `int8`. |
53 | | - alpha : `float`. slope. |
54 | | - name : a string or None |
55 | | - An optional name to attach to this activation function. |
| 55 | + x : Tensor |
| 56 | + Support input type ``float``, ``double``, ``int32``, ``int64``, ``uint8``, |
| 57 | + ``int16``, or ``int8``. |
| 58 | + alpha : float |
| 59 | + Slope. |
| 60 | + name : str |
| 61 | + The function name (optional). |
56 | 62 |
|
57 | 63 | Examples |
58 | | - --------- |
59 | | - >>> network = tl.layers.DenseLayer(network, n_units=100, name = 'dense_lrelu', |
60 | | - ... act= lambda x : tl.act.lrelu(x, 0.2)) |
| 64 | + -------- |
| 65 | + >>> net = tl.layers.DenseLayer(net, 100, act=lambda x : tl.act.lrelu(x, 0.2), name='dense') |
| 66 | +
|
| 67 | + Returns |
| 68 | + ------- |
| 69 | + Tensor |
| 70 | + A ``Tensor`` in the same type as ``x``. |
61 | 71 |
|
62 | 72 | References |
63 | 73 | ------------ |
64 | | - - `Rectifier Nonlinearities Improve Neural Network Acoustic Models, Maas et al. (2013) <http://web.stanford.edu/~awni/papers/relu_hybrid_icml2013_final.pdf>`_ |
| 74 | + - `Rectifier Nonlinearities Improve Neural Network Acoustic Models, Maas et al. (2013) <http://web.stanford.edu/~awni/papers/relu_hybrid_icml2013_final.pdf>`__ |
| 75 | +
|
65 | 76 | """ |
66 | 77 | # with tf.name_scope(name) as scope: |
67 | | - # x = tf.nn.relu(x) |
68 | | - # m_x = tf.nn.relu(-x) |
69 | | - # x -= alpha * m_x |
| 78 | + # x = tf.nn.relu(x) |
| 79 | + # m_x = tf.nn.relu(-x) |
| 80 | + # x -= alpha * m_x |
70 | 81 | x = tf.maximum(x, alpha * x, name=name) |
71 | 82 | return x |
72 | 83 |
|
73 | | -#Shortcut |
74 | | -lrelu = leaky_relu |
75 | | - |
76 | 84 |
|
77 | 85 | def swish(x, name='swish'): |
78 | | - """The Swish function, see `Swish: a Self-Gated Activation Function <https://arxiv.org/abs/1710.05941>`_. |
| 86 | + """The Swish function. |
| 87 | + See `Swish: a Self-Gated Activation Function <https://arxiv.org/abs/1710.05941>`__. |
79 | 88 |
|
80 | 89 | Parameters |
81 | 90 | ---------- |
82 | | - x : a tensor input |
83 | | - input(s) |
| 91 | + x : Tensor |
| 92 | + input. |
| 93 | + name: str |
| 94 | + function name (optional). |
84 | 95 |
|
85 | 96 | Returns |
86 | | - -------- |
87 | | - A `Tensor` with the same type as `x`. |
| 97 | + ------- |
| 98 | + Tensor |
| 99 | + A ``Tensor`` in the same type as ``x``. |
| 100 | +
|
88 | 101 | """ |
89 | | - with tf.name_scope(name) as scope: |
90 | | - x = tf.nn.sigmoid(x) * x |
| 102 | + with tf.name_scope(name): |
| 103 | + x = tf.nn.sigmoid(x) * x |
91 | 104 | return x |
92 | 105 |
|
93 | | -def pixel_wise_softmax(output, name='pixel_wise_softmax'): |
| 106 | + |
| 107 | +def pixel_wise_softmax(x, name='pixel_wise_softmax'): |
94 | 108 | """Return the softmax outputs of images, every pixels have multiple label, the sum of a pixel is 1. |
95 | 109 | Usually be used for image segmentation. |
96 | 110 |
|
97 | 111 | Parameters |
98 | | - ------------ |
99 | | - output : tensor |
100 | | - - For 2d image, 4D tensor [batch_size, height, weight, channel], channel >= 2. |
101 | | - - For 3d image, 5D tensor [batch_size, depth, height, weight, channel], channel >= 2. |
| 112 | + ---------- |
| 113 | + x : Tensor |
| 114 | + input. |
| 115 | + - For 2d image, 4D tensor (batch_size, height, weight, channel), where channel >= 2. |
| 116 | + - For 3d image, 5D tensor (batch_size, depth, height, weight, channel), where channel >= 2. |
| 117 | + name : str |
| 118 | + function name (optional) |
| 119 | +
|
| 120 | + Returns |
| 121 | + ------- |
| 122 | + Tensor |
| 123 | + A ``Tensor`` in the same type as ``x``. |
102 | 124 |
|
103 | 125 | Examples |
104 | | - --------- |
| 126 | + -------- |
105 | 127 | >>> outputs = pixel_wise_softmax(network.outputs) |
106 | 128 | >>> dice_loss = 1 - dice_coe(outputs, y_, epsilon=1e-5) |
107 | 129 |
|
108 | 130 | References |
109 | | - ----------- |
110 | | - - `tf.reverse <https://www.tensorflow.org/versions/master/api_docs/python/array_ops.html#reverse>`_ |
| 131 | + ---------- |
| 132 | + - `tf.reverse <https://www.tensorflow.org/versions/master/api_docs/python/array_ops.html#reverse>`__ |
| 133 | +
|
111 | 134 | """ |
112 | | - with tf.name_scope(name) as scope: |
113 | | - return tf.nn.softmax(output) |
114 | | - ## old implementation |
115 | | - # exp_map = tf.exp(output) |
116 | | - # if output.get_shape().ndims == 4: # 2d image |
117 | | - # evidence = tf.add(exp_map, tf.reverse(exp_map, [False, False, False, True])) |
118 | | - # elif output.get_shape().ndims == 5: # 3d image |
119 | | - # evidence = tf.add(exp_map, tf.reverse(exp_map, [False, False, False, False, True])) |
120 | | - # else: |
121 | | - # raise Exception("output parameters should be 2d or 3d image, not %s" % str(output._shape)) |
122 | | - # return tf.div(exp_map, evidence) |
| 135 | + with tf.name_scope(name): |
| 136 | + return tf.nn.softmax(x) |
| 137 | + |
| 138 | + |
| 139 | +# Alias |
| 140 | +linear = identity |
| 141 | +lrelu = leaky_relu |
0 commit comments