Computer Vision News - February 2018
V4 in TF-SLIM: To illustrate the efficiency of TF-Slim, let’s look at the code of inception block 35 in the snippet below. See how short the definition of an entire deep complex network like GoogLeNet inception v4 becomes in TF-Slim, here . 16 Computer Vision News Tool We Tried for You: TensorFlow-Slim def vgg16 (inputs): with slim.arg_scope([slim.conv2d, slim.fully_connected], activation_fn = tf.nn.relu, weights_regularizer = slim.l2_regularizer( 0.0005 )): net = slim.repeat(inputs, 2 , slim.conv2d, 64 , [ 3 , 3 ], scope = 'conv1' ) net = slim.max_pool2d(net, [ 2 , 2 ], scope = 'pool1' ) net = slim.repeat(net, 2 , slim.conv2d, 128 , [ 3 , 3 ], scope = 'conv2' ) net = slim.max_pool2d(net, [ 2 , 2 ], scope = 'pool2' ) net = slim.repeat(net, 3 , slim.conv2d, 256 , [ 3 , 3 ], scope = 'conv3' ) net = slim.max_pool2d(net, [ 2 , 2 ], scope = 'pool3' ) net = slim.repeat(net, 3 , slim.conv2d, 512 , [ 3 , 3 ], scope = 'conv4' ) net = slim.max_pool2d(net, [ 2 , 2 ], scope = 'pool4' ) net = slim.repeat(net, 3 , slim.conv2d, 512 , [ 3 , 3 ], scope = 'conv5' ) net = slim.max_pool2d(net, [ 2 , 2 ], scope = 'pool5' ) net = slim.fully_connected(net, 4096 , scope = 'fc6' ) net = slim.dropout(net, 0.5 , scope = 'dropout6' ) net = slim.fully_connected(net, 4096 , scope = 'fc7' ) net = slim.dropout(net, 0.5 , scope = 'dropout7' ) net = slim.fully_connected(net, 1000 , activation_fn = None , scope = 'fc8' ) return net def block35 ( net , scale = 1.0 , activation_fn = tf . nn . relu , scope = None , reuse = None ): """Builds the 35x35 resnet block.""" with tf . variable_scope ( scope , 'Block35' , [ net ], reuse = reuse ): with tf . variable_ scope ( 'Branch_0' ): tower_conv = slim . conv2d ( net , 32 , 1 , scope = 'Conv2d_1x1' ) with tf . variable_scope ( 'Branch_1' ): tower_conv1_0 = slim . conv2d ( net , 32 , 1 , scope = 'Conv2d_0a_1x1' ) tower_conv1_1 = slim . conv2d ( tower_conv1_0 , 32 , 3 , scope = 'Conv2d_0b_3x3' ) with tf . variable_scope ( 'Branch_2' ): tower_conv2_0 = slim . conv2d ( net , 32 , 1 , scope = 'Conv2d_0a_1x1' ) tower_conv2_1 = slim . conv2d ( tower_conv2_0 , 48 , 3 , scope = 'Conv2d_0b_3x3' ) tower_conv2_2 = slim . conv2d ( tower_conv2_1 , 64 , 3 , scope = 'Conv2d_0c_3x3' ) mixed = tf . concat ( axis = 3 , values =[ tower_conv , tower_conv1_1 , tower_conv2_2 ]) up = slim . conv2d ( mixed , net . get_shape ()[ 3 ], 1 , normalizer_fn = None , activation_fn = None , scope = 'Conv2d_1x1' ) scaled_up = up * scale if activation_fn == tf . nn . relu6 : # Use clip_by_value to simulate bandpass activation. scaled_up = tf . clip_by_value ( scaled_up , - 6.0 , 6.0 ) net += scaled_up if activation_fn : net = activation_fn ( net ) return net
Made with FlippingBook
RkJQdWJsaXNoZXIy NTc3NzU=