这里的网络架构和论文中插图中的网络架构是相一致的。 对了,忘了说了,这里使用的keras版本是1.2.2,等源码读完之后,我自己改一个2.0.6版本上传到github上面。 可别直接粘贴复制,里面有些中文的解释,不一定可行的。 #defint input shape input_shape = (300,300,3) #defint the number of classesnum_classes = 21 #Here the network is wrapped in to a dictory because it more easy to make some operations.net = {}# Block 1input_tensor = Input(shape=input_shape) #defint the image hight and wightimg_size = (input_shape[1], input_shape[0])net['input'] = input_tensornet['conv1_1'] = Convolution2D(64, 3, 3, activation='relu', border_mode='same', name='conv1_1')(net['input'])net['conv1_2'] = Convolution2D(64, 3, 3, activation='relu', border_mode='same', name='conv1_2')(net['conv1_1'])net['pool1'] = MaxPooling2D((2, 2), strides=(2, 2), border_mode='same', name='pool1')(net['conv1_2'])# Block 2net['conv2_1'] = Convolution2D(128, 3, 3, activation='relu', border_mode='same', name='conv2_1')(net['pool1'])net['conv2_2'] = Convolution2D(128, 3, 3, activation='relu', border_mode='same', name='conv2_2')(net['conv2_1'])net['pool2'] = MaxPooling2D((2, 2), strides=(2, 2), border_mode='same', name='pool2')(net['conv2_2'])# Block 3net['conv3_1'] = Convolution2D(256, 3, 3, activation='relu', border_mode='same', name='conv3_1')(net['pool2'])net['conv3_2'] = Convolution2D(256, 3, 3, activation='relu', border_mode='same', name='conv3_2')(net['conv3_1'])net['conv3_3'] = Convolution2D(256, 3, 3, activation='relu', border_mode='same', name='conv3_3')(net['conv3_2'])net['pool3'] = MaxPooling2D((2, 2), strides=(2, 2), border_mode='same', name='pool3')(net['conv3_3'])# Block 4net['conv4_1'] = Convolution2D(512, 3, 3, activation='relu', border_mode='same', name='conv4_1')(net['pool3'])net['conv4_2'] = Convolution2D(512, 3, 3, activation='relu', border_mode='same', name='conv4_2')(net['conv4_1']) #the first layer be operatednet['conv4_3'] = Convolution2D(512, 3, 3, activation='relu', border_mode='same', name='conv4_3')(net['conv4_2'])net['pool4'] = MaxPooling2D((2, 2), strides=(2, 2), border_mode='same', name='pool4')(net['conv4_3'])# Block 5net['conv5_1'] = Convolution2D(512, 3, 3, activation='relu', border_mode='same', name='conv5_1')(net['pool4'])net['conv5_2'] = Convolution2D(512, 3, 3, activation='relu', border_mode='same', name='conv5_2')(net['conv5_1'])net['conv5_3'] = Convolution2D(512, 3, 3, activation='relu', border_mode='same', name='conv5_3')(net['conv5_2'])net['pool5'] = MaxPooling2D((3, 3), strides=(1, 1), border_mode='same', name='pool5')(net['conv5_3']) #here is the FC6 in the orginal VGG16 Network,There move to Atrous Convolution for the reason i don't know.# FC6net['fc6'] = AtrousConvolution2D(1024, 3, 3, atrous_rate=(6, 6), activation='relu', border_mode='same', name='fc6')(net['pool5']) #the second layer to be operated# FC7net['fc7'] = Convolution2D(1024, 1, 1, activation='relu', border_mode='same', name='fc7')(net['fc6'])# x = Dropout(0.5, name='drop7')(x)# Block 6net['conv6_1'] = Convolution2D(256, 1, 1, activation='relu', border_mode='same', name='conv6_1')(net['fc7']) #the third layer to be opreatednet['conv6_2'] = Convolution2D(512, 3, 3, subsample=(2, 2), activation='relu', border_mode='same', name='conv6_2')(net['conv6_1'])# Block 7net['conv7_1'] = Convolution2D(128, 1, 1, activation='relu', border_mode='same', name='conv7_1')(net['conv6_2'])net['conv7_2'] = ZeroPadding2D()(net['conv7_1']) #the forth layer to be operatednet['conv7_2'] = Convolution2D(256, 3, 3, subsample=(2, 2), activation='relu', border_mode='valid', name='conv7_2')(net['conv7_2'])# Block 8net['conv8_1'] = Convolution2D(128, 1, 1, activation='relu', border_mode='same', name='conv8_1')(net['conv7_2']) #the fifth layer to be operatednet['conv8_2'] = Convolution2D(256, 3, 3, subsample=(2, 2), activation='relu', border_mode='same', name='conv8_2')(net['conv8_1']) # the last layer to be operated# Last Pool net['pool6'] = GlobalAveragePooling2D(name='pool6')(net['conv8_2'])# Prediction from conv4_3 # net['conv4_3']._shape = (?, 38, 38, 512) # 算了还是说中文吧,这个层是用来对输入数据进行正则化的层,有参数需要学习,输出的数据形式和输入输入形式是一致的。net['conv4_3_norm'] = Normalize(20, name='conv4_3_norm')(net['conv4_3'])num_priors = 3#here is *4 because the box need 4 number to define,here is only predice the box coordinatex = Convolution2D(num_priors * 4, 3, 3, border_mode='same', name='conv4_3_norm_mbox_loc')(net['conv4_3_norm'])net['conv4_3_norm_mbox_loc'] = xflatten = Flatten(name='conv4_3_norm_mbox_loc_flat')net['conv4_3_norm_mbox_loc_flat'] = flatten(net['conv4_3_norm_mbox_loc']) #the box coordinate is finished now it will perdice the classesname = 'conv4_3_norm_mbox_conf'if num_classes != 21: name += '_{}'.format(num_classes) # here is start predict the classesx = Convolution2D(num_priors * num_classes, 3, 3, border_mode='same', name=name)(net['conv4_3_norm'])net['conv4_3_norm_mbox_conf'] = xflatten = Flatten(name='conv4_3_norm_mbox_conf_flat')net['conv4_3_norm_mbox_conf_flat'] = flatten(net['conv4_3_norm_mbox_conf']) #这里是用来对conv4_3层的feature map生成论文中所说的default box,对没错,就是直接使用Feature map来进行default box的生成 #当然这里要指定一些参数,这些参数是需要好好斟酌的。priorbox = PriorBox(img_size, 30.0, aspect_ratios=[2], variances=[0.1, 0.1, 0.2, 0.2], name='conv4_3_norm_mbox_priorbox')net['conv4_3_norm_mbox_priorbox'] = priorbox(net['conv4_3_norm'])#好了,到这里第一个层的操作就完成了,下面其他层的操作都是相类似的啦。# Prediction from fc7num_priors = 6net['fc7_mbox_loc'] = Convolution2D(num_priors * 4, 3, 3, border_mode='same', name='fc7_mbox_loc')(net['fc7'])flatten = Flatten(name='fc7_mbox_loc_flat')net['fc7_mbox_loc_flat'] = flatten(net['fc7_mbox_loc'])name = 'fc7_mbox_conf'if num_classes != 21: name += '_{}'.format(num_classes)net['fc7_mbox_conf'] = Convolution2D(num_priors * num_classes, 3, 3, border_mode='same', name=name)(net['fc7'])flatten = Flatten(name='fc7_mbox_conf_flat')net['fc7_mbox_conf_flat'] = flatten(net['fc7_mbox_conf'])priorbox = PriorBox(img_size, 60.0, max_size=114.0, aspect_ratios=[2, 3], variances=[0.1, 0.1, 0.2, 0.2], name='fc7_mbox_priorbox')net['fc7_mbox_priorbox'] = priorbox(net['fc7'])# Prediction from conv6_2num_priors = 6x = Convolution2D(num_priors * 4, 3, 3, border_mode='same', name='conv6_2_mbox_loc')(net['conv6_2'])net['conv6_2_mbox_loc'] = xflatten = Flatten(name='conv6_2_mbox_loc_flat')net['conv6_2_mbox_loc_flat'] = flatten(net['conv6_2_mbox_loc'])name = 'conv6_2_mbox_conf'if num_classes != 21: name += '_{}'.format(num_classes)x = Convolution2D(num_priors * num_classes, 3, 3, border_mode='same', name=name)(net['conv6_2'])net['conv6_2_mbox_conf'] = xflatten = Flatten(name='conv6_2_mbox_conf_flat')net['conv6_2_mbox_conf_flat'] = flatten(net['conv6_2_mbox_conf'])priorbox = PriorBox(img_size, 114.0, max_size=168.0, aspect_ratios=[2, 3], variances=[0.1, 0.1, 0.2, 0.2], name='conv6_2_mbox_priorbox')net['conv6_2_mbox_priorbox'] = priorbox(net['conv6_2'])# Prediction from conv7_2num_priors = 6x = Convolution2D(num_priors * 4, 3, 3, border_mode='same', name='conv7_2_mbox_loc')(net['conv7_2'])net['conv7_2_mbox_loc'] = xflatten = Flatten(name='conv7_2_mbox_loc_flat')net['conv7_2_mbox_loc_flat'] = flatten(net['conv7_2_mbox_loc'])name = 'conv7_2_mbox_conf'if num_classes != 21: name += '_{}'.format(num_classes)x = Convolution2D(num_priors * num_classes, 3, 3, border_mode='same', name=name)(net['conv7_2'])net['conv7_2_mbox_conf'] = xflatten = Flatten(name='conv7_2_mbox_conf_flat')net['conv7_2_mbox_conf_flat'] = flatten(net['conv7_2_mbox_conf'])priorbox = PriorBox(img_size, 168.0, max_size=222.0, aspect_ratios=[2, 3], variances=[0.1, 0.1, 0.2, 0.2], name='conv7_2_mbox_priorbox')net['conv7_2_mbox_priorbox'] = priorbox(net['conv7_2'])# Prediction from conv8_2num_priors = 6x = Convolution2D(num_priors * 4, 3, 3, border_mode='same', name='conv8_2_mbox_loc')(net['conv8_2'])net['conv8_2_mbox_loc'] = xflatten = Flatten(name='conv8_2_mbox_loc_flat')net['conv8_2_mbox_loc_flat'] = flatten(net['conv8_2_mbox_loc'])name = 'conv8_2_mbox_conf'if num_classes != 21: name += '_{}'.format(num_classes)x = Convolution2D(num_priors * num_classes, 3, 3, border_mode='same', name=name)(net['conv8_2'])net['conv8_2_mbox_conf'] = xflatten = Flatten(name='conv8_2_mbox_conf_flat')net['conv8_2_mbox_conf_flat'] = flatten(net['conv8_2_mbox_conf'])priorbox = PriorBox(img_size, 222.0, max_size=276.0, aspect_ratios=[2, 3], variances=[0.1, 0.1, 0.2, 0.2], name='conv8_2_mbox_priorbox')net['conv8_2_mbox_priorbox'] = priorbox(net['conv8_2'])# Prediction from pool6num_priors = 6x = Dense(num_priors * 4, name='pool6_mbox_loc_flat')(net['pool6'])net['pool6_mbox_loc_flat'] = xname = 'pool6_mbox_conf_flat'if num_classes != 21: name += '_{}'.format(num_classes)x = Dense(num_priors * num_classes, name=name)(net['pool6'])net['pool6_mbox_conf_flat'] = xpriorbox = PriorBox(img_size, 276.0, max_size=330.0, aspect_ratios=[2, 3], variances=[0.1, 0.1, 0.2, 0.2], name='pool6_mbox_priorbox') #由于这里的维数不对,因此要修改Feature map层对应的维数信息if K.image_dim_ordering() == 'tf': target_shape = (1, 1, 256)else: target_shape = (256, 1, 1)net['pool6_reshaped'] = Reshape(target_shape, name='pool6_reshaped')(net['pool6'])net['pool6_mbox_priorbox'] = priorbox(net['pool6_reshaped']) #好啦,到这里位置,所有的信息都已经生成了,下一步就是根据这些信息来进行训练或者是预测了。# Gather all predictionsnet['mbox_loc'] = merge([net['conv4_3_norm_mbox_loc_flat'], net['fc7_mbox_loc_flat'], net['conv6_2_mbox_loc_flat'], net['conv7_2_mbox_loc_flat'], net['conv8_2_mbox_loc_flat'], net['pool6_mbox_loc_flat']], mode='concat', concat_axis=1, name='mbox_loc')net['mbox_conf'] = merge([net['conv4_3_norm_mbox_conf_flat'], net['fc7_mbox_conf_flat'], net['conv6_2_mbox_conf_flat'], net['conv7_2_mbox_conf_flat'], net['conv8_2_mbox_conf_flat'], net['pool6_mbox_conf_flat']], mode='concat', concat_axis=1, name='mbox_conf')net['mbox_priorbox'] = merge([net['conv4_3_norm_mbox_priorbox'], net['fc7_mbox_priorbox'], net['conv6_2_mbox_priorbox'], net['conv7_2_mbox_priorbox'], net['conv8_2_mbox_priorbox'], net['pool6_mbox_priorbox']], mode='concat', concat_axis=1, name='mbox_priorbox')if hasattr(net['mbox_loc'], '_keras_shape'): num_boxes = net['mbox_loc']._keras_shape[-1] // 4elif hasattr(net['mbox_loc'], 'int_shape'): num_boxes = K.int_shape(net['mbox_loc'])[-1] // 4net['mbox_loc'] = Reshape((num_boxes, 4), name='mbox_loc_final')(net['mbox_loc'])net['mbox_conf'] = Reshape((num_boxes, num_classes), name='mbox_conf_logits')(net['mbox_conf'])net['mbox_conf'] = Activation('softmax', name='mbox_conf_final')(net['mbox_conf'])net['predictions'] = merge([net['mbox_loc'], net['mbox_conf'], net['mbox_priorbox']], mode='concat', concat_axis=2, name='predictions')model = Model(net['input'], net['predictions'])