写在前面
Keras是搭建深度神经网络很好用的工具,集成度高,做深度学习的原型非常方便,可选择使用Theano或Tensorflow作为后端,非常适合学习和研究深度学习。最近会抽时间写一系列Keras实现各种网络的文章,首先本篇给出使用Keras实现AlexNet的代码。
技术要点
一、使用ReLU代替Sigmoid作为CNN激活函数,解决了Sigmoid在网络较深时出现的梯度弥散问题。
二、最后的全链接层采用Dropout策略,避免过拟合。
三、使用重叠的最大池化,最大池化避免平均池化的模糊化问题,重叠提升了特征的丰富性。
四、提出了LRN层,对局部神经元的活动创建竞争机制,使得其中响应比较大的值变得相对更大,并抑制其他反馈较小的神经元,增强了模型的泛化能力。
五、使用CUDA利用GPU加速深度卷积网络的训练。
六、数据增强,随机地从大图像中截取224*224大小的区域作为输入,并通过翻转等操作增加数据量,可减轻过拟合,提升泛化能力。预测时提取图片四个角加中间五个位置并进行左右翻转一共十幅图片,进行预测求平均值。
网络结构
代码
#-*- coding: UTF-8 -*-
"""
Author: lanbing510
Environment: Keras2.0.5,Python2.7
Model: AlexNet
"""
from keras.layers import Conv2D, MaxPooling2D, ZeroPadding2D
from keras.layers import Flatten, Dense, Dropout
from keras.layers import Input
from keras.models import Model
from keras import regularizers
from keras.utils import plot_model
from KerasLayers.Custom_layers import LRN2D
# Global Constants
NB_CLASS=1000
LEARNING_RATE=0.01
MOMENTUM=0.9
ALPHA=0.0001
BETA=0.75
GAMMA=0.1
DROPOUT=0.5
WEIGHT_DECAY=0.0005
LRN2D_NORM=True
DATA_FORMAT='channels_last' # Theano:'channels_first' Tensorflow:'channels_last'
def conv2D_lrn2d(x,filters,kernel_size,strides=(1,1),padding='same',data_format=DATA_FORMAT,dilation_rate=(1,1),activation='relu',use_bias=True,kernel_initializer='glorot_uniform',bias_initializer='zeros',kernel_regularizer=None,bias_regularizer=None,activity_regularizer=None,kernel_constraint=None,bias_constraint=None,lrn2d_norm=LRN2D_NORM,weight_decay=WEIGHT_DECAY):
if weight_decay:
kernel_regularizer=regularizers.l2(weight_decay)
bias_regularizer=regularizers.l2(weight_decay)
else:
kernel_regularizer=None
bias_regularizer=None
x=Conv2D(filters=filters,kernel_size=kernel_size,strides=strides,padding=padding,data_format=data_format,dilation_rate=dilation_rate,activation=activation,use_bias=use_bias,kernel_initializer=kernel_initializer,bias_initializer=bias_initializer,kernel_regularizer=kernel_regularizer,bias_regularizer=bias_regularizer,activity_regularizer=activity_regularizer,kernel_constraint=kernel_constraint,bias_constraint=bias_constraint)(x)
if lrn2d_norm:
x=LRN2D(alpha=ALPHA,beta=BETA)(x)
return x
def create_model():
if DATA_FORMAT=='channels_first':
INP_SHAPE=(3,227,227)
img_input=Input(shape=INP_SHAPE)
CONCAT_AXIS=1
elif DATA_FORMAT=='channels_last':
INP_SHAPE=(227,227,3)
img_input=Input(shape=INP_SHAPE)
CONCAT_AXIS=3
else:
raise Exception('Invalid Dim Ordering: '+str(DIM_ORDERING))
# Convolution Net Layer 1
x=conv2D_lrn2d(img_input,96,(11,11),4,padding='valid')
x=MaxPooling2D(pool_size=(3,3),strides=2,padding='valid',data_format=DATA_FORMAT)(x)
# Convolution Net Layer 2
x=conv2D_lrn2d(x,256,(5,5),1,padding='same')
x=MaxPooling2D(pool_size=(3,3),strides=2,padding='valid',data_format=DATA_FORMAT)(x)
# Convolution Net Layer 3~5
x=conv2D_lrn2d(x,384,(3,3),1,padding='same',lrn2d_norm=False)
x=conv2D_lrn2d(x,384,(3,3),1,padding='same',lrn2d_norm=False)
x=conv2D_lrn2d(x,256,(3,3),1,padding='same',lrn2d_norm=False)
x=MaxPooling2D(pool_size=(3,3),strides=2,padding='valid',data_format=DATA_FORMAT)(x)
# Convolution Net Layer 6
x=Flatten()(x)
x=Dense(4096,activation='relu')(x)
x=Dropout(DROPOUT)(x)
# Convolution Net Layer 7
x=Dense(4096,activation='relu')(x)
x=Dropout(DROPOUT)(x)
# Convolution Net Layer 8
x=Dense(output_dim=NB_CLASS,activation='softmax')(x)
return x,img_input,CONCAT_AXIS,INP_SHAPE,DATA_FORMAT
def check_print():
# Create the Model
x,img_input,CONCAT_AXIS,INP_SHAPE,DATA_FORMAT=create_model()
# Create a Keras Model
model=Model(input=img_input,output=[x])
model.summary()
# Save a PNG of the Model Build
plot_model(model,to_file='AlexNet.png')
model.compile(optimizer='rmsprop',loss='categorical_crossentropy')
print 'Model Compiled'
if __name__=='__main__':
check_print()
参考文献
[1] Krizhevsky, Alex, Ilya Sutskever, and Geoffrey E. Hinton. "Imagenet classification with deep convolutional neural networks." Advances in neural information processing systems. 2012.
[2] Caffe中的AlexNet模型