标签 Keras 下的文章

设计简单的LSTM、GRU模型

设计简单的LSTM、GRU模型,其中的embedding_matrix见上一篇文章。

from sklearn.cross_validation import train_test_split
from keras.models import Model
from keras.layers import Dense, Embedding, Input, concatenate, Flatten, SpatialDropout1D
from keras.layers import LSTM, Bidirectional, GlobalMaxPool1D, Dropout, CuDNNLSTM, GRU, CuDNNGRU, GlobalAveragePooling1D, GlobalMaxPooling1D
from keras.preprocessing import text, sequence
from keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau, TensorBoard, LearningRateScheduler, Callback
from keras.optimizers import Adam, Adadelta, SGD, RMSprop, Nadam
from keras import backend as K
def get_model():
    inp = Input(shape=(maxlen,))
    x = Embedding(nb_words, embed_size, weights=[embedding_matrix], trainable=False)(inp)
    x = SpatialDropout1D(0.2)(x)
    x = Bidirectional(CuDNNLSTM(256, return_sequences=True))(x)
    x = Dropout(0.2)(x)
    x = Bidirectional(CuDNNGRU(128, return_sequences=True))(x)
    x = Dropout(0.2)(x)
    avg_pool = GlobalAveragePooling1D()(x)
    max_pool = GlobalMaxPooling1D()(x)
    x = concatenate([avg_pool, max_pool])
    x = Dense(64, activation="relu")(x)
    x = Dense(6, activation="sigmoid")(x)
    model = Model(inputs=inp, outputs=x)
    opt = Adam(lr=1e-3)
    model.compile(loss='binary_crossentropy', optimizer=opt, metrics=['accuracy'])
    return model
INPUT = './'
batch_size = 32
epochs = 10
model = get_model()

X_train, X_val = train_test_split(train_sequence, random_state=17, train_size=0.90)
y_train, y_val = train_test_split(y, random_state=17, train_size=0.90)

exp_decay = lambda init, fin, steps: (init/fin)**(1/(steps-1)) - 1
steps = int(len(train_df)/batch_size) * epochs
lr_init, lr_fin = 0.001, 0.0005
lr_decay = exp_decay(lr_init, lr_fin, steps)
K.set_value(model.optimizer.lr, lr_init)
K.set_value(model.optimizer.decay, lr_decay)

num = 0
if not os.path.isdir(INPUT+"models/"):
    os.mkdir(INPUT+"models/")
if not os.path.isdir(INPUT+"models/"+str(num)):
    os.mkdir(INPUT+"models/"+str(num))
file_path_best=INPUT+"models/"+str(num)+"/"+"weights_best"+str(num)+".hdf5"
checkpoint_best = ModelCheckpoint(file_path_best, monitor='val_loss', verbose=1, save_best_only=True, mode='min')
early = EarlyStopping(monitor="val_loss", mode="min", patience=3)

callbacks_list = [checkpoint_best, early]
model.fit(X_train, y_train, batch_size=batch_size, epochs=epochs, validation_data=(X_val,y_val), callbacks=callbacks_list)

if os.path.isfile(file_path_best):
    print ('load ',file_path_best)
    model.load_weights(file_path_best)

y_test = model.predict([test_sequence], batch_size=256, verbose=1)


[转]使用Keras预训练模型ResNet50进行图像分类

转自https://blog.csdn.net/u010632850/article/details/77926679

Keras提供了一些用ImageNet训练过的模型:Xception,VGG16,VGG19,ResNet50,InceptionV3。在使用这些模型的时候,有一个参数include_top表示是否包含模型顶部的全连接层,如果包含,则可以将图像分为ImageNet中的1000类,如果不包含,则可以利用这些参数来做一些定制的事情。在运行时自动下载有可能会失败,需要去网站中手动下载,放在“~/.keras/models/”中,使用WinPython则在“settings/.keras/models/”中。

修正:表示当前是训练模式还是测试模式的参数K.learning_phase()文中表述和使用有误,在该函数说明中可以看到

The learning phase flag is a bool tensor (0 = test, 1 = train),所以0是测试模式,1是训练模式,部分网络结构下两者有差别。

这里使用ResNet50预训练模型,对Caltech101数据集进行图像分类。只有CPU,运行较慢,但是在训练集固定的情况下,较慢的过程只需要运行一次。该预训练模型的中文文档介绍在http://keras-cn.readthedocs.io/en/latest/other/application/#resnet50

我使用的版本:

1.      Ubuntu 16.04.3

2.      Python 2.7

3.      Keras 2.0.8

4.      Tensoflow 1.3.0

5.      Numpy 1.13.1

6.      python-opencv 2.4.9.1+dfsg-1.5ubuntu1

7.      h5py 2.7.0

从文件夹中提取图像数据的方式:

函数:

def eachFile(filepath):                 #将目录内的文件名放入列表中  
    pathDir =  os.listdir(filepath)  
    out = []  
    for allDir in pathDir:  
        child = allDir.decode('gbk')    # .decode('gbk')是解决中文显示乱码问题  
        out.append(child)  
    return out  
  
def get_data(data_name,train_left=0.0,train_right=0.7,train_all=0.7,resize=True,data_format=None,t=''):   #从文件夹中获取图像数据  
    file_name = os.path.join(pic_dir_out,data_name+t+'_'+str(train_left)+'_'+str(train_right)+'_'+str(Width)+"X"+str(Height)+".h5")     
    print file_name  
    if os.path.exists(file_name):           #判断之前是否有存到文件中  
        f = h5py.File(file_name,'r')  
        if t=='train':  
            X_train = f['X_train'][:]  
            y_train = f['y_train'][:]  
            f.close()  
            return (X_train, y_train)  
        elif t=='test':  
            X_test = f['X_test'][:]  
            y_test = f['y_test'][:]  
            f.close()  
            return (X_test, y_test)    
        else:  
            return   
    data_format = conv_utils.normalize_data_format(data_format)  
    pic_dir_set = eachFile(pic_dir_data)  
    X_train = []  
    y_train = []  
    X_test = []  
    y_test = []  
    label = 0  
    for pic_dir in pic_dir_set:  
        print pic_dir_data+pic_dir  
        if not os.path.isdir(os.path.join(pic_dir_data,pic_dir)):  
            continue      
        pic_set = eachFile(os.path.join(pic_dir_data,pic_dir))  
        pic_index = 0  
        train_count = int(len(pic_set)*train_all)  
        train_l = int(len(pic_set)*train_left)  
        train_r = int(len(pic_set)*train_right)  
        for pic_name in pic_set:  
            if not os.path.isfile(os.path.join(pic_dir_data,pic_dir,pic_name)):  
                continue          
            img = cv2.imread(os.path.join(pic_dir_data,pic_dir,pic_name))  
            if img is None:  
                continue  
            if (resize):  
                img = cv2.resize(img,(Width,Height))     
                img = img.reshape(-1,Width,Height,3)  
            if (pic_index < train_count):  
                if t=='train':  
                    if (pic_index >= train_l and pic_index < train_r):  
                        X_train.append(img)  
                        y_train.append(label)    
            else:  
                if t=='test':  
                    X_test.append(img)  
                    y_test.append(label)  
            pic_index += 1  
        if len(pic_set) <> 0:          
            label += 1  
      
    f = h5py.File(file_name,'w')   
    if t=='train':  
        X_train = np.concatenate(X_train,axis=0)       
        y_train = np.array(y_train)        
        f.create_dataset('X_train', data = X_train)  
        f.create_dataset('y_train', data = y_train)  
        f.close()  
        return (X_train, y_train)  
    elif t=='test':  
        X_test = np.concatenate(X_test,axis=0)   
        y_test = np.array(y_test)  
        f.create_dataset('X_test', data = X_test)  
        f.create_dataset('y_test', data = y_test)  
        f.close()  
        return (X_test, y_test)     
    else:  
        return

调用:

global Width, Height, pic_dir_out, pic_dir_data  
Width = 224  
Height = 224  
num_classes = 102               #Caltech101为102  cifar10为10  
pic_dir_out = '/home/ccuux3/pic_cnn/pic_out/'    
pic_dir_data = '/home/ccuux3/pic_cnn/pic_dataset/Caltech101/'  
sub_dir = '224_resnet50/'  
if not os.path.isdir(os.path.join(pic_dir_out,sub_dir)):  
    os.mkdir(os.path.join(pic_dir_out,sub_dir))  
pic_dir_mine = os.path.join(pic_dir_out,sub_dir)  
(X_train, y_train) = get_data("Caltech101_color_data_",0.0,0.7,data_format='channels_last',t='train')  
y_train = np_utils.to_categorical(y_train, num_classes)

载入预训练模型ResNet50,并将训练图像经过网络运算得到数据,不包含顶部的全连接层,得到的结果存成文件,以后可以直接调用(由于我内存不够,所以拆分了一下):

input_tensor = Input(shape=(224, 224, 3))  
base_model = ResNet50(input_tensor=input_tensor,include_top=False,weights='imagenet')  
#base_model = ResNet50(input_tensor=input_tensor,include_top=False,weights=None)  
get_resnet50_output = K.function([base_model.layers[0].input, K.learning_phase()],  
                          [base_model.layers[-1].output])  
  
file_name = os.path.join(pic_dir_mine,'resnet50_train_output'+'.h5')  
if os.path.exists(file_name):  
    f = h5py.File(file_name,'r')  
    resnet50_train_output = f['resnet50_train_output'][:]  
    f.close()  
else:  
    resnet50_train_output = []  
    delta = 10  
    for i in range(0,len(X_train),delta):  
        print i  
        one_resnet50_train_output = get_resnet50_output([X_train[i:i+delta], 0])[0]  
        resnet50_train_output.append(one_resnet50_train_output)  
    resnet50_train_output = np.concatenate(resnet50_train_output,axis=0)   
    f = h5py.File(file_name,'w')            
    f.create_dataset('resnet50_train_output', data = resnet50_train_output)  
    f.close()

将ResNet50网络产生的结果用于图像分类:

input_tensor = Input(shape=(1, 1, 2048))  
x = Flatten()(input_tensor)  
x = Dense(1024, activation='relu')(x)  
predictions = Dense(num_classes, activation='softmax')(x)     
model = Model(inputs=input_tensor, outputs=predictions)  
model.compile(optimizer=Adam(), loss='categorical_crossentropy',metrics=['accuracy'])

训练图像数据集:

print('\nTraining ------------')    #从文件中提取参数,训练后存在新的文件中  
cm = 0                              #修改这个参数可以多次训练  
cm_str = '' if cm==0 else str(cm)  
cm2_str = '' if (cm+1)==0 else str(cm+1)   
if cm >= 1:  
    model.load_weights(os.path.join(pic_dir_mine,'cnn_model_Caltech101_resnet50_'+cm_str+'.h5'))  
model.fit(resnet50_train_output, y_train, epochs=10, batch_size=128,)   
model.save_weights(os.path.join(pic_dir_mine,'cnn_model_Caltech101_resnet50_'+cm2_str+'.h5'))

测试图像数据集:

(X_test, y_test) = get_data("Caltech101_color_data_",0.0,0.7,data_format='channels_last',t='test')      
y_test = np_utils.to_categorical(y_test, num_classes)  
      
file_name = os.path.join(pic_dir_mine,'resnet50_test_output'+'.h5')  
if os.path.exists(file_name):  
    f = h5py.File(file_name,'r')  
    resnet50_test_output = f['resnet50_test_output'][:]  
    f.close()  
else:  
    resnet50_test_output = []  
    delta = 10  
    for i in range(0,len(X_test),delta):  
        print i  
        one_resnet50_test_output = get_resnet50_output([X_test[i:i+delta], 0])[0]  
        resnet50_test_output.append(one_resnet50_test_output)  
    resnet50_test_output = np.concatenate(resnet50_test_output,axis=0)  
    f = h5py.File(file_name,'w')            
    f.create_dataset('resnet50_test_output', data = resnet50_test_output)  
    f.close()  
print('\nTesting ------------')     #对测试集进行评估  
class_name_list = get_name_list(pic_dir_data)    #获取top-N的每类的准确率  
pred = model.predict(resnet50_test_output, batch_size=32)

输出测试集各类别top-5的准确率:

N = 5  
pred_list = []  
for row in pred:  
    pred_list.append(row.argsort()[-N:][::-1])  #获取最大的N个值的下标  
pred_array = np.array(pred_list)  
test_arg = np.argmax(y_test,axis=1)  
class_count = [0 for _ in xrange(num_classes)]  
class_acc = [0 for _ in xrange(num_classes)]  
for i in xrange(len(test_arg)):  
    class_count[test_arg[i]] += 1  
    if test_arg[i] in pred_array[i]:  
        class_acc[test_arg[i]] += 1  
print('top-'+str(N)+' all acc:',str(sum(class_acc))+'/'+str(len(test_arg)),sum(class_acc)/float(len(test_arg)))  
for i in xrange(num_classes):  
    print (i, class_name_list[i], 'acc: '+str(class_acc[i])+'/'+str(class_count[i]))

完整代码:

# -*- coding: utf-8 -*-  
import cv2  
import numpy as np  
import h5py  
import os  
  
from keras.utils import np_utils, conv_utils  
from keras.models import Model  
from keras.layers import Flatten, Dense, Input   
from keras.optimizers import Adam  
from keras.applications.resnet50 import ResNet50  
from keras import backend as K  
  
def get_name_list(filepath):                #获取各个类别的名字  
    pathDir =  os.listdir(filepath)  
    out = []  
    for allDir in pathDir:  
        if os.path.isdir(os.path.join(filepath,allDir)):  
            child = allDir.decode('gbk')    # .decode('gbk')是解决中文显示乱码问题  
            out.append(child)  
    return out  
      
def eachFile(filepath):                 #将目录内的文件名放入列表中  
    pathDir =  os.listdir(filepath)  
    out = []  
    for allDir in pathDir:  
        child = allDir.decode('gbk')    # .decode('gbk')是解决中文显示乱码问题  
        out.append(child)  
    return out  
  
def get_data(data_name,train_left=0.0,train_right=0.7,train_all=0.7,resize=True,data_format=None,t=''):   #从文件夹中获取图像数据  
    file_name = os.path.join(pic_dir_out,data_name+t+'_'+str(train_left)+'_'+str(train_right)+'_'+str(Width)+"X"+str(Height)+".h5")     
    print file_name  
    if os.path.exists(file_name):           #判断之前是否有存到文件中  
        f = h5py.File(file_name,'r')  
        if t=='train':  
            X_train = f['X_train'][:]  
            y_train = f['y_train'][:]  
            f.close()  
            return (X_train, y_train)  
        elif t=='test':  
            X_test = f['X_test'][:]  
            y_test = f['y_test'][:]  
            f.close()  
            return (X_test, y_test)    
        else:  
            return   
    data_format = conv_utils.normalize_data_format(data_format)  
    pic_dir_set = eachFile(pic_dir_data)  
    X_train = []  
    y_train = []  
    X_test = []  
    y_test = []  
    label = 0  
    for pic_dir in pic_dir_set:  
        print pic_dir_data+pic_dir  
        if not os.path.isdir(os.path.join(pic_dir_data,pic_dir)):  
            continue      
        pic_set = eachFile(os.path.join(pic_dir_data,pic_dir))  
        pic_index = 0  
        train_count = int(len(pic_set)*train_all)  
        train_l = int(len(pic_set)*train_left)  
        train_r = int(len(pic_set)*train_right)  
        for pic_name in pic_set:  
            if not os.path.isfile(os.path.join(pic_dir_data,pic_dir,pic_name)):  
                continue          
            img = cv2.imread(os.path.join(pic_dir_data,pic_dir,pic_name))  
            if img is None:  
                continue  
            if (resize):  
                img = cv2.resize(img,(Width,Height))     
                img = img.reshape(-1,Width,Height,3)  
            if (pic_index < train_count):  
                if t=='train':  
                    if (pic_index >= train_l and pic_index < train_r):  
                        X_train.append(img)  
                        y_train.append(label)    
            else:  
                if t=='test':  
                    X_test.append(img)  
                    y_test.append(label)  
            pic_index += 1  
        if len(pic_set) <> 0:          
            label += 1  
      
    f = h5py.File(file_name,'w')   
    if t=='train':  
        X_train = np.concatenate(X_train,axis=0)       
        y_train = np.array(y_train)        
        f.create_dataset('X_train', data = X_train)  
        f.create_dataset('y_train', data = y_train)  
        f.close()  
        return (X_train, y_train)  
    elif t=='test':  
        X_test = np.concatenate(X_test,axis=0)   
        y_test = np.array(y_test)  
        f.create_dataset('X_test', data = X_test)  
        f.create_dataset('y_test', data = y_test)  
        f.close()  
        return (X_test, y_test)     
    else:  
        return  
  
def main():  
    global Width, Height, pic_dir_out, pic_dir_data  
    Width = 224  
    Height = 224  
    num_classes = 102               #Caltech101为102  cifar10为10  
    pic_dir_out = '/home/ccuux3/pic_cnn/pic_out/'    
    pic_dir_data = '/home/ccuux3/pic_cnn/pic_dataset/Caltech101/'  
    sub_dir = '224_resnet50/'  
    if not os.path.isdir(os.path.join(pic_dir_out,sub_dir)):  
        os.mkdir(os.path.join(pic_dir_out,sub_dir))  
    pic_dir_mine = os.path.join(pic_dir_out,sub_dir)  
    (X_train, y_train) = get_data("Caltech101_color_data_",0.0,0.7,data_format='channels_last',t='train')  
    y_train = np_utils.to_categorical(y_train, num_classes)  
  
    input_tensor = Input(shape=(224, 224, 3))  
    base_model = ResNet50(input_tensor=input_tensor,include_top=False,weights='imagenet')  
    #base_model = ResNet50(input_tensor=input_tensor,include_top=False,weights=None)  
    get_resnet50_output = K.function([base_model.layers[0].input, K.learning_phase()],  
                              [base_model.layers[-1].output])  
  
    file_name = os.path.join(pic_dir_mine,'resnet50_train_output'+'.h5')  
    if os.path.exists(file_name):  
        f = h5py.File(file_name,'r')  
        resnet50_train_output = f['resnet50_train_output'][:]  
        f.close()  
    else:  
        resnet50_train_output = []  
        delta = 10  
        for i in range(0,len(X_train),delta):  
            print i  
            one_resnet50_train_output = get_resnet50_output([X_train[i:i+delta], 0])[0]  
            resnet50_train_output.append(one_resnet50_train_output)  
        resnet50_train_output = np.concatenate(resnet50_train_output,axis=0)   
        f = h5py.File(file_name,'w')            
        f.create_dataset('resnet50_train_output', data = resnet50_train_output)  
        f.close()  
  
    input_tensor = Input(shape=(1, 1, 2048))  
    x = Flatten()(input_tensor)  
    x = Dense(1024, activation='relu')(x)  
    predictions = Dense(num_classes, activation='softmax')(x)     
    model = Model(inputs=input_tensor, outputs=predictions)  
    model.compile(optimizer=Adam(), loss='categorical_crossentropy',metrics=['accuracy'])  
      
    print('\nTraining ------------')    #从文件中提取参数,训练后存在新的文件中  
    cm = 0                              #修改这个参数可以多次训练  
    cm_str = '' if cm==0 else str(cm)  
    cm2_str = '' if (cm+1)==0 else str(cm+1)   
    if cm >= 1:  
        model.load_weights(os.path.join(pic_dir_mine,'cnn_model_Caltech101_resnet50_'+cm_str+'.h5'))  
    model.fit(resnet50_train_output, y_train, epochs=10, batch_size=128,)   
    model.save_weights(os.path.join(pic_dir_mine,'cnn_model_Caltech101_resnet50_'+cm2_str+'.h5'))  
      
    (X_test, y_test) = get_data("Caltech101_color_data_",0.0,0.7,data_format='channels_last',t='test')      
    y_test = np_utils.to_categorical(y_test, num_classes)  
          
    file_name = os.path.join(pic_dir_mine,'resnet50_test_output'+'.h5')  
    if os.path.exists(file_name):  
        f = h5py.File(file_name,'r')  
        resnet50_test_output = f['resnet50_test_output'][:]  
        f.close()  
    else:  
        resnet50_test_output = []  
        delta = 10  
        for i in range(0,len(X_test),delta):  
            print i  
            one_resnet50_test_output = get_resnet50_output([X_test[i:i+delta], 0])[0]  
            resnet50_test_output.append(one_resnet50_test_output)  
        resnet50_test_output = np.concatenate(resnet50_test_output,axis=0)  
        f = h5py.File(file_name,'w')            
        f.create_dataset('resnet50_test_output', data = resnet50_test_output)  
        f.close()  
    print('\nTesting ------------')     #对测试集进行评估  
    class_name_list = get_name_list(pic_dir_data)    #获取top-N的每类的准确率  
    pred = model.predict(resnet50_test_output, batch_size=32)  
    f = h5py.File(os.path.join(pic_dir_mine,'pred_'+cm2_str+'.h5'),'w')            
    f.create_dataset('pred', data = pred)  
    f.close()  
      
    N = 1  
    pred_list = []  
    for row in pred:  
        pred_list.append(row.argsort()[-N:][::-1])  #获取最大的N个值的下标  
    pred_array = np.array(pred_list)  
    test_arg = np.argmax(y_test,axis=1)  
    class_count = [0 for _ in xrange(num_classes)]  
    class_acc = [0 for _ in xrange(num_classes)]  
    for i in xrange(len(test_arg)):  
        class_count[test_arg[i]] += 1  
        if test_arg[i] in pred_array[i]:  
            class_acc[test_arg[i]] += 1  
    print('top-'+str(N)+' all acc:',str(sum(class_acc))+'/'+str(len(test_arg)),sum(class_acc)/float(len(test_arg)))  
    for i in xrange(num_classes):  
        print (i, class_name_list[i], 'acc: '+str(class_acc[i])+'/'+str(class_count[i]))  
      
    print('----------------------------------------------------')  
    N = 5  
    pred_list = []  
    for row in pred:  
        pred_list.append(row.argsort()[-N:][::-1])  #获取最大的N个值的下标  
    pred_array = np.array(pred_list)  
    test_arg = np.argmax(y_test,axis=1)  
    class_count = [0 for _ in xrange(num_classes)]  
    class_acc = [0 for _ in xrange(num_classes)]  
    for i in xrange(len(test_arg)):  
        class_count[test_arg[i]] += 1  
        if test_arg[i] in pred_array[i]:  
            class_acc[test_arg[i]] += 1  
    print('top-'+str(N)+' all acc:',str(sum(class_acc))+'/'+str(len(test_arg)),sum(class_acc)/float(len(test_arg)))  
    for i in xrange(num_classes):  
        print (i, class_name_list[i], 'acc: '+str(class_acc[i])+'/'+str(class_count[i]))  
        
if __name__ == '__main__':  
    main()

运行结果:

Using TensorFlow backend.
/home/ccuux3/pic_cnn/pic_out/Caltech101_color_data_train_0.0_0.7_224X224.h5

Training ------------
Epoch 1/10
6353/6353 [==============================] - 5s - loss: 1.1269 - acc: 0.7494      
Epoch 2/10
6353/6353 [==============================] - 4s - loss: 0.1603 - acc: 0.9536     
Epoch 3/10
6353/6353 [==============================] - 4s - loss: 0.0580 - acc: 0.9855     
Epoch 4/10
6353/6353 [==============================] - 4s - loss: 0.0312 - acc: 0.9931     
Epoch 5/10
6353/6353 [==============================] - 4s - loss: 0.0182 - acc: 0.9956     
Epoch 6/10
6353/6353 [==============================] - 4s - loss: 0.0111 - acc: 0.9976     
Epoch 7/10
6353/6353 [==============================] - 4s - loss: 0.0090 - acc: 0.9981     
Epoch 8/10
6353/6353 [==============================] - 4s - loss: 0.0082 - acc: 0.9987     
Epoch 9/10
6353/6353 [==============================] - 4s - loss: 0.0069 - acc: 0.9994     
Epoch 10/10
6353/6353 [==============================] - 4s - loss: 0.0087 - acc: 0.9987     
/home/ccuux3/pic_cnn/pic_out/Caltech101_color_data_test_0.0_0.7_224X224.h5

Testing ------------
('top-1 all acc:', '2597/2792', 0.9301575931232091)
(0, u'62.mayfly', 'acc: 10/12')
(1, u'66.Motorbikes', 'acc: 240/240')
(2, u'68.octopus', 'acc: 7/11')
(3, u'94.umbrella', 'acc: 21/23')
(4, u'90.strawberry', 'acc: 10/11')
(5, u'86.stapler', 'acc: 13/14')
(6, u'83.sea_horse', 'acc: 15/18')
(7, u'72.pigeon', 'acc: 13/14')
(8, u'89.stop_sign', 'acc: 19/20')
(9, u'4.BACKGROUND_Google', 'acc: 125/141')
(10, u'22.cougar_face', 'acc: 18/21')
(11, u'81.scissors', 'acc: 9/12')
(12, u'100.wrench', 'acc: 8/12')
(13, u'57.Leopards', 'acc: 60/60')
(14, u'46.hawksbill', 'acc: 29/30')
(15, u'30.dolphin', 'acc: 19/20')
(16, u'9.bonsai', 'acc: 39/39')
(17, u'35.euphonium', 'acc: 18/20')
(18, u'44.gramophone', 'acc: 16/16')
(19, u'74.platypus', 'acc: 7/11')
(20, u'14.camera', 'acc: 15/15')
(21, u'55.lamp', 'acc: 15/19')
(22, u'38.Faces_easy', 'acc: 129/131')
(23, u'54.ketch', 'acc: 28/35')
(24, u'33.elephant', 'acc: 18/20')
(25, u'3.ant', 'acc: 8/13')
(26, u'49.helicopter', 'acc: 26/27')
(27, u'36.ewer', 'acc: 26/26')
(28, u'78.rooster', 'acc: 14/15')
(29, u'70.pagoda', 'acc: 15/15')
(30, u'58.llama', 'acc: 20/24')
(31, u'5.barrel', 'acc: 15/15')
(32, u'101.yin_yang', 'acc: 18/18')
(33, u'18.cellphone', 'acc: 18/18')
(34, u'59.lobster', 'acc: 7/13')
(35, u'17.ceiling_fan', 'acc: 14/15')
(36, u'16.car_side', 'acc: 37/37')
(37, u'50.ibis', 'acc: 24/24')
(38, u'76.revolver', 'acc: 23/25')
(39, u'84.snoopy', 'acc: 7/11')
(40, u'87.starfish', 'acc: 26/26')
(41, u'12.buddha', 'acc: 24/26')
(42, u'52.joshua_tree', 'acc: 20/20')
(43, u'43.gerenuk', 'acc: 10/11')
(44, u'65.minaret', 'acc: 23/23')
(45, u'91.sunflower', 'acc: 26/26')
(46, u'56.laptop', 'acc: 24/25')
(47, u'77.rhino', 'acc: 17/18')
(48, u'1.airplanes', 'acc: 239/240')
(49, u'88.stegosaurus', 'acc: 16/18')
(50, u'23.crab', 'acc: 17/22')
(51, u'8.binocular', 'acc: 8/10')
(52, u'31.dragonfly', 'acc: 18/21')
(53, u'6.bass', 'acc: 15/17')
(54, u'95.watch', 'acc: 72/72')
(55, u'0.accordion', 'acc: 17/17')
(56, u'98.wild_cat', 'acc: 9/11')
(57, u'67.nautilus', 'acc: 16/17')
(58, u'40.flamingo', 'acc: 20/21')
(59, u'92.tick', 'acc: 12/15')
(60, u'47.headphone', 'acc: 12/13')
(61, u'24.crayfish', 'acc: 15/21')
(62, u'97.wheelchair', 'acc: 17/18')
(63, u'27.cup', 'acc: 15/18')
(64, u'25.crocodile', 'acc: 14/15')
(65, u'2.anchor', 'acc: 7/13')
(66, u'19.chair', 'acc: 17/19')
(67, u'39.ferry', 'acc: 21/21')
(68, u'60.lotus', 'acc: 16/20')
(69, u'13.butterfly', 'acc: 26/28')
(70, u'34.emu', 'acc: 14/16')
(71, u'64.metronome', 'acc: 10/10')
(72, u'82.scorpion', 'acc: 24/26')
(73, u'7.beaver', 'acc: 12/14')
(74, u'48.hedgehog', 'acc: 16/17')
(75, u'37.Faces', 'acc: 131/131')
(76, u'45.grand_piano', 'acc: 30/30')
(77, u'79.saxophone', 'acc: 11/12')
(78, u'26.crocodile_head', 'acc: 9/16')
(79, u'80.schooner', 'acc: 15/19')
(80, u'93.trilobite', 'acc: 26/26')
(81, u'28.dalmatian', 'acc: 21/21')
(82, u'10.brain', 'acc: 28/30')
(83, u'61.mandolin', 'acc: 10/13')
(84, u'11.brontosaurus', 'acc: 11/13')
(85, u'63.menorah', 'acc: 25/27')
(86, u'85.soccer_ball', 'acc: 20/20')
(87, u'51.inline_skate', 'acc: 9/10')
(88, u'71.panda', 'acc: 11/12')
(89, u'53.kangaroo', 'acc: 24/26')
(90, u'99.windsor_chair', 'acc: 16/17')
(91, u'42.garfield', 'acc: 11/11')
(92, u'29.dollar_bill', 'acc: 16/16')
(93, u'20.chandelier', 'acc: 30/33')
(94, u'96.water_lilly', 'acc: 6/12')
(95, u'41.flamingo_head', 'acc: 13/14')
(96, u'73.pizza', 'acc: 13/16')
(97, u'21.cougar_body', 'acc: 15/15')
(98, u'75.pyramid', 'acc: 16/18')
(99, u'69.okapi', 'acc: 12/12')
(100, u'15.cannon', 'acc: 11/13')
(101, u'32.electric_guitar', 'acc: 19/23')
----------------------------------------------------
('top-5 all acc:', '2759/2792', 0.9881805157593123)
(0, u'62.mayfly', 'acc: 12/12')
(1, u'66.Motorbikes', 'acc: 240/240')
(2, u'68.octopus', 'acc: 11/11')
(3, u'94.umbrella', 'acc: 23/23')
(4, u'90.strawberry', 'acc: 11/11')
(5, u'86.stapler', 'acc: 14/14')
(6, u'83.sea_horse', 'acc: 16/18')
(7, u'72.pigeon', 'acc: 14/14')
(8, u'89.stop_sign', 'acc: 20/20')
(9, u'4.BACKGROUND_Google', 'acc: 141/141')
(10, u'22.cougar_face', 'acc: 19/21')
(11, u'81.scissors', 'acc: 11/12')
(12, u'100.wrench', 'acc: 10/12')
(13, u'57.Leopards', 'acc: 60/60')
(14, u'46.hawksbill', 'acc: 30/30')
(15, u'30.dolphin', 'acc: 20/20')
(16, u'9.bonsai', 'acc: 39/39')
(17, u'35.euphonium', 'acc: 20/20')
(18, u'44.gramophone', 'acc: 16/16')
(19, u'74.platypus', 'acc: 9/11')
(20, u'14.camera', 'acc: 15/15')
(21, u'55.lamp', 'acc: 18/19')
(22, u'38.Faces_easy', 'acc: 131/131')
(23, u'54.ketch', 'acc: 34/35')
(24, u'33.elephant', 'acc: 20/20')
(25, u'3.ant', 'acc: 10/13')
(26, u'49.helicopter', 'acc: 27/27')
(27, u'36.ewer', 'acc: 26/26')
(28, u'78.rooster', 'acc: 15/15')
(29, u'70.pagoda', 'acc: 15/15')
(30, u'58.llama', 'acc: 24/24')
(31, u'5.barrel', 'acc: 15/15')
(32, u'101.yin_yang', 'acc: 18/18')
(33, u'18.cellphone', 'acc: 18/18')
(34, u'59.lobster', 'acc: 13/13')
(35, u'17.ceiling_fan', 'acc: 14/15')
(36, u'16.car_side', 'acc: 37/37')
(37, u'50.ibis', 'acc: 24/24')
(38, u'76.revolver', 'acc: 25/25')
(39, u'84.snoopy', 'acc: 10/11')
(40, u'87.starfish', 'acc: 26/26')
(41, u'12.buddha', 'acc: 25/26')
(42, u'52.joshua_tree', 'acc: 20/20')
(43, u'43.gerenuk', 'acc: 11/11')
(44, u'65.minaret', 'acc: 23/23')
(45, u'91.sunflower', 'acc: 26/26')
(46, u'56.laptop', 'acc: 25/25')
(47, u'77.rhino', 'acc: 18/18')
(48, u'1.airplanes', 'acc: 240/240')
(49, u'88.stegosaurus', 'acc: 18/18')
(50, u'23.crab', 'acc: 22/22')
(51, u'8.binocular', 'acc: 10/10')
(52, u'31.dragonfly', 'acc: 20/21')
(53, u'6.bass', 'acc: 16/17')
(54, u'95.watch', 'acc: 72/72')
(55, u'0.accordion', 'acc: 17/17')
(56, u'98.wild_cat', 'acc: 11/11')
(57, u'67.nautilus', 'acc: 17/17')
(58, u'40.flamingo', 'acc: 21/21')
(59, u'92.tick', 'acc: 13/15')
(60, u'47.headphone', 'acc: 12/13')
(61, u'24.crayfish', 'acc: 21/21')
(62, u'97.wheelchair', 'acc: 18/18')
(63, u'27.cup', 'acc: 16/18')
(64, u'25.crocodile', 'acc: 15/15')
(65, u'2.anchor', 'acc: 12/13')
(66, u'19.chair', 'acc: 19/19')
(67, u'39.ferry', 'acc: 21/21')
(68, u'60.lotus', 'acc: 19/20')
(69, u'13.butterfly', 'acc: 27/28')
(70, u'34.emu', 'acc: 16/16')
(71, u'64.metronome', 'acc: 10/10')
(72, u'82.scorpion', 'acc: 26/26')
(73, u'7.beaver', 'acc: 14/14')
(74, u'48.hedgehog', 'acc: 17/17')
(75, u'37.Faces', 'acc: 131/131')
(76, u'45.grand_piano', 'acc: 30/30')
(77, u'79.saxophone', 'acc: 12/12')
(78, u'26.crocodile_head', 'acc: 14/16')
(79, u'80.schooner', 'acc: 19/19')
(80, u'93.trilobite', 'acc: 26/26')
(81, u'28.dalmatian', 'acc: 21/21')
(82, u'10.brain', 'acc: 30/30')
(83, u'61.mandolin', 'acc: 13/13')
(84, u'11.brontosaurus', 'acc: 13/13')
(85, u'63.menorah', 'acc: 25/27')
(86, u'85.soccer_ball', 'acc: 20/20')
(87, u'51.inline_skate', 'acc: 10/10')
(88, u'71.panda', 'acc: 12/12')
(89, u'53.kangaroo', 'acc: 26/26')
(90, u'99.windsor_chair', 'acc: 17/17')
(91, u'42.garfield', 'acc: 11/11')
(92, u'29.dollar_bill', 'acc: 16/16')
(93, u'20.chandelier', 'acc: 32/33')
(94, u'96.water_lilly', 'acc: 12/12')
(95, u'41.flamingo_head', 'acc: 14/14')
(96, u'73.pizza', 'acc: 16/16')
(97, u'21.cougar_body', 'acc: 15/15')
(98, u'75.pyramid', 'acc: 18/18')
(99, u'69.okapi', 'acc: 12/12')
(100, u'15.cannon', 'acc: 12/13')
(101, u'32.electric_guitar', 'acc: 23/23')


[转]使用Keras进行图像分类

转自https://blog.csdn.net/u010632850/article/details/77102821

Keras深度学习框架可以用来了解深度学习可以用来做什么,下面介绍一些使用Keras来做基础的图像分类的内容,欢迎各位交流。

参考资料:https://morvanzhou.github.io/tutorials/machine-learning/keras/2-3-CNN/

我使用的版本:Python2.7,numpy1.13.1,Theano0.9.0,Keras2.0.6,h5py2.5.0,opencv2.4.13,WIN7系统。

要做图像分类,首先需要有数据集,需要将下载到的图像数据集转化为Keras可以识别的numpy矩阵。需要得到X_train,X_test,y_train,y_test,其中X_train和X_test分别是一个4维矩阵,第一维代表有几幅图像,后三维代表图像数据,y_train和y_test是一维列表,表示对应的图像属于哪一类。

 可以下载到的图像数据集一般分为两种,一种是由若干文件夹组成,每个文件夹的名字是该类别的名字,每个文件夹中包含若干图像,这种数据集需要自己划分训练集和测试集;另一种由train文件夹和test文件夹组成,每个文件夹中有一些文件夹,其名字是类别的名字,其中有很多的图像,这种则固定了训练集和测试集。有时候数据集中会有文件来说明图像的名字和对应的标注,但是对于图像分类来说,不需要这些标注也可以提取出需要的numpy矩阵。

 这里使用简单的网络对Caltech101数据集进行分类,这里暂时不考虑去除背景类,经过简单的改动后也可对cifar10数据集进行分类。如果需要更高的准确率,需要修改所用的网络。

提取的方法如下:(get_data和get_2data函数分别对应上面说的两种数据集。)

def eachFile(filepath):                 #将目录内的文件名放入列表中  
    pathDir =  os.listdir(filepath)  
    out = []  
    for allDir in pathDir:  
        child = allDir.decode('gbk')    # .decode('gbk')是解决中文显示乱码问题  
        out.append(child)  
    return out  
  
def get_data(data_name,train_percentage=0.7,resize=True,data_format=None):   #从文件夹中获取图像数据  
    file_name = os.path.join(pic_dir_out,data_name+str(Width)+"X"+str(Height)+".pkl")     
    if os.path.exists(file_name):           #判断之前是否有存到文件中  
        (X_train, y_train), (X_test, y_test) = cPickle.load(open(file_name,"rb"))  
        return (X_train, y_train), (X_test, y_test)    
    data_format = conv_utils.normalize_data_format(data_format)  
    pic_dir_set = eachFile(pic_dir_data)    
    X_train = []  
    y_train = []  
    X_test = []  
    y_test = []  
    label = 0  
    for pic_dir in pic_dir_set:  
        print pic_dir_data+pic_dir  
        if not os.path.isdir(os.path.join(pic_dir_data,pic_dir)):  
            continue      
        pic_set = eachFile(os.path.join(pic_dir_data,pic_dir))  
        pic_index = 0  
        train_count = int(len(pic_set)*train_percentage)  
        for pic_name in pic_set:  
            if not os.path.isfile(os.path.join(pic_dir_data,pic_dir,pic_name)):  
                continue  
            img = cv2.imread(os.path.join(pic_dir_data,pic_dir,pic_name))  
            if img is None:  
                continue  
            img = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)   
            if (resize):  
                img = cv2.resize(img,(Width,Height))  
            if (data_format == 'channels_last'):  
                img = img.reshape(-1,Width,Height,1)  
            elif (data_format == 'channels_first'):  
                img = img.reshape(-1,1,Width,Height)  
            if (pic_index < train_count):  
                X_train.append(img)  
                y_train.append(label)            
            else:  
                X_test.append(img)  
                y_test.append(label)  
            pic_index += 1  
        if len(pic_set) <> 0:          
            label += 1  
    X_train = np.concatenate(X_train,axis=0)          
    X_test = np.concatenate(X_test,axis=0)      
    y_train = np.array(y_train)  
    y_test = np.array(y_test)  
    cPickle.dump([(X_train, y_train), (X_test, y_test)],open(file_name,"wb"))   
    return (X_train, y_train), (X_test, y_test)     
  
def get_2data(data_name,resize=True,data_format=None):   #当数据被分为train和test两个部分时使用  
    file_name = os.path.join(pic_dir_out,data_name+str(Width)+"X"+str(Height)+".pkl")     
    if os.path.exists(file_name):           #判断之前是否有存到文件中  
        (X_train, y_train), (X_test, y_test) = cPickle.load(open(file_name,"rb"))  
        return (X_train, y_train), (X_test, y_test)     
    data_format = conv_utils.normalize_data_format(data_format)  
    all_dir_set = eachFile(pic_dir_data)  
    X_train = []  
    y_train = []  
    X_test = []  
    y_test = []  
  
    for all_dir in all_dir_set:  
        if not os.path.isdir(os.path.join(pic_dir_data,all_dir)):  
            continue  
        label = 0  
        pic_dir_set = eachFile(os.path.join(pic_dir_data,all_dir))  
        for pic_dir in pic_dir_set:  
            print pic_dir_data+pic_dir  
            if not os.path.isdir(os.path.join(pic_dir_data,all_dir,pic_dir)):  
                continue      
            pic_set = eachFile(os.path.join(pic_dir_data,all_dir,pic_dir))  
            for pic_name in pic_set:  
                if not os.path.isfile(os.path.join(pic_dir_data,all_dir,pic_dir,pic_name)):  
                    continue  
                img = cv2.imread(os.path.join(pic_dir_data,all_dir,pic_dir,pic_name))  
                if img is None:  
                    continue  
                img = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)   
                if resize:  
                    img = cv2.resize(img,(Width,Height))  
                if (data_format == 'channels_last'):  
                    img = img.reshape(-1,Width,Height,1)  
                elif (data_format == 'channels_first'):  
                    img = img.reshape(-1,1,Width,Height)  
                if ('train' in all_dir):  
                    X_train.append(img)  
                    y_train.append(label)            
                elif ('test' in all_dir):  
                    X_test.append(img)  
                    y_test.append(label)  
            if len(pic_set) <> 0:          
                label += 1  
    X_train = np.concatenate(X_train,axis=0)          
    X_test = np.concatenate(X_test,axis=0)      
    y_train = np.array(y_train)  
    y_test = np.array(y_test)  
    cPickle.dump([(X_train, y_train), (X_test, y_test)],open(file_name,"wb"))   
    return (X_train, y_train), (X_test, y_test)

其中的一些参数值为

Width = 32  
Height = 32  
num_classes = 102          
pic_dir_out = 'E:/pic_cnn/pic_out/'    
pic_dir_data = 'E:/pic_cnn/pic_dataset/Caltech101/'

如果每次都要遍历这些文件夹,获得numpy矩阵,还是比较慢的,通过文件存取的方式,可以将提取到的矩阵存成文件,之后运行的时候就可以较快的运行。

接下来需要对数据做预处理,先将图像数值转换到0到1之间,如果不这样做准确率会下降。np_utils.to_categorical的用途是,假设图像分为10类,得到的y_train和y_test就是0到9的数字组成的列表,需要将它做一个变换,例如其中的数字5,表示第6类,变化之后为[0, 0, 0, 0, 0, 1, 0, 0, 0, 0],第6位取值为1。原因是之后得到的对每幅图像的预测结果,也是一个10列的列表,例如[0, 0, 0, 0.1, 0, 0.8, 0, 0.1, 0, 0],其中的最大值如果和实际值是在同一位,说明预测准确。

X_train = X_train/255.              #数据预处理  
X_test = X_test/255.  
print X_train.shape  
print X_test.shape  
y_train = np_utils.to_categorical(y_train, num_classes)  
y_test = np_utils.to_categorical(y_test, num_classes)

之后就可以使用Keras构建一些简单的CNN结构。

所设计的CNN结构代码如下:

model = Sequential()                #CNN构建  
model.add(Convolution2D(  
    input_shape=(Width, Height, 1),  
    #input_shape=(1, Width, Height),  
    filters=8,  
    kernel_size=3,  
    strides=1,  
    padding='same',       
    data_format='channels_last',  
))  
model.add(Activation('relu'))  
model.add(MaxPooling2D(  
    pool_size=2,  
    strides=2,  
    data_format='channels_last',  
))  
model.add(Convolution2D(16, 3, strides=1, padding='same', data_format='channels_last'))  
model.add(Activation('relu'))  
model.add(MaxPooling2D(2, 2, data_format='channels_last'))  
model.add(Dropout(0.5))  
model.add(Flatten())  
model.add(Dense(256, activation='relu'))  
model.add(Dropout(0.5))  
  
model.add(Dense(num_classes, activation='softmax'))  
  
model.compile(optimizer=Adam(),  
              loss='categorical_crossentropy',  
              metrics=['accuracy'])

Convolution2D层(卷积层)相当于用卷积核去扫描原图像,得到一些新的图像。其中的参数fliters表示卷积核的数量,也就是得到的新图像的数量,kernel_size是卷积核的大小,strides是每次扫描移动几个像素,padding表示是否通过在图像周围加一圈0,使得生成的卷积图像大小与原图像相同,’same’表示加一圈0,默认’valid’表示不加,data_format表示图像的通道位于3维中的前面还是后面。另外,第一个层需要注明input_shape,表示输入图像的大小。

Activation层(激励函数)是一个函数,将前面传递过来的值做一个变换,这个函数需要有导数,常用的有relu,softmax。relu是当x小于0时,y等于0,当x大于0时,y等于x。

MaxPooling2D层(池化层)将前面得到的卷积图像,用一个小方格来扫描,每个方格中只记录它的最大值,扫描结束之后会产生新的小一些的图像。pool_size表示方格的大小,strides表示每次移动的长度,如果都为2则会使图像的长和宽都除以2。

Flatten层是将图像展平成一维的列表。

Dense层(全连接层)可以使参数的数量发生变化,参数units表示该层有多少个神经元,可以改变输出结果的维度。

Dropout层表示对其相邻的两层训练参数时,会随机的丢弃一定百分比的神经元的连接,减少过拟合的现象。

softmax激励函数可以将输出的结果转化为0到1之间的浮点数,同一个列表中所有数值的和为1,可以当作是分为该类的概率。 

结构设计好之后,需要通过compile函数定义一些优化参数的方式。

optimizer表示梯度下降是选用哪种优化器来优化参数,loss表示损失值的计算使用哪种方式,metrics表示对测试数据evaluate时,性能评估的方法。

然后就可以使用训练数据进行训练了。训练过程如下:

print('\nTraining ------------')    #从文件中提取参数,训练后存在新的文件中  
cm = 0  
cm_str = '' if cm==0 else str(cm)  
cm2_str = '' if (cm+1)==0 else str(cm+1)    
if cm >= 1:  
    model.load_weights(os.path.join(pic_dir_out,'cnn_model_Caltech101_'+cm_str+'.h5'))  
    #model.load_weights(os.path.join(pic_dir_out,'cnn_model_Cifar10_'+cm_str+'.h5'))      
model.fit(X_train, y_train, epochs=10, batch_size=128,)   #正式训练数据  
model.save_weights(os.path.join(pic_dir_out,'cnn_model_Caltech101_'+cm2_str+'.h5'))

epochs参数表示总共进行多少轮训练,batch_size表示每次梯度更新会用到多少组数据。这里增加了一些小的操作的用途是,每次训练完网络的参数后保存成文件,递增修改cm的值后再运行可以先读取上次训练的参数,然后再接着训练。我在运行代码的过程中发现,该程序消耗的内存会不断增加,使得epochs的值不能取一个非常大的值,所以只能多次运行才能得到收敛的结果。暂时不清楚有没有办法减小内存的消耗。

最后是对测试数据进行预测,并评估结果。这里得到模型最终的损失值和准确率,以及top-N的准确率,和每个类别的准确率。

print('\nTesting ------------')     #对测试集进行评估,额外获得metrics中的信息  
loss, accuracy = model.evaluate(X_test, y_test)  
print('\n')  
print('test loss: ', loss)  
print('test accuracy: ', accuracy)  
  
class_name_list = get_name_list(pic_dir_data)   #获取每一类的名字列表   
pred = model.predict(X_test, batch_size=128)    #获取top-N的每类的准确率  
N = 5  
pred_list = []  
for row in pred:  
    pred_list.append(row.argsort()[-N:][::-1])  #获取最大的N个值的下标  
pred_array = np.array(pred_list)  
test_arg = np.argmax(y_test,axis=1)  
class_count = [0 for _ in xrange(num_classes)]  
class_acc = [0 for _ in xrange(num_classes)]  
for i in xrange(len(test_arg)):  
    class_count[test_arg[i]] += 1  
    if test_arg[i] in pred_array[i]:  
        class_acc[test_arg[i]] += 1  
print('top-'+str(N)+' all acc:',str(sum(class_acc))+'/'+str(len(test_arg)),sum(class_acc)/float(len(test_arg)))  
for i in xrange(num_classes):  
    print (i, class_name_list[i], 'acc: '+str(class_acc[i])+'/'+str(class_count[i]))

完整代码如下:

import cv2  
import numpy as np  
  
from keras.utils import np_utils, conv_utils  
from keras.models import Sequential  
from keras.layers import Convolution2D, MaxPooling2D, Flatten, Dropout, Dense, Activation  
from keras.optimizers import Adam  
  
import os  
import cPickle  
  
def get_name_list(filepath):                #获取各个类别的名字  
    pathDir =  os.listdir(filepath)  
    out = []  
    for allDir in pathDir:  
        if os.path.isdir(os.path.join(filepath,allDir)):  
            child = allDir.decode('gbk')    # .decode('gbk')是解决中文显示乱码问题  
            out.append(child)  
    return out  
      
def eachFile(filepath):                 #将目录内的文件名放入列表中  
    pathDir =  os.listdir(filepath)  
    out = []  
    for allDir in pathDir:  
        child = allDir.decode('gbk')    # .decode('gbk')是解决中文显示乱码问题  
        out.append(child)  
    return out  
  
def get_data(data_name,train_percentage=0.7,resize=True,data_format=None):   #从文件夹中获取图像数据  
    file_name = os.path.join(pic_dir_out,data_name+str(Width)+"X"+str(Height)+".pkl")     
    if os.path.exists(file_name):           #判断之前是否有存到文件中  
        (X_train, y_train), (X_test, y_test) = cPickle.load(open(file_name,"rb"))  
        return (X_train, y_train), (X_test, y_test)    
    data_format = conv_utils.normalize_data_format(data_format)  
    pic_dir_set = eachFile(pic_dir_data)    
    X_train = []  
    y_train = []  
    X_test = []  
    y_test = []  
    label = 0  
    for pic_dir in pic_dir_set:  
        print pic_dir_data+pic_dir  
        if not os.path.isdir(os.path.join(pic_dir_data,pic_dir)):  
            continue      
        pic_set = eachFile(os.path.join(pic_dir_data,pic_dir))  
        pic_index = 0  
        train_count = int(len(pic_set)*train_percentage)  
        for pic_name in pic_set:  
            if not os.path.isfile(os.path.join(pic_dir_data,pic_dir,pic_name)):  
                continue  
            img = cv2.imread(os.path.join(pic_dir_data,pic_dir,pic_name))  
            if img is None:  
                continue  
            img = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)   
            if (resize):  
                img = cv2.resize(img,(Width,Height))  
            if (data_format == 'channels_last'):  
                img = img.reshape(-1,Width,Height,1)  
            elif (data_format == 'channels_first'):  
                img = img.reshape(-1,1,Width,Height)  
            if (pic_index < train_count):  
                X_train.append(img)  
                y_train.append(label)            
            else:  
                X_test.append(img)  
                y_test.append(label)  
            pic_index += 1  
        if len(pic_set) <> 0:          
            label += 1  
    X_train = np.concatenate(X_train,axis=0)          
    X_test = np.concatenate(X_test,axis=0)      
    y_train = np.array(y_train)  
    y_test = np.array(y_test)  
    cPickle.dump([(X_train, y_train), (X_test, y_test)],open(file_name,"wb"))   
    return (X_train, y_train), (X_test, y_test)     
  
def get_2data(data_name,resize=True,data_format=None):   #当train和test数据被分为两个部分时使用  
    file_name = os.path.join(pic_dir_out,data_name+str(Width)+"X"+str(Height)+".pkl")     
    if os.path.exists(file_name):           #判断之前是否有存到文件中  
        (X_train, y_train), (X_test, y_test) = cPickle.load(open(file_name,"rb"))  
        return (X_train, y_train), (X_test, y_test)     
    data_format = conv_utils.normalize_data_format(data_format)  
    all_dir_set = eachFile(pic_dir_data)  
    X_train = []  
    y_train = []  
    X_test = []  
    y_test = []  
  
    for all_dir in all_dir_set:  
        if not os.path.isdir(os.path.join(pic_dir_data,all_dir)):  
            continue  
        label = 0  
        pic_dir_set = eachFile(os.path.join(pic_dir_data,all_dir))  
        for pic_dir in pic_dir_set:  
            print pic_dir_data+pic_dir  
            if not os.path.isdir(os.path.join(pic_dir_data,all_dir,pic_dir)):  
                continue      
            pic_set = eachFile(os.path.join(pic_dir_data,all_dir,pic_dir))  
            for pic_name in pic_set:  
                if not os.path.isfile(os.path.join(pic_dir_data,all_dir,pic_dir,pic_name)):  
                    continue  
                img = cv2.imread(os.path.join(pic_dir_data,all_dir,pic_dir,pic_name))  
                if img is None:  
                    continue  
                img = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)   
                if resize:  
                    img = cv2.resize(img,(Width,Height))  
                if (data_format == 'channels_last'):  
                    img = img.reshape(-1,Width,Height,1)  
                elif (data_format == 'channels_first'):  
                    img = img.reshape(-1,1,Width,Height)  
                if ('train' in all_dir):  
                    X_train.append(img)  
                    y_train.append(label)            
                elif ('test' in all_dir):  
                    X_test.append(img)  
                    y_test.append(label)  
            if len(pic_set) <> 0:          
                label += 1  
    X_train = np.concatenate(X_train,axis=0)          
    X_test = np.concatenate(X_test,axis=0)      
    y_train = np.array(y_train)  
    y_test = np.array(y_test)  
    cPickle.dump([(X_train, y_train), (X_test, y_test)],open(file_name,"wb"))   
    return (X_train, y_train), (X_test, y_test)     
  
def main():  
    global Width, Height, pic_dir_out, pic_dir_data  
    Width = 32  
    Height = 32  
    num_classes = 102                   #Caltech101为102  cifar10为10  
    pic_dir_out = 'E:/pic_cnn/pic_out/'    
    pic_dir_data = 'E:/pic_cnn/pic_dataset/Caltech101/'    
    (X_train, y_train), (X_test, y_test) = get_data("Caltech101_gray_data_",0.7,data_format='channels_last')  
    #pic_dir_data = 'E:/pic_cnn/pic_dataset/cifar10/'  
    #(X_train, y_train), (X_test, y_test) = get_2data("Cifar10_gray_data_",resize=False,data_format='channels_last')  
      
    X_train = X_train/255.              #数据预处理  
    X_test = X_test/255.  
    print X_train.shape  
    print X_test.shape  
    y_train = np_utils.to_categorical(y_train, num_classes)  
    y_test = np_utils.to_categorical(y_test, num_classes)  
      
    model = Sequential()                #CNN构建  
    model.add(Convolution2D(  
        input_shape=(Width, Height, 1),  
        #input_shape=(1, Width, Height),  
        filters=8,  
        kernel_size=3,  
        strides=1,  
        padding='same',       
        data_format='channels_last',  
    ))  
    model.add(Activation('relu'))  
    model.add(MaxPooling2D(  
        pool_size=2,  
        strides=2,  
        data_format='channels_last',  
    ))  
    model.add(Convolution2D(16, 3, strides=1, padding='same', data_format='channels_last'))  
    model.add(Activation('relu'))  
    model.add(MaxPooling2D(2, 2, data_format='channels_last'))  
    model.add(Dropout(0.5))  
    model.add(Flatten())  
    model.add(Dense(256, activation='relu'))  
    model.add(Dropout(0.5))  
      
    model.add(Dense(num_classes, activation='softmax'))  
      
    model.compile(optimizer=Adam(),  
                  loss='categorical_crossentropy',  
                  metrics=['accuracy'])  
          
    print('\nTraining ------------')    #从文件中提取参数,训练后存在新的文件中  
    cm = 0                              #修改这个参数可以多次训练  
    cm_str = '' if cm==0 else str(cm)  
    cm2_str = '' if (cm+1)==0 else str(cm+1)    
    if cm >= 1:  
        model.load_weights(os.path.join(pic_dir_out,'cnn_model_Caltech101_'+cm_str+'.h5'))  
        #model.load_weights(os.path.join(pic_dir_out,'cnn_model_Cifar10_'+cm_str+'.h5'))      
    model.fit(X_train, y_train, epochs=10, batch_size=128,)   #正式训练数据  
    model.save_weights(os.path.join(pic_dir_out,'cnn_model_Caltech101_'+cm2_str+'.h5'))  
       
    print('\nTesting ------------')     #对测试集进行评估,额外获得metrics中的信息  
    loss, accuracy = model.evaluate(X_test, y_test)  
    print('\n')  
    print('test loss: ', loss)  
    print('test accuracy: ', accuracy)  
      
    class_name_list = get_name_list(pic_dir_data)    #获取top-N的每类的准确率  
    #class_name_list = get_name_list(os.path.join(pic_dir_data,'train'))  
    pred = model.predict(X_test, batch_size=128)  
    N = 5  
    pred_list = []  
    for row in pred:  
        pred_list.append(row.argsort()[-N:][::-1])  #获取最大的N个值的下标  
    pred_array = np.array(pred_list)  
    test_arg = np.argmax(y_test,axis=1)  
    class_count = [0 for _ in xrange(num_classes)]  
    class_acc = [0 for _ in xrange(num_classes)]  
    for i in xrange(len(test_arg)):  
        class_count[test_arg[i]] += 1  
        if test_arg[i] in pred_array[i]:  
            class_acc[test_arg[i]] += 1  
    print('top-'+str(N)+' all acc:',str(sum(class_acc))+'/'+str(len(test_arg)),sum(class_acc)/float(len(test_arg)))  
    for i in xrange(num_classes):  
        print (i, class_name_list[i], 'acc: '+str(class_acc[i])+'/'+str(class_count[i]))  
      
if __name__ == '__main__':  
    main()

运行结果如下:

(6353, 32, 32, 1)  
(2792, 32, 32, 1)  
  
Training ------------  
Epoch 1/10  
6353/6353 [==============================] - 8s - loss: 4.2459 - acc: 0.1152       
Epoch 2/10  
6353/6353 [==============================] - 8s - loss: 3.8954 - acc: 0.1942       
Epoch 3/10  
6353/6353 [==============================] - 8s - loss: 3.6121 - acc: 0.2500       
Epoch 4/10  
6353/6353 [==============================] - 8s - loss: 3.3974 - acc: 0.2811       
Epoch 5/10  
6353/6353 [==============================] - 8s - loss: 3.2033 - acc: 0.3101       
Epoch 6/10  
6353/6353 [==============================] - 9s - loss: 3.0413 - acc: 0.3343       
Epoch 7/10  
6353/6353 [==============================] - 9s - loss: 2.9090 - acc: 0.3559       
Epoch 8/10  
6353/6353 [==============================] - 9s - loss: 2.7931 - acc: 0.3760       
Epoch 9/10  
6353/6353 [==============================] - 9s - loss: 2.7039 - acc: 0.3897       
Epoch 10/10  
6353/6353 [==============================] - 9s - loss: 2.6152 - acc: 0.4003       
  
Testing ------------  
2720/2792 [============================>.] - ETA: 0s  
  
('test loss: ', 2.5188725370177227)  
('test accuracy: ', 0.42836676217765041)  
('top-5 all acc:', '1754/2792', 0.6282234957020058)  
(0, u'0.accordion', 'acc: 15/17')  
(1, u'1.airplanes', 'acc: 238/240')  
(2, u'10.brain', 'acc: 7/30')  
(3, u'100.wrench', 'acc: 5/12')  
(4, u'101.yin_yang', 'acc: 15/18')  
(5, u'11.brontosaurus', 'acc: 7/13')  
(6, u'12.buddha', 'acc: 9/26')  
(7, u'13.butterfly', 'acc: 6/28')  
(8, u'14.camera', 'acc: 5/15')  
(9, u'15.cannon', 'acc: 0/13')  
(10, u'16.car_side', 'acc: 37/37')  
(11, u'17.ceiling_fan', 'acc: 1/15')  
(12, u'18.cellphone', 'acc: 16/18')  
(13, u'19.chair', 'acc: 4/19')  
(14, u'2.anchor', 'acc: 2/13')  
(15, u'20.chandelier', 'acc: 27/33')  
(16, u'21.cougar_body', 'acc: 0/15')  
(17, u'22.cougar_face', 'acc: 8/21')  
(18, u'23.crab', 'acc: 4/22')  
(19, u'24.crayfish', 'acc: 3/21')  
(20, u'25.crocodile', 'acc: 0/15')  
(21, u'26.crocodile_head', 'acc: 1/16')  
(22, u'27.cup', 'acc: 3/18')  
(23, u'28.dalmatian', 'acc: 14/21')  
(24, u'29.dollar_bill', 'acc: 14/16')  
(25, u'3.ant', 'acc: 0/13')  
(26, u'30.dolphin', 'acc: 5/20')  
(27, u'31.dragonfly', 'acc: 12/21')  
(28, u'32.electric_guitar', 'acc: 15/23')  
(29, u'33.elephant', 'acc: 14/20')  
(30, u'34.emu', 'acc: 0/16')  
(31, u'35.euphonium', 'acc: 8/20')  
(32, u'36.ewer', 'acc: 7/26')  
(33, u'37.Faces', 'acc: 127/131')  
(34, u'38.Faces_easy', 'acc: 127/131')  
(35, u'39.ferry', 'acc: 10/21')  
(36, u'4.BACKGROUND_Google', 'acc: 133/141')  
(37, u'40.flamingo', 'acc: 9/21')  
(38, u'41.flamingo_head', 'acc: 0/14')  
(39, u'42.garfield', 'acc: 6/11')  
(40, u'43.gerenuk', 'acc: 0/11')  
(41, u'44.gramophone', 'acc: 4/16')  
(42, u'45.grand_piano', 'acc: 24/30')  
(43, u'46.hawksbill', 'acc: 17/30')  
(44, u'47.headphone', 'acc: 3/13')  
(45, u'48.hedgehog', 'acc: 4/17')  
(46, u'49.helicopter', 'acc: 17/27')  
(47, u'5.barrel', 'acc: 4/15')  
(48, u'50.ibis', 'acc: 10/24')  
(49, u'51.inline_skate', 'acc: 5/10')  
(50, u'52.joshua_tree', 'acc: 11/20')  
(51, u'53.kangaroo', 'acc: 15/26')  
(52, u'54.ketch', 'acc: 26/35')  
(53, u'55.lamp', 'acc: 8/19')  
(54, u'56.laptop', 'acc: 12/25')  
(55, u'57.Leopards', 'acc: 58/60')  
(56, u'58.llama', 'acc: 9/24')  
(57, u'59.lobster', 'acc: 0/13')  
(58, u'6.bass', 'acc: 1/17')  
(59, u'60.lotus', 'acc: 12/20')  
(60, u'61.mandolin', 'acc: 2/13')  
(61, u'62.mayfly', 'acc: 1/12')  
(62, u'63.menorah', 'acc: 19/27')  
(63, u'64.metronome', 'acc: 6/10')  
(64, u'65.minaret', 'acc: 21/23')  
(65, u'66.Motorbikes', 'acc: 237/240')  
(66, u'67.nautilus', 'acc: 3/17')  
(67, u'68.octopus', 'acc: 0/11')  
(68, u'69.okapi', 'acc: 6/12')  
(69, u'7.beaver', 'acc: 3/14')  
(70, u'70.pagoda', 'acc: 15/15')  
(71, u'71.panda', 'acc: 2/12')  
(72, u'72.pigeon', 'acc: 4/14')  
(73, u'73.pizza', 'acc: 4/16')  
(74, u'74.platypus', 'acc: 1/11')  
(75, u'75.pyramid', 'acc: 8/18')  
(76, u'76.revolver', 'acc: 19/25')  
(77, u'77.rhino', 'acc: 3/18')  
(78, u'78.rooster', 'acc: 11/15')  
(79, u'79.saxophone', 'acc: 0/12')  
(80, u'8.binocular', 'acc: 6/10')  
(81, u'80.schooner', 'acc: 14/19')  
(82, u'81.scissors', 'acc: 4/12')  
(83, u'82.scorpion', 'acc: 2/26')  
(84, u'83.sea_horse', 'acc: 1/18')  
(85, u'84.snoopy', 'acc: 3/11')  
(86, u'85.soccer_ball', 'acc: 10/20')  
(87, u'86.stapler', 'acc: 6/14')  
(88, u'87.starfish', 'acc: 9/26')  
(89, u'88.stegosaurus', 'acc: 4/18')  
(90, u'89.stop_sign', 'acc: 9/20')  
(91, u'9.bonsai', 'acc: 26/39')  
(92, u'90.strawberry', 'acc: 3/11')  
(93, u'91.sunflower', 'acc: 8/26')  
(94, u'92.tick', 'acc: 9/15')  
(95, u'93.trilobite', 'acc: 26/26')  
(96, u'94.umbrella', 'acc: 13/23')  
(97, u'95.watch', 'acc: 62/72')  
(98, u'96.water_lilly', 'acc: 1/12')  
(99, u'97.wheelchair', 'acc: 11/18')  
(100, u'98.wild_cat', 'acc: 0/11')  
(101, u'99.windsor_chair', 'acc: 8/17')