tensorflow搭建cnn人脸识别训练+识别代码(python)


#train 
1
# coding:utf-8 2 from skimage import io,transform 3 import glob 4 import os 5 import tensorflow as tf 6 import numpy as np 7 import time 8 import cv2 9 import dlib 10 import sys 11 12 detector = dlib.get_frontal_face_detector() #获取人脸分类器 13 14 ID=(1511277,1511278,1511279,1511282,1511283,1511286,1511287,1511289, 15 1511290,1511291,1511292,1511295,1511296,1511298,1511300,1511301, 16 1511302,1511303,1511304,1511305,1511306,1511307,1511308,1511310, 17 1511311,1511312,1511313,1511315,1511317,1511318,1511319,1511323, 18 1511325,1511328,1511329,1511330,1511332,1511333,1511334,1511337, 19 1511339,1511340,1511341,1511342,1511343,1511344,1511345,1511347, 20 1511349,1511350,1511351,1511352,1511353,1511355,1511357,1511358, 21 1511360,1511361,1511363,1511365) 22 23 24 w=128 25 h=128 26 c=3 27 28 x=tf.placeholder(tf.float32,shape=[None,w,h,c],name='x') 29 y_=tf.placeholder(tf.int32,shape=[None,],name='y_') 30 31 def CNNlayer(): 32 #第一个卷积层(128——>64) 33 conv1=tf.layers.conv2d( 34 inputs=x, 35 filters=32, 36 kernel_size=[5, 5], 37 padding="same", 38 activation=tf.nn.relu, 39 kernel_initializer=tf.truncated_normal_initializer(stddev=0.01)) 40 pool1=tf.layers.max_pooling2d(inputs=conv1, pool_size=[2, 2], strides=2) 41 42 #第二个卷积层(64->32) 43 conv2=tf.layers.conv2d( 44 inputs=pool1, 45 filters=64, 46 kernel_size=[5, 5], 47 padding="same", 48 activation=tf.nn.relu, 49 kernel_initializer=tf.truncated_normal_initializer(stddev=0.01)) 50 pool2=tf.layers.max_pooling2d(inputs=conv2, pool_size=[2, 2], strides=2) 51 52 #第三个卷积层(32->16) 53 conv3=tf.layers.conv2d( 54 inputs=pool2, 55 filters=128, 56 kernel_size=[3, 3], 57 padding="same", 58 activation=tf.nn.relu, 59 kernel_initializer=tf.truncated_normal_initializer(stddev=0.01)) 60 pool3=tf.layers.max_pooling2d(inputs=conv3, pool_size=[2, 2], strides=2) 61 62 #第四个卷积层(16->8) 63 conv4=tf.layers.conv2d( 64 inputs=pool3, 65 filters=128, 66 kernel_size=[3, 3], 67 padding="same", 68 activation=tf.nn.relu, 69 kernel_initializer=tf.truncated_normal_initializer(stddev=0.01)) 70 pool4=tf.layers.max_pooling2d(inputs=conv4, pool_size=[2, 2], strides=2) 71 72 re1 = tf.reshape(pool4, [-1, 8 * 8 * 128]) 73 74 #全连接层 75 dense1 = tf.layers.dense(inputs=re1, 76 units=1024, 77 activation=tf.nn.relu, 78 kernel_initializer=tf.truncated_normal_initializer(stddev=0.01), 79 kernel_regularizer=tf.contrib.layers.l2_regularizer(0.003)) 80 dense2= tf.layers.dense(inputs=dense1, 81 units=512, 82 activation=tf.nn.relu, 83 kernel_initializer=tf.truncated_normal_initializer(stddev=0.01), 84 kernel_regularizer=tf.contrib.layers.l2_regularizer(0.003)) 85 logits= tf.layers.dense(inputs=dense2, 86 units=60, 87 activation=None, 88 kernel_initializer=tf.truncated_normal_initializer(stddev=0.01), 89 kernel_regularizer=tf.contrib.layers.l2_regularizer(0.003)) 90 return logits 91 #---------------------------网络结束--------------------------- 92 logits=CNNlayer() 93 predict = tf.argmax(logits, 1) 94 95 saver = tf.train.Saver() 96 sess = tf.Session() 97 saver.restore(sess, 'ckpt1/faces.ckpt-9') 98 99 100 user=input("图片(G)还是摄像头(V):") 101 if user=="G": 102 path=input("图片路径名是:") 103 img = cv2.imread(path) 104 dets = detector(img, 1) 105 print("Number of faces detected: {}".format(len(dets))) 106 for index, face in enumerate(dets): 107 print('face {}; left {}; top {}; right {}; bottom {}'.format(index, face.left(), face.top(), face.right(), face.bottom())) 108 left = face.left() 109 top = face.top() 110 right = face.right() 111 bottom = face.bottom() 112 cv2.rectangle(img, (left, top), (right, bottom), (0, 255, 0), 3) 113 io.imsave('temp.png', img) 114 img1=io.imread('temp.png') 115 img1=transform.resize(img1,(w,h,c)) 116 cv2.imshow('image',img1) 117 118 img1 = img[top:bottom,left:right] 119 img1=transform.resize(img1,(w,h,c)) 120 # cv2.imshow('image1',img) 121 res=sess.run(predict, feed_dict={x:[img1]}) 122 print(ID[res[0]]) 123 if len(dets)==0: 124 img=transform.resize(img,(w,h,c)) 125 res=sess.run(predict, feed_dict={x:[img]}) 126 print(ID[res[0]]) 127 cv2.waitKey(0) 128 cv2.destroyAllWindows() 129 cv2.waitKey(0) 130 cv2.destroyAllWindows() 131 else: 132 # 打开摄像头 133 cap = cv2.VideoCapture(0) 134 # 视屏封装格式 135 136 while True: 137 ret, frame = cap.read() 138 gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) 139 cv2.imshow('frame', frame) 140 141 # 抓取图像 142 if cv2.waitKey(1) & 0xFF == ord('s'): 143 cv2.imwrite('image/now.png', frame) 144 145 img = cv2.imread("image/now.png") 146 dets = detector(img, 1) 147 print("Number of faces detected: {}".format(len(dets))) 148 for index, face in enumerate(dets): 149 print('face {}; left {}; top {}; right {}; bottom {}'.format(index, 150 face.left(), face.top(), face.right(), face.bottom())) 151 left = face.left() 152 top = face.top() 153 right = face.right() 154 bottom = face.bottom() 155 img = img[top:bottom,left:right] 156 157 158 #img=io.imread('image/now.png') 159 img=transform.resize(img,(w,h,c)) 160 res=sess.run(predict, feed_dict={x:[img]}) 161 print(ID[res[0]]) 162 163 # 退出 164 if cv2.waitKey(1) & 0xFF == ord('q'): 165 break 166 167 cap.release() 168 cv2.destroyAllWindows() 169

下面是测试代码:

# -*- coding: utf-8 -*-
 
from skimage import io,transform
import glob
import os
import tensorflow as tf
import numpy as np
import time
 
path='D:/code/python/Anaconda3/envs/faces'
 
#将所有的图片resize成100*100
w=128
h=128
c=3
 
 
#读取图片
def read_img(path):
    cate=[path+'/'+x for x in os.listdir(path) if os.path.isdir(path+'/'+x)]
    imgs=[]
    labels=[]
    for idx,folder in enumerate(cate):
        for im in glob.glob(folder+'/*.png'):
            print('reading the images:%s'%(im))
            img=io.imread(im)
            img=transform.resize(img,(w,h,c))
            imgs.append(img)
            labels.append(idx)
    return np.asarray(imgs,np.float32),np.asarray(labels,np.int32)
 
data,label=read_img(path)
 
 
#打乱顺序
num_example=data.shape[0]
arr=np.arange(num_example)
np.random.shuffle(arr)
data=data[arr]
label=label[arr]
 
 
#将所有数据分为训练集和验证集
ratio=0.8
s=np.int(num_example*ratio)
x_train=data[:s]
y_train=label[:s]
x_val=data[s:]
y_val=label[s:]
 
#-----------------构建网络----------------------
#占位符
x=tf.placeholder(tf.float32,shape=[None,w,h,c],name='x')
y_=tf.placeholder(tf.int32,shape=[None,],name='y_')
 
def CNNlayer():
    #第一个卷积层(128——>64)
    conv1=tf.layers.conv2d(
          inputs=x,
          filters=32,
          kernel_size=[5, 5],
          padding="same",
          activation=tf.nn.relu,
          kernel_initializer=tf.truncated_normal_initializer(stddev=0.01))
    pool1=tf.layers.max_pooling2d(inputs=conv1, pool_size=[2, 2], strides=2)
 
    #第二个卷积层(64->32)
    conv2=tf.layers.conv2d(
          inputs=pool1,
          filters=64,
          kernel_size=[5, 5],
          padding="same",
          activation=tf.nn.relu,
          kernel_initializer=tf.truncated_normal_initializer(stddev=0.01))
    pool2=tf.layers.max_pooling2d(inputs=conv2, pool_size=[2, 2], strides=2)
 
    #第三个卷积层(32->16)
    conv3=tf.layers.conv2d(
          inputs=pool2,
          filters=128,
          kernel_size=[3, 3],
          padding="same",
          activation=tf.nn.relu,
          kernel_initializer=tf.truncated_normal_initializer(stddev=0.01))
    pool3=tf.layers.max_pooling2d(inputs=conv3, pool_size=[2, 2], strides=2)
 
    #第四个卷积层(16->8)
    conv4=tf.layers.conv2d(
          inputs=pool3,
          filters=128,
          kernel_size=[3, 3],
          padding="same",
          activation=tf.nn.relu,
          kernel_initializer=tf.truncated_normal_initializer(stddev=0.01))
    pool4=tf.layers.max_pooling2d(inputs=conv4, pool_size=[2, 2], strides=2)
 
    re1 = tf.reshape(pool4, [-1, 8 * 8 * 128])
 
    #全连接层
    dense1 = tf.layers.dense(inputs=re1, 
                          units=1024, 
                          activation=tf.nn.relu,
                          kernel_initializer=tf.truncated_normal_initializer(stddev=0.01),
                          kernel_regularizer=tf.contrib.layers.l2_regularizer(0.003))
    dense2= tf.layers.dense(inputs=dense1, 
                          units=512, 
                          activation=tf.nn.relu,
                          kernel_initializer=tf.truncated_normal_initializer(stddev=0.01),
                          kernel_regularizer=tf.contrib.layers.l2_regularizer(0.003))
    logits= tf.layers.dense(inputs=dense2, 
                            units=60,
                            activation=None,
                            kernel_initializer=tf.truncated_normal_initializer(stddev=0.01),
                            kernel_regularizer=tf.contrib.layers.l2_regularizer(0.003))
    return logits
#---------------------------网络结束---------------------------
logits=CNNlayer()
loss=tf.losses.sparse_softmax_cross_entropy(labels=y_,logits=logits)
train_op=tf.train.AdamOptimizer(learning_rate=0.001).minimize(loss)
correct_prediction = tf.equal(tf.cast(tf.argmax(logits,1),tf.int32), y_)    
acc= tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
 
 
#定义一个函数,按批次取数据
def minibatches(inputs=None, targets=None, batch_size=None, shuffle=False):
    assert len(inputs) == len(targets)
    if shuffle:
        indices = np.arange(len(inputs))
        np.random.shuffle(indices)
    for start_idx in range(0, len(inputs) - batch_size + 1, batch_size):
        if shuffle:
            excerpt = indices[start_idx:start_idx + batch_size]
        else:
            excerpt = slice(start_idx, start_idx + batch_size)
        yield inputs[excerpt], targets[excerpt]
 
 
#训练和测试数据,可将n_epoch设置更大一些
saver=tf.train.Saver(max_to_keep=3)
max_acc=0
f=open('ckpt1/acc.txt','w')
 
 
 
n_epoch=10
batch_size=64
sess=tf.InteractiveSession()  
sess.run(tf.global_variables_initializer())
for epoch in range(n_epoch):
    start_time = time.time()
    
    #training
    train_loss, train_acc, n_batch = 0, 0, 0
    for x_train_a, y_train_a in minibatches(x_train, y_train, batch_size, shuffle=True):
        _,err,ac=sess.run([train_op,loss,acc], feed_dict={x: x_train_a, y_: y_train_a})
        train_loss += err; train_acc += ac; n_batch += 1
    print("   train loss: %f" % (train_loss/ n_batch))
    print("   train acc: %f" % (train_acc/ n_batch))
    
    #validation
    val_loss, val_acc, n_batch = 0, 0, 0
    for x_val_a, y_val_a in minibatches(x_val, y_val, batch_size, shuffle=False):
        err, ac = sess.run([loss,acc], feed_dict={x: x_val_a, y_: y_val_a})
        val_loss += err; val_acc += ac; n_batch += 1
    print("   validation loss: %f" % (val_loss/ n_batch))
    print("   validation acc: %f" % (val_acc/ n_batch))
 
    f.write(str(epoch+1)+', val_acc: '+str(val_acc)+'\n')
    if val_acc>max_acc:
        max_acc=val_acc
        saver.save(sess,'ckpt1/faces.ckpt',global_step=epoch+1)
 
f.close()
sess.close()

 


免责声明!

本站转载的文章为个人学习借鉴使用,本站对版权不负任何法律责任。如果侵犯了您的隐私权益,请联系本站邮箱yoyou2525@163.com删除。



 
粤ICP备18138465号  © 2018-2025 CODEPRJ.COM