本項目介紹利用深度學習技術(tensorflow),來識別驗證碼(4位驗證碼,具體的驗證碼的長度可以自己生成,可以在自己進行訓練)
【項目的源代碼在下面會給出github地址,在git下載即可(模型要自己訓練,模型文件較大,這里不上傳)】
驗證碼也是根據系統程序自己進行生成,然后用這些作為 訓練集,然后進行訓練,保存模型,對測試集進行測試,確定訓練模型的准確度
首先就是測試集的生成:
# coding: utf-8 # In[1]: # 驗證碼生成庫 from captcha.image import ImageCaptcha # pip install captcha import numpy as np from PIL import Image import random import sys number = ['0','1','2','3','4','5','6','7','8','9'] # alphabet = ['a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z'] # ALPHABET = ['A','B','C','D','E','F','G','H','I','J','K','L','M','N','O','P','Q','R','S','T','U','V','W','X','Y','Z'] def random_captcha_text(char_set=number, captcha_size=4): # 驗證碼列表 captcha_text = [] for i in range(captcha_size): #隨機選擇 c = random.choice(char_set) #加入驗證碼列表 captcha_text.append(c) return captcha_text # 生成字符對應的驗證碼 def gen_captcha_text_and_image(): image = ImageCaptcha() #獲得隨機生成的驗證碼 captcha_text = random_captcha_text() #把驗證碼列表轉為字符串 captcha_text = ''.join(captcha_text) #生成驗證碼 captcha = image.generate(captcha_text) image.write(captcha_text, '../image/' + captcha_text + '.jpg') # 寫到文件 #數量少於10000,因為重名 num = 10000 if __name__ == '__main__': for i in range(num): gen_captcha_text_and_image() sys.stdout.write('\r>> Creating image %d/%d' % (i+1, num)) sys.stdout.flush() sys.stdout.write('\n') sys.stdout.flush() print("生成完畢") # In[ ]: # In[ ]:
文件的路徑自己進行設置和修改,訓練集大小自己設定(這里面只是單單創建的數字驗證碼,需要帶上大小寫英文的自己設置即可,還有就是驗證碼的字符長度自己進行設置)
然后就是對這些文件進行格式的轉換(將一堆的圖片文件轉換為tfrecord文件)
# coding: utf-8 # In[1]: import tensorflow as tf import os import random import math import sys from PIL import Image import numpy as np # In[2]: #驗證集數量 _NUM_TEST = 500 #隨機種子 _RANDOM_SEED = 0 #數據集路徑 DATASET_DIR = "../image/" #tfrecord文件存放路徑 TFRECORD_DIR = "../captcha/" #判斷tfrecord文件是否存在 def _dataset_exists(dataset_dir): for split_name in ['train', 'test']: output_filename = os.path.join(dataset_dir,split_name + '.tfrecords') if not tf.gfile.Exists(output_filename): return False return True #獲取所有驗證碼圖片 def _get_filenames_and_classes(dataset_dir): photo_filenames = [] for filename in os.listdir(dataset_dir): #獲取文件路徑 path = os.path.join(dataset_dir, filename) photo_filenames.append(path) return photo_filenames def int64_feature(values): if not isinstance(values, (tuple, list)): values = [values] return tf.train.Feature(int64_list=tf.train.Int64List(value=values)) def bytes_feature(values): return tf.train.Feature(bytes_list=tf.train.BytesList(value=[values])) def image_to_tfexample(image_data, label0, label1, label2, label3): #Abstract base class for protocol messages. return tf.train.Example(features=tf.train.Features(feature={ 'image': bytes_feature(image_data), 'label0': int64_feature(label0), 'label1': int64_feature(label1), 'label2': int64_feature(label2), 'label3': int64_feature(label3), })) #把數據轉為TFRecord格式 def _convert_dataset(split_name, filenames, dataset_dir): assert split_name in ['train', 'test'] with tf.Session() as sess: #定義tfrecord文件的路徑+名字 output_filename = os.path.join(TFRECORD_DIR,split_name + '.tfrecords') with tf.python_io.TFRecordWriter(output_filename) as tfrecord_writer: for i,filename in enumerate(filenames): try: sys.stdout.write('\r>> Converting image %d/%d' % (i+1, len(filenames))) sys.stdout.flush() #讀取圖片 image_data = Image.open(filename) #根據模型的結構resize image_data = image_data.resize((224, 224)) #灰度化 image_data = np.array(image_data.convert('L')) #將圖片轉化為bytes image_data = image_data.tobytes() #獲取label labels = filename.split('/')[-1][0:4] num_labels = [] for j in range(4): num_labels.append(int(labels[j])) #生成protocol數據類型 example = image_to_tfexample(image_data, num_labels[0], num_labels[1], num_labels[2], num_labels[3]) tfrecord_writer.write(example.SerializeToString()) except IOError as e: print('Could not read:',filename) print('Error:',e) print('Skip it\n') sys.stdout.write('\n') sys.stdout.flush() #判斷tfrecord文件是否存在 if _dataset_exists(TFRECORD_DIR): print('tfcecord文件已存在') else: #獲得所有圖片 photo_filenames = _get_filenames_and_classes(DATASET_DIR) #把數據切分為訓練集和測試集,並打亂 random.seed(_RANDOM_SEED) random.shuffle(photo_filenames) training_filenames = photo_filenames[_NUM_TEST:] testing_filenames = photo_filenames[:_NUM_TEST] #數據轉換 _convert_dataset('train', training_filenames, DATASET_DIR) _convert_dataset('test', testing_filenames, DATASET_DIR) print('生成tfcecord文件') # In[ ]:
測試集的大小自己進行設定
其次就是對這些圖片文件進行訓練;
import os import tensorflow as tf from PIL import Image from lib.nets2 import nets_factory import numpy as np os.environ["CUDA_VISIBLE_DEVICES"] = '0' #指定第一塊GPU可用 config = tf.ConfigProto() config.gpu_options.per_process_gpu_memory_fraction = 0.5 # 程序最多只能占用指定gpu50%的顯存 config.gpu_options.allow_growth = True #程序按需申請內存 sess = tf.Session(config = config) # 不同字符數量 CHAR_SET_LEN = 10 # 圖片高度 IMAGE_HEIGHT = 60 # 圖片寬度 IMAGE_WIDTH = 160 # 批次 BATCH_SIZE = 28 # tfrecord文件存放路徑 TFRECORD_FILE = "C:\workspace\Python\deep-learning\card\Cimages/train.tfrecords" # placeholder x = tf.placeholder(tf.float32, [None, 224, 224]) y0 = tf.placeholder(tf.float32, [None]) # 學習率 lr = tf.Variable(0.001, dtype=tf.float32) # 從tfrecord讀出數據 def read_and_decode(filename): # 根據文件名生成一個隊列 filename_queue = tf.train.string_input_producer([filename]) reader = tf.TFRecordReader() # 返回文件名和文件 _, serialized_example = reader.read(filename_queue) features = tf.parse_single_example(serialized_example, features={ 'image' : tf.FixedLenFeature([], tf.string), 'label0': tf.FixedLenFeature([], tf.int64), }) # 獲取圖片數據 image = tf.decode_raw(features['image'], tf.uint8) # tf.train.shuffle_batch必須確定shape image = tf.reshape(image, [224, 224]) # 圖片預處理 image = tf.cast(image, tf.float32) / 255.0 image = tf.subtract(image, 0.5) image = tf.multiply(image, 2.0) # 獲取label label0 = tf.cast(features['label0'], tf.int32) return image, label0 # 獲取圖片數據和標簽 image, label0 = read_and_decode(TFRECORD_FILE) # 使用shuffle_batch可以隨機打亂 image_batch, label_batch0= tf.train.shuffle_batch( [image, label0], batch_size=BATCH_SIZE, capacity=50000, min_after_dequeue=10000, num_threads=1) # 定義網絡結構 train_network_fn = nets_factory.get_network_fn( 'alexnet_v2', num_classes=CHAR_SET_LEN * 1, weight_decay=0.0005, is_training=True) with tf.Session() as sess: # inputs: a tensor of size [batch_size, height, width, channels] X = tf.reshape(x, [BATCH_SIZE, 224, 224, 1]) # 數據輸入網絡得到輸出值 logits, end_points = train_network_fn(X) # 把標簽轉成one_hot的形式 one_hot_labels0 = tf.one_hot(indices=tf.cast(y0, tf.int32), depth=CHAR_SET_LEN) # 把標簽轉成長度為40的向量 label_40 = tf.concat([one_hot_labels0], 1) # 計算loss loss_40 = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=logits, labels=label_40)) # 優化loss optimizer_40 = tf.train.AdamOptimizer(learning_rate=lr).minimize(loss_40) # 計算准確率 correct_prediction_40 = tf.equal(tf.argmax(label_40, 1), tf.argmax(logits, 1)) accuracy_40 = tf.reduce_mean(tf.cast(correct_prediction_40, tf.float32)) # 用於保存模型 saver = tf.train.Saver() # 初始化 sess.run(tf.global_variables_initializer()) # 創建一個協調器,管理線程 coord = tf.train.Coordinator() # 啟動QueueRunner, 此時文件名隊列已經進隊 threads = tf.train.start_queue_runners(sess=sess, coord=coord) for i in range(3500): # 獲取一個批次的數據和標簽 b_image, b_label0 = sess.run([image_batch, label_batch0]) # 優化模型 sess.run(optimizer_40, feed_dict={x: b_image, y0: b_label0}) # 每迭代20次計算一次loss和准確率 if i % 4 == 0: # 每迭代3000次降低一次學習率 if i % 400 == 0: sess.run(tf.assign(lr, lr / 3)) acc, loss_ = sess.run([accuracy_40, loss_40], feed_dict={x: b_image, y0: b_label0}) learning_rate = sess.run(lr) print("Iter:%d Loss:%.4f Accuracy:%.4f Learning_rate:%.4f" % (i, loss_, acc, learning_rate)) # acc0,acc1,acc2,acc3,loss_ = sess.run([accuracy0,accuracy1,accuracy2,accuracy3,total_loss],feed_dict={x: b_image, # y0: b_label0, # y1: b_label1, # y2: b_label2, # y3: b_label3}) # learning_rate = sess.run(lr) # print ("Iter:%d Loss:%.3f Accuracy:%.2f,%.2f,%.2f,%.2f Learning_rate:%.4f" % (i,loss_,acc0,acc1,acc2,acc3,learning_rate)) # 保存模型 if i == 3300: saver.save(sess, "C:\workspace\Python\deep-learning\card\Cmodels/crack_captcha1.model", global_step=i) break # 通知其他線程關閉 coord.request_stop() # 其他所有線程關閉之后,這一函數才能返回 coord.join(threads)
這里就是對圖片進行訓練,然后保存模型(其中的訓練次數還有學習率迭代可以根據自己的數據集的大小進行不同的設定)
最后就是對測試集進行模型的准確度的測試:
# coding: utf-8 # In[1]: import os import tensorflow as tf from PIL import Image from nets import nets_factory import numpy as np import matplotlib.pyplot as plt # In[2]: # 不同字符數量 CHAR_SET_LEN = 10 # 圖片高度 IMAGE_HEIGHT = 60 # 圖片寬度 IMAGE_WIDTH = 160 # 批次 BATCH_SIZE = 1 # tfrecord文件存放路徑 TFRECORD_FILE = "D:/Tensorflow/captcha/test.tfrecords" # placeholder x = tf.placeholder(tf.float32, [None, 224, 224]) # 從tfrecord讀出數據 def read_and_decode(filename): # 根據文件名生成一個隊列 filename_queue = tf.train.string_input_producer([filename]) reader = tf.TFRecordReader() # 返回文件名和文件 _, serialized_example = reader.read(filename_queue) features = tf.parse_single_example(serialized_example, features={ 'image' : tf.FixedLenFeature([], tf.string), 'label0': tf.FixedLenFeature([], tf.int64), 'label1': tf.FixedLenFeature([], tf.int64), 'label2': tf.FixedLenFeature([], tf.int64), 'label3': tf.FixedLenFeature([], tf.int64), }) # 獲取圖片數據 image = tf.decode_raw(features['image'], tf.uint8) # 沒有經過預處理的灰度圖 image_raw = tf.reshape(image, [224, 224]) # tf.train.shuffle_batch必須確定shape image = tf.reshape(image, [224, 224]) # 圖片預處理 image = tf.cast(image, tf.float32) / 255.0 image = tf.subtract(image, 0.5) image = tf.multiply(image, 2.0) # 獲取label label0 = tf.cast(features['label0'], tf.int32) label1 = tf.cast(features['label1'], tf.int32) label2 = tf.cast(features['label2'], tf.int32) label3 = tf.cast(features['label3'], tf.int32) return image, image_raw, label0, label1, label2, label3 # In[3]: # 獲取圖片數據和標簽 image, image_raw, label0, label1, label2, label3 = read_and_decode(TFRECORD_FILE) #使用shuffle_batch可以隨機打亂 image_batch, image_raw_batch, label_batch0, label_batch1, label_batch2, label_batch3 = tf.train.shuffle_batch( [image, image_raw, label0, label1, label2, label3], batch_size = BATCH_SIZE, capacity = 50000, min_after_dequeue=10000, num_threads=1) #定義網絡結構 train_network_fn = nets_factory.get_network_fn( 'alexnet_v2', num_classes=CHAR_SET_LEN, weight_decay=0.0005, is_training=False) with tf.Session() as sess: # inputs: a tensor of size [batch_size, height, width, channels] X = tf.reshape(x, [BATCH_SIZE, 224, 224, 1]) # 數據輸入網絡得到輸出值 logits0,logits1,logits2,logits3,end_points = train_network_fn(X) # 預測值 predict0 = tf.reshape(logits0, [-1, CHAR_SET_LEN]) predict0 = tf.argmax(predict0, 1) predict1 = tf.reshape(logits1, [-1, CHAR_SET_LEN]) predict1 = tf.argmax(predict1, 1) predict2 = tf.reshape(logits2, [-1, CHAR_SET_LEN]) predict2 = tf.argmax(predict2, 1) predict3 = tf.reshape(logits3, [-1, CHAR_SET_LEN]) predict3 = tf.argmax(predict3, 1) # 初始化 sess.run(tf.global_variables_initializer()) # 載入訓練好的模型 saver = tf.train.Saver() saver.restore(sess,'./captcha/models/crack_captcha.model-6000') # 創建一個協調器,管理線程 coord = tf.train.Coordinator() # 啟動QueueRunner, 此時文件名隊列已經進隊 threads = tf.train.start_queue_runners(sess=sess, coord=coord) for i in range(10): # 獲取一個批次的數據和標簽 b_image, b_image_raw, b_label0, b_label1 ,b_label2 ,b_label3 = sess.run([image_batch, image_raw_batch, label_batch0, label_batch1, label_batch2, label_batch3]) # 顯示圖片 img=Image.fromarray(b_image_raw[0],'L') plt.imshow(img) plt.axis('off') plt.show() # 打印標簽 print('label:',b_label0, b_label1 ,b_label2 ,b_label3) # 預測 label0,label1,label2,label3 = sess.run([predict0,predict1,predict2,predict3], feed_dict={x: b_image}) # 打印預測值 print('predict:',label0,label1,label2,label3) # 通知其他線程關閉 coord.request_stop() # 其他所有線程關閉之后,這一函數才能返回 coord.join(threads)
git的程序下載地址為:https://github.com/H-Designer/Tensorflow-Verification_Code