視頻學習來源
https://www.bilibili.com/video/av40787141?from=search&seid=17003307842787199553
筆記
RNN用於圖像識別並不是很好
模型保存(結構和參數)
1 需要安裝h5py
pip install h5py
2在代碼最后一行
model.save(‘model.h5’)
即可在當前目錄保存HDF5文件
模型載入
1開頭導入包
from keras.models import load_model
2導入模型
model=load_model(‘model.h5’)
模型載入后可接着訓練
model.fit(x_train,y_train,batch_size=64,epochs=2)
只保存參數
model.save_weights(‘weights.h5’)
model.load _weights(‘weights.h5’)
只保存網絡結構
from keras.models import model_from_json
json_string=model.to_json()
model=model_from_json(json_string)
import numpy as np from keras.datasets import mnist #將會從網絡下載mnist數據集 from keras.utils import np_utils from keras.models import Sequential #序列模型 from keras.layers import Dense from keras.layers.recurrent import SimpleRNN #keras中三種RNN SimpleRNN,LSTM,GRU from keras.optimizers import Adam
# 數據長度,一行有28個像素 input_size=28 # 序列長度,一共有28行 time_steps=28 # 隱藏層cell個數 cell_size=50 #載入數據 (x_train,y_train),(x_test,y_test)=mnist.load_data() #查看格式 #(60000,28,28) print('x_shape:',x_train.shape) #(60000) print('y_shape:',y_train.shape) #格式是(60000,28,28) #格式是樣本數,time_steps(序列長度),input_size(每一個序列的數據長度) #如果數據是(60000,784)需要轉成(60000,28,28) #除以255是做數據歸一化處理 x_train=x_train/255.0 #轉換數據格式 x_test=x_test/255.0 #轉換數據格式 #label標簽轉換成 one hot 形式 y_train=np_utils.to_categorical(y_train,num_classes=10) #分成10類 y_test=np_utils.to_categorical(y_test,num_classes=10) #分成10類 #定義序列模型 model=Sequential() #循環神經網絡 #一個隱藏層 model.add(SimpleRNN( units=cell_size, #輸出 input_shape=(time_steps,input_size), #輸入 )) #輸出層 model.add(Dense(10,activation='softmax')) #定義優化器 #學習速率為10的負4次方 adam=Adam(lr=1e-4) #定義優化器,損失函數,訓練效果中計算准確率 model.compile( optimizer=adam, #sgd優化器 loss='categorical_crossentropy', #損失用交叉熵,速度會更快 metrics=['accuracy'], #計算准確率 ) #訓練 #六萬張,每次訓練64張,訓練10個周期(六萬張全部訓練完算一個周期) model.fit(x_train,y_train,batch_size=64,epochs=10) #評估模型 loss,accuracy=model.evaluate(x_test,y_test) print('\ntest loss',loss) print('\ntest accuracy',accuracy) loss,accuracy=model.evaluate(x_train,y_train) print('\ntrain loss',loss) print('\ntrain accuracy',accuracy)
x_shape: (60000, 28, 28) y_shape: (60000,) Epoch 1/10 60000/60000 [==============================] - 9s 145us/step - loss: 1.6191 - acc: 0.4629 Epoch 2/10 60000/60000 [==============================] - 9s 156us/step - loss: 0.9580 - acc: 0.7103 Epoch 3/10 60000/60000 [==============================] - 6s 101us/step - loss: 0.7064 - acc: 0.7934 Epoch 4/10 60000/60000 [==============================] - 8s 141us/step - loss: 0.5749 - acc: 0.8344 Epoch 5/10 60000/60000 [==============================] - 8s 128us/step - loss: 0.4999 - acc: 0.8550 Epoch 6/10 60000/60000 [==============================] - 6s 102us/step - loss: 0.4503 - acc: 0.8689 Epoch 7/10 60000/60000 [==============================] - 6s 99us/step - loss: 0.4130 - acc: 0.8808 Epoch 8/10 60000/60000 [==============================] - 6s 95us/step - loss: 0.3838 - acc: 0.8891 Epoch 9/10 60000/60000 [==============================] - 6s 96us/step - loss: 0.3597 - acc: 0.8969 Epoch 10/10 60000/60000 [==============================] - 6s 96us/step - loss: 0.3408 - acc: 0.9020 10000/10000 [==============================] - 1s 73us/step test loss 0.3126664091944695 test accuracy 0.91 60000/60000 [==============================] - 4s 67us/step train loss 0.326995205249389 train accuracy 0.9060166666666667