深度學習模型stacking模型融合python代碼,看了你就會使


話不多說,直接上代碼

 1 def stacking_first(train, train_y, test):
 2     savepath = './stack_op{}_dt{}_tfidf{}/'.format(args.option, args.data_type, args.tfidf)
 3     os.makedirs(savepath, exist_ok=True)
 4 
 5     count_kflod = 0
 6     num_folds = 6
 7     kf = KFold(n_splits=num_folds, shuffle=True, random_state=10)
 8     # 測試集上的預測結果
 9     predict = np.zeros((test.shape[0], config.n_class))
10     # k折交叉驗證集的預測結果
11     oof_predict = np.zeros((train.shape[0], config.n_class))
12     scores = []
13     f1s = []
14 
15     for train_index, test_index in kf.split(train):
16         # 訓練集划分為6折,每一折都要走一遍。那么第一個是5份的訓練集索引,第二個是1份的測試集,此處為驗證集是索引
17 
18         kfold_X_train = {}
19         kfold_X_valid = {}
20 
21         # 取數據的標簽
22         y_train, y_test = train_y[train_index], train_y[test_index]
23         # 取數據
24         kfold_X_train, kfold_X_valid = train[train_index], train[test_index]
25 
26         # 模型的前綴
27         model_prefix = savepath + 'DNN' + str(count_kflod)
28         if not os.path.exists(model_prefix):
29             os.mkdir(model_prefix)
30 
31         M = 4  # number of snapshots
32         alpha_zero = 1e-3  # initial learning rate
33         snap_epoch = 16
34         snapshot = SnapshotCallbackBuilder(snap_epoch, M, alpha_zero)
35 
36         # 使用訓練集的size設定維度,fit一個模型出來
37         res_model = get_model(train)
38         res_model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
39         # res_model.fit(train_x, train_y, batch_size=BATCH_SIZE, epochs=EPOCH, verbose=1,  class_weight=class_weight)
40         res_model.fit(kfold_X_train, y_train, batch_size=BATCH_SIZE, epochs=snap_epoch, verbose=1,
41                       validation_data=(kfold_X_valid, y_test),
42                       callbacks=snapshot.get_callbacks(model_save_place=model_prefix))
43 
44         # 找到這個目錄下所有已經訓練好的深度學習模型,通過".h5"
45         evaluations = []
46         for i in os.listdir(model_prefix):
47             if '.h5' in i:
48                 evaluations.append(i)
49 
50         # 給測試集和當前的驗證集開辟空間,就是當前折的數據預測結果構建出這么多的數據集[數據個數,類別]
51         preds1 = np.zeros((test.shape[0], config.n_class))
52         preds2 = np.zeros((len(kfold_X_valid), config.n_class))
53         # 遍歷每一個模型,用他們分別預測當前折數的驗證集和測試集,N個模型的結果求平均
54         for run, i in enumerate(evaluations):
55             res_model.load_weights(os.path.join(model_prefix, i))
56             preds1 += res_model.predict(test, verbose=1) / len(evaluations)
57             preds2 += res_model.predict(kfold_X_valid, batch_size=128) / len(evaluations)
58 
59         # 測試集上預測結果的加權平均
60         predict += preds1 / num_folds
61         # 每一折的預測結果放到對應折上的測試集中,用來最后構建訓練集
62         oof_predict[test_index] = preds2
63 
64         # 計算精度和F1
65         accuracy = mb.cal_acc(oof_predict[test_index], np.argmax(y_test, axis=1))
66         f1 = mb.cal_f_alpha(oof_predict[test_index], np.argmax(y_test, axis=1), n_out=config.n_class)
67         print('the kflod cv is : ', str(accuracy))
68         print('the kflod f1 is : ', str(f1))
69         count_kflod += 1
70 
71         # 模型融合的預測結果,存起來,用以以后求平均值
72         scores.append(accuracy)
73         f1s.append(f1)
74     # 指標均值,最為最后的預測結果
75     print('total scores is ', np.mean(scores))
76     print('total f1 is ', np.mean(f1s))
77     return predict

 


免責聲明!

本站轉載的文章為個人學習借鑒使用,本站對版權不負任何法律責任。如果侵犯了您的隱私權益,請聯系本站郵箱yoyou2525@163.com刪除。



 
粵ICP備18138465號   © 2018-2025 CODEPRJ.COM