LightGBM實戰


數據集地址

基於原生LightGBM的分類

首先得安裝相關的庫:pip install lightgbm

from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import OneHotEncoder
import lightgbm as lgb
import numpy as np


# 以分隔符,讀取文件,得到的是一個二維列表
iris = np.loadtxt('iris.data', dtype=str, delimiter=',', unpack=False, encoding='utf-8')

# 前4列是特征
data = iris[:, :4].astype(np.float)
# 最后一列是標簽,我們將其轉換為二維列表
target = iris[:, -1][:, np.newaxis]

# 對標簽進行onehot編碼后還原成數字
enc = OneHotEncoder()
target = enc.fit_transform(target).astype(np.int).toarray()
target = [list(oh).index(1) for oh in target]

# 划分訓練數據和測試數據
X_train, X_test, y_train, y_test = train_test_split(data, target, test_size=0.2, random_state=1)

# 轉換為Dataset數據格式
train_data = lgb.Dataset(X_train, label=y_train)
validation_data = lgb.Dataset(X_test, label=y_test)

# 參數
params = {
    'learning_rate': 0.1,
    'lambda_l1': 0.1,
    'lambda_l2': 0.2,
    'max_depth': 4,
    'objective': 'multiclass',  # 目標函數
    'num_class': 3,
}

# 模型訓練
gbm = lgb.train(params, train_data, valid_sets=[validation_data])

# 模型預測
y_pred = gbm.predict(X_test)
y_pred = [list(x).index(max(x)) for x in y_pred]
print(y_pred)

# 模型評估
print(accuracy_score(y_test, y_pred))

結果

[LightGBM] [Warning] No further splits with positive gain, best gain: -inf
[LightGBM] [Warning] No further splits with positive gain, best gain: -inf
[99] valid_0's multi_logloss: 0.264218
[LightGBM] [Warning] No further splits with positive gain, best gain: -inf
[LightGBM] [Warning] No further splits with positive gain, best gain: -inf
[LightGBM] [Warning] No further splits with positive gain, best gain: -inf
[100] valid_0's multi_logloss: 0.264481
[0, 1, 1, 0, 2, 1, 2, 0, 0, 2, 1, 0, 2, 1, 1, 0, 1, 1, 0, 0, 1, 1, 2, 0, 2, 1, 0, 0, 1, 2]
0.9666666666666667

基於sklearn接口的分類

使用基本參數進行分類

from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import OneHotEncoder
from lightgbm import LGBMClassifier
from sklearn.externals import joblib
import numpy as np


# 以分隔符,讀取文件,得到的是一個二維列表
iris = np.loadtxt('iris.data', dtype=str, delimiter=',', unpack=False, encoding='utf-8')

# 前4列是特征
data = iris[:, :4].astype(np.float)
# 最后一列是標簽,我們將其轉換為二維列表
target = iris[:, -1][:, np.newaxis]

# 對標簽進行onehot編碼后還原成數字
enc = OneHotEncoder()
target = enc.fit_transform(target).astype(np.int).toarray()
target = [list(oh).index(1) for oh in target]

# 划分訓練數據和測試數據
X_train, X_test, y_train, y_test = train_test_split(data, target, test_size=0.2, random_state=1)

# 模型訓練
gbm = LGBMClassifier(num_leaves=31, learning_rate=0.05, n_estimators=20)
gbm.fit(X_train, y_train, eval_set=[(X_test, y_test)], early_stopping_rounds=5)

# 模型存儲
joblib.dump(gbm, 'loan_model.pkl')
# 模型加載
gbm = joblib.load('loan_model.pkl')

# 模型預測
y_pred = gbm.predict(X_test, num_iteration=gbm.best_iteration_)

# 模型評估
print('The accuracy of prediction is:', accuracy_score(y_test, y_pred))

# 特征重要度
print('Feature importances:', list(gbm.feature_importances_))

結果

[1] valid_0's multi_logloss: 1.04105
Training until validation scores don't improve for 5 rounds
[2] valid_0's multi_logloss: 0.969489
[3] valid_0's multi_logloss: 0.903964
[4] valid_0's multi_logloss: 0.845211
[5] valid_0's multi_logloss: 0.793714
[6] valid_0's multi_logloss: 0.742919
[7] valid_0's multi_logloss: 0.698058
[8] valid_0's multi_logloss: 0.659407
[9] valid_0's multi_logloss: 0.621686
[10] valid_0's multi_logloss: 0.588324
[11] valid_0's multi_logloss: 0.556705
[12] valid_0's multi_logloss: 0.52607
[13] valid_0's multi_logloss: 0.501139
[14] valid_0's multi_logloss: 0.476254
[15] valid_0's multi_logloss: 0.454358
[16] valid_0's multi_logloss: 0.433247
[17] valid_0's multi_logloss: 0.41494
[18] valid_0's multi_logloss: 0.395876
[19] valid_0's multi_logloss: 0.378817
[20] valid_0's multi_logloss: 0.364502
Did not meet early stopping. Best iteration is:
[20] valid_0's multi_logloss: 0.364502
The accuracy of prediction is: 0.9333333333333333
Feature importances: [12, 15, 129, 56]

使用參數搜索進行分類

from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.preprocessing import OneHotEncoder
from lightgbm import LGBMClassifier
from sklearn.externals import joblib
import numpy as np


# 以分隔符,讀取文件,得到的是一個二維列表
iris = np.loadtxt('iris.data', dtype=str, delimiter=',', unpack=False, encoding='utf-8')

# 前4列是特征
data = iris[:, :4].astype(np.float)
# 最后一列是標簽,我們將其轉換為二維列表
target = iris[:, -1][:, np.newaxis]

# 對標簽進行onehot編碼后還原成數字
enc = OneHotEncoder()
target = enc.fit_transform(target).astype(np.int).toarray()
target = [list(oh).index(1) for oh in target]

# 划分訓練數據和測試數據
X_train, X_test, y_train, y_test = train_test_split(data, target, test_size=0.2, random_state=1)

# 網格搜索,參數優化
estimator = LGBMClassifier(num_leaves=31)
param_grid = {
    'learning_rate': [0.01, 0.1, 1],
    'n_estimators': [20, 40]
}
gbm = GridSearchCV(estimator, param_grid)
gbm.fit(X_train, y_train)
print('Best parameters found by grid search are:', gbm.best_params_)

結果

Best parameters found by grid search are: {'learning_rate': 0.1, 'n_estimators': 20}

基於原生LightGBM的回歸

from sklearn.datasets import make_regression
from sklearn.model_selection import train_test_split
import lightgbm as lgb
from sklearn.metrics import mean_absolute_error

X, y = make_regression(n_samples=100, n_features=1, noise=20)
print(X,y)
# 切分訓練集、測試集
train_X, test_X, train_y, test_y = train_test_split(X, y, test_size=0.25, random_state=1)

# 轉換為Dataset數據格式
lgb_train = lgb.Dataset(train_X, train_y)
lgb_eval = lgb.Dataset(test_X, test_y, reference=lgb_train)

# 參數
params = {
    'task': 'train',
    'boosting_type': 'gbdt',  # 設置提升類型
    'objective': 'regression',  # 目標函數
    'metric': {'l2', 'auc'},  # 評估函數
    'num_leaves': 31,  # 葉子節點數
    'learning_rate': 0.05,  # 學習速率
    'feature_fraction': 0.9,  # 建樹的特征選擇比例
    'bagging_fraction': 0.8,  # 建樹的樣本采樣比例
    'bagging_freq': 5,  # k 意味着每 k 次迭代執行bagging
    'verbose': 1  # <0 顯示致命的, =0 顯示錯誤 (警告), >0 顯示信息
}

# 調用LightGBM模型,使用訓練集數據進行訓練(擬合)
# Add verbosity=2 to print messages while running boosting
my_model = lgb.train(params, lgb_train, num_boost_round=20, valid_sets=lgb_eval, early_stopping_rounds=5)

# 使用模型對測試集數據進行預測
predictions = my_model.predict(test_X, num_iteration=my_model.best_iteration)

# 對模型的預測結果進行評判(平均絕對誤差)
print("Mean Absolute Error : " + str(mean_absolute_error(predictions, test_y)))

結果

[LightGBM] [Warning] No further splits with positive gain, best gain: -inf
[9] valid_0's auc: 0.873377 valid_0's l2: 1521.21
[LightGBM] [Warning] No further splits with positive gain, best gain: -inf
[10] valid_0's auc: 0.873377 valid_0's l2: 1448
[LightGBM] [Warning] No further splits with positive gain, best gain: -inf
[11] valid_0's auc: 0.873377 valid_0's l2: 1394.27
Early stopping, best iteration is:
[6] valid_0's auc: 0.873377 valid_0's l2: 1796.72
Mean Absolute Error : 32.371899328245405

基於sklearn接口的回歸

from sklearn.datasets import make_regression
from sklearn.model_selection import train_test_split
import lightgbm as lgb
from sklearn.metrics import mean_absolute_error

X, y = make_regression(n_samples=100, n_features=1, noise=20)

# 切分訓練集、測試集
train_X, test_X, train_y, test_y = train_test_split(X, y, test_size=0.25, random_state=1)

# 調用LightGBM模型,使用訓練集數據進行訓練(擬合)
# Add verbosity=2 to print messages while running boosting
my_model = lgb.LGBMRegressor(objective='regression', num_leaves=31, learning_rate=0.05, n_estimators=20,
                             verbosity=2)

my_model.fit(train_X, train_y, verbose=False)

# 使用模型對測試集數據進行預測
predictions = my_model.predict(test_X)

# 對模型的預測結果進行評判(平均絕對誤差)
print("Mean Absolute Error : " + str(mean_absolute_error(predictions, test_y)))

結果

[LightGBM] [Debug] Dataset::GetMultiBinFromAllFeatures: sparse rate 0.000000
[LightGBM] [Debug] init for col-wise cost 0.000011 seconds, init for row-wise cost 0.000109 seconds
[LightGBM] [Warning] Auto-choosing col-wise multi-threading, the overhead of testing was 0.000126 seconds.
You can set force_col_wise=true to remove the overhead.
[LightGBM] [Info] Total Bins 27
[LightGBM] [Info] Number of data points in the train set: 75, number of used features: 1
[LightGBM] [Info] Start training from score 10.744539
[LightGBM] [Warning] No further splits with positive gain, best gain: -inf
[LightGBM] [Debug] Trained a tree with leaves = 3 and max_depth = 2
[LightGBM] [Warning] No further splits with positive gain, best gain: -inf
[LightGBM] [Debug] Trained a tree with leaves = 3 and max_depth = 2
[LightGBM] [Warning] No further splits with positive gain, best gain: -inf
[LightGBM] [Debug] Trained a tree with leaves = 3 and max_depth = 2
[LightGBM] [Warning] No further splits with positive gain, best gain: -inf
[LightGBM] [Debug] Trained a tree with leaves = 3 and max_depth = 2
[LightGBM] [Warning] No further splits with positive gain, best gain: -inf
[LightGBM] [Debug] Trained a tree with leaves = 2 and max_depth = 1
[LightGBM] [Warning] No further splits with positive gain, best gain: -inf
[LightGBM] [Debug] Trained a tree with leaves = 3 and max_depth = 2
[LightGBM] [Warning] No further splits with positive gain, best gain: -inf
[LightGBM] [Debug] Trained a tree with leaves = 3 and max_depth = 2
[LightGBM] [Warning] No further splits with positive gain, best gain: -inf
[LightGBM] [Debug] Trained a tree with leaves = 2 and max_depth = 1
[LightGBM] [Warning] No further splits with positive gain, best gain: -inf
[LightGBM] [Debug] Trained a tree with leaves = 3 and max_depth = 2
[LightGBM] [Warning] No further splits with positive gain, best gain: -inf
[LightGBM] [Debug] Trained a tree with leaves = 3 and max_depth = 2
[LightGBM] [Warning] No further splits with positive gain, best gain: -inf
[LightGBM] [Debug] Trained a tree with leaves = 3 and max_depth = 2
[LightGBM] [Warning] No further splits with positive gain, best gain: -inf
[LightGBM] [Debug] Trained a tree with leaves = 2 and max_depth = 1
[LightGBM] [Warning] No further splits with positive gain, best gain: -inf
[LightGBM] [Debug] Trained a tree with leaves = 3 and max_depth = 2
[LightGBM] [Warning] No further splits with positive gain, best gain: -inf
[LightGBM] [Debug] Trained a tree with leaves = 3 and max_depth = 2
[LightGBM] [Warning] No further splits with positive gain, best gain: -inf
[LightGBM] [Debug] Trained a tree with leaves = 2 and max_depth = 1
[LightGBM] [Warning] No further splits with positive gain, best gain: -inf
[LightGBM] [Debug] Trained a tree with leaves = 3 and max_depth = 2
[LightGBM] [Warning] No further splits with positive gain, best gain: -inf
[LightGBM] [Debug] Trained a tree with leaves = 3 and max_depth = 2
[LightGBM] [Warning] No further splits with positive gain, best gain: -inf
[LightGBM] [Debug] Trained a tree with leaves = 2 and max_depth = 1
[LightGBM] [Warning] No further splits with positive gain, best gain: -inf
[LightGBM] [Debug] Trained a tree with leaves = 3 and max_depth = 2
[LightGBM] [Warning] No further splits with positive gain, best gain: -inf
[LightGBM] [Debug] Trained a tree with leaves = 3 and max_depth = 2
Mean Absolute Error : 18.71203698086779

https://mp.weixin.qq.com/s/75etKylCWXzBGKTplS1qig


免責聲明!

本站轉載的文章為個人學習借鑒使用,本站對版權不負任何法律責任。如果侵犯了您的隱私權益,請聯系本站郵箱yoyou2525@163.com刪除。



 
粵ICP備18138465號   © 2018-2025 CODEPRJ.COM