原生形式使用lightgbm(import lightgbm as lgb)
import lightgbm as lgb
from sklearn.metrics import mean_squared_error
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
# 加載數據
iris = load_iris()
data = iris.data
target = iris.target
# 划分訓練集和測試集
X_train, X_test, y_train, y_test = train_test_split(data, target, test_size=0.2)
print("Train data length:", len(X_train))
print("Test data length:", len(X_test))
# 轉換為Dataset數據格式
lgb_train = lgb.Dataset(X_train, y_train)
lgb_eval = lgb.Dataset(X_test, y_test, reference=lgb_train)
# 參數
params = {
'task': 'train',
'boosting_type': 'gbdt', # 設置提升類型
'objective': 'regression', # 目標函數
'metric': {'l2', 'auc'}, # 評估函數
'num_leaves': 31, # 葉子節點數
'learning_rate': 0.05, # 學習速率
'feature_fraction': 0.9, # 建樹的特征選擇比例
'bagging_fraction': 0.8, # 建樹的樣本采樣比例
'bagging_freq': 5, # k 意味着每 k 次迭代執行bagging
'verbose': 1 # <0 顯示致命的, =0 顯示錯誤 (警告), >0 顯示信息
}
# 模型訓練
gbm = lgb.train(params, lgb_train, num_boost_round=20, valid_sets=lgb_eval, early_stopping_rounds=5)
# 模型保存
gbm.save_model('model.txt')
# 模型加載
gbm = lgb.Booster(model_file='model.txt')
# 模型預測
y_pred = gbm.predict(X_test, num_iteration=gbm.best_iteration)
# 模型評估
print('The rmse of prediction is:', mean_squared_error(y_test, y_pred) ** 0.5)
Sklearn接口形式使用lightgbm(from lightgbm import LGBMRegressor)
from lightgbm import LGBMRegressor
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import GridSearchCV
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
from sklearn.externals import joblib
# 加載數據
iris = load_iris()
data = iris.data
target = iris.target
# 划分訓練數據和測試數據
X_train, X_test, y_train, y_test = train_test_split(data, target, test_size=0.2)
# 模型訓練
gbm = LGBMRegressor(objective='regression', num_leaves=31, learning_rate=0.05, n_estimators=20)
gbm.fit(X_train, y_train, eval_set=[(X_test, y_test)], eval_metric='l1', early_stopping_rounds=5)
# 模型存儲
joblib.dump(gbm, 'loan_model.pkl')
# 模型加載
gbm = joblib.load('loan_model.pkl')
# 模型預測
y_pred = gbm.predict(X_test, num_iteration=gbm.best_iteration_)
# 模型評估
print('The rmse of prediction is:', mean_squared_error(y_test, y_pred) ** 0.5)
# 特征重要度
print('Feature importances:', list(gbm.feature_importances_))
# 網格搜索,參數優化
estimator = LGBMRegressor(num_leaves=31)
param_grid = {
'learning_rate': [0.01, 0.1, 1],
'n_estimators': [20, 40]
}
gbm = GridSearchCV(estimator, param_grid)
gbm.fit(X_train, y_train)
print('Best parameters found by grid search are:', gbm.best_params_)