LR線性回歸算法代碼(Python)


代碼

  1. 基本操作代碼
from sklearn.linear_model import LinearRegression


x = [[80, 86], [82, 80], [85, 78], [90, 90],
     [86, 82], [82, 90], [78, 80], [92, 94]]
y = [84.2, 80.6, 80.1, 90, 83.2, 87.6, 79.4, 93.4]
# 實例化API
estimator = LinearRegression()
# 使用fit方法進行訓練
estimator.fit(x, y)

estimator.coef_

print(estimator.predict([[100, 80]]))
  1. Boston example
from sklearn.datasets import load_boston
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LinearRegression, SGDRegressor
from sklearn.metrics import mean_squared_error
from sklearn.linear_model import Ridge, ElasticNet, Lasso
# Rideg表示嶺回歸,ElasticNet表示彈性網絡,Lasso表示Lasso回歸


def linear_model1():
    """
    線性回歸:正規方程
    :return:None
    """
    # 1.獲取數據
    data = load_boston()

    # 2.數據集划分
    x_train, x_test, y_train, y_test = train_test_split(data.data, data.target, random_state=22)

    # 3.特征工程-標准化
    transfer = StandardScaler()
    x_train = transfer.fit_transform(x_train)
    x_test = transfer.fit_transform(x_test)

    # 4.機器學習-線性回歸(正規方程)
    estimator = LinearRegression()
    estimator.fit(x_train, y_train)

    # 5.模型評估
    # 5.1 獲取系數等值
    y_predict = estimator.predict(x_test)
    print("預測值為:\n", y_predict)
    print("模型中的系數為:\n", estimator.coef_)
    print("模型中的偏置為:\n", estimator.intercept_)

    # 5.2 評價
    # 均方誤差
    error = mean_squared_error(y_test, y_predict)
    print("誤差為:\n", error)


def linear_model2():
    """
    線性回歸:梯度下降法
    :return:None
    """
    # 1.獲取數據
    data = load_boston()

    # 2.數據集划分
    x_train, x_test, y_train, y_test = train_test_split(data.data, data.target, random_state=22)

    # 3.特征工程-標准化
    transfer = StandardScaler()
    x_train = transfer.fit_transform(x_train)
    x_test = transfer.fit_transform(x_test)

    # 4.機器學習-線性回歸(特征方程)
    estimator = SGDRegressor(max_iter=1000)
    estimator.fit(x_train, y_train)

    # 5.模型評估
    # 5.1 獲取系數等值
    y_predict = estimator.predict(x_test)
    print("預測值為:\n", y_predict)
    print("模型中的系數為:\n", estimator.coef_)
    print("模型中的偏置為:\n", estimator.intercept_)

    # 5.2 評價
    # 均方誤差
    error = mean_squared_error(y_test, y_predict)
    print("誤差為:\n", error)


def linear_model3():
    """
    線性回歸:嶺回歸
    :return:
    """
    # 1.獲取數據
    data = load_boston()

    # 2.數據集划分
    x_train, x_test, y_train, y_test = train_test_split(data.data, data.target, random_state=22)

    # 3.特征工程-標准化
    transfer = StandardScaler()
    x_train = transfer.fit_transform(x_train)
    x_test = transfer.fit_transform(x_test)

    # 4.機器學習-線性回歸(嶺回歸)
    estimator = Ridge(alpha=1)
    # estimator = RidgeCV(alphas=(0.1, 1, 10))
    estimator.fit(x_train, y_train)

    # 5.模型評估
    # 5.1 獲取系數等值
    y_predict = estimator.predict(x_test)
    print("預測值為:\n", y_predict)
    print("模型中的系數為:\n", estimator.coef_)
    print("模型中的偏置為:\n", estimator.intercept_)

    # 5.2 評價
    # 均方誤差
    error = mean_squared_error(y_test, y_predict)
    print("誤差為:\n", error)


if __name__ == '__main__':
    linear_model1()
    linear_model2()
    linear_model3()


免責聲明!

本站轉載的文章為個人學習借鑒使用,本站對版權不負任何法律責任。如果侵犯了您的隱私權益,請聯系本站郵箱yoyou2525@163.com刪除。



 
粵ICP備18138465號   © 2018-2025 CODEPRJ.COM