sklearn調用多項式回歸


import numpy as np
import matplotlib.pyplot as plt
x=np.random.uniform(-3,3,size=100)
X=x.reshape(-1,1)
np.random.seed(666)
y=0.5*x**2+x+2+np.random.normal(0,1,size=100)
plt.scatter(x,y)
plt.show()
#多項式回歸,回歸為線性回歸,在原來的數據上添加新的特征而已
x2=np.hstack([X,X**2])
from sklearn.linear_model import LinearRegression
l=LinearRegression()
l.fit(x2,y)
y_pre=l.predict(x2)
plt.scatter(x,y)
plt.plot(np.sort(x),y_pre[np.argsort(x)],color="r")
plt.show()
print(l.coef_)
print(l.intercept_)

#sklearn中多項式回歸算法實現與Pipeline
#一維數據一個特征使用多項式回歸
import numpy as np
import matplotlib.pyplot as plt
x=np.random.uniform(-3,3,size=100)
X=x.reshape(-1,1)
np.random.seed(666)
y=0.5*x**2+x+2+np.random.normal(0,1,size=100)
plt.scatter(x,y)
plt.show()
from sklearn.preprocessing import PolynomialFeatures
poly=PolynomialFeatures(degree=2) #定義次數為2次
poly.fit(X)
x2=poly.transform(X) #進行數據維度的增加,增加數據的平方
print(x2.shape)
print(x2[:5,:])
from sklearn.linear_model import LinearRegression
l=LinearRegression()
l.fit(x2,y)
y_pre=l.predict(x2)
plt.scatter(x,y)
plt.plot(np.sort(x),y_pre[np.argsort(x)],color="r")
plt.show()
print(l.coef_)
print(l.intercept_)
#二維數據特征使用多項式回歸
#二項式變為(1+x+y+x2+xy+y2)6個特征的矩陣
x=np.arange(1,11).reshape(-1,2)
print(x.shape)
poly=PolynomialFeatures(degree=2)
poly.fit(x)
x2=poly.transform(x)
print(x2.shape)
print(x)
print(x2)
#三次多項式變為(1+x+y+x2+xy+y2+x2y+xy2+y3+x3)10個特征的矩陣
poly=PolynomialFeatures(degree=3)
poly.fit(x)
x2=poly.transform(x)
print(x2.shape)
print(x)
print(x2)

#Pipeline實現三步:1特征增加-2數據歸一化-3線性回歸
import numpy as np
import matplotlib.pyplot as plt
x=np.random.uniform(-3,3,size=100)
np.random.seed(666)
y=0.5*x**2+x+2+np.random.normal(0,1,size=100)
X=x.reshape(-1,1)
plt.scatter(x,y)
plt.show()
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
#創建屬於自己的多特征的多項式回歸算法
poly_reg=Pipeline([
("poly",PolynomialFeatures(degree=2)), #1特征增加-
("std_scaler",StandardScaler()), #2數據歸一化
("lin-reg",LinearRegression()) #3線性回歸
])
poly_reg.fit(X,y)
y_pre=poly_reg.predict(X)
plt.scatter(x,y)
plt.plot(np.sort(x),y_pre[np.argsort(x)],color="r")
plt.show()

#機器學習過擬合和欠擬合的問題-模型泛化能力
import numpy as np
import matplotlib.pyplot as plt
x=np.random.uniform(-3,3,size=100)
np.random.seed(666)
y=0.5*x**2+x+2+np.random.normal(0,1,size=100)
X=x.reshape(-1,1)
plt.scatter(x,y)
plt.show()
#使用線性回歸算法進行回歸模型的建立,underfitting,欠擬合的情況
from sklearn.linear_model import LinearRegression
l=LinearRegression()
l.fit(X,y)
print(l.score(X,y))
y_pre=l.predict(X)
plt.scatter(x,y)
plt.plot(np.sort(x),y_pre[np.argsort(x)],color="r")
plt.show()
from sklearn.metrics import mean_squared_error
print(mean_squared_error(y,y_pre))
#使用多項式回歸算法,建立屬於自己的多項式回歸算法
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import PolynomialFeatures
from sklearn.preprocessing import StandardScaler
def PolynomailRegression(degree):
return Pipeline([
("poly",PolynomialFeatures(degree=degree)), #1特征增加-
("std_scaler",StandardScaler()), #2數據歸一化
("lin_reg",LinearRegression()) #3線性回歸
])
poly=PolynomailRegression(degree=2)
poly.fit(X,y)
print(poly.score(X,y))
y_pre=poly.predict(X)
plt.scatter(x,y)
plt.plot(np.sort(x),y_pre[np.argsort(x)],color="r")
plt.show()
from sklearn.metrics import mean_squared_error
print(mean_squared_error(y,y_pre))
#overfitting,過擬合,增加多項式回歸的次數為100,嚴重出現過擬合,隨着次數的增加,均方差會一直減小,但是模型過於復雜,所以是不合適的
poly=PolynomailRegression(degree=100)
poly.fit(X,y)
print(poly.score(X,y))
y_pre=poly.predict(X)
plt.scatter(x,y)
plt.plot(np.sort(x),y_pre[np.argsort(x)],color="r")
plt.show()
from sklearn.metrics import mean_squared_error
print(mean_squared_error(y,y_pre))
#可以明顯看到建立的模型是存在過擬合的
x1=np.linspace(-3,3,100).reshape(100,1)
y1=poly.predict(x1)
plt.scatter(x,y)
plt.plot(x1[:,0],y1,color="r")
plt.axis([-3,3,-1,10])
plt.show()
#檢測模型的泛化能力的函數train_test_split函數
from sklearn.model_selection import train_test_split
x_train,x_test,y_train,y_test=train_test_split(X,y,random_state=666)
l=LinearRegression()
l.fit(x_train,y_train)
y_pre1=l.predict(x_test)
print(mean_squared_error(y_test,y_pre1))
P1=PolynomailRegression(degree=2)
P1.fit(x_train,y_train)
y_pre2=P1.predict(x_test)
print(mean_squared_error(y_test,y_pre2))
P2=PolynomailRegression(degree=10)
P2.fit(x_train,y_train)
y_pre3=P2.predict(x_test)
print(mean_squared_error(y_test,y_pre3))
#學習曲線函數封裝,
#學習曲線可以可視化出模型的過擬合與欠擬合的情況
from sklearn.model_selection import train_test_split
x_train,x_test,y_train,y_test=train_test_split(X,y,random_state=666)
def plot_learning_curve(algo,x_train,x_test,y_train,y_test):
train_score = []
test_score = []
for i in range(1, len(x_train)):
algo.fit(x_train[:i], y_train[:i])
y_train_pre = algo.predict(x_train[:i])
y_test_pre =algo.predict(x_test)
train_score.append(mean_squared_error(y_train[:i], y_train_pre))
test_score.append(mean_squared_error(y_test, y_test_pre))
plt.figure()
plt.plot([i for i in range(1, len(x_train))], np.sqrt(train_score), "g", label="train_error")
plt.plot([i for i in range(1, len(x_train))], np.sqrt(test_score), "r", label="test_error")
plt.legend()
plt.axis([0,len(x_train)+1,0,4])
plt.show()
#畫出線性回歸模型和多項式回歸模型的學習曲線
plot_learning_curve(LinearRegression(),x_train,x_test,y_train,y_test) #線性回歸算法預測模型建立
plot_learning_curve(PolynomailRegression(degree=1),x_train,x_test,y_train,y_test) #欠擬合的情況
plot_learning_curve(PolynomailRegression(degree=2),x_train,x_test,y_train,y_test) #最佳擬合的情況
plot_learning_curve(PolynomailRegression(degree=20),x_train,x_test,y_train,y_test)#過擬合的情況
#交叉驗證的方式來進行knn算法的查看
#機器學習算法的交叉驗證方式實現代碼:
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
digits=datasets.load_digits()
x=digits.data
y=digits.target
from sklearn.model_selection import train_test_split
x_train,x_test,y_train,y_test=train_test_split(x,y,test_size=0.4,random_state=666)
#1-1普通的訓練測試方式
from sklearn.neighbors import KNeighborsClassifier
best_score=0
best_p=0
best_k=0
for k in range(2,11):
for p in range(1,6):
knn1=KNeighborsClassifier(weights="distance",p=p,n_neighbors=k)
knn1.fit(x_train,y_train)
score=knn1.score(x_test,y_test)
if score>best_score:
best_p=p
best_k=k
best_score=score
print("best_score:",best_score)
print("best_k:",best_k)
print("best_p:",best_p)
#1-2交叉驗證方式
from sklearn.model_selection import cross_val_score
best_p=0
best_k=0
best_score=0
for k in range(2,11):
for p in range(1,6):
knn2=KNeighborsClassifier(weights="distance",p=p,n_neighbors=k)
knn2.fit(x_train,y_train)
scores=cross_val_score(knn2,x_train,y_train,cv=5) #這里的cv參數就是指將訓練數據集分為幾份進行交叉驗證,默認為3
score=np.mean(scores)
if score>best_score:
best_p=p
best_k=k
best_score=score
print("best_score:",best_score)
print("best_k:",best_k)
print("best_p:",best_p)
knn=KNeighborsClassifier(weights="distance",p=2,n_neighbors=2)
knn.fit(x_train,y_train)
print(knn.score(x_test,y_test))
#1-3利用網格搜索的方式尋找最優的超參數組合就是對於訓練數據集進行交叉驗證尋找最優
from sklearn.model_selection import GridSearchCV
knn3=KNeighborsClassifier()
param=[
{
"weights":["distance"],
"n_neighbors":[i for i in range(2,11)],
"p":[k for k in range(1,6)]
}
]
grid1=GridSearchCV(knn3,param,verbose=1,cv=5) #這里的cv參數就是指將訓練數據集分為幾份進行交叉驗證,默認為3
grid1.fit(x_train,y_train)
print(grid1.best_score_)
print(grid1.best_params_)
kn2=grid1.best_estimator_
print(kn2.score(x_test,y_test))
#模型正則化-限制模型參數的大小,解決方差太大的問題
import numpy as np
import matplotlib.pyplot as plt
x=np.random.uniform(-3,3,size=100)
np.random.seed(666)
y=0.5*x**2+x+2+np.random.normal(0,1,size=100)
X=x.reshape(-1,1)
plt.scatter(x,y)
plt.show()
lin=LinearRegression()
def PolynomailRegression(degree):
return Pipeline([
("poly",PolynomialFeatures(degree=degree)), #1特征增加-
("std_scaler",StandardScaler()), #2數據歸一化
("lin_reg",lin) #3線性回歸
])
poly=PolynomailRegression(degree=100)
poly.fit(X,y)
print(poly.score(X,y))
y_pre=poly.predict(X)
plt.scatter(x,y)
plt.plot(np.sort(x),y_pre[np.argsort(x)],color="r")
plt.show()
print(lin.coef_) #各個模型的參數非常大
#模型正則化方式
#(1)使用嶺回歸的正則化方式減小模型方差,將其封裝為一個函數
import numpy as np
import matplotlib.pyplot as plt
x=np.random.uniform(-3,3,size=100)
X=x.reshape(-1,1)
y=0.5*x**2+x+2+np.random.normal(0,1,size=100)
from sklearn.model_selection import train_test_split
x_train,x_test,y_train,y_test=train_test_split(X,y,random_state=666)
from sklearn.linear_model import Ridge
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import PolynomialFeatures
from sklearn.metrics import mean_squared_error
def Ridgeregression(degree,alpha):
return Pipeline([("poly", PolynomialFeatures(degree=degree)),
("std_scaler", StandardScaler()),
("Ridge_reg", Ridge(alpha=alpha))
])
r1=Ridgeregression(20,0.01) #隨着a的增大,越來越平直
r1.fit(x_train,y_train)
y11=r1.predict(x_test)
print(mean_squared_error(y11,y_test))
plt.figure()
plt.scatter(X,y)
x1=np.linspace(-3,3,100).reshape(100,1)
y1=r1.predict(x1)
plt.plot(x1,y1,"r")
#plt.axis([-3,3,-1,10])
plt.show()
#(2)使用LASSO回歸的正則化方式減小模型方差,將其封裝為一個函數
#采用LASSO回歸進行訓練和預測
from sklearn.linear_model import Lasso
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import PolynomialFeatures
from sklearn.metrics import mean_squared_error
def lassoregression(degree,alpha):
return Pipeline([("poly", PolynomialFeatures(degree=degree)),
("std_scaler", StandardScaler()),
("LASSO_reg", Lasso(alpha=alpha))
])
LA1=lassoregression(20,1) #當a的值從0開始增大時,其擬合曲線的模型會越來越平直,慢慢會接近一條直線,區別於嶺回歸的曲線,這是由LASSO正則化數學式子決定的
LA1.fit(x_train,y_train)
y11=LA1.predict(x_test)
print(mean_squared_error(y11,y_test))
plt.figure()
plt.scatter(X,y)
x1=np.linspace(-3,3,100).reshape(100,1)
y1=LA1.predict(x1)
plt.plot(x1,y1,"r")
#plt.axis([-3,3,-1,10])
plt.show()
#(3)采用普通多項式回歸進行預測
import numpy as np
import matplotlib.pyplot as plt
x=np.random.uniform(-3,3,size=100)
X=x.reshape(-1,1)
y=0.5*x**2+x+2+np.random.normal(0,1,size=100)
from sklearn.model_selection import train_test_split
x_train,x_test,y_train,y_test=train_test_split(X,y,random_state=666)
from sklearn.linear_model import Ridge
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import PolynomialFeatures
from sklearn.metrics import mean_squared_error
from sklearn.linear_model import LinearRegression
def polynomialRegression(degree):
return Pipeline([("poly",PolynomialFeatures(degree=degree)),
("std_scaler",StandardScaler()),
( "lin_reg",LinearRegression())
])
poly2_reg=polynomialRegression(20)
poly2_reg.fit(X,y)
y2=poly2_reg.predict(X)
print(mean_squared_error(y,y2))
print(poly2_reg.score(X,y))
plt.figure()
plt.scatter(X,y)
x1=np.linspace(-3,3,100).reshape(100,1)
y11=poly2_reg.predict(x1)
plt.plot(x1,y11,"r")
#plt.axis([-3,3,-1,10])
plt.show()




免責聲明!

本站轉載的文章為個人學習借鑒使用,本站對版權不負任何法律責任。如果侵犯了您的隱私權益,請聯系本站郵箱yoyou2525@163.com刪除。



 
粵ICP備18138465號   © 2018-2025 CODEPRJ.COM