⽐賽數據分為訓練數據集和測試數據集。兩個數據集都包括每棟房⼦的特征,如街道類型、建造年份、房頂類型、地下室狀況等特征值。這些特征值有連續的數字、離散的標簽甚⾄是缺失值“na”。只有訓練數據集包括了每棟房⼦的價格,也就是標簽。我們可以訪問⽐賽⽹⻚,下載這些數據集。
一、導包
%matplotlib inline
import torch
import torch.nn as nn
import numpy as np
import pandas as pd
import sys
sys.path.append("..")
import d2lzh_pytorch as d2l
print(torch.__version__)
torch.set_default_tensor_type(torch.FloatTensor)
二、導入數據
# 導入數據
train_data = pd.read_csv('F:/CodeStore/DL_Pytorch/data/kaggle_house/train.csv')
test_data = pd.read_csv('F:/CodeStore/DL_Pytorch/data/kaggle_house/test.csv')
train_data.shape# 輸出 (1460, 81)
test_data.shape# 輸出 (1459, 80)
# 查看前4個樣本的前4個特征、后2個特征和標簽(SalePrice):
train_data.iloc[0:4, [0, 1, 2 , 3, -3, -2, -1]]
輸出結果:
三、預處理數據
# 合並有效特征
all_features = pd.concat((train_data.iloc[:, 1:-1], test_data.iloc[:, 1:]))
# 預處理數據
numeric_features = all_features.dtypes[all_features.dtypes != 'object'].index
all_features[numeric_features] = all_features[numeric_features].apply(lambda x: (x - x.mean()) / (x.std()))
# 標准化后,每個特征的均值變為0,所以可以直接用0來替換缺失值
all_features = all_features.fillna(0)
# 將離散數值轉成指示特征
# dummy_na=True將缺失值也當作合法的特征值並為其創建指示特征
all_features = pd.get_dummies(all_features, dummy_na=True)
all_features.shape
n_train = train_data.shape[0]
train_features = torch.tensor(all_features[:n_train].values, dtype=torch.float)
test_features = torch.tensor(train_data.SalePrice.values, dtype=torch.float).view(-1, 1)
四、訓練模型
# 訓練模型
loss = torch.nn.MSELoss()
def get_net(feature_num):
net = nn.Linear(feature_num, 1)
for param in net.parameters():
nn.init.normal_(param, mean=0, std=0.01)
return net
對數均⽅根誤差的實現如下。
def log_rmse(net, features, labels):
with torch.no_grad():
# 將小於1的值設成1,使得取得對數時數值更穩定
clipped_preds = torch.max(net(features), torch.tensor(1.0))
rmse = torch.sqrt(2 * loss(clipped_preds.log(),labels.log()).mean())
return rmse.item()
def train(net, train_features, train_labels, test_features, test_labels, num_epochs, learning_rate, weight_decay, batch_size):
train_ls, test_ls = [],[]
dataset = torch.utils.data.DataLoder(dataset, batch_size, shuffle=True)
#這里使用了Adam算法
optimizer = torch.optim.Adam(params=net.parameters(), lr=learning_rate, weight_decay=weight_decay)
net = net.float()
for epoch in range(num_epochs):
for X, y in train_iter:
l = loss(net(x.float()), y.float())
optimazer.zero_grad()
l.backward()
optimazer.step()
train_ls.append(log_rmse(net, test_features, test_labels))
if test_labels is not None:
test_ls.append(log_rmse(net, test_features, test_labels))
return train_ls, test_ls
五、K折交叉驗證
# K折交叉驗證
def get_k_fold_data(k, i, x, y):
assert k > 1
fold_size = x.shape[0]
x_train, y_train = None, None
for j in range(k):
idx = slice(j * fold_size, (j+1) * fold_size)
x_part, y_part = x[idx, :],y[idx]
if j==i:
x_valid, y_valid = x_part, y_part
elif x_train is None:
x_train, y_train = x_part, y_part
else:
x_train = torch.cat((x_train, x_part), dim=0)
y_train = torch.cat((y_train, y_part), dim=0)
return x_train, y_train, x_valid, y_valid