使用pytorch預(yù)測(cè)房?jī)r(jià)預(yù)測(cè)比賽,順便給自己寫一個(gè)標(biāo)準(zhǔn)的pytorch模板。
1. 數(shù)據(jù)預(yù)處理
引入必要的包,tqdm監(jiān)控訓(xùn)練進(jìn)程,colorama彩色打印
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.utils.data import Dataset, DataLoader
import numpy as np
import pandas as pd
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import train_test_split
from tqdm import tqdm
from datetime import datetime
from colorama import Fore, Back
import warnings
warnings.filterwarnings('ignore')
這是一個(gè)簡(jiǎn)單的回歸問(wèn)題,這里只做簡(jiǎn)單的預(yù)處理,對(duì)于數(shù)值型數(shù)據(jù)轉(zhuǎn)化為正態(tài)分布然后缺失填充0,標(biāo)稱型數(shù)據(jù)做onehot編碼
train_df = pd.read_csv('train.csv')
test_df = pd.read_csv('test.csv')
# 第一列為id,最后一列為標(biāo)簽,把這兩列取出后合并訓(xùn)練集與測(cè)試集
all_features = pd.concat((train_df.iloc[:, 1:-1], test_df.iloc[:, 1:]))
# 數(shù)值型數(shù)據(jù)減均值除方差標(biāo)準(zhǔn)化
numeric_features = all_features.dtypes[all_features.dtypes != 'object'].index
all_features[numeric_features] = all_features[numeric_features].apply(
lambda x: (x - x.mean()) / (x.std()))
# 標(biāo)準(zhǔn)化后,每個(gè)數(shù)值特征的均值變?yōu)?,所以可以直接用0來(lái)替換缺失值
all_features[numeric_features] = all_features[numeric_features].fillna(0)
# 標(biāo)稱數(shù)據(jù)直接onehot
all_features = pd.get_dummies(all_features, dummy_na=True)
print(f'預(yù)處理之后數(shù)據(jù)形狀: {all_features.shape}')
[out]: 預(yù)處理之后數(shù)據(jù)形狀: (2919, 331)
這里是回歸問(wèn)題,就簡(jiǎn)單的2,8分割訓(xùn)練集與驗(yàn)證集。一般來(lái)說(shuō)訓(xùn)練集還會(huì)被分割為訓(xùn)練集和驗(yàn)證集,這兩個(gè)部分都是有類標(biāo)簽的。測(cè)試集是最終預(yù)測(cè)提交的數(shù)據(jù),因此沒(méi)有類標(biāo)簽。
train_num = train_df.shape[0] # 訓(xùn)練集總樣本
train_data = all_features.iloc[:train_num, :] # 訓(xùn)練集樣本
test_data = all_features.iloc[train_num:, :] # 測(cè)試集樣本
# 2, 8 分訓(xùn)練集,驗(yàn)證集
train_features, val_features, train_labels, val_labels = train_test_split(
train_data, train_df.iloc[:,-1], test_size=0.2, random_state=42)
# 轉(zhuǎn)化為tensor
# 訓(xùn)練集
train_features = torch.tensor(train_features.values, dtype=torch.float)
# 驗(yàn)證集
val_features = torch.tensor(val_features.values, dtype=torch.float)
# 測(cè)試集
test_features = torch.tensor(test_data.values, dtype=torch.float)
# 類標(biāo)簽
# 類標(biāo)簽需要加一維, 為了作為損失函數(shù)的輸入
# eg:[100,]--->[100, 1]
train_labels = torch.tensor(train_labels.values, dtype=torch.float)
train_labels = train_labels.unsqueeze(1)
val_labels = torch.tensor(val_labels.values, dtype=torch.float)
val_labels = val_labels.unsqueeze(1)
print(f'訓(xùn)練集數(shù)據(jù): {train_features.shape}')
print(f'驗(yàn)證集數(shù)據(jù): {val_features.shape}')
print(f'測(cè)試集數(shù)據(jù): {test_features.shape}')
[out]:
訓(xùn)練集數(shù)據(jù): torch.Size([1168, 331])
驗(yàn)證集數(shù)據(jù): torch.Size([292, 331])
測(cè)試集數(shù)據(jù): torch.Size([1459, 331])
2. 編寫網(wǎng)絡(luò)
首先,處理輸入數(shù)據(jù)集,pytorch標(biāo)準(zhǔn)寫法,編寫Dataset類,然后將其裝入一個(gè)迭代器
# 生成數(shù)據(jù)集類
class myDataset:
def __init__(self, data, label):
self.data = data
self.label = label
def __len__(self):
return len(self.label)
def __getitem__(self, idx):
return self.data[idx, :], self.label[idx]
train_dataset = myDataset(train_features, train_labels)
val_dataset = myDataset(val_features, val_labels)
# 變?yōu)榈?train_iter = DataLoader(dataset=train_dataset, batch_size=64, shuffle=True, num_workers=4)
val_iter = DataLoader(dataset=val_dataset, batch_size=64, shuffle=False, num_workers=4)
然后,定義網(wǎng)絡(luò)初始化參數(shù),定義損失函數(shù),優(yōu)化器,自定義得分函數(shù)
# 初始化權(quán)重
def _weight_init(m):
if isinstance(m, nn.Linear):
nn.init.xavier_uniform_(m.weight)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Conv2d):
nn.init.xavier_uniform_(m.weight)
elif isinstance(m, nn.BatchNorm1d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# 網(wǎng)絡(luò)
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.fc1 = nn.Linear(331, 512)
self.fc2 = nn.Linear(512, 256)
self.fc3 = nn.Linear(256, 1)
self.apply(_weight_init) # 初始化參數(shù)
def forward(self, x):
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
# 使用rmse作為自定義得分函數(shù),這也是比賽的判定標(biāo)準(zhǔn)
def custom_score(y_true, y_pred):
rmse = mean_squared_error(np.log1p(y_true), np.log1p(y_pred), squared=False)
return rmse
net = Net()
criterion = torch.nn.MSELoss() # 損失函數(shù)為MSE
net = net.to(device) # 將網(wǎng)絡(luò)和損失函數(shù)轉(zhuǎn)化為GPU或CPU
criterion = criterion.to(device)
optimizer = torch.optim.Adam(params=net.parameters(), lr=0.005, weight_decay=0)
在后, 定義訓(xùn)練函數(shù)
# 這是訓(xùn)練函數(shù),分為train和val
# train時(shí)前向傳播后向更新參數(shù)
# val時(shí)只計(jì)算損失函數(shù)
def train(net, data_iter, phase, criterion, optimizer=None):
y_true = []
y_pred = []
mean_loss = []
is_grad = True if phase == 'train' else False
with torch.set_grad_enabled(is_grad):
net.train()
for step, (X, y) in enumerate(data_iter):
X = X.to(device)
y = y.to(device)
out = net(X)
loss = criterion(out, y) # 計(jì)算損失
mean_loss.append(loss.item())
if phase == 'train':
optimizer.zero_grad() # optimizer 0
loss.backward() # back propragation
optimizer.step() # update the paramters
# 將每一個(gè)step的結(jié)果加入列表,最后統(tǒng)一生產(chǎn)這一個(gè)epoch的指標(biāo)
# 添加預(yù)測(cè)值和真實(shí)類標(biāo)簽
y_pred.extend(out.detach().cpu().squeeze().numpy().tolist())
y_true.extend(y.detach().cpu().squeeze().numpy().tolist())
# 全量樣本的rmse和平均loss
rmse = custom_score(y_true, y_pred)
mean_loss = np.mean(mean_loss)
# 保留4位小數(shù)
rmse = np.round(rmse, 4)
mean_loss = np.round(mean_loss, 4)
return mean_loss, rmse
最后,訓(xùn)練函數(shù)提交結(jié)果
epochs = 100
print(f'{datetime.now()} 開(kāi)始訓(xùn)練結(jié)束...')
for epoch in tqdm(range(epochs)):
train_mean_loss, train_score = train(net=net,
data_iter=train_iter,
phase='train',
criterion=criterion,
optimizer=optimizer)
val_mean_loss, val_score = train(net=net,
data_iter=train_iter,
phase='val',
criterion=criterion,
optimizer=None)
if epoch%10 == 0:
print(Fore.CYAN + Back.BLACK, end='')
tqdm.write(f'Epoch: {epoch} Train loss: {train_mean_loss} Val loss: {val_mean_loss}', end=' ')
tqdm.write(f'Train score: {train_score} Val score: {val_score}')
print(f'{datetime.now()} 訓(xùn)練結(jié)束...')

訓(xùn)練結(jié)果
# 提交結(jié)果
submission = pd.read_csv('sample_submission.csv')
predict = net(test_features)
predict = predict.detach().squeeze().numpy()
submission['SalePrice'] = predict
submission.to_csv('torch_submission.csv', index=False)
這里可以看出驗(yàn)證集的最終結(jié)果應(yīng)該是0.11,我們將數(shù)據(jù)提交到kaggle得到最終分?jǐn)?shù)