1 %matplotlib inline
2 import torch
3 import torch.nn as nn
4 import numpy as np
5 import pandas as pd
6 import sys
7 sys.path.append("/home/kesci/input")
8 import d2lzh1981 as d2l
9 print(torch.__version__)
10 torch.set_default_tensor_type(torch.FloatTensor)
11
12
13 test_data = pd.read_csv("/home/kesci/input/houseprices2807/house-prices-advanced-regression-techniques/test.csv")
14 train_data = pd.read_csv("/home/kesci/input/houseprices2807/house-prices-advanced-regression-techniques/train.csv")
15
16 all_features = pd.concat((train_data.iloc[:, 1:-1], test_data.iloc[:, 1:]))
17
18 # 数据预处理
19 numeric_features = all_features.dtypes[all_features.dtypes != 'object'].index
20 all_features[numeric_features] = all_features[numeric_features].apply(
21 lambda x: (x - x.mean()) / (x.std()))
22 # 标准化后,每个数值特征的均值变为0,所以可以直接用0来替换缺失值
23 all_features[numeric_features] = all_features[numeric_features].fillna(0)
24
25
26 # dummy_na=True将缺失值也当作合法的特征值并为其创建指示特征
27 all_features = pd.get_dummies(all_features, dummy_na=True)
28 all_features.shape
29
30 n_train = train_data.shape[0]
31 train_features = torch.tensor(all_features[:n_train].values, dtype=torch.float)
32 test_features = torch.tensor(all_features[n_train:].values, dtype=torch.float)
33 train_labels = torch.tensor(train_data.SalePrice.values, dtype=torch.float).view(-1, 1)
34
35 # 训练模型
36 loss = torch.nn.MSELoss()
37
38 def get_net(feature_num):
39 net = nn.Linear(feature_num, 1)
40 for param in net.parameters():
41 nn.init.normal_(param, mean=0, std=0.01)
42 return net
43
44
45 def log_rmse(net, features, labels):
46 with torch.no_grad():
47 # 将小于1的值设成1,使得取对数时数值更稳定
48 clipped_preds = torch.max(net(features), torch.tensor(1.0))
49 rmse = torch.sqrt(2 * loss(clipped_preds.log(), labels.log()).mean())
50 return rmse.item()
51
52
53 def train(net, train_features, train_labels, test_features, test_labels,
54 num_epochs, learning_rate, weight_decay, batch_size):
55 train_ls, test_ls = [], []
56 dataset = torch.utils.data.TensorDataset(train_features, train_labels)
57 train_iter = torch.utils.data.DataLoader(dataset, batch_size, shuffle=True)
58 # 这里使用了Adam优化算法
59 optimizer = torch.optim.Adam(params=net.parameters(), lr=learning_rate, weight_decay=weight_decay)
60 net = net.float()
61 for epoch in range(num_epochs):
62 for X, y in train_iter:
63 l = loss(net(X.float()), y.float())
64 optimizer.zero_grad()
65 l.backward()
66 optimizer.step()
67 train_ls.append(log_rmse(net, train_features, train_labels))
68 if test_labels is not None:
69 test_ls.append(log_rmse(net, test_features, test_labels))
70 return train_ls, test_ls
71
72
73 # K折交叉验证
74 def get_k_fold_data(k, i, X, y):
75 # 返回第i折交叉验证时所需要的训练和验证数据
76 assert k > 1
77 fold_size = X.shape[0] // k
78 X_train, y_train = None, None
79 for j in range(k):
80 idx = slice(j * fold_size, (j + 1) * fold_size)
81 X_part, y_part = X[idx, :], y[idx]
82 if j == i:
83 X_valid, y_valid = X_part, y_part
84 elif X_train is None:
85 X_train, y_train = X_part, y_part
86 else:
87 X_train = torch.cat((X_train, X_part), dim=0)
88 y_train = torch.cat((y_train, y_part), dim=0)
89 return X_train, y_train, X_valid, y_valid
90
91 def k_fold(k, X_train, y_train, num_epochs,
92 learning_rate, weight_decay, batch_size):
93 train_l_sum, valid_l_sum = 0, 0
94 for i in range(k):
95 data = get_k_fold_data(k, i, X_train, y_train)
96 net = get_net(X_train.shape[1])
97 train_ls, valid_ls = train(net, *data, num_epochs, learning_rate,
98 weight_decay, batch_size)
99 train_l_sum += train_ls[-1]
100 valid_l_sum += valid_ls[-1]
101 if i == 0:
102 d2l.semilogy(range(1, num_epochs + 1), train_ls, 'epochs', 'rmse',
103 range(1, num_epochs + 1), valid_ls,
104 ['train', 'valid'])
105 print('fold %d, train rmse %f, valid rmse %f' % (i, train_ls[-1], valid_ls[-1]))
106 return train_l_sum / k, valid_l_sum / k
107
108 # 模型选择
109 k, num_epochs, lr, weight_decay, batch_size = 5, 100, 5, 0, 64
110 train_l, valid_l = k_fold(k, train_features, train_labels, num_epochs, lr, weight_decay, batch_size)
111 print('%d-fold validation: avg train rmse %f, avg valid rmse %f' % (k, train_l, valid_l))
112
113
114 # 预测
115 def train_and_pred(train_features, test_features, train_labels, test_data,
116 num_epochs, lr, weight_decay, batch_size):
117 net = get_net(train_features.shape[1])
118 train_ls, _ = train(net, train_features, train_labels, None, None,
119 num_epochs, lr, weight_decay, batch_size)
120 d2l.semilogy(range(1, num_epochs + 1), train_ls, 'epochs', 'rmse')
121 print('train rmse %f' % train_ls[-1])
122 preds = net(test_features).detach().numpy()
123 test_data['SalePrice'] = pd.Series(preds.reshape(1, -1)[0])
124 submission = pd.concat([test_data['Id'], test_data['SalePrice']], axis=1)
125 submission.to_csv('./submission.csv', index=False)
126 # sample_submission_data = pd.read_csv("../input/house-prices-advanced-regression-techniques/sample_submission.csv")
127
128 train_and_pred(train_features, test_features, train_labels, test_data, num_epochs, lr, weight_decay, batch_size)