中韩双语网站制作价格seo资讯
主要有以下几个步骤
第一:导入相应的工具包
第二:导入需要使用的数据集
第三:对导入的数据集输入进行预处理,找出特征与标签,查看数据特征的类型,判断是否需要标准化或者归一化处理
第四:构建神经网络的一些参数
在使用matplotlib时,需要加入
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
防止报错
实现如下:
import numpy as np
import pandas as pd
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
# 导入sklearn预处理模块
from sklearn import preprocessingimport torch
import torch.optim as optim
import datetime
import warnings
warnings.filterwarnings("ignore")
# matplotlib inline
features = pd.read_csv('C:\\Users\\Administrator\\Desktop\\temps.csv')
# 观看数据大致情况
print(features.head())
# 查看数据维度
print(features.shape)#对年月日进行格式转换
years = features['year']
months = features['month']
days = features['day']
dates = [str(int(year))+'-'+str(int(month))+'-'+str(int(day)) for year, month, day in zip(years, months, days)]
dates = [datetime.datetime.strptime(date,'%Y-%m-%d')for date in dates]
print(dates[:2])# 准备画图
# 指定绘画的风格
plt.style.use('fivethirtyeight')#设置布局
# fig,((ax1,ax2),(ax3,ax4))=plt.subplots(nrows=2,ncols=2,figsize = (10,10))
# fig.autofmt_xdate(rotation=45)
fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(nrows=2, ncols=2, figsize = (10,10))
fig.autofmt_xdate(rotation = 45)#标签值
ax1.plot(dates,features['actual'])
ax1.set_xlabel(''); ax1.set_ylabel('Temperature'); ax1.set_title('Max Temp')#昨天
ax2.plot(dates,features['temp_1'])
ax2.set_xlabel(''); ax2.set_ylabel('Temperature'); ax2.set_title('previous Max Temp')#前天
ax3.plot(dates,features['temp_2'])
ax3.set_xlabel('Date'); ax3.set_ylabel('Temperature'); ax3.set_title('Two Days Prior Max Temp')# 我的二货朋友
ax4.plot(dates,features['friend'])
ax4.set_xlabel('Date'); ax4.set_ylabel('Temperature'); ax4.set_title('Friend Estimate')plt.tight_layout(pad=2)plt.show(block = True)# 独热编码
features = pd.get_dummies(features)
print(features.head())# 标签
labels = np.array(features['actual'])# 在特征中去掉标签
features = features.drop('actual',axis=1)# 名字单独保存,以防后面需要
features_list = list(features.columns)# 转换成合适的格式
features = np.array(features)
print(features.shape)# 对数据进行预处理
# 由于数据差距太大所以需要进行标准化处理
input_features = preprocessing.StandardScaler().fit_transform(features)## 构建网络模型
x = torch.tensor(input_features, dtype=float)
y = torch.tensor(labels, dtype=float)# 权重参数初始化
weights = torch.randn((14, 128), dtype=float, requires_grad=True)
# 将14个特征转换成128层隐层特征,这里就是对特征进行了升维
biases = torch.randn(128, dtype=float, requires_grad=True)
weights2 = torch.randn((128, 1), dtype=float, requires_grad=True)
biases2 = torch.randn(1, dtype=float, requires_grad=True)learning_rate = 0.01
losses = []for i in range(1000):# 计算隐层hidden = x.mm(weights) + biases# 加入激活函数hidden = torch.relu(hidden)# 预测结果predictions = hidden.mm(weights2) + biases2# 通过计算损失函数loss = torch.mean((predictions - y) ** 2)losses.append(loss.data.numpy())#打印损失值if i % 100 == 0:print('loss:', loss)# 反向计算传播loss.backward()# 更新参数weights.data.add_(-learning_rate*weights.grad.data)biases.data.add_(-learning_rate*biases.grad.data)weights2.data.add_(-learning_rate * weights2.grad.data)biases2.data.add_(-learning_rate * biases2.grad.data)# 每次更新完都要清空迭代,不然会累加weights.grad.data.zero_()biases.grad.data.zero_()weights2.grad.data.zero_()biases2.grad.data.zero_()
但是着这种构造太麻烦,因为导入的工具包都帮我们设置好了,我们只需要设置相应的参数即可
改版如下:
import numpy as np
import pandas as pd
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
# 导入sklearn预处理模块
from sklearn import preprocessingimport torch
import torch.optim as optim
import datetime
import warnings
warnings.filterwarnings("ignore")
# matplotlib inline
features = pd.read_csv('C:\\Users\\Administrator\\Desktop\\temps.csv')
# 观看数据大致情况
print(features.head())
# 查看数据维度
print(features.shape)#对年月日进行格式转换
years = features['year']
months = features['month']
days = features['day']
dates = [str(int(year))+'-'+str(int(month))+'-'+str(int(day)) for year, month, day in zip(years, months, days)]
dates = [datetime.datetime.strptime(date,'%Y-%m-%d')for date in dates]
print(dates[:2])# 独热编码
features = pd.get_dummies(features)
print(features.head())# 标签
labels = np.array(features['actual'])# 在特征中去掉标签
features = features.drop('actual',axis=1)# 名字单独保存,以防后面需要
features_list = list(features.columns)# 转换成合适的格式
features = np.array(features)
print(features.shape)# 对数据进行预处理
# 由于数据差距太大所以需要进行标准化处理
input_features = preprocessing.StandardScaler().fit_transform(features)## 构建网络模型
x = torch.tensor(input_features, dtype=float)
y = torch.tensor(labels, dtype=float)# 权重参数初始化
weights = torch.randn((14, 128), dtype=float, requires_grad=True)
# 将14个特征转换成128层隐层特征,这里就是对特征进行了升维
biases = torch.randn(128, dtype=float, requires_grad=True)
weights2 = torch.randn((128, 1), dtype=float, requires_grad=True)
biases2 = torch.randn(1, dtype=float, requires_grad=True)input_size = input_features.shape[1]
hidden_size = 128
output_size = 1
batch_size = 16
my_nn = torch.nn.Sequential(torch.nn.Linear(input_size,hidden_size),torch.nn.Sigmoid(),torch.nn.Linear(hidden_size,output_size),
)
cost = torch.nn.MSELoss(reduction='mean')
optimizer = torch.optim.Adam(my_nn.parameters(),lr=0.01)#动态调整学习率# 训练网络
losses = []for i in range(1000):batch_lose = []for start in range(0, len(input_features),batch_size):end = start+batch_size if start+batch_size<len(input_features) else len(input_features)xx = torch.tensor(input_features[start:end],dtype=torch.float,requires_grad=True)yy = torch.tensor(labels[start:end],dtype=torch.float,requires_grad=True)prediction = my_nn(xx)loss = cost(prediction,yy)optimizer.zero_grad()loss.backward()# 更新操作optimizer.step()batch_lose.append(loss.data.numpy())# 打印损失if i % 100 == 0:losses.append(np.mean(batch_lose))print(i,np.mean(batch_lose))# 预测训练模型
# 还是需要将数据转换成torchtensor格式
x = torch.tensor(input_features,dtype=torch.float)
# 要将预测的结果转换numpy格式更容易后续计算
pred = my_nn(x).data.numpy()#转换日期格式
dates = [str(int(year))+'-'+str(int(month))+'-'+str(int(day))for year,month,day in zip(years,months,days)]
dates = [datetime.datetime.strptime(date,'%Y-%m-%d')for date in dates]
#创建一个表格存储日期和对应的标签值
true_date = pd.DataFrame(data={'date':dates,'actual':labels})# 创建一个存日期和其对应的模型预测值
months = features[:,features_list.index('month')]
days = features[:, features_list.index('day')]
years = features[:,features_list.index('year')]test_dates = [str(int(year))+'-'+str(int(month))+'-'+str(int(day))for year,month,day in zip(years,months,days)]
test_dates = [datetime.datetime.strptime(date,'%Y-%m-%d')for date in test_dates]pred_data = pd.DataFrame(data={'date':test_dates,'pred':pred.reshape(-1)})plt.plot(true_date['date'],true_date['actual'],'b-',label = 'actual')
plt.plot(pred_data['date'],pred_data['pred'],'ro',label = 'prediction')
plt.xticks(fontsize=15, rotation=45, ha='right')
plt.legend()