# implement of Deep_learning Code（手撕代码）

Line_Model
import torch
import torch.nn as nn
import math
import random
import numpy as np

# 计算线性回归模型 梯度
def Cal_SGD_Linear(x, pred, label, lr, k, bias=0):
g = 0
for (idx, item) in enumerate(pred):
g += (item - label[idx]) * x[idx]
# 梯度 即loss关于模型参数的导数在当前参数 的导数值
g = (2 * g) / len(x)
print(k - lr * g)
return {'k': k - lr * g, 'bias': 0 if bias == 0 else bias - lr * g}

def Cal_MSE(pred, label):
loss = 0
for (idx, item) in enumerate(pred):
loss += math.pow(item - label[idx], 2)
# print(loss / len(pred))   # MSE 均方误差
# print(math.sqrt(loss / len(pred)))   # RMSE 均方根误差

def gen_line_data(len_data):
x = torch.linspace(10, 110, len_data)
x = torch.unsqueeze(x, dim=1)
y = 2 * x + torch.rand(x.size())
return {'x': x, 'y': y}

class LineRegressionNet(nn.Module):
def __init__(self) -> object:
super().__init__()
self.liner = nn.Linear(1, 1, bias=False)

def forward(self, x):
out = self.liner(x)
return out

class line_model():
def __init__(self, lr, epoches):
self.lr = lr
self.epoches = epoches
self.init_model()
def init_model(self):
self.model = LineRegressionNet()
self.optimiser = torch.optim.SGD(self.model.parameters(), lr=self.lr)
self.loss_fn = torch.nn.MSELoss()
def train_model(self , data , model_save_path="model.ck"):
x = data['x']
y = data['y']
model = self.model
for th in range(self.epoches):
random.Random(th).shuffle(x)
random.Random(th).shuffle(y)
outputs = model(x)
loss = self.loss_fn(outputs , y )
loss.backward()
self.optimiser.step()
self.model_save_path = model_save_path
torch.save(model.state_dict() , model_save_path )
def test_model(self , data):
x = data['x']
y = data['y']
pred = self.model(x)
print(x , pred)

train_data = gen_line_data(10)
test_data = gen_line_data(5)
learning_rate = 0.0001
liner_model = line_model(learning_rate , 100)
liner_model.train_model(train_data)
liner_model.test_model(test_data)

'''
loss_function = torch.nn.MSELoss()
optimizer = torch.optim.SGD(liner_model.parameters(), lr=learning_rate)  # 随机梯度下降
x = data['x']
y = data['y']
for i in range(10):
outputs = liner_model(x)
# Cal_MSE(outputs, y)
loss = loss_function(outputs, y)  # 前向传播
pp = liner_model.state_dict()
print('liner.weight', pp['liner.weight'])
Cal_SGD_Linear(x, outputs, y, learning_rate, pp['liner.weight'][0])
loss.backward()  # 反向传播
optimizer.step()  # 优化器参数更新

pp = liner_model.state_dict()
# test_data = torch.unsqueeze(torch.linspace(100, 200, 10) , dim=1)
# print(test_data, liner_model(test_data))'''


CNN_Model
import torch
import torch.nn as nn
import numpy as np

def gen_line_data(len_data):
x = torch.linspace(0, 100, len_data)
x = torch.unsqueeze(x, dim=1)
y = 2 * x + torch.rand(x.size())
return {'x': x, 'y': y}

class CnnNet(nn.Module):
def __init__(self):
self.layer1 = nn.Sequential(
nn.Conv2d(1, 16 , kernel_size=5 , stride=1,padding=2),
nn.BatchNorm2d(16),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2,stride=2)
)
self.fc = nn.Linear(16 , 10)
def forward(self, x):
out = self.layer1(x)
out = self.fc(out)
return out

data = gen_line_data(10)
liner_model = CnnNet()
learning_rate = 0.02
loss_function = torch.nn.MSELoss()
optimizer = torch.optim.SGD(liner_model.parameters(), lr=learning_rate)

x = data['x']
y = data['y']
# 前向传递
outputs = liner_model(x)
loss = loss_function(outputs, y)
# 反向传播和参数更新
loss.backward()  # 反向传播
optimizer.step()  # 优化器参数更新

test_data = torch.unsqueeze(torch.linspace(100, 200, 10), dim=1)
print(test_data, liner_model(test_data))

#-*- coding: UTF-8 -*-

import sys

if __name__ == "__main__":
argvs = sys.argv
src = argvs[1]
out = argvs[2]
print(src , out )
dict = {}
with open(src , 'r' ,encoding="utf-8") as fin:
for line in fin:
arr = line.strip().split('\t')
assert len(arr) == 2
dict[arr[0]] = arr[1]
with open(out , 'r' ,encoding="utf-8") as fin:
for line in fin :
arr = line.strip().split(' ')
assert len(arr) == 2
if dict[arr[0]] != arr[1]:
with open('badcase' , 'w' , encoding="utf-8") as fout:
for key ,value in badcase.items() :
fout.write('\t'.join((key , value)) + '\n')


#### 数学板块

def Mean(nums):
# 均值
return sum( nums ) / len(nums)

'''

**2

math模块：math.pow(x,2)

'''
def Var(nums):
# 方差 variance
mean = Mean(nums)
return sum( ( item - mean )**2 for item in nums) / len(nums)
def SD(nums):
# 标准差：西格玛 Standard Deviation 3倍西格玛出自于此
return Var(nums)**0.5

posted @ 2022-04-18 01:13  365/24/60  阅读(5)  评论(0编辑  收藏  举报