# 优化:
# 1. 增加Modulist类,再次封装每层及forward和backward
# 2. 删除import matplotlib.pyplot as plt,暂时不需要画图;删去全局变量shuffle,在linear类实现
# 3. 增加__repr__魔数,可以快速查看每层大小
# 4. 暂时注释掉acc
# 导入必要的库
import numpy as np
import os
import struct
# 定义导入函数
def load_images(path):
with open(path, "rb") as f:
data = f.read()
magic_number, num_items, rows, cols = struct.unpack(">iiii", data[:16])
return np.asanyarray(bytearray(data[16:]), dtype=np.uint8).reshape(
num_items, 28, 28
)
def load_labels(file):
with open(file, "rb") as f:
data = f.read()
return np.asanyarray(bytearray(data[8:]), dtype=np.int32)
# 定义sigmoid函数
def sigmoid(x):
result = np.zeros_like(x)
positive_mask = x >= 0
result[positive_mask] = 1 / (1 + np.exp(-x[positive_mask]))
negative_mask = x < 0
exp_x = np.exp(x[negative_mask])
result[negative_mask] = exp_x / (1 + exp_x)
return result
# 定义softmax函数
def softmax(x):
max_x = np.max(x, axis=-1, keepdims=True)
x = x - max_x
ex = np.exp(x)
sum_ex = np.sum(ex, axis=1, keepdims=True)
result = ex / sum_ex
result = np.clip(result, 1e-10, 1e10)
return result
# 定义独热编码函数
def make_onehot(labels, class_num):
result = np.zeros((labels.shape[0], class_num))
for idx, cls in enumerate(labels):
result[idx, cls] = 1
return result
# 定义dataset类
class Dataset:
def __init__(self, all_images, all_labels):
self.all_images = all_images
self.all_labels = all_labels
def __getitem__(self, index):
image = self.all_images[index]
label = self.all_labels[index]
return image, label
def __len__(self):
return len(self.all_images)
# 定义dataloader类
class DataLoader:
def __init__(self, dataset, batch_size, shuffle=True):
self.dataset = dataset
self.batch_size = batch_size
self.shuffle = shuffle
self.idx = np.arange(len(self.dataset))
def __iter__(self):
# 如果需要打乱,则在每个 epoch 开始时重新排列索引
if self.shuffle:
np.random.shuffle(self.idx)
self.cursor = 0
return self
def __next__(self):
if self.cursor >= len(self.dataset):
raise StopIteration
# 使用索引来获取数据
batch_idx = self.idx[
self.cursor : min(self.cursor + self.batch_size, len(self.dataset))
]
batch_images = self.dataset.all_images[batch_idx]
batch_labels = self.dataset.all_labels[batch_idx]
self.cursor += self.batch_size
return batch_images, batch_labels
# 定义linear类
class Linear:
def __init__(self, in_features, out_features):
self.info = f"Linear({in_features}, {out_features})" # 打印信息
self.w = np.random.normal(0, 1, size=(in_features, out_features))
self.b = np.random.normal(0, 1, size=(1, out_features))
def __repr__(self):
return self.info
def forward(self, x):
self.x = x
return np.dot(x, self.w) + self.b
def backward(self, G):
dw = np.dot(self.x.T, G)
db = np.mean(G, axis=0, keepdims=True)
self.w -= lr * dw
self.b -= lr * db
return np.dot(G, self.w.T)
# 定义Sigmoid类
class Sigmoid:
def __init__(self):
self.info = "Sigmoid()" # 打印信息
return self.info
def __repr__(self):
return self.info
def forward(self, x):
self.result = sigmoid(x)
return self.result
def backward(self, G):
return G * self.result * (1 - self.result)
# 定义Tanh类
class Tanh:
def __init__(self):
self.info = "Tanh()" # 打印信息
def __repr__(self):
return self.info
def forward(self, x):
self.result = 2 * sigmoid(2 * x) - 1
return self.result
def backward(self, G):
return G * (1 - self.result**2)
# 定义Softmax类
class Softmax:
def __init__(self):
self.info = "Softmax()" # 打印信息
def __repr__(self):
return self.info
def forward(self, x):
# p = softmax(H4) # 输出层输出,使用softmax激活函数
self.p = softmax(x)
return self.p
def backward(self, G):
# G4 = G * H4 * (1 - H4) # 第四层误差
G = (self.p - G) / len(G)
return G
# 定义ReLU类
class ReLU:
def __init__(self):
self.info = "ReLU()" # 打印信息
def __repr__(self):
return self.info
def forward(self, x):
self.x = x
return np.maximum(0, x)
def backward(self, G):
grad = G.copy()
grad[self.x <= 0] = 0
return grad
# 定义ModelList类
class ModelList:
def __init__(self, layers):
self.layers = layers
def forward(self, x):
for layer in self.layers:
x = layer.forward(x)
return x
def backward(self, G):
for layer in self.layers[::-1]:
G = layer.backward(G)
def __repr__(self):
info = ""
for layer in self.layers:
info += layer.info + "\n"
return info
# 主函数
if __name__ == "__main__":
# 加载训练集图片、标签
train_images = (
load_images(
os.path.join(
"Python", "NLP basic", "data", "minist", "train-images.idx3-ubyte"
)
)
/ 255
)
train_labels = make_onehot(
load_labels(
os.path.join(
"Python", "NLP basic", "data", "minist", "train-labels.idx1-ubyte"
)
),
10,
)
# 加载测试集图片、标签
dev_images = (
load_images(
os.path.join(
"Python", "NLP basic", "data", "minist", "t10k-images.idx3-ubyte"
)
)
/ 255
)
dev_labels = load_labels(
os.path.join("Python", "NLP basic", "data", "minist", "t10k-labels.idx1-ubyte")
)
# 设置超参数
epochs = 10
lr = 0.08 # V2版本调整了学习率
batch_size = 200
# 展开图片数据
train_images = train_images.reshape(60000, 784)
dev_images = dev_images.reshape(-1, 784)
# 调用dataset类和dataloader类
train_dataset = Dataset(train_images, train_labels)
train_dataloader = DataLoader(train_dataset, batch_size)
dev_dataset = Dataset(dev_images, dev_labels)
dev_dataloader = DataLoader(dev_dataset, batch_size)
# 定义模型
model = ModelList(
[Linear(784, 512), ReLU(), Linear(512, 256), Tanh(), Linear(256, 10), Softmax()]
)
print(model)
# 训练集训练过程
for e in range(epochs):
for x, l in train_dataloader:
# 前向传播
x = model.forward(x)
# 计算损失
loss = -np.mean(l * np.log(x))
# 反向传播
G = model.backward(l)
# 验证集验证并输出预测准确率
right_num = 0
for x, batch_labels in dev_dataloader:
x = model.forward(x)
pre_idx = np.argmax(x, axis=-1) # 预测类别
right_num += np.sum(pre_idx == batch_labels) # 统计正确个数
acc = right_num / len(dev_images) # 计算准确率
# print(f"Epoch {e}, Acc: {acc:.4f}")
