FNN示例

import torch
import torch.nn as nn
import torch.optim as optim


# 定义前馈神经网络
class FNN(nn.Module):
    def __init__(self, input_size, hidden_size, output_size):
        super(FNN, self).__init__()
        self.fc1 = nn.Linear(input_size, hidden_size)  # 输入层到隐藏层
        self.relu = nn.ReLU()  # 激活函数
        self.fc2 = nn.Linear(hidden_size, output_size)  # 隐藏层到输出层
        self.sigmoid = nn.Sigmoid()  # 输出层激活(用于二分类)

    def forward(self, x):
        out = self.fc1(x)
        out = self.relu(out)
        out = self.fc2(out)
        out = self.sigmoid(out)
        return out


# 假设输入有2个特征,隐藏层10个神经元,输出1个(概率)
model = FNN(input_size=2, hidden_size=10, output_size=1)

# 损失函数和优化器
criterion = nn.BCELoss()  # 二分类交叉熵
optimizer = optim.Adam(model.parameters(), lr=0.01)

# 生成一些假数据 (比如 OR 逻辑门)
X = torch.tensor([[0., 0.], [0., 1.], [1., 0.], [1., 1.]])
y = torch.tensor([[0.], [1.], [1.], [1.]])

# 训练
for epoch in range(1000):
    # 前向传播
    outputs = model(X)
    loss = criterion(outputs, y)

    # 反向传播与优化
    optimizer.zero_grad()
    loss.backward()
    optimizer.step()

    if (epoch + 1) % 200 == 0:
        print(f'Epoch [{epoch + 1}/1000], Loss: {loss.item():.4f}')

# 测试
with torch.no_grad():
    test_out = model(X)
    predictions = (test_out > 0.5).float()
    print("预测结果:", predictions.view(-1).tolist())
Epoch [200/1000], Loss: 0.0075
Epoch [400/1000], Loss: 0.0017
Epoch [600/1000], Loss: 0.0008
Epoch [800/1000], Loss: 0.0004
Epoch [1000/1000], Loss: 0.0003
预测结果: [0.0, 1.0, 1.0, 1.0]

为什么有效

  • 前馈神经网络 (FNN) 是 通用函数逼近器:只要有足够的隐藏层神经元,就能逼近任意函数(包括逻辑运算)。
  • 实践验证:损失下降 + 预测结果正确。
posted @ 2025-09-10 09:58  java拌饭  阅读(5)  评论(0)    收藏  举报