5.1.0 头文件

import torch
from torch import nn
from torch.nn import functional as F

 

5.1.1 通过实例化nn.Sequential来构建网络模型

# 通过实例化nn.Sequential来构建网络模型,该网络模型包含一个具有256个单元和RELU激活函数得全连接隐藏层,然后是一个巨头具有10个输出单元且不带激活函数的全连接输出层
net = nn.Sequential(nn.Linear(20, 256), nn.ReLU(), nn.Linear(256, 10))
# 定义输入特征
X = torch.rand(2, 20)
# 模型输出
Y = net(X)      # net(X)实际上是net.__call__(X)的缩写
print(Y)
# 输出:
# tensor([[-0.0734,  0.1032,  0.0983,  0.1074,  0.0482,  0.2511,  0.1665,  0.1535, 0.0662,  0.0410],
#         [ 0.0207,  0.0912,  0.0752,  0.0009,  0.0232,  0.0717, -0.0337, -0.0010, 0.1217,  0.0413]], grad_fn=<AddmmBackward0>)

 

5.1.2 自定义模块

# 块的作用是将层串联起来
class MLP(nn.Module):
    def __init__(self):
        # 调用父类nn.Module的构造函数来执行必要的初始化。
        super().__init__()
        # 定义一个具有256个隐藏单元的全连接隐藏层
        self.hidden = nn.Linear(20, 256)
        # 定义一个具有10个输出单元的全连接输出层
        self.out = nn.Linear(256, 10)

    # 定义模型的前向传播,根据输入X返回模型输出
    def forward(self, X):
        return self.out(F.relu(self.hidden(X)))

net = MLP()
Y = net(X)
print(Y)
# 输出:
# tensor([[ 0.0168, -0.0403,  0.2232,  0.1695,  0.3364,  0.0075,  0.0732, -0.0419, -0.1076,  0.0864],
#         [ 0.0663, -0.1298,  0.0890,  0.2442,  0.3788, -0.0290,  0.0885, -0.0660, -0.1007,  0.0372]], grad_fn=<AddmmBackward0>)

 

5.1.3 自定义顺序块

# 顺序块的作用是将模块串联起来
class MySequential(nn.Module):
    def __init__(self, *args):
        # 调用父类nn.Module的构造函数来执行必要的初始化
        super().__init__()
        for idx, module in enumerate(args):
            # 定义要调用的每个模块
            self._modules[str(idx)] = module
    # 定义前向传播函数
    def forward(self, X):
        for block in self._modules.values():
            X = block(X)
        return X

net = MySequential(nn.Linear(20, 256), nn.ReLU(), nn.Linear(256, 10))
Y = net(X)
print(Y)
# 输出:
# tensor([[ 0.0932,  0.1025,  0.0384, -0.0860,  0.1336,  0.0991,  0.3763, -0.1100, -0.0088, -0.1979],
#         [ 0.0882, -0.0685,  0.1821, -0.1365, -0.0244, -0.0578,  0.3142,  0.0109, -0.0583, -0.2758]], grad_fn=<AddmmBackward0>)

 

5.1.4 自定义一个块来进行函数运算

class FixedHiddenMLP(nn.Module):
    def __init__(self):
        super().__init__()
        # 定义权重参数
        self.rand_weight = torch.rand((20, 20), requires_grad=False)
        # 定义层
        self.linear = nn.Linear(20, 20)

    # 定义前向传播来实现运算过程
    def forward(self, X):
        X = self.linear(X)
        X = F.relu(torch.mm(X, self.rand_weight) + 1)
        X = self.linear(X)
        while X.abs().sum() > 1:
            X /= 2
        return X.sum()

net = FixedHiddenMLP()
Y = net(X)
print(Y)
# 输出:
# tensor(0.4448, grad_fn=<SumBackward0>)

 

5.1.5 在一个块中嵌套另一个块

class NestMLP(nn.Module):
    def __init__(self):
        super().__init__()
        self.net = nn.Sequential(nn.Linear(20, 64), nn.ReLU(),nn.Linear(64, 32), nn.ReLU())
        self.linear = nn.Linear(32, 16)

    def forward(self, X):
        return self.linear(self.net(X))

chimera = nn.Sequential(NestMLP(), nn.Linear(16, 20), FixedHiddenMLP())
Y= chimera(X)
print(Y)
# 输出:
# tensor(-0.0844, grad_fn=<SumBackward0>)

 

本小节完整代码如下

import torch
from torch import nn
from torch.nn import functional as F

# ------------------------------通过实例化nn.Sequential来构建网络模型------------------------------------

# 通过实例化nn.Sequential来构建网络模型,该网络模型包含一个具有256个单元和RELU激活函数得全连接隐藏层,然后是一个巨头具有10个输出单元且不带激活函数的全连接输出层
net = nn.Sequential(nn.Linear(20, 256), nn.ReLU(), nn.Linear(256, 10))
# 定义输入特征
X = torch.rand(2, 20)
# 模型输出
Y = net(X)      # net(X)实际上是net.__call__(X)的缩写
print(Y)
# 输出:
# tensor([[-0.0734,  0.1032,  0.0983,  0.1074,  0.0482,  0.2511,  0.1665,  0.1535, 0.0662,  0.0410],
#         [ 0.0207,  0.0912,  0.0752,  0.0009,  0.0232,  0.0717, -0.0337, -0.0010, 0.1217,  0.0413]], grad_fn=<AddmmBackward0>)

# ------------------------------自定义模块------------------------------------

# 块的作用是将层串联起来
class MLP(nn.Module):
    def __init__(self):
        # 调用父类nn.Module的构造函数来执行必要的初始化。
        super().__init__()
        # 定义一个具有256个隐藏单元的全连接隐藏层
        self.hidden = nn.Linear(20, 256)
        # 定义一个具有10个输出单元的全连接输出层
        self.out = nn.Linear(256, 10)

    # 定义模型的前向传播,根据输入X返回模型输出
    def forward(self, X):
        return self.out(F.relu(self.hidden(X)))

net = MLP()
Y = net(X)
print(Y)
# 输出:
# tensor([[ 0.0168, -0.0403,  0.2232,  0.1695,  0.3364,  0.0075,  0.0732, -0.0419, -0.1076,  0.0864],
#         [ 0.0663, -0.1298,  0.0890,  0.2442,  0.3788, -0.0290,  0.0885, -0.0660, -0.1007,  0.0372]], grad_fn=<AddmmBackward0>)

# ------------------------------自定义顺序块------------------------------------
# 顺序块的作用是将模块串联起来
class MySequential(nn.Module):
    def __init__(self, *args):
        # 调用父类nn.Module的构造函数来执行必要的初始化
        super().__init__()
        for idx, module in enumerate(args):
            # 定义要调用的每个模块
            self._modules[str(idx)] = module
    # 定义前向传播函数
    def forward(self, X):
        for block in self._modules.values():
            X = block(X)
        return X

net = MySequential(nn.Linear(20, 256), nn.ReLU(), nn.Linear(256, 10))
Y = net(X)
print(Y)
# 输出:
# tensor([[ 0.0932,  0.1025,  0.0384, -0.0860,  0.1336,  0.0991,  0.3763, -0.1100, -0.0088, -0.1979],
#         [ 0.0882, -0.0685,  0.1821, -0.1365, -0.0244, -0.0578,  0.3142,  0.0109, -0.0583, -0.2758]], grad_fn=<AddmmBackward0>)

# ------------------------------自定义一个块来进行函数运算------------------------------------

class FixedHiddenMLP(nn.Module):
    def __init__(self):
        super().__init__()
        # 定义权重参数
        self.rand_weight = torch.rand((20, 20), requires_grad=False)
        # 定义层
        self.linear = nn.Linear(20, 20)

    # 定义前向传播来实现运算过程
    def forward(self, X):
        X = self.linear(X)
        X = F.relu(torch.mm(X, self.rand_weight) + 1)
        X = self.linear(X)
        while X.abs().sum() > 1:
            X /= 2
        return X.sum()

net = FixedHiddenMLP()
Y = net(X)
print(Y)
# 输出:
# tensor(0.4448, grad_fn=<SumBackward0>)

# ------------------------------在一个块中嵌套另一个块------------------------------------

class NestMLP(nn.Module):
    def __init__(self):
        super().__init__()
        self.net = nn.Sequential(nn.Linear(20, 64), nn.ReLU(),nn.Linear(64, 32), nn.ReLU())
        self.linear = nn.Linear(32, 16)

    def forward(self, X):
        return self.linear(self.net(X))

chimera = nn.Sequential(NestMLP(), nn.Linear(16, 20), FixedHiddenMLP())
Y= chimera(X)
print(Y)
# 输出:
# tensor(-0.0844, grad_fn=<SumBackward0>)

 

posted on 2022-11-08 10:28  yc-limitless  阅读(64)  评论(0)    收藏  举报