0,1序列

import torch
import torch.nn as nn
import random

-------------------------

超参数

-------------------------

N_max = 10 # 最大 n 值(训练时用)
input_size = 4 # 字符集大小:'0', '1', ,
hidden_size = 64
output_size = input_size
num_layers = 1
lr = 0.01
epochs = 2000

-------------------------

1. 生成训练数据

-------------------------

def generate_sequences(N_max):
seqs = []
for n in range(1, N_max + 1):
s = '0' * n + '1' * n
seqs.append(s)
return seqs

sequences = generate_sequences(N_max)

字符到索引的映射

char_to_idx = {'0': 0, '1': 1, '': 2, '': 3}
idx_to_char = {v: k for k, v in char_to_idx.items()}
vocab_size = len(char_to_idx)

将字符串转换为索引序列(带

def string_to_tensor(s):
tensor = [char_to_idx['']] + [char_to_idx[c] for c in s] + [char_to_idx['']]
return torch.tensor(tensor, dtype=torch.long)

所有序列转为张量

data = [string_to_tensor(s) for s in sequences]

-------------------------

2. 定义 RNN 模型

-------------------------

class SimpleRNN(nn.Module):
def init(self, input_size, hidden_size, output_size, num_layers=1):
super(SimpleRNN, self).init()
self.hidden_size = hidden_size
self.num_layers = num_layers
self.embedding = nn.Embedding(input_size, hidden_size)
self.rnn = nn.GRU(hidden_size, hidden_size, num_layers, batch_first=True)
self.fc = nn.Linear(hidden_size, output_size)

def forward(self, x, hidden=None):
    embedded = self.embedding(x)
    output, hidden = self.rnn(embedded, hidden)
    logits = self.fc(output)
    return logits, hidden

model = SimpleRNN(vocab_size, hidden_size, vocab_size, num_layers)
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=lr)

-------------------------

3. 训练模型

-------------------------

print("开始训练...")
for epoch in range(epochs):
total_loss = 0
for seq in data:
optimizer.zero_grad()
# 输入:去掉最后一个 token(
inputs = seq[:-1].unsqueeze(0) # (1, L)
# 目标:去掉第一个 token(
targets = seq[1:].unsqueeze(0) # (1, L)

    logits, _ = model(inputs)
    # logits: (1, L, vocab_size) → reshape
    loss = criterion(logits.view(-1, vocab_size), targets.view(-1))
    loss.backward()
    optimizer.step()
    total_loss += loss.item()
if (epoch + 1) % 500 == 0:
    print(f"Epoch {epoch+1}, Loss: {total_loss:.4f}")

print("训练完成!")

-------------------------

4. 生成指定 n 的序列

-------------------------

def generate_sequence(n):
model.eval()
with torch.no_grad():
input_token = torch.tensor([[char_to_idx['']]], dtype=torch.long)
hidden = None
generated = []
for _ in range(2 * n + 1): # 最多生成 2n+1 个 token(含
logits, hidden = model(input_token, hidden)
prob = torch.softmax(logits[0, -1], dim=0)
next_token = torch.multinomial(prob, 1).item()
if next_token == char_to_idx['']:
break
generated.append(idx_to_char[next_token])
input_token = torch.tensor([[next_token]], dtype=torch.long)
return ''.join(generated)

用户输入 n

try:
n = int(input("请输入 n:"))
if n < 1:
raise ValueError
result = generate_sequence(n)
print(f"生成的序列 (n={n}): {result}")
# 验证
expected = '0' * n + '1' * n
print(f"期望序列: {expected}")
print("✅ 正确!" if result == expected else "❌ 错误!")
except Exception as e:
print("输入无效,请输入正整数。")

posted @ 2025-11-30 13:13  XiaoguoLu  阅读(1)  评论(0)    收藏  举报