简单序列

import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import Dataset, DataLoader
import random

===================== 用户输入参数 =====================

print("===== 训练参数设置 =====")
max_n = int(input("设置训练时n的上限:"))
max_seq_len = 2 * max_n

询问是否指定生成n

specify_n = input("\n是否要指定生成序列的n值?(y/n):").lower()
if specify_n == 'y':
target_n = int(input(f"请输入要生成的n值(1~{max_n}):"))
while target_n < 1 or target_n > max_n:
target_n = int(input(f"输入无效!请输入1~{max_n}之间的数:"))
else:
target_n = None

===================== 数据生成 =====================

char_map = {'0': 0, '1': 1, '': 2}
idx_to_char = {0: '0', 1: '1', 2: ''}

生成训练数据

def create_data(num_samples=5000):
data = []
for _ in range(num_samples):
n = np.random.randint(1, max_n + 1)
sequence = '0' * n + '1' * n
data.append(sequence)
random.shuffle(data)
return data

train_data = create_data()

构建输入输出对

X = []
y = []
for seq in train_data:
for i in range(1, len(seq)):
input_seq = seq[:i]
target_char = seq[i]
input_idx = [char_map[c] for c in input_seq]
input_padded = [char_map['']] * (len(seq)-1 - len(input_idx)) + input_idx
X.append(input_padded)
y.append(char_map[target_char])

统一填充到全局max_seq_len

X = [x + [char_map['']]*(max_seq_len - len(x)) for x in X]
X = torch.tensor(X, dtype=torch.long)
y = torch.tensor(y, dtype=torch.long)

自定义数据集

class SeqDataset(Dataset):
def init(self, X, y):
self.X = X
self.y = y
def len(self):
return len(self.X)
def getitem(self, idx):
return self.X[idx], self.y[idx]

dataset = SeqDataset(X, y)
dataloader = DataLoader(dataset, batch_size=64, shuffle=True)

===================== LSTM模型 =====================

class LSTMSeqModel(nn.Module):
def init(self, vocab_size=3, embed_dim=16, hidden_dim=32):
super().init()
self.embedding = nn.Embedding(vocab_size, embed_dim)
self.lstm = nn.LSTM(embed_dim, hidden_dim, batch_first=True, dropout=0.2)
self.fc = nn.Linear(hidden_dim, 2)

def forward(self, x):
    x = self.embedding(x)
    lstm_out, _ = self.lstm(x)
    out = self.fc(lstm_out[:, -1, :])
    return out

model = LSTMSeqModel()
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=0.001, weight_decay=1e-5) #

===================== 训练模型 =====================

epochs = 10
model.train()
best_acc = 0.0
print("\n===== 开始训练模型 =====")
for epoch in range(epochs):
total_loss = 0.0
correct = 0
total = 0
for batch_X, batch_y in dataloader:
optimizer.zero_grad()
outputs = model(batch_X)
loss = criterion(outputs, batch_y)
loss.backward()
optimizer.step()

    total_loss += loss.item()
    _, predicted = torch.max(outputs.data, 1)
    total += batch_y.size(0)
    correct += (predicted == batch_y).sum().item()

acc = 100 * correct / total
if acc > best_acc:
    best_acc = acc
    torch.save(model.state_dict(), "best_seq_model.pth")
print(f"Epoch {epoch+1}/{epochs}, Loss: {total_loss/len(dataloader):.4f}, Accuracy: {acc:.2f}% (Best: {best_acc:.2f}%)")

model.load_state_dict(torch.load("best_seq_model.pth"))

===================== 生成序列 =====================

def generate_0n1n(model, target_n=None, start='0'):
model.eval()
seq = [start]
with torch.no_grad():
for _ in range(max_seq_len - 1):
seq_idx = [char_map[c] for c in seq]
input_padded = [char_map['']] * (max_seq_len - len(seq_idx)) + seq_idx
input_tensor = torch.tensor(input_padded, dtype=torch.long).unsqueeze(0)
outputs = model(input_tensor)
probs = torch.softmax(outputs, dim=1).numpy()[0]
next_char_idx = np.random.choice([0, 1], p=probs)
next_char = idx_to_char[next_char_idx]
seq.append(next_char)
if target_n is not None:
if seq.count('0') == target_n and seq.count('1') == target_n:
break
else:
# 随机n:满足0=1且长度≥2,且概率直接停止(强制短序列)
if seq.count('0') == seq.count('1') and seq.count('0') >= 1:
if random.random() > 0.25:
break
# 过滤无效序列
final_seq = ''.join(seq)
n_0 = final_seq.count('0')
n_1 = final_seq.count('1')
valid_n = min(n_0, n_1)
if valid_n > 0:
final_seq = '0'valid_n + '1'valid_n
return final_seq

===================== 输出结果 =====================

print("\n===== 生成结果 =====")
if target_n is not None:
print(f"生成n={target_n}的0{target_n}1序列:")
for _ in range(5):
print(generate_0n1n(model, target_n=target_n))
else:
print(f"生成随机长度的0n1n序列(1~{max_n}):")
# 生成10个,确保长度分布均匀
generated_seqs = [generate_0n1n(model) for _ in range(10)]

for seq in generated_seqs[:10]:
    print(seq)
posted @ 2025-12-04 18:14  飕飕  阅读(0)  评论(0)    收藏  举报