[GHCTF 2025]Disappeared Logo WP
Problem: [GHCTF 2025]Disappeared Logo
思路
- 这是原论文
https://www.anquanke.com/post/id/259060#h3-4 - 这是我出题的wp
https://mp.weixin.qq.com/s/H3l54bICAkvLTFNuPTinKg

总结
- 对该题的考点总结
- 时间戳//10000后代表当天
- 根据参数shape来恢复模型结构
- 考察选手电脑性能,为了改模型而改模型,见到了 \(262144 *64\) 的全连接层,华硕轻薄本已经死了
- 考察选手对于出过已有题目的认知,我已经见过不少于7个版本的这篇论文了,不愧是CTF的好论文
- 信春哥,得永生
EXP
- 具体攻击代码
from PIL import Image
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import torch.nn.functional as F
from torchvision import transforms
import time
import pickle
class Lemodel(nn.Module):
def __init__(self):
super(Lemodel, self).__init__()
act = nn.Sigmoid
self.body = nn.Sequential(
nn.Conv2d(3, 32, kernel_size=3, padding=1, stride=1),
act(),
nn.Conv2d(32, 32, kernel_size=3, padding=1, stride=1),
act(),
nn.Conv2d(32, 64, kernel_size=3, padding=1, stride=1),
act(),
)
self.fc = nn.Sequential(
nn.Linear(262144, 64),
)
def forward(self, x):
out = self.body(x)
out = out.view(-1)
out = self.fc(out)
return out
def cross_entropy_for_onehot(pred, target):
return torch.mean(torch.sum(- target * F.log_softmax(pred, dim=-1), 1))
def weights_init(m):
if hasattr(m, "weight"):
m.weight.data.uniform_(-0.5, 0.5)
if hasattr(m, "bias"):
m.bias.data.uniform_(-0.5, 0.5)
def label_to_onehot(target):
target = torch.unsqueeze(target, 1)
onehot_target = torch.zeros(target.size(0), 64, device=target.device)
onehot_target.scatter_(1, target, 1)
return onehot_target
tt = transforms.Compose([
transforms.Resize((64, 64)),
transforms.ToTensor()
])
original_dy_dx = pickle.load(open('model/logo.pkl', 'rb'))
device = "cpu"
criterion = cross_entropy_for_onehot
tt = transforms.Compose([
transforms.Resize((64, 64)),
transforms.ToTensor()
])
tp = transforms.ToPILImage()
seed = 17402
torch.manual_seed(seed)
model = Lemodel()
model.to(device)
model.apply(weights_init)
# for gx, gy in zip(model.parameters(), original_dy_dx):
# print(gx.shape, gy.shape)
# exit()
dummy_data = torch.randn(1,3,64,64).to('cpu').requires_grad_(True)
dummy_onehot_label = label_to_onehot(torch.Tensor([int(0)]).long().to(device))
optimizer = torch.optim.LBFGS([dummy_data])
history = []
for iters in range(300):
def closure():
optimizer.zero_grad()
pred = model(dummy_data)
dummy_loss = criterion(pred,dummy_onehot_label)
dummy_dy_dx = torch.autograd.grad(dummy_loss, model.parameters(), create_graph=True)
grad_diff = 0
grad_count = 0
for gx, gy in zip(dummy_dy_dx, original_dy_dx):
grad_diff += ((gx - gy) ** 2).sum()
grad_count += gx.nelement()
grad_diff.backward()
return grad_diff
optimizer.step(closure)
if iters % 10 == 0:
current_loss = closure()
print(iters, "%.4f" % current_loss.item())
tp(dummy_data[0].cpu()).save(f"{iters//10}.png")


浙公网安备 33010602011771号