计算pytorch 模型的推理时间和 模型大小

脚本

import torch
import time
import sys
sys.path.insert(0, sys.path[0]+"/../")
from cls_models.model import ClsModel

if __name__ == '__main__':
	model_name = 'resnet18'
	num_classes = 2
	is_pretrained = False
	model = ClsModel(model_name, num_classes, is_pretrained)
	# 分析模型参数量和占用的内存
	from torchsummary import summary
	device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
	print(summary(model.to(device), input_size=(3, 128, 128), batch_size=-1))
	# 分析模型推理时间
	use_gpu = True
	if use_gpu:
		device = torch.device('cuda:0')
	else:
		device = torch.device('cpu')
	model.eval()
	model.to(device)
	dump_input = torch.ones(1, 3, 128, 128).to(device)

	# Warn-up
	for _ in range(5):
		start = time.time()
		outputs = model(dump_input)
		torch.cuda.synchronize()
		end = time.time()
		print('Time:{}ms'.format((end-start)*1000))

	with torch.autograd.profiler.profile(enabled=True, use_cuda=use_gpu, record_shapes=False, profile_memory=False) as prof:
		outputs = model(dump_input)
	print(prof.table())

参考

【1】PyTorch 模型性能分析——PyTorch Profiler
【2】深度学习模型参数量/计算量和推理速度计算
【3】PYTORCH PROFILER

posted @ 2023-03-28 17:52  Zenith_Hugh  阅读(26)  评论(0)    收藏  举报