# -*- coding: utf-8 -*-
# 文件名: jmeter_runner.py
# 描述: 通用型JMeter自动化压测及报告生成脚本。
# 功能:
# 1. 在脚本中直接指定要运行的JMX文件路径,无需命令行参数。
# 2. 动态管理 'test_plan.ini',为每个JMX文件自动创建独立的测试计划配置。
# 3. 自动生成包含示例的 'servers.txt'。
# 4. 实时将JMeter日志输出到控制台。
# 5. 自动集成Prometheus,获取服务器硬件指标。
# 6. 为混合场景在报告中增加TOTAL汇总行。
# 7. 生成包含所有原始视图、完整且统一格式的五合一Excel报告。
# 8. Excel报告名称根据JMX脚本名动态生成。
# 9. 混合场景的取样器百分比完全依赖 "test_plan.ini" 中的手动配置。
# 10. 引入 [ScenarioToServerMapping] 配置,由用户明确指定场景与服务器组的监控关系。
# 11. JMETER_HOME 已移回脚本内配置,简化ini文件。
# 12. 彻底重写JMX参数修改逻辑,确保兼容JMeter 5.2至5.6+版本,解决核心运行问题。
# 13. 强化文件名净化逻辑,避免因中文字符编码问题导致路径错误。
# 14. 测试完成后自动清理工作目录中所有0字节的无效文件。
# 15. 修复混合测试JTL文件丢失问题,优化文件重命名逻辑,增强Win7系统兼容性。
# 版本: 20250731-Flexible-v2.5
import os
import shutil
import subprocess
import time
import re
import sys
import xml.etree.ElementTree as ET
import configparser
import csv
import threading
from datetime import datetime
from collections import defaultdict
# ==============================================================================
# --- 0. 依赖库检查 ---
# ==============================================================================
try:
import pandas as pd
from openpyxl import load_workbook
from openpyxl.drawing.image import Image
from openpyxl.styles import Font, Alignment, Border, Side, PatternFill
from openpyxl.utils import get_column_letter
import requests
import PIL # Pillow is imported as PIL
except ImportError:
print("错误: 脚本运行需要 'pandas', 'openpyxl', 'requests', 和 'Pillow' 库。")
print("请先在您的命令行中运行以下命令进行安装:")
print("pip install --upgrade pandas openpyxl requests Pillow")
sys.exit(1)
# ==============================================================================
# --- 1. 全局配置区域 ---
# ==============================================================================
# (重要) 请在此处填写您本地JMeter的根目录路径
JMETER_HOME = "D:/apache-jmeter/apache-jmeter-5.6.312/apache-jmeter-5.6.3"
# (重要) 请在此处填写您要执行的JMX文件的完整路径
ORIGINAL_JMX_FILE = "D:/OneDrive/AI-project/exce_local_xsl/测试/ViewResultsTree.jmx"
# (可选) Prometheus服务器地址
PROMETHEUS_URL = "http://192.168.6.241:9099"
# 定义配置文件的名称
CONFIG_FILE = "test_plan.ini"
SERVER_CONFIG_FILE = "servers.txt"
# ==============================================================================
# --- 2. 压测策略配置 ---
# ==============================================================================
STEP_DURATION = 30
DEFAULT_RAMP_UP = 3
JMETER_GRACE_PERIOD = 120
DETAILED_ERROR_LOGGING = False
# ==============================================================================
# --- 3. 核心功能函数 ---
# ==============================================================================
def log_message(message):
"""记录日志到控制台。"""
print(message)
def get_prometheus_metrics(start_time_unix, end_time_unix, server_instances):
"""从Prometheus查询硬件指标。"""
if not server_instances or not PROMETHEUS_URL:
return {}
try:
log_message(" - 正在连接Prometheus服务...")
# 测试Prometheus连接
test_url = f"{PROMETHEUS_URL}/api/v1/query"
test_params = {'query': 'up'}
test_response = requests.get(test_url, params=test_params, timeout=30)
if test_response.status_code != 200:
log_message(f" - Prometheus连接失败: HTTP {test_response.status_code}")
log_message(" - 请检查Prometheus服务是否正常运行")
return {}
log_message(" - Prometheus连接成功")
# 检查是否有硬件监控指标
metrics_url = f"{PROMETHEUS_URL}/api/v1/label/__name__/values"
metrics_response = requests.get(metrics_url, timeout=30)
if metrics_response.status_code != 200:
log_message(" - 无法获取Prometheus指标列表")
return {}
metrics_data = metrics_response.json()
available_metrics = metrics_data.get('data', [])
# 检查是否有node_exporter或其他硬件监控指标
hardware_metrics = [m for m in available_metrics if any(keyword in m.lower() for keyword in ['node_', 'cpu', 'memory', 'disk', 'filesystem'])]
if not hardware_metrics:
log_message(" - ❌ Prometheus中未发现硬件监控指标")
log_message(" - 💡 解决方案:")
log_message(" 1. 在目标服务器上安装并启动node_exporter")
log_message(" 2. 配置Prometheus抓取node_exporter数据")
log_message(" 3. 确保防火墙允许相关端口通信")
log_message(f" - 📊 当前Prometheus中只有以下指标: {', '.join(available_metrics)}")
return {}
log_message(f" - ✅ 发现 {len(hardware_metrics)} 个硬件监控指标")
# 使用传入的服务器实例列表
log_message(f" - 正在从Prometheus查询硬件指标... (目标服务器: {', '.join(server_instances)})")
query_step = 15
metrics_by_instance = {}
# 为每个服务器实例查询指标
for target_instance in server_instances:
promql_queries = {
'cpu': f'100 - (avg by (instance) (rate(node_cpu_seconds_total{{mode="idle", instance="{target_instance}"}}[1m])) * 100)',
'memory': f'(1 - (node_memory_MemAvailable_bytes{{instance="{target_instance}"}} / node_memory_MemTotal_bytes{{instance="{target_instance}"}})) * 100',
'disk': f'(1 - (node_filesystem_avail_bytes{{mountpoint="/", instance="{target_instance}"}} / node_filesystem_size_bytes{{mountpoint="/", instance="{target_instance}"}})) * 100'
}
metrics_by_instance[target_instance] = {}
for metric_name, query in promql_queries.items():
api_url = f"{PROMETHEUS_URL}/api/v1/query_range"
params = {'query': query, 'start': start_time_unix, 'end': end_time_unix, 'step': f'{query_step}s'}
try:
response = requests.get(api_url, params=params, timeout=30)
response.raise_for_status()
data = response.json()
log_message(f" - {metric_name}查询状态: {data.get('status', 'unknown')}")
if data['status'] == 'success' and data['data']['result']:
for result in data['data']['result']:
values = [float(val[1]) for val in result['values'] if val[1] != 'NaN']
if values:
average_value = sum(values) / len(values)
metrics_by_instance[target_instance][metric_name] = average_value
log_message(f" - {target_instance} {metric_name}: {average_value:.2f}%")
else:
log_message(f" - {target_instance} {metric_name}: 无有效数据")
else:
log_message(f" - {metric_name}查询无结果: {data.get('status', 'unknown')}")
# 尝试简化查询
simple_query = f'up{{instance="{target_instance}"}}'
simple_params = {'query': simple_query}
simple_response = requests.get(f"{PROMETHEUS_URL}/api/v1/query", params=simple_params, timeout=30)
simple_data = simple_response.json()
log_message(f" - 连通性测试 (up指标): {simple_data.get('status', 'unknown')}")
except requests.RequestException as e:
log_message(f"!!! 严重: 连接Prometheus失败: {e}")
return {}
except Exception as e:
log_message(f"!!! 严重: 解析Prometheus响应时出错 ({metric_name}): {e}")
log_message(" - Prometheus指标获取完成。")
return metrics_by_instance
except requests.RequestException as e:
log_message(f"!!! 严重: 连接Prometheus失败: {e}")
return {}
except Exception as e:
log_message(f"!!! 严重: 获取Prometheus指标时出现未知错误: {e}")
return {}
def get_scenario_identifiers(jmx_path):
"""从JMX文件获取所有线程组(场景)的名称。"""
try:
tree = ET.parse(jmx_path)
root = tree.getroot()
names = [tg.get("testname") for tg in root.iter('ThreadGroup')]
if not names:
log_message("警告: 在JMX文件中未找到任何线程组。")
return names
except ET.ParseError as e:
log_message(f"!!! 严重错误: JMX文件 '{jmx_path}' 格式无效,无法解析。")
log_message(f" 解析错误: {e}")
return []
except Exception as e:
log_message(f"!!! 解析JMX以获取场景标识符时出现未知错误: {e}")
return []
def initialize_config_files(jmx_path):
"""
初始化配置文件。如果 'servers.txt' 不存在,则创建它。
检查 'test_plan.ini' 是否有当前JMX的配置,如果没有,则自动添加。
"""
if not os.path.exists(SERVER_CONFIG_FILE):
log_message(f"--- 首次运行,正在生成示例服务器配置文件: {SERVER_CONFIG_FILE} ---")
server_content = """# 格式: 服务器组全称 IP地址:端口
# 请根据您的实际环境修改此文件
核心业务系统 10.0.0.1:9098
核心业务系统 10.0.0.2:9098
认证服务 10.0.0.3:9098
网关服务 10.0.0.4:9098
"""
with open(SERVER_CONFIG_FILE, 'w', encoding='utf-8') as f: f.write(server_content)
log_message(f"'{SERVER_CONFIG_FILE}' 已创建。")
config = configparser.ConfigParser(allow_no_value=True, interpolation=None)
config.optionxform = str
if os.path.exists(CONFIG_FILE):
config.read(CONFIG_FILE, encoding='utf-8-sig')
jmx_filename = os.path.basename(jmx_path)
config_updated = False
if not config.has_section(jmx_filename):
config_updated = True
log_message(f"--- 检测到新的JMX文件 '{jmx_filename}',正在更新配置文件 ---")
identifiers = get_scenario_identifiers(jmx_path)
if not identifiers:
log_message(f"!!! 错误: JMX文件 '{jmx_filename}' 中不包含任何线程组 (ThreadGroup),无法生成配置。"); return False
config.add_section(jmx_filename)
config.set(jmx_filename, f'# 针对 "{jmx_filename}" 的测试计划')
config.set(jmx_filename, '# 请在场景名称后填写逗号分隔的并发数, 例如: 场景名 = 1, 5, 10')
for key in identifiers: config.set(jmx_filename, key, "")
if not config.has_section('ScenarioToServerMapping'):
config.add_section('ScenarioToServerMapping')
config.set('ScenarioToServerMapping', '# (重要) 在此指定每个测试场景需要监控的服务器组')
config.set('ScenarioToServerMapping', '# 格式: JMX中的场景名 = servers.txt中的服务器组名1, 服务器组名2')
for key in identifiers:
if not config.has_option('ScenarioToServerMapping', key): config.set('ScenarioToServerMapping', key, "")
if not config.has_section('SamplerPercentages'):
config_updated = True
config.add_section('SamplerPercentages')
config.set('SamplerPercentages', '# (重要) 请在此手动配置混合场景中取样器的流量百分比')
config.set('SamplerPercentages', '# 格式: 取样器完整名称 = 百分比 (例如: HTTP请求-登录 = 20%)')
if config_updated:
with open(CONFIG_FILE, 'w', encoding='utf-8') as configfile: config.write(configfile)
log_message(f"\n配置文件 '{CONFIG_FILE}' 已更新。")
log_message(f"--- 请打开该文件,完成 [{jmx_filename}] 和 [ScenarioToServerMapping] 部分的配置。---")
log_message("--- 编辑完成后,请重新运行此脚本。---")
return False
return True
def load_config_section(section_name):
"""从配置文件加载指定的配置节。"""
config = configparser.ConfigParser(interpolation=None)
config.optionxform = str
config.read(CONFIG_FILE, encoding='utf-8-sig')
data_dict = {}
if config.has_section(section_name):
for key, value in config.items(section_name):
if key.startswith('#') or value is None: continue
data_dict[key] = value.strip()
return data_dict
def load_test_plan_from_config(jmx_path):
"""从配置文件中读取指定JMX文件的测试计划。"""
jmx_filename = os.path.basename(jmx_path)
plan_section = load_config_section(jmx_filename)
test_plan_dict = {}
for key, value_str in plan_section.items():
if value_str:
try:
test_plan_dict[key] = [int(s.strip()) for s in value_str.split(',')]
except ValueError:
log_message(f"警告: 配置文件中 '{key}' 的值 '{value_str}' 格式不正确。已跳过。")
return test_plan_dict
def load_server_map(file_path):
"""加载服务器IP映射。"""
server_map = defaultdict(list)
if not os.path.exists(file_path):
log_message(f"警告: 服务器配置文件 '{file_path}' 未找到。"); return {}
try:
with open(file_path, 'r', encoding='utf-8') as f:
for line in f:
line = line.strip()
if not line or line.startswith('#'): continue
parts = line.split(maxsplit=1)
if len(parts) == 2:
server_map[parts[0]].append(parts[1])
except Exception as e:
log_message(f"!!! 读取服务器配置文件 '{file_path}' 时出错: {e}"); return {}
log_message(f"--- 成功从 '{file_path}' 加载服务器配置 ---")
return server_map
def generate_target_servers_map(jmx_scenarios, server_map, scenario_to_server_mapping):
"""根据用户在ini中的明确配置,生成监控目标。"""
target_map = {}
if not server_map or not scenario_to_server_mapping: return {}
for scenario in jmx_scenarios:
server_groups_str = scenario_to_server_mapping.get(scenario)
if not server_groups_str: continue
server_group_names = [name.strip() for name in server_groups_str.split(',')]
instance_list = []
for group_name in server_group_names:
if group_name in server_map:
instance_list.extend(server_map[group_name])
else:
log_message(f"警告: 场景 '{scenario}' 配置的服务器组 '{group_name}' 在 {SERVER_CONFIG_FILE} 中未找到。")
if instance_list:
target_map[scenario] = sorted(list(set(instance_list)))
log_message("--- 已根据 [ScenarioToServerMapping] 配置生成监控目标 ---")
return target_map
def prepare_workspace(original_jmx_path, output_dir=None):
"""创建隔离的测试工作区。"""
import re
log_message("--- 1. 正在创建隔离的测试工作区... ---")
# 处理中文路径:如果路径包含非ASCII字符,创建临时ASCII安全路径
temp_jmx_created = False
temp_jmx_path = None
if re.search(r'[^a-zA-Z0-9_\\/:.-]', original_jmx_path):
log_message("检测到JMX路径包含中文或特殊字符,将创建临时ASCII安全路径")
jmx_dir = os.path.dirname(original_jmx_path)
# 使用时间戳确保临时文件名唯一,避免覆盖
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S_%f")[:17] # 精确到毫秒
temp_jmx_filename = f"temp_jmx_file_{timestamp}.jmx"
temp_jmx_path = os.path.join(jmx_dir, temp_jmx_filename)
shutil.copy2(original_jmx_path, temp_jmx_path)
log_message(f"已创建临时JMX文件: {temp_jmx_path}")
original_jmx_path = temp_jmx_path
temp_jmx_created = True
# 允许用户自定义输出目录,在该路径下创建带时间戳的子目录
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
if output_dir:
temp_dir = os.path.join(output_dir, f"jmeter_workspace_{timestamp}")
os.makedirs(temp_dir, exist_ok=True)
else:
# 使用当前工作目录下的my_jmeter_output作为默认输出目录
temp_dir = os.path.join(os.getcwd(), "my_jmeter_output", f"jmeter_workspace_{timestamp}")
os.makedirs(temp_dir, exist_ok=True)
log_message(f"测试工作区已创建: {temp_dir}")
try:
workspace_jmx = os.path.join(temp_dir, os.path.basename(original_jmx_path))
# 首先解析JMX文件获取CSV文件列表
tree = ET.parse(original_jmx_path)
root = tree.getroot()
csv_files = {prop.text for prop in root.findall(".//stringProp[@name='filename']") if prop.text}
# 复制CSV文件到工作区
for csv_path in csv_files:
csv_abs_path = os.path.join(os.path.dirname(original_jmx_path), csv_path)
if os.path.exists(csv_abs_path):
shutil.copy(csv_abs_path, temp_dir)
log_message(f" - 已复制数据文件: {os.path.basename(csv_path)}")
else:
log_message(f" - 警告: 未找到数据文件: {csv_abs_path}")
# 读取JMX内容并更新CSV路径为相对路径
with open(original_jmx_path, 'r', encoding='utf-8') as f:
content = f.read()
for csv_path in csv_files:
if csv_path and os.path.sep in csv_path:
basename = os.path.basename(csv_path)
content = content.replace(csv_path, basename)
# 将修改后的内容写入工作区JMX文件
with open(workspace_jmx, 'w', encoding='utf-8') as f:
f.write(content)
log_message(f" - 已复制并更新JMX脚本: {os.path.basename(original_jmx_path)}")
log_message(" - 工作区内JMX的CSV路径已更新。")
except Exception as e:
log_message(f"准备工作区时出错: {e}")
# 如果创建了临时文件,需要清理
if temp_jmx_created and temp_jmx_path and os.path.exists(temp_jmx_path):
try:
os.remove(temp_jmx_path)
log_message("已清理临时JMX文件")
except:
pass
return None, None, False, None
# 返回工作区路径、工作区JMX文件路径和临时文件信息
return temp_dir, workspace_jmx, temp_jmx_created, temp_jmx_path
def update_jmx_load_values(jmx_path, threads, duration, ramp_up):
"""
修改JMX文件中的负载值 (兼容JMeter 5.2 - 5.6+)。
此函数现在只修改已启用的线程组,并能处理不同版本的JMX结构。
先用ElementTree补充调度属性,再用正则文本替换保持原始格式。
"""
# 用ElementTree补充调度属性
tree = ET.parse(jmx_path)
root = tree.getroot()
changed = False
for tg in root.iter('ThreadGroup'):
if tg.get('enabled') == 'true':
scheduler_prop = tg.find("./boolProp[@name='ThreadGroup.scheduler']")
if scheduler_prop is None:
scheduler_prop = ET.SubElement(tg, 'boolProp', {'name': 'ThreadGroup.scheduler'})
if scheduler_prop.text != 'true':
scheduler_prop.text = 'true'
changed = True
duration_prop = tg.find("./stringProp[@name='ThreadGroup.duration']")
if duration_prop is None:
duration_prop = ET.SubElement(tg, 'stringProp', {'name': 'ThreadGroup.duration'})
if duration_prop.text != str(duration):
duration_prop.text = str(duration)
changed = True
if changed:
tree.write(jmx_path, encoding='UTF-8', xml_declaration=True)
# 再用正则方式批量替换其它参数
with open(jmx_path, 'r', encoding='utf-8') as f:
content = f.read()
import re
properties_to_update = {
'ThreadGroup.num_threads': str(threads),
'ThreadGroup.ramp_time': str(ramp_up),
'LoopController.continue_forever': 'true',
'LoopController.loops': '-1'
}
modified_content = content
for prop_name, prop_value in properties_to_update.items():
pattern = f'<stringProp name="{prop_name}">([^<]*)</stringProp>'
modified_content = re.sub(pattern, f'<stringProp name="{prop_name}">{prop_value}</stringProp>', modified_content)
with open(jmx_path, 'w', encoding='utf-8') as f:
f.write(modified_content)
return True
def generate_plugin_output(jtl_file, plugin_type, output_path, is_image=True):
"""调用 JMeterPluginsCMD.bat 生成图表或CSV。"""
plugins_cmd_path = os.path.join(JMETER_HOME, "bin", "JMeterPluginsCMD.bat")
# 尝试优先查找2.3版本cmdrunner
cmdrunner_23 = os.path.join(JMETER_HOME, "lib", "cmdrunner-2.3.jar")
cmdrunner_22 = os.path.join(JMETER_HOME, "lib", "cmdrunner-2.2.jar")
if os.path.exists(cmdrunner_23):
os.environ["CMDRUNNER_JAR"] = cmdrunner_23
elif os.path.exists(cmdrunner_22):
os.environ["CMDRUNNER_JAR"] = cmdrunner_22
else:
log_message(f" - 警告: 未找到cmdrunner-2.2.jar或2.3.jar,请手动放入JMeter lib目录。"); return False
output_type_flag = "--generate-png" if is_image else "--generate-csv"
log_message(f" - 正在为 {os.path.basename(jtl_file)} 生成 {plugin_type} {'图片' if is_image else 'CSV'}...")
command = [plugins_cmd_path, output_type_flag, output_path, "--input-jtl", jtl_file, "--plugin-type", plugin_type]
if is_image: command.extend(["--width", "800", "--height", "600"])
try:
subprocess.run(command, capture_output=True, text=True, encoding='utf-8', errors='ignore', check=True)
log_message(f" - {plugin_type} 已成功生成: {output_path}")
return True
except subprocess.CalledProcessError as e:
log_message(f"!!! 生成 {plugin_type} 失败。错误: {e.stderr}"); return False
except FileNotFoundError:
log_message(f"!!! 严重: 无法执行命令。请确认 '{plugins_cmd_path}' 是否正确。"); return False
def parse_jtl_summary(jtl_path):
"""解析JTL文件获取总体摘要。"""
if not os.path.exists(jtl_path) or os.path.getsize(jtl_path) == 0: return None
try:
df = pd.read_csv(jtl_path, low_memory=False)
if df.empty: return None
total_samples = len(df)
successful_samples = df['success'].sum()
duration_sec = max((df['timeStamp'].max() - df['timeStamp'].min()) / 1000.0, 1)
return {
'tps': float(f"{total_samples / duration_sec:.2f}"),
'avg_response_time': float(f"{df['elapsed'].mean():.2f}"),
'total_samples': total_samples,
'successful_samples': successful_samples,
'success_rate': float(f"{successful_samples * 100 / total_samples:.2f}"),
'start_time_ms': df['timeStamp'].min(),
'end_time_ms': (df['timeStamp'] + df['elapsed']).max()
}
except Exception as e:
log_message(f"!!! 解析JTL摘要时出错 '{jtl_path}': {e}"); return None
def process_detailed_jtl(jtl_path):
"""读取JTL,返回详细和总计数据。"""
if not os.path.exists(jtl_path) or os.path.getsize(jtl_path) == 0: return None, None
try:
jtl_df = pd.read_csv(jtl_path, low_memory=False)
if jtl_df.empty: return None, None
duration_sec = max((jtl_df['timeStamp'].max() - jtl_df['timeStamp'].min()) / 1000.0, 1)
grouped = jtl_df.groupby('label')
detailed_report = pd.DataFrame({
'取样器': grouped.size().index, '总样本数': grouped.size().values,
'成功样本数': grouped['success'].sum().values,
'成功率(%)': (grouped['success'].sum() / grouped.size() * 100).round(2).values,
'平均响应时间(ms)': grouped['elapsed'].mean().round(0).astype(int).values,
'中位响应时间(ms)': grouped['elapsed'].median().round(0).astype(int).values,
'90%响应时间(ms)': grouped['elapsed'].quantile(0.90).round(0).astype(int).values,
'95%响应时间(ms)': grouped['elapsed'].quantile(0.95).round(0).astype(int).values,
'99%响应时间(ms)': grouped['elapsed'].quantile(0.99).round(0).astype(int).values,
'最小响应时间(ms)': grouped['elapsed'].min().values, '最大响应时间(ms)': grouped['elapsed'].max().values,
'TPS': (grouped.size() / duration_sec).round(2).values,
'接收 KB/sec': ((grouped['bytes'].sum() / 1024) / duration_sec).round(2).values if 'bytes' in jtl_df.columns else np.zeros(len(grouped)),
'发送 KB/sec': ((grouped['sentBytes'].sum() / 1024) / duration_sec).round(2).values if 'sentBytes' in jtl_df.columns else np.zeros(len(grouped))
})
total_samples = len(jtl_df)
total_successful = jtl_df['success'].sum()
total_report = pd.DataFrame({
'取样器': ['TOTAL'], '总样本数': [total_samples], '成功样本数': [total_successful],
'成功率(%)': [round(total_successful * 100 / total_samples, 2) if total_samples > 0 else 0],
'平均响应时间(ms)': [int(round(jtl_df['elapsed'].mean()))],
'中位响应时间(ms)': [int(round(jtl_df['elapsed'].median()))],
'90%响应时间(ms)': [int(round(jtl_df['elapsed'].quantile(0.90)))],
'95%响应时间(ms)': [int(round(jtl_df['elapsed'].quantile(0.95)))],
'99%响应时间(ms)': [int(round(jtl_df['elapsed'].quantile(0.99)))],
'最小响应时间(ms)': [jtl_df['elapsed'].min()], '最大响应时间(ms)': [jtl_df['elapsed'].max()],
'TPS': [round(total_samples / duration_sec, 2)],
'接收 KB/sec': [round((jtl_df.get('bytes', 0).sum() / 1024) / duration_sec, 2)],
'发送 KB/sec': [round((jtl_df.get('sentBytes', 0).sum() / 1024) / duration_sec, 2)]
})
return detailed_report, total_report
except Exception as e:
log_message(f"!!! 解析详细JTL时出错 '{jtl_path}': {e}\n请检查JTL文件格式或内容,或手动分析该文件。\n如需跳过此文件,请在配置中排除或修正。"); return None, None
def run_fixed_step_test(workspace_dir, workspace_jmx, scenario_id, step):
"""执行单次固定步长测试。"""
log_message(f"\n--- [场景: {scenario_id}] 正在执行测试: {step} 并发线程 ---")
if not update_jmx_load_values(workspace_jmx, step, STEP_DURATION, DEFAULT_RAMP_UP):
return None # 如果更新JMX失败,则中止本次执行
# BUGFIX: 移除所有非ASCII字符和特殊字符,确保文件名在任何系统下都有效
# 保留原始场景名用于JTL文件名,支持中文
ascii_safe_scenario_id = re.sub(r'[^A-Za-z0-9_]', '_', scenario_id)
safe_scenario_name = ascii_safe_scenario_id
jtl_file = os.path.join(workspace_dir, f"{ascii_safe_scenario_id}_{step}threads.jtl")
# 运行JMeter后,将JTL文件重命名为原始中文名
jtl_file_chinese = os.path.join(workspace_dir, f"{scenario_id}_{step}threads.jtl")
jmeter_bat = os.path.join(JMETER_HOME, "bin", "jmeter.bat")
# 先使用ASCII安全的文件名生成JMeter日志文件
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
jmeter_log_file = os.path.join(workspace_dir, f"jmeter_{ascii_safe_scenario_id}_{step}threads_{timestamp}.log")
# 运行JMeter后,将日志文件重命名为原始中文名
jmeter_log_file_chinese = os.path.join(workspace_dir, f"jmeter_{scenario_id}_{step}threads_{timestamp}.log")
# 构建JMeter命令,确保路径参数在Win7下能正确处理
jmeter_cmd = [
jmeter_bat, '-n', '-t', workspace_jmx,
'-l', jtl_file,
'-j', jmeter_log_file,
'-Jthreads', str(step),
'-Jduration', str(STEP_DURATION)
]
if DETAILED_ERROR_LOGGING:
jmeter_cmd.extend(["-Jjmeter.save.saveservice.response_code=true", "-Jjmeter.save.saveservice.response_message=true"])
# 记录命令信息用于调试
log_message(f" - JMeter可执行文件: {jmeter_bat} ({'存在' if os.path.exists(jmeter_bat) else '不存在'})")
log_message(f" - JMX文件: {workspace_jmx} ({'存在' if os.path.exists(workspace_jmx) else '不存在'})")
log_message(f" - JTL输出文件: {jtl_file}")
log_message(f" - JMeter日志文件: {jmeter_log_file}")
try:
# 在Win7环境下,使用shell=True可能更好地处理路径中的特殊字符
import platform
use_shell = platform.system() == 'Windows' and platform.release() in ['7', 'Vista']
if use_shell:
# 对于Win7,将命令列表转换为字符串,并使用shell=True
cmd_str = ' '.join([f'"{arg}"' if ' ' in arg else arg for arg in jmeter_cmd])
log_message(f" - 使用shell模式执行命令: {cmd_str}")
process = subprocess.Popen(cmd_str, stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
text=True, encoding='utf-8', errors='ignore', shell=True)
else:
# 对于其他系统,使用列表形式
process = subprocess.Popen(jmeter_cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
text=True, encoding='utf-8', errors='ignore')
log_message(" - JMeter进程已启动,实时日志输出如下...\n" + "-" * 60)
# 使用非阻塞方式读取输出,避免无限等待
import select
import time
start_time = time.time()
timeout = STEP_DURATION + JMETER_GRACE_PERIOD
while True:
# 检查进程是否已结束
if process.poll() is not None:
# 进程已结束,读取剩余输出
remaining_output = process.stdout.read()
if remaining_output:
print(remaining_output, end='')
break
# 检查是否超时
if time.time() - start_time > timeout:
log_message(f"\n警告: JMeter进程执行超时({timeout}秒),强制终止进程")
process.terminate()
time.sleep(5) # 等待进程优雅退出
if process.poll() is None:
process.kill() # 强制杀死进程
break
# 非阻塞读取输出
try:
# 在Windows上使用不同的方法
import msvcrt
import sys
# 检查是否有可读数据
line = process.stdout.readline()
if line:
print(line, end='')
else:
time.sleep(0.1) # 短暂等待避免CPU占用过高
except:
time.sleep(0.1)
process.stdout.close()
return_code = process.wait()
log_message("-" * 60 + f"\n - JMeter进程已结束(返回码: {return_code})。开始分析结果...")
# 详细检查JTL文件状态
log_message(f" - 检查JTL文件状态:")
log_message(f" - 文件路径: {jtl_file}")
log_message(f" - 文件存在: {'是' if os.path.exists(jtl_file) else '否'}")
if os.path.exists(jtl_file):
file_size = os.path.getsize(jtl_file)
log_message(f" - 文件大小: {file_size} 字节")
else:
log_message(f" - 错误: JTL文件未生成")
# 检查工作目录中是否有其他JTL文件
jtl_files = [f for f in os.listdir(workspace_dir) if f.endswith('.jtl')]
if jtl_files:
log_message(f" - 发现其他JTL文件: {jtl_files}")
return None
if os.path.exists(jtl_file) and os.path.getsize(jtl_file) > 0:
summary = parse_jtl_summary(jtl_file)
if summary:
# 先用原始ASCII安全的文件名生成图表
summary['tps_chart_path'] = os.path.join(workspace_dir, f"{safe_scenario_name}_{step}threads_tps.png")
summary['rt_chart_path'] = os.path.join(workspace_dir, f"{safe_scenario_name}_{step}threads_rt.png")
generate_plugin_output(jtl_file, "TransactionsPerSecond", summary['tps_chart_path'])
generate_plugin_output(jtl_file, "ResponseTimesOverTime", summary['rt_chart_path'])
# 图表生成完成后,将JTL文件和图片文件重命名为中文可读名称
# 使用复制+删除的方式代替重命名,确保文件不会丢失
try:
# 安全地重命名JTL文件
log_message(f" - 准备重命名JTL文件: {jtl_file} -> {jtl_file_chinese}")
log_message(f" - 原始JTL文件存在: {'是' if os.path.exists(jtl_file) else '否'}")
if os.path.exists(jtl_file):
# 如果原始文件名和目标文件名相同,则不需要重命名
if jtl_file == jtl_file_chinese:
log_message(f" - JTL文件名无需更改: {os.path.basename(jtl_file)}")
summary['jtl_path'] = jtl_file
else:
if os.path.exists(jtl_file_chinese):
os.remove(jtl_file_chinese) # 如果目标文件已存在,先删除
shutil.copy2(jtl_file, jtl_file_chinese) # 复制文件
os.remove(jtl_file) # 删除原文件
log_message(f" - JTL文件已重命名为: {os.path.basename(jtl_file_chinese)}")
summary['jtl_path'] = jtl_file_chinese # 更新文件路径引用
else:
log_message(f" - 警告: 原始JTL文件不存在: {jtl_file}")
# 检查是否目标文件已经存在(可能之前已经重命名过)
if os.path.exists(jtl_file_chinese):
log_message(f" - 发现目标JTL文件已存在: {jtl_file_chinese}")
summary['jtl_path'] = jtl_file_chinese
else:
log_message(f" - 错误: 无法找到任何JTL文件")
summary['jtl_path'] = jtl_file # 保持原路径
# 同时重命名图片文件为中文名称
tps_chinese = os.path.join(workspace_dir, f"{scenario_id}_{step}threads_tps.png")
rt_chinese = os.path.join(workspace_dir, f"{scenario_id}_{step}threads_rt.png")
if os.path.exists(summary['tps_chart_path']):
if summary['tps_chart_path'] == tps_chinese:
log_message(f" - TPS图表文件名无需更改: {os.path.basename(summary['tps_chart_path'])}")
else:
if os.path.exists(tps_chinese):
os.remove(tps_chinese)
shutil.copy2(summary['tps_chart_path'], tps_chinese)
os.remove(summary['tps_chart_path'])
summary['tps_chart_path'] = tps_chinese
log_message(f" - TPS图表已重命名为: {os.path.basename(tps_chinese)}")
if os.path.exists(summary['rt_chart_path']):
if summary['rt_chart_path'] == rt_chinese:
log_message(f" - 响应时间图表文件名无需更改: {os.path.basename(summary['rt_chart_path'])}")
else:
if os.path.exists(rt_chinese):
os.remove(rt_chinese)
shutil.copy2(summary['rt_chart_path'], rt_chinese)
os.remove(summary['rt_chart_path'])
summary['rt_chart_path'] = rt_chinese
log_message(f" - 响应时间图表已重命名为: {os.path.basename(rt_chinese)}")
# 同时重命名JMeter日志文件为中文名称
if os.path.exists(jmeter_log_file):
if jmeter_log_file == jmeter_log_file_chinese:
log_message(f" - JMeter日志文件名无需更改: {os.path.basename(jmeter_log_file)}")
else:
if os.path.exists(jmeter_log_file_chinese):
os.remove(jmeter_log_file_chinese)
shutil.copy2(jmeter_log_file, jmeter_log_file_chinese)
os.remove(jmeter_log_file)
log_message(f" - JMeter日志文件已重命名为: {os.path.basename(jmeter_log_file_chinese)}")
except Exception as e:
log_message(f" - 警告: 文件重命名失败: {e}")
# 确保JTL路径指向实际存在的文件
if os.path.exists(jtl_file_chinese):
summary['jtl_path'] = jtl_file_chinese
elif os.path.exists(jtl_file):
summary['jtl_path'] = jtl_file
else:
log_message(f" - 严重错误: JTL文件丢失,无法找到 {jtl_file} 或 {jtl_file_chinese}")
return None
return summary
log_message(" - 警告: JTL结果文件未生成或为空。"); return None
except subprocess.TimeoutExpired:
log_message("!!! 严重警告: JMeter进程已超时,已被终止。"); process.kill(); return None
except Exception as e:
log_message(f"!!! 执行JMeter时发生未知错误: {e}"); return None
def display_and_save_summary(original_jmx_path, summary_results, detailed_df, totals_df, workspace_dir, sampler_percentages_config=None):
"""生成最终的Excel报告。"""
if detailed_df.empty: log_message("没有详细结果可供生成报告。"); return
log_message("\n--- 正在生成最终版Excel压测报告 (v31) ---")
# 确保 totals_df 包含必要的列,避免 KeyError
if totals_df.empty:
totals_df = pd.DataFrame(columns=['场景名', '并发数', '取样器', '总样本数', '成功样本数', '成功率(%)', '平均响应时间(ms)', '中位响应时间(ms)', '90%响应时间(ms)', '95%响应时间(ms)', '99%响应时间(ms)', '最小响应时间(ms)', '最大响应时间(ms)', 'TPS', '接收 KB/sec', '发送 KB/sec'])
df_overview = pd.DataFrame(summary_results)
df_sheet1 = pd.DataFrame({
'交易名': df_overview['线程名'], '并发用户数': df_overview['并发数'],
'平均响应时间(秒)': (df_overview['avg_response_time'] / 1000.0).round(2),
'平均TPS(笔/秒)': df_overview['tps'], '总笔数': df_overview['total_samples'],
'成功笔数': df_overview['successful_samples'],
'成功率': df_overview['success_rate'].apply(lambda x: f"{x:.2f}%"), '备注': ''
})
def format_metrics(all_metrics, key):
if not all_metrics or not isinstance(all_metrics, dict): return 'N/A'
lines = [f"{all_metrics.get(instance, {}).get(key, 0):.2f}%" for instance in sorted(all_metrics.keys())]
return "\n".join(lines)
df_sheet1['服务器名称'] = df_overview['all_hw_metrics'].apply(lambda x: "\n".join(sorted([inst.split(':')[0] for inst in x.keys()])))
df_sheet1['CPU使用率'] = df_overview['all_hw_metrics'].apply(lambda x: format_metrics(x, 'cpu'))
df_sheet1['内存使用率'] = df_overview['all_hw_metrics'].apply(lambda x: format_metrics(x, 'memory'))
df_sheet1['磁盘使用率'] = df_overview['all_hw_metrics'].apply(lambda x: format_metrics(x, 'disk'))
df_sheet2_source = df_overview.drop_duplicates(subset=['线程名', '并发数'])
df_sheet2 = df_sheet2_source[['线程名', '并发数']].rename(columns={'线程名': '交易名', '并发数': '并发用户数'})
df_sheet2['TPS 趋势图'] = ''; df_sheet2['响应时间趋势图'] = ''
df_sheet2['执行区间'] = df_sheet2_source.apply(lambda row: f"{datetime.fromtimestamp(row['start_time_ms']/1000).strftime('%Y-%m-%d %H:%M:%S')} - {datetime.fromtimestamp(row['end_time_ms']/1000).strftime('%H:%M:%S')}", axis=1)
image_paths = df_sheet2_source[['tps_chart_path', 'rt_chart_path']]
# 直接合并详细数据和TOTAL行,按照用户截图格式重新组织
combined_df = pd.concat([detailed_df, totals_df], ignore_index=True)
# 按照用户截图的格式重新组织数据结构和列名
df_sheet3_formatted = []
# 按场景名和并发数分组处理
for (scenario, concurrency), group in combined_df.groupby(['场景名', '并发数']):
# 为每个场景添加数据行
for _, row in group.iterrows():
formatted_row = {
'场景名': scenario,
'并发数': concurrency, # 所有行都显示并发数
'Label': row['取样器'],
'# Samples': int(row['总样本数']) if pd.notna(row['总样本数']) else 0,
'Average': int(row['平均响应时间(ms)']) if pd.notna(row['平均响应时间(ms)']) else 0,
'Median': int(row['中位响应时间(ms)']) if pd.notna(row['中位响应时间(ms)']) else 0,
'90% Line': int(row['90%响应时间(ms)']) if pd.notna(row['90%响应时间(ms)']) else 0,
'95% Line': int(row['95%响应时间(ms)']) if pd.notna(row['95%响应时间(ms)']) else 0,
'99% Line': int(row['99%响应时间(ms)']) if pd.notna(row['99%响应时间(ms)']) else 0,
'Min': int(row['最小响应时间(ms)']) if pd.notna(row['最小响应时间(ms)']) else 0,
'Max': int(row['最大响应时间(ms)']) if pd.notna(row['最大响应时间(ms)']) else 0,
'Error %': f"{100 - float(row['成功率(%)']):.2f}%" if pd.notna(row['成功率(%)']) else "0.00%",
'TPS': f"{float(row['TPS']):.2f}" if pd.notna(row['TPS']) else "0.00",
'接收 KB/sec': f"{float(row['接收 KB/sec']):.2f}" if pd.notna(row['接收 KB/sec']) else "0.00",
'发送 KB/sec': f"{float(row['发送 KB/sec']):.2f}" if pd.notna(row['发送 KB/sec']) else "0.00"
}
df_sheet3_formatted.append(formatted_row)
# 转换为DataFrame
df_sheet3 = pd.DataFrame(df_sheet3_formatted)
# 确保列顺序与用户截图一致
column_order = ['场景名', '并发数', 'Label', '# Samples', 'Average', 'Median', '90% Line', '95% Line', '99% Line', 'Min', 'Max', 'Error %', 'TPS', '接收 KB/sec', '发送 KB/sec']
if not df_sheet3.empty:
df_sheet3 = df_sheet3[column_order]
mixed_scenarios = totals_df['场景名'].unique()
df_benchmark = detailed_df[(detailed_df['并发数'] == 1) & (~detailed_df['场景名'].isin(mixed_scenarios))]
df_mixed = pd.concat([detailed_df[detailed_df['场景名'].isin(mixed_scenarios)], totals_df[totals_df['场景名'].isin(mixed_scenarios)]]).sort_values(by=['场景名', '并发数'])
def create_simplified_view(df):
if df.empty: return pd.DataFrame()
return pd.DataFrame({
'场景名': df['场景名'], '并发数': df['并发数'], 'Label': df['取样器'],
'平均响应时间(秒)': (df['平均响应时间(ms)'] / 1000.0).round(3),
'TPS(笔/秒)': df['TPS'], '总笔数': df['总样本数'],
'成功笔数': (df['总样本数'] * (df['成功率(%)'] / 100.0)).round(0).astype(int),
'成功率': df['成功率(%)'].apply(lambda x: f"{x:.2f}%")
})
df_sheet4 = create_simplified_view(df_benchmark)
df_sheet5 = create_simplified_view(df_mixed)
timestamp_file = datetime.now().strftime("%Y%m%d_%H%M%S")
jmx_name_prefix = os.path.splitext(os.path.basename(original_jmx_path))[0]
report_filename = os.path.join(workspace_dir, f"{jmx_name_prefix}_测试记录_{timestamp_file}.xlsx")
timestamp_sheet = datetime.now().strftime("%Y%m%d")
with pd.ExcelWriter(report_filename, engine='openpyxl') as writer:
s1, s2, s3, s4, s5 = f'性能概览{timestamp_sheet}', f'性能图表{timestamp_sheet}', f'聚合报告汇总{timestamp_sheet}', '基准测试', '混合测试'
df_sheet1.to_excel(writer, sheet_name=s1, index=False, header=False, startrow=2)
# 为sheet2创建空的工作表,稍后手动填充数据
pd.DataFrame().to_excel(writer, sheet_name=s2, index=False)
df_sheet3.to_excel(writer, sheet_name=s3, index=False)
if not df_sheet4.empty: df_sheet4.to_excel(writer, sheet_name=s4, index=False)
if not df_sheet5.empty: df_sheet5.to_excel(writer, sheet_name=s5, index=False)
wb = load_workbook(report_filename)
header_fill = PatternFill(start_color="4F81BD", end_color="4F81BD", fill_type="solid")
header_font = Font(bold=True, color="FFFFFF")
thin_border = Border(left=Side(style='thin'), right=Side(style='thin'), top=Side(style='thin'), bottom=Side(style='thin'))
def apply_styles(ws, merge_cols=[]):
if not ws: return
for cell in ws[1]: cell.fill, cell.font = header_fill, header_font
for row in ws.iter_rows():
for cell in row:
cell.border = thin_border
cell.alignment = Alignment(horizontal='center', vertical='center')
for col_idx in merge_cols:
start_row = 2
for row in range(3, ws.max_row + 2):
if row > ws.max_row or ws.cell(row=row, column=col_idx).value != ws.cell(row=start_row, column=col_idx).value:
if row - start_row > 1:
ws.merge_cells(start_row=start_row, start_column=col_idx, end_row=row - 1, end_column=col_idx)
ws.cell(row=start_row, column=col_idx).alignment = Alignment(horizontal='center', vertical='center')
start_row = row
ws1 = wb[s1]
ws1.cell(row=1, column=9, value="硬件资源使用情况").fill, ws1.cell(row=1, column=9, value="硬件资源使用情况").font = header_fill, header_font
ws1.merge_cells('I1:L1')
for col_num, header_text in enumerate(df_sheet1.columns, 1):
ws1.cell(row=2, column=col_num, value=header_text)
if col_num <= 8:
ws1.merge_cells(start_row=1, start_column=col_num, end_row=2, end_column=col_num)
ws1.cell(row=1, column=col_num, value=header_text).alignment = Alignment(horizontal='center', vertical='center', wrap_text=True)
apply_styles(ws1, merge_cols=[1])
ws2 = wb[s2]
# 手动填充sheet2的表头
headers = ['交易名', '并发用户数', 'TPS 趋势图', '响应时间趋势图', '执行区间']
for col_idx, header in enumerate(headers, 1):
ws2.cell(row=1, column=col_idx, value=header)
apply_styles(ws2, merge_cols=[1])
# 设置更宽的列宽以适应图片,避免重叠
ws2.column_dimensions['C'].width = 45 # 增加TPS图片列宽
ws2.column_dimensions['D'].width = 45 # 增加响应时间图片列宽
ws2.column_dimensions['E'].width = 45 # 增加响应时间图片列宽
# 同时填充数据和插入图片,确保在同一行
current_row = 2 # 从第2行开始
# 合并数据填充和图片插入逻辑
for index, row_data in df_sheet2.iterrows():
# 为当前数据行设置足够的行高来容纳图片
ws2.row_dimensions[current_row].height = 185 # 数据行高度,足够容纳缩放后的图片
# 在当前行写入数据
ws2.cell(row=current_row, column=1, value=row_data['交易名'])
ws2.cell(row=current_row, column=2, value=row_data['并发用户数'])
ws2.cell(row=current_row, column=3, value=row_data['TPS 趋势图'])
ws2.cell(row=current_row, column=4, value=row_data['响应时间趋势图'])
ws2.cell(row=current_row, column=5, value=row_data['执行区间'])
# 在同一行插入对应的图片
if index < len(image_paths):
img_row_data = image_paths.iloc[index]
# 插入TPS图片,放在"TPS 趋势图"列(C列)
tps_img_path = img_row_data.get('tps_chart_path', '')
if tps_img_path and os.path.exists(tps_img_path):
tps_img = Image(tps_img_path)
# 缩放图片到合适大小
tps_img.width = int(tps_img.width * 0.4)
tps_img.height = int(tps_img.height * 0.4)
# 将TPS图片放在C列(第3列),与数据行对齐
ws2.add_image(tps_img, f'C{current_row}')
# 插入响应时间图片,放在"响应时间趋势图"列(D列)
rt_img_path = img_row_data.get('rt_chart_path', '')
if rt_img_path and os.path.exists(rt_img_path):
rt_img = Image(rt_img_path)
# 缩放图片到合适大小
rt_img.width = int(rt_img.width * 0.4)
rt_img.height = int(rt_img.height * 0.4)
# 将响应时间图片放在D列(第4列),与数据行对齐
ws2.add_image(rt_img, f'D{current_row}')
# 移动到下一行(紧凑排列,只留1行间距)
current_row += 1
# 如果有混合场景且提供了百分比配置,则在并发数列中填入百分比值
if len(mixed_scenarios) > 0 and sampler_percentages_config:
log_message("正在应用百分比填充到并发数列...")
# 获取百分比配置
sampler_percentages = {}
for sampler, percentage in sampler_percentages_config.items():
sampler_percentages[sampler] = percentage
# 在聚合报告汇总工作表中填入百分比值到并发数列
ws3 = wb[s3]
for row in range(2, ws3.max_row + 1): # 从第2行开始(跳过表头)
label = ws3.cell(row=row, column=3).value # Label列
scenario = ws3.cell(row=row, column=1).value # 场景名列
current_concurrency = ws3.cell(row=row, column=2).value # 当前行的并发数
# 在混合场景中填充百分比或保持原并发数
if scenario in mixed_scenarios:
if label == 'TOTAL':
# TOTAL行保持原有的并发数(不修改)
pass
elif label in sampler_percentages:
# 其他行填充百分比值
ws3.cell(row=row, column=2).value = sampler_percentages[label]
# 如果存在混合测试工作表,也在其中填入百分比值到并发数列
if s5 in wb.sheetnames:
ws5 = wb[s5]
for row in range(2, ws5.max_row + 1): # 从第2行开始(跳过表头)
label = ws5.cell(row=row, column=3).value # Label列
scenario = ws5.cell(row=row, column=1).value # 场景名列
current_concurrency = ws5.cell(row=row, column=2).value # 当前行的并发数
# 在混合场景中填充百分比或保持原并发数
if scenario in mixed_scenarios:
if label == 'TOTAL':
# TOTAL行保持原有的并发数(不修改)
pass
elif label in sampler_percentages:
# 其他行填充百分比值
ws5.cell(row=row, column=2).value = sampler_percentages[label]
# 应用样式,但对于混合场景不合并并发数列
if len(mixed_scenarios) > 0:
# 对于有混合场景的情况,不合并并发数列
apply_styles(wb[s3], merge_cols=[1]) # 只合并场景名列
else:
apply_styles(wb[s3], merge_cols=[1, 2]) # 正常合并场景名和并发数列
if s4 in wb.sheetnames: apply_styles(wb[s4], merge_cols=[1])
if s5 in wb.sheetnames:
if len(mixed_scenarios) > 0:
apply_styles(wb[s5], merge_cols=[1]) # 混合测试工作表不合并并发数列
else:
apply_styles(wb[s5], merge_cols=[1, 2])
wb.save(report_filename)
log_message(f"\n报告已成功生成: {report_filename}")
def cleanup_workspace(workspace_dir):
"""清理工作目录,只删除大小为0字节的无效文件。"""
log_message("\n--- 正在清理工作目录中的无效文件... ---")
files_deleted_count = 0
for filename in os.listdir(workspace_dir):
file_path = os.path.join(workspace_dir, filename)
try:
if os.path.isfile(file_path) and os.path.getsize(file_path) == 0:
os.unlink(file_path)
log_message(f" - 已删除0字节文件: {filename}")
files_deleted_count += 1
except Exception as e:
log_message(f"!!! 删除文件 '{file_path}' 时出错: {e}")
log_message(f"--- 清理完成,共删除 {files_deleted_count} 个0字节文件。 ---")
# ==============================================================================
# --- 4. 主程序入口 ---
# ==============================================================================
def main():
"""主函数,负责调度整个自动化测试流程。"""
# 使用硬编码的JMX文件路径,不再从命令行读取
original_jmx_path = ORIGINAL_JMX_FILE
if not os.path.exists(original_jmx_path):
log_message(f"!!! 错误: JMX文件未找到: {original_jmx_path}"); return
if not initialize_config_files(original_jmx_path): return
log_message("欢迎使用通用型JMeter全自动压测框架 (v31)"); log_message("=" * 60)
log_message(f"--- 目标JMX文件: {original_jmx_path} ---")
if not os.path.isdir(JMETER_HOME):
log_message(f"!!! 错误: JMeter路径配置不正确或不存在: '{JMETER_HOME}'");
log_message(f"--- 请修改脚本顶部的 'JMETER_HOME' 变量为正确的路径。"); return
test_plan = load_test_plan_from_config(original_jmx_path)
if not test_plan:
log_message(f"测试计划为空或加载失败。请检查'{CONFIG_FILE}'中是否有为'{os.path.basename(original_jmx_path)}'配置的有效场景。"); return
all_jmx_scenarios = get_scenario_identifiers(original_jmx_path)
if not all_jmx_scenarios:
log_message(f"!!! 错误: 您提供的 JMX 文件 '{os.path.basename(original_jmx_path)}' 中不包含任何线程组 (ThreadGroup),无法执行测试。"); return
server_map = load_server_map(SERVER_CONFIG_FILE)
sampler_percentages_config = load_config_section('SamplerPercentages')
scenario_to_server_mapping = load_config_section('ScenarioToServerMapping')
target_servers_map = generate_target_servers_map(all_jmx_scenarios, server_map, scenario_to_server_mapping)
OUTPUT_DIR = None # 使用默认的当前工作目录下的my_jmeter_output
workspace_result = prepare_workspace(original_jmx_path, OUTPUT_DIR if OUTPUT_DIR else None)
if len(workspace_result) == 4:
workspace_dir, workspace_jmx, temp_jmx_created, temp_jmx_path = workspace_result
else:
workspace_dir, workspace_jmx = workspace_result
temp_jmx_created, temp_jmx_path = False, None
if not workspace_dir:
log_message("准备工作区失败,程序终止。")
return
all_summaries, all_detailed_dfs, all_totals_dfs = [], [], []
for scenario_id, steps in test_plan.items():
if scenario_id not in all_jmx_scenarios:
log_message(f"警告: 配置文件中的场景名 '{scenario_id}' 在JMX文件中不存在,已跳过。")
continue
log_message(f"\n>>> 准备执行场景: '{scenario_id}'")
target_servers = target_servers_map.get(scenario_id, [])
if not target_servers: log_message(f" - 警告: 未在 [ScenarioToServerMapping] 中找到场景 '{scenario_id}' 的监控配置。")
tree = ET.parse(workspace_jmx)
for tg in tree.getroot().iter('ThreadGroup'):
tg.set("enabled", "true" if tg.get('testname') == scenario_id else "false")
tree.write(workspace_jmx, encoding="UTF-8", xml_declaration=True)
log_message(f" - JMX场景配置完毕,仅启用 '{scenario_id}'。")
for step in steps:
run_summary = run_fixed_step_test(workspace_dir, workspace_jmx, scenario_id, step)
if run_summary and 'jtl_path' in run_summary:
all_hw_metrics = get_prometheus_metrics(run_summary['start_time_ms']/1000, run_summary['end_time_ms']/1000, target_servers) if target_servers else {}
run_summary.update({'线程名': scenario_id, '并发数': step, 'all_hw_metrics': all_hw_metrics})
all_summaries.append(run_summary)
detailed_df, total_df = process_detailed_jtl(run_summary['jtl_path'])
if detailed_df is not None:
detailed_df['场景名'], detailed_df['并发数'] = scenario_id, step
all_detailed_dfs.append(detailed_df)
if total_df is not None and len(detailed_df) > 1:
total_df['场景名'], total_df['并发数'] = scenario_id, step
all_totals_dfs.append(total_df)
else:
log_message(f"!!! 严重: 场景 '{scenario_id}' 在 {step} 并发下执行失败或未生成有效结果。")
if all_detailed_dfs:
master_detailed_df = pd.concat(all_detailed_dfs, ignore_index=True)
master_totals_df = pd.concat(all_totals_dfs, ignore_index=True) if all_totals_dfs else pd.DataFrame()
display_and_save_summary(original_jmx_path, all_summaries, master_detailed_df, master_totals_df, workspace_dir, sampler_percentages_config)
cleanup_workspace(workspace_dir)
else:
log_message("所有测试均未产生有效数据,无法生成报告。")
# 清理临时JMX文件
if temp_jmx_created and temp_jmx_path and os.path.exists(temp_jmx_path):
try:
os.remove(temp_jmx_path)
log_message(f"已清理临时JMX文件: {temp_jmx_path}")
except Exception as e:
log_message(f"清理临时JMX文件时出错: {e}")
log_message("\n" + "=" * 60 + "\n所有测试已完成!\n" + "=" * 60)
if __name__ == "__main__":
if sys.stdout.encoding and sys.stdout.encoding.lower() != 'utf-8':
sys.stdout.reconfigure(encoding='utf-8', errors='ignore')
sys.stderr.reconfigure(encoding='utf-8', errors='ignore')
main()