基于 XSStrike 的接口 XSS 漏洞检测方案
Hello, comrades!
上篇文章基于SqlMap开发了扫描接口,但是,仅仅是sql注入的检测是不够的,本次又基于XSStrike开源库开发了XSS扫描接口。因XSStrike库是命令行执行命令,所以我们要修改源码,在方法中增加return返回值。
XSStrike源码增加return
- 首先是把源码下载到本地,在源码中找到 xsstrike.py,增加return返回值。
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
@作者: debugfeng
@文件: xsstrike_tasks.py
@时间: 2025/04/16 08:52:59
@说明:
"""
import sys
import json
from pathlib import Path
import concurrent.futures
from datetime import datetime, timezone
from urllib.parse import urlparse
from app.core.xssTrikeTasks.core import log
from app.core.xssTrikeTasks.core import config
from app.core.xssTrikeTasks.core.config import blindPayload
from app.core.xssTrikeTasks.core.encoders import base64
from app.core.xssTrikeTasks.core.photon import photon
# from core.xssTrikeTasks.core.prompt import prompt
from app.core.xssTrikeTasks.core.utils import (
extractHeaders,
reader,
converter,
get_db_path,
)
from app.core.xssTrikeTasks.modes.bruteforcer import bruteforcer
from app.core.xssTrikeTasks.modes.crawl import crawl
from app.core.xssTrikeTasks.modes.scan import scan
from app.core.xssTrikeTasks.modes.singleFuzz import singleFuzz
from app.schemas.scan_request import XSStrikeParams
__all__ = ["xsstrike_start_scan"]
logger = log.setup_logger()
def xsstrike_start_scan(params: XSStrikeParams):
# 初始化日志配置
log.console_log_level = params.console_log_level
log.file_log_level = params.file_log_level
log.log_file = params.log_file
# 初始化 headers
if type(params.header) == bool:
headers = {}
elif type(params.header) == str:
headers = extractHeaders(params.header)
else:
from core.xssTrikeTasks.core.config import headers
# 初始化 globalVariables
config.globalVariables = {
"headers": headers,
"checkedScripts": set(),
"checkedForms": {},
"definitions": get_db_path("definitions.json"),
"jsonData": params.paramData,
"path": params.path,
"url": params.url,
"file": params.file,
"fuzz": params.fuzz,
"recursive": params.recursive,
"seeds": params.seeds,
"skipDOM": params.skipDOM,
"skip": params.skip,
"delay": params.delay,
"timeout": params.timeout,
"level": params.level,
"threadCount": params.threadCount,
"proxy": params.proxy,
"encode": params.encode,
"blindXSS": params.blindXSS,
}
# 参数转换
if params.path:
params.paramData = converter(params.url, params.url)
elif params.jsonData:
headers["Content-type"] = "application/json"
params.paramData = converter(params.paramData)
# 初始化 Payload 和 Seed 列表
payloadList = []
if params.file:
if params.file == "default":
payloadList = config.payloads
else:
payloadList = list(filter(None, reader(params.file)))
seedList = []
if params.seeds:
seedList = list(filter(None, reader(params.seeds)))
encoding = base64 if params.encode and params.encode == "base64" else False
if not params.proxy:
config.proxies = {}
# 返回值结构初始化
results = {
"mode": None, # 扫描模式
"scan_summary": { # 扫描统计信息
"total_forms_found": 0, # 总表单数量
"total_js_vulnerabilities_found": 0, # 总 JS 漏洞数量
"total_crawled_pages": 0, # 总爬取页面数
},
"scan_details": { # 扫描详细信息
"fuzz_results": [], # 模糊测试模式结果
"bruteforce_results": [], # 暴力破解模式结果
"scan_results": [], # 扫描模式结果
"crawl_results": [], # 爬虫模式结果
},
"vulnerabilities": { # 检测到的漏洞分类
"dom_based_xss": [],
"library_vulnerabilities": [],
"form_based_vulnerabilities": [],
"other_vulnerabilities": [],
},
"js_vulnerabilities": [], # JS 漏洞信息
"errors": [], # 错误信息
}
# 根据参数选择扫描模式
try:
if params.fuzz:
results["mode"] = "模糊测试模式"
fuzz_result = singleFuzz(
params.url,
params.paramData,
encoding,
headers,
params.delay,
params.timeout,
)
results["scan_details"]["fuzz_results"].append(fuzz_result)
elif not params.recursive and not params.seeds:
if params.file:
results["mode"] = "暴力破解模式"
bruteforce_result = bruteforcer(
params.url,
params.paramData,
payloadList,
encoding,
headers,
params.delay,
params.timeout,
)
results["scan_details"]["bruteforce_results"].append(bruteforce_result)
else:
results["mode"] = "扫描模式"
scan_result = scan(
params.url,
params.paramData,
encoding,
headers,
params.delay,
params.timeout,
params.skipDOM,
params.skip,
)
results["scan_details"]["scan_results"].append(scan_result)
results["vulnerabilities"]["form_based_vulnerabilities"].extend(
scan_result.get("vulnerabilities", [])
)
else:
results["mode"] = "爬虫模式"
crawled_data = [] # 存储爬取的数据
if params.url:
seedList.append(params.url)
for target in seedList:
logger.run("Crawling the url")
scheme = urlparse(target).scheme
logger.debug("Url scheme: {}".format(scheme))
host = urlparse(target).netloc
main_url = scheme + "://" + host
crawlingResult = photon(
target,
headers,
params.level,
params.threadCount,
params.delay,
params.timeout,
params.skipDOM,
)
forms = crawlingResult[0]
domURLs = crawlingResult[1]
js_vulns = crawlingResult[2] # 获取 JS 漏洞信息
if js_vulns:
results["js_vulnerabilities"].extend(js_vulns)
results["scan_summary"]["total_js_vulnerabilities_found"] += len(
js_vulns
)
# 平衡 forms 和 domURLs 的长度
difference = abs(len(domURLs) - len(forms))
if len(domURLs) > len(forms):
forms.extend([None] * difference)
elif len(forms) > len(domURLs):
domURLs.extend([None] * difference)
# 多线程爬取
threadpool = concurrent.futures.ThreadPoolExecutor(
max_workers=params.threadCount
)
futures = [
threadpool.submit(
crawl,
scheme,
host,
main_url,
form,
params.blindXSS,
blindPayload,
headers,
params.delay,
params.timeout,
encoding,
)
for form, domURL in zip(forms, domURLs)
]
crawl_results = []
for i, future in enumerate(concurrent.futures.as_completed(futures)):
crawl_result = future.result()
crawl_results.append(crawl_result)
results["scan_summary"]["total_forms_found"] += len(
crawl_result.get("forms", [])
)
results["vulnerabilities"]["dom_based_xss"].extend(
crawl_result.get("dom_based_xss", [])
)
results["vulnerabilities"]["form_based_vulnerabilities"].extend(
crawl_result.get("form_based_vulnerabilities", [])
)
if i + 1 == len(forms) or (i + 1) % params.threadCount == 0:
logger.info("Progress: %i/%i\r" % (i + 1, len(forms)))
logger.no_format("")
crawled_data.append(crawl_results)
results["scan_summary"]["total_crawled_pages"] += 1
results["scan_details"]["crawl_results"] = crawled_data
except Exception as e:
results["errors"].append(
{
"message": str(e),
"timestamp": datetime.now(timezone.utc)
.isoformat()
.replace("+00:00", "Z"),
}
)
return results
这是源码的入口文件,把命令行传参修改为return返回,这样就能实现接口的返回值。在运行的时候肯定会出现源码中其他方法的报错,所以其他方法也要增加return返回值。对于这一步大家可以直接使用AI去处理吧,非常便捷。
Development
因为没有像SqlMap官方提供了API,对于长时间运行后,接口等待时间过长,所以我们基于Celery加了一个队列执行,这样进入扫描后直接放入队列执行,执行完成后拿ID去查询结果即可。
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@作者: debugfeng
@文件: xsstrike_celery.py
@时间: 2025/04/22 18:42:29
@说明: 定义 Celery 任务,包括 XSS 和 SQLMap 扫描
"""
import logging
from celery import Celery
from celery.result import AsyncResult
from app.schemas.scan_request import XSStrikeParams
from app.core.xssTrikeTasks.xsstrike_tasks import xsstrike_start_scan
__all__ = [
"celery_app",
"run_xss_scan",
"format_celery_task_status",
]
# ====================== Celery 初始化 ======================
celery_app = Celery(
"celery_tasks",
broker="amqp://root:root@11.11.29.14:5672//",
backend="rpc://",
)
# ====================== Celery 配置 ======================
celery_app.conf.update(
task_serializer="json",
result_serializer="json",
accept_content=["json"],
timezone="UTC",
enable_utc=True,
task_time_limit=3600,
task_acks_late=False,
task_reject_on_worker_lost=True,
)
# 路由配置(按任务名称路由到指定队列)
celery_app.conf.task_routes = {
"xss_scan.run": {"queue": "xss_scan_queue"},
}
celery_app.conf.task_default_queue = "default"
# ====================== XSS 扫描任务定义 ======================
@celery_app.task(name="xss_scan.run", queue="xss_scan_queue")
def run_xss_scan(params: dict) -> dict:
"""
Celery 异步运行 XSS 扫描任务
:param params: dict,前端请求参数
:return: 扫描结果或错误信息
"""
try:
model_params = XSStrikeParams(**params)
result = xsstrike_start_scan(model_params)
return result
except Exception as e:
logging.exception("XSS 扫描任务执行异常")
return {"error": str(e)}
# ====================== 任务状态格式化工具 ======================
def format_celery_task_status(celery_id: str, task_result: AsyncResult) -> dict:
"""
将 Celery 任务状态统一格式化为结构化响应
:param celery_id: 任务 ID
:param task_result: AsyncResult 实例
:return: dict 格式的任务状态信息
"""
status = task_result.status
result = task_result.result if task_result.ready() else None
response = {
"celery_id": celery_id,
"status": status,
"code": 200,
"msg": "",
"result": {},
}
status_messages = {
"PENDING": "任务等待执行中...",
"STARTED": "任务正在执行中...",
"SUCCESS": "任务执行成功。",
"FAILURE": "任务执行失败。",
"RETRY": "任务重试中...",
}
response["msg"] = status_messages.get(status, f"未知状态:{status}")
if status == "SUCCESS":
response["result"] = result
elif status == "FAILURE":
response["result"] = str(result) if result else "未知错误"
return response
前置工作完成后,就开始进入接口路由开发环节,队列方法直接引用到接口路由处调用即可。
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
@作者: debugfeng
@文件: xsstrike.py
@时间: 2025/04/15 11:07:15
@说明:
"""
from celery.result import AsyncResult
from fastapi import APIRouter, Body, HTTPException, status, Query
from app.tasks.celery_task import run_xss_scan, format_celery_task_status
from app.schemas.scan_request import XSStrikeParams
xss_router = APIRouter(prefix="/xss", tags=["用于xss注入接口"])
@xss_router.post(
"/scan",
summary="启动XSS注入扫描任务",
description="启动XSS注入扫描任务,扫描启动后返回任务ID,用户可以使用此ID查询扫描进度和结果",
)
async def api_xss_scan(
request: XSStrikeParams = Body(
...,
example={
"url": "可是网址或文件路径(根据 seeds 决定)",
"path": "是否注入payload到路径",
"jsonData": "是否使用JSON数据",
"paramData": " post数据",
"encode": " 设置编码",
"fuzz": "是否启用模糊测试",
"timeout": " 设置超时时间",
"proxy": " 设置代理",
"recursive": "是否启动爬虫",
"file": " 加载payloads文件",
"seeds": "加载爬虫种子",
"level": " 爬虫深度",
"header": "设置请求头",
"threadCount": "线程数",
"delay": " 延迟时间,单位为秒",
"skip": "是否跳过确认",
"skipDOM": "是否跳过DOM检查",
"blindXSS": " 是否xss注入盲注",
"console_log_level": "控制台日志级别",
"file_log_level": " 文件日志级别",
"log_file": " 日志文件",
},
)
):
"""
启动XSS注入扫描任务
参数:
request (XSStrikeParams): 包含XSS扫描配置和目标的请求体
返回:
dict: 包含以下字段的字典:
- code (int): HTTP状态码,成功时为200
- msg (str): 操作结果信息
- data (dict): 包含任务ID的字典
异常处理:
如果任务提交过程中发生异常,抛出HTTP 500错误,并返回错误详情
"""
try:
datas = request.model_dump() # 将请求数据转换为字典格式
celery_init = run_xss_scan.delay(datas) # 提交任务到 Celery 队列
# 统一格式返回结果
return {
"code": status.HTTP_200_OK,
"msg": "XSS 扫描任务已成功提交",
"data": {"celery_id": celery_init.id},
}
except Exception as e:
raise HTTPException(
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
detail=f"任务提交失败: {str(e)}",
)
@xss_router.get(
"/status",
summary="查询XSS扫描任务状态",
description="根据任务ID查询XSS扫描任务的状态和结果",
)
async def api_task_status_data(
celery_id: str = Query(..., min_length=1, example={"celery_id": "任务ID"})
):
"""
根据任务ID查询XSS扫描任务的状态和结果
参数:
task_id (str): 要查询的XSS扫描任务ID
返回:
格式化后的任务状态信息,包含任务ID、状态、进度和结果等信息
异常处理:
如果查询过程中发生异常,抛出HTTP 500错误,并返回错误详情
"""
try:
if not celery_id:
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST, detail="任务ID不能为空"
)
task_result = AsyncResult(celery_id)
return format_celery_task_status(celery_id, task_result)
except Exception as e:
raise HTTPException(
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
detail=f"查询任务状态失败: {str(e)}",
)
接口发开完成后,运行接口前要有限运行队列服务,所以本地要启动一个工作。
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
@作者: debugfeng
@文件: worker.py
@时间: 2025/07/08 14:47:24
@说明:
"""
import logging
from app.tasks.celery_task import celery_app
def main():
"""
启动 Celery worker,监听指定队列。
本示例使用 solo pool 适合本地调试,生产环境建议使用默认 prefork。
"""
logging.basicConfig(level=logging.INFO, format="%(asctime)s [%(levelname)s] %(message)s")
argv = [
"worker",
"--loglevel=info",
"--pool=solo", # 本地调试推荐 solo,生产建议 prefork
"--queues=xss_scan_queue",
"--concurrency=6", # 并发数,根据服务器配置调整
]
celery_app.worker_main(argv)
if __name__ == "__main__":
main()
最后再把新开发的接口关联到入口文件中,用于启动接口服务。
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
@作者: debugfeng
@文件: main.py
@时间: 2025/04/03 10:42:29
@说明:
"""
import uvicorn
from fastapi import FastAPI
from app.routes import sqlmap, xsstrike
app = FastAPI(
title="安全测试API文档",
description="用于扫描xss和sql注入的接口文档",
version="0.0.1",
)
app.include_router(sqlmap.sqlmap_router)
app.include_router(xsstrike.xss_router)
if __name__ == "__main__":
uvicorn.run(app, host="0.0.0.0", port=8081)
启动时要先启动 worker.py,然后再运行 main.py。自此,我们的XSS测试扫描接口就开发完成了,目前已经用于实际工作中。希望对你们有所帮助,感谢!
一直在努力,希望你也是!如果你觉得还不错,欢迎点赞+关注!
文章作者:李锋 || 微信号:LIFENG00078 || 公众号:全栈测试工程师

浙公网安备 33010602011771号