利用Python编写挖掘Web页面漏洞程序
核心思想:
在获取页面内容会后,用BeautifulSoup模块对页面标签进行提取,提取出页面中所有的Form表单,并进一步提取表单中的action属性(提交页面的目标URL),method属性,以及input,然后自动提交数据,实现漏洞的挖掘。
import requests from bs4 import BeautifulSoup import sys from urllib.parse import urljoin #因为action属性提取出来的URL往往是相对URL,因此用URLJOIN方法实现自动的URL拼接 def request_page(url, data, method): """ 编写通用的页面请求函数,可以根据不同的请求方法向目标页面发起请求,比如get, post等 """ try: if data is None: response = requests.get(url) return response.text if method == 'get': response = requests.get(url, params=data) return response.text if method == 'post': response = requests.post(url, data=data) return response.text except requests.exceptions.ConnectionError: pass if __name__ == "__main__": target_url = "http://192.168.140.137/mutillidae/index.php?page=dns-lookup.php" response = request_page(target_url,None, 'get') if response is None: print("Nothing captured") sys.exit() beautified_content = BeautifulSoup(response) form_list = beautified_content.findAll('form') if len(form_list) == 0: #如果没有发现任何表单,则退出程序 print("No form is found!") sys.exit() for form in form_list: action = form.get('action') action_url_absolute = urljoin(target_url, action) method = form.get('method') input_list = form.findAll('input') input_dict = {} for input in input_list: input_name = input.get('name') input_type = input.get('type') input_value = input.get('value') if input_type == 'text': input_value = 'test' input_dict[input_name] = input_value res = request_page(action_url_absolute,input_dict,method) print(res)
STRIVE FOR PROGRESS,NOT FOR PERFECTION

浙公网安备 33010602011771号