python在线批量扫描东西

python在线批量扫描东西,域名保存在url.txt里
import logging
import socket
import threading
from binascii import b2a_hex
from concurrent.futures import ThreadPoolExecutor, wait, ALL_COMPLETED, FIRST_COMPLETED
from urllib.parse import urlparse
from datetime import datetime

import requests as requests
import inspect
import ctypes
def _async_raise(tid, exctype):
  """raises the exception, performs cleanup if needed"""
  tid = ctypes.c_long(tid)
  if not inspect.isclass(exctype):
    exctype = type(exctype)
  res = ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, ctypes.py_object(exctype))
  if res == 0:
    raise ValueError("invalid thread id")
  elif res != 1:
    # """if it returns a number greater than one, you're in trouble,
    # and you should call it again with exc=NULL to revert the effect"""
    ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, None)
    raise SystemError("PyThreadState_SetAsyncExc failed")
def stop_thread(thread):
  _async_raise(thread.ident, SystemExit)

requests.packages.urllib3.disable_warnings()


import requests
def getIP(domain):
  myaddr = socket.getaddrinfo(domain, 'http')
  return myaddr[0][4][0]

def vlun(urltarget, df,req):
    mu=urltarget
    try:
        try:
            r = req.head(url=mu, headers=headers, timeout=timeout, allow_redirects=False, stream=True, verify=False)
        except Exception as e:
            return 1
        content = b2a_hex(r.raw.read(10)).decode()
        print(r.status_code)
        rarsize = int(r.headers.get('Content-Length'))
        r.close()
        if rarsize >= 1024000000:
            unit = int(rarsize) // 1024
            rarsize = str(unit) + 'G'
            rarsize2 = unit
        elif rarsize >= 1024000:
            unit = int(rarsize) // 1024
            rarsize = str(unit) + 'M'
            rarsize2 = unit
        else:
            unit = int(rarsize) // 1024
            rarsize = str(unit) + 'K'
            rarsize2 = unit
        if rarsize2 > 100:
            logging.warning('[*] {}  size:{}'.format(urltarget, rarsize))
            with open(df, 'a') as f:
                try:
                    if (rarsize2 > 5000):
                        f.write(str(mu) + '\n')
                        return "1"
                except:
                    pass
        else:
            pass

    except Exception as e:
        pass




def list_of_groups(init_list, childern_list_len):
    list_of_groups = zip(*(iter(init_list),) *childern_list_len)
    end_list = [list(i) for i in list_of_groups]
    count = len(init_list) % childern_list_len
    end_list.append(init_list[-count:]) if count !=0 else end_list
    return end_list
def urlcheck(target=None, ulist=None):
    if target is not None and ulist is not None:
        if target.startswith('http://') or target.startswith('https://'):
            if target.endswith('/'):
                ulist.append(target)
            else:
                ulist.append(target + '/')
        else:
            line = 'http://' + target
            try:
                f1 = requests.head(line, timeout=5)
                if ('301' in str(f1.status_code)):
                    line = 'https://' + target
            except Exception as e:
                print(e)


            if line.endswith('/'):
                ulist.append(line)
            else:
                ulist.append(line + '/')
        return ulist

def a(u):
    # ucp = u.strip('https://').strip('http://')
    if u.startswith('http://'):
        ucp = u.lstrip('http://')
    elif u.startswith('https://'):
        ucp = u.lstrip('https://')

    if '/' in ucp:
        ucp = ucp.split('/')[0]
    if ':' in ucp:
        cport = ucp.split(':')[1]
        ucp = ucp.split(':')[0]
        www1 = ucp.split('.')
    else:
        www1 = ucp.split('.')
    wwwlen = len(www1)
    wwwhost = ''
    for i in range(1, wwwlen):
        wwwhost += www1[i]

    current_info_dic = []  # deep copy
    suffixFormat = ['rar', 'zip', 'gz', 'tar.gz']
    domainDic = [ucp + '.', ucp.replace('.', '') + '.', 'wz.', 'sql.', 'wwwroot.', 'index.', 'old.', 'web.',
                 'database.', 'upload.', 'website.', 'wangzhan.',
                 'package.', 'test.', 'bin.', 'ftp.', 'output.', 'config.', '网站备份.', '网站.', '数据库.', '数据库备份.', 'sjk.',
                 'shujuku.',
                 'backup.', 'faisunzip.',
                 '1.',
                 '2.',
                 '3.',
                 '4.',
                 '5.',
                 '6.',
                 '7.',
                 '8.',
                 '9.',
                 '666.',
                 '777.',
                 '888.',
                 '999.',
                 '234.',
                 '555.',
                 '333.',
                 '444.',
                 'admin.', 'db.', 'test.', '123.', 'admin.', 'root.', 'data.', '666.',
                 '111.', 'beifen.',
                 'b.', 'template.', 'install.', 'core.', 'about.', 'cache.', 'download.',
                 'runtime.', 'a.', 'img.', 'include.',
                 '000.', '00.', '0.', '012.',
                 'application.', 'server', 'extend.', 'vendor.', 'app.', 'public.'
                                                                         'bf.', wwwhost + '.',
                 ucp.split('.', 1)[-1] + '.', www1[0] + '.', www1[1] + '.']

    for s in suffixFormat:
        for d in domainDic:
            current_info_dic.extend([d + s])
    req = requests.session()
    for info in current_info_dic:
        url = str(u) + str(info)
        print(url)
        abn=vlun(url,datefile,req)
        if(abn==1):
            return

    req.close()
    return



datefile = datetime.now().strftime('%Y%m%d_%H-%M-%S.txt')
headers = {}
timeout = 5
urllist=[]
with open('url.txt',encoding='utf-8') as f:
    hj=f.readlines()
    for kkl2 in hj:
        c=kkl2.strip()
        if c.startswith("http"):
            pass
        else:
            c = "http://" + c
        urllist.append(c+"/")
print(len(urllist))
urllist = filter(None,urllist)
pool = ThreadPoolExecutor(max_workers=800)
all_task = [pool.submit(a, (u)) for u in urllist]
wait(all_task, return_when=ALL_COMPLETED)

 

posted @ 2022-04-27 14:37  纠结伦  阅读(57)  评论(0编辑  收藏  举报