SRC小技巧:批量查询网站权重

一、获取脚本

  1. ip2domain
  2. ipInfoSearch
  3. 捡来的脚本
https://github.com/Sma11New/ip2domain

在这里插入图片描述

https://github.com/Potato-py/ipInfoSearch

在这里插入图片描述
使用方法请读取GitHub。

捡来的代码

https://mp.weixin.qq.com/s/PLiiXlbBCDs_k4UPPdu2nA
import requests
import urllib3
import argparse
from lxml import etree
from concurrent.futures import ThreadPoolExecutor
# 解决requests请求出现的InsecureRequestWarning错误
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)


def query(url):
    url = "https://www.aizhan.com/cha/{}/".format(url)

    headers = {
        'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
        'Accept-Language': 'en,zh-CN;q=0.9,zh;q=0.8,vi;q=0.7',
        'Cache-Control': 'no-cache',
        'Connection': 'keep-alive',
        'Pragma': 'no-cache',
        'Referer': 'https://www.aizhan.com/',
        'Sec-Fetch-Dest': 'document',
        'Sec-Fetch-Mode': 'navigate',
        'Sec-Fetch-Site': 'same-origin',
        'Sec-Fetch-User': '?1',
        'Upgrade-Insecure-Requests': '1',
        'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:40.0) Gecko/20100101 Firefox/40.1',
        'sec-ch-ua': '"Not?A_Brand";v="8", "Chromium";v="108", "Google Chrome";v="108"',
        'sec-ch-ua-mobile': '?0',
        'sec-ch-ua-platform': 'linux',
    }

    response = requests.get(url=url, headers=headers)
    lxml_tree = etree.HTML(response.text)
    href_name = lxml_tree.xpath(
        '//div[@id="webpage_title"]//text()')
    print("\n-> Title信息: {0}".format("".join(href_name)))
    br = lxml_tree.xpath(
        '//a[@id="baidurank_br"]//img//@alt')
    mbr = lxml_tree.xpath(
        '//a[@id="baidurank_mbr"]//img//@alt')
    pr = lxml_tree.xpath(
        '//a[@id="360_pr"]//img//@alt')
    sm_pr = lxml_tree.xpath(
        '//a[@id="sm_pr"]//img//@alt')
    sogou_pr = lxml_tree.xpath(
        '//a[@id="sogou_pr"]//img//@alt')
    google_pr = lxml_tree.xpath(
        '//a[@id="google_pr"]//img//@alt')
    print("[+] 综合权重: \n 百度权重: {0}\t移动权重:{1}\t360权重:{2}\t神马权重:{3}\t搜狗权重:{4}\t谷歌PR:{5}".format("".join(
        br), "".join(mbr), "".join(pr), "".join(sm_pr), "".join(sogou_pr), "".join(google_pr)))

    icp = lxml_tree.xpath(
        '//ul[@id="icp"]//text()')
    print("[+] 备案信息: \n", repr(" ".join(icp)).replace(
        "\\n", "").replace("\\t", "").replace("'", ""))


if __name__ == "__main__":
    example_text = """
        python 1.py -u qq.com
        python 1.py -f 1.txt
    """
    try:
        parser = argparse.ArgumentParser(
            description=example_text, formatter_class=argparse.RawTextHelpFormatter
        )
        parser.add_argument("-u", "--url", required=False)
        parser.add_argument("-f", "--files", required=False)
        args = parser.parse_args()
        url = args.url
        files = args.files
        if url:
            query(url=url)
        else:
            count = 0
            with open(files, "r", encoding="utf-8") as f:
                    # 创建最大线程数的线程池
                    with ThreadPoolExecutor(10) as threadPool:
                            for url in f:
                                try:
                                    threadPool.submit(query, url.replace("\n", ""))
                                    count += 1
                                except Exception as e:
                                    print("[-] error: ",e)
                                    continue
            print("\ntotle: [{}]".format(count))
    except:
        pass
posted @ 2022-12-30 01:45  夜_星空如海  阅读(7)  评论(0)    收藏  举报  来源