访问这个网站可以查看自己ja3信息   

https://tls.browserleaks.com/json    

https://kawayiyi.com/tls

https://tls.peet.ws/api/all

https://browserleaks.com/tls

 

方法一

pip install curl_cffi   # https://github.com/yifeikong/curl_cffi
from curl_cffi import requests

# 注意 impersonate 这个参数
r = requests.get("https://tls.browserleaks.com/json", impersonate="chrome110")

print(r.json())
# output: {'ja3_hash': '53ff64ddf993ca882b70e1c82af5da49'
# 指纹和目标浏览器一致

# 支持使用代理
proxies = {"https": "http://localhost:3128"}
r = requests.get("https://tls.browserleaks.com/json", impersonate="chrome110", proxies=proxies)





from curl_cffi import Curl, CurlOpt
from io import BytesIO

buffer = BytesIO()
c = Curl()
c.setopt(CurlOpt.URL, b'https://tls.browserleaks.com/json')
c.setopt(CurlOpt.WRITEDATA, buffer)

c.impersonate("chrome101")

c.perform()
c.close()
body = buffer.getvalue()
print(body.decode())  # 查看当前请求的ja3信息

curl_cffi异步爬取

import asyncio
import time
from yscredit_tools.utils import get_proxies
from curl_cffi import AsyncSession, requests

async def fetch(url):
    async with AsyncSession() as curl:
        # 模拟Chrome浏览器指纹
        response = await curl.get(
            url,
            impersonate="chrome110",
            headers={"User-Agent": "Mozilla/5.0"},
            verify=False,
            timeout=10,
            proxies = {'http': get_proxies()["http"]}
        )
        return response

async def main(urls):
    tasks = [fetch(url) for url in urls]
    return await asyncio.gather(*tasks)

if __name__ == "__main__":
    start_time = int(time.time() * 1000)
    urls = []
    urls.extend("https://www.baidu.com/" for _ in range(100))
    results = asyncio.run(main(urls))
    print(int(time.time() * 1000) - start_time)
    # print(results)

curl_cffi 过ssl

from curl_cffi import requests
headers = {
    'authority': 'tls.browserleaks.com',
    'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',
    'accept-language': 'zh-CN,zh;q=0.9,en;q=0.8,zh-TW;q=0.7,da;q=0.6',
    'cache-control': 'no-cache',
    'pragma': 'no-cache',
    'sec-ch-ua': '"Not/A)Brand";v="99", "Google Chrome";v="115", "Chromium";v="115"',
    'sec-ch-ua-mobile': '?0',
    'sec-ch-ua-platform': '"Windows"',
    'sec-fetch-dest': 'document',
    'sec-fetch-mode': 'navigate',
    'sec-fetch-site': 'cross-site',
    'sec-fetch-user': '?1',
    'upgrade-insecure-requests': '1',
    'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/115.0.0.0 Safari/537.36',
}

response = requests.get('https://tls.browserleaks.com/json', headers=headers, impersonate="chrome110")
print(response.text)
可以看到,akamai_hash和akamai_text都有值了   直接requests akamai_hash和akamai_text 是空的

# 支持的模拟版本,由curl-impersonate支持
edge99 = "edge99"
edge101 = "edge101"
chrome99 = "chrome99"
chrome100 = "chrome100"
chrome101 = "chrome101"
chrome104 = "chrome104"
chrome107 = "chrome107"
chrome110 = "chrome110"
chrome116 = "chrome116"
chrome99_android = "chrome99_android"
safari15_3 = "safari15_3"
safari15_5 = "safari15_5"
ff102 = "ff102"

  

方法二

pip install tls-client   # https://github.com/FlorianREGAZ/Python-Tls-Client
import tls_client

# You can also use the following as `client_identifier`:
# Chrome --> chrome_103, chrome_104, chrome_105, chrome_106, chrome_107, chrome_108, chrome109, Chrome110,
#            chrome111, chrome112
# Firefox --> firefox_102, firefox_104, firefox108, Firefox110
# Opera --> opera_89, opera_90
# Safari --> safari_15_3, safari_15_6_1, safari_16_0
# iOS --> safari_ios_15_5, safari_ios_15_6, safari_ios_16_0
# iPadOS --> safari_ios_15_6
# Android --> okhttp4_android_7, okhttp4_android_8, okhttp4_android_9, okhttp4_android_10, okhttp4_android_11,
#             okhttp4_android_12, okhttp4_android_13

import tls_client

# 创建模拟Chrome 120的会话
session = tls_client.Session(
    client_identifier="chrome_120",  # 浏览器指纹标识
    random_tls_extension_order=True  # 随机化TLS扩展顺序增强匿名性 :ml-citation{ref="5,7" data="citationList"}
)

# 发起GET请求
response = session.get(
    "https://httpbin.org/headers",
    headers={"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36"}
)
print(response.status_code)
print(response.text)

方法三

pip install pyhttpx  # https://github.com/zero3301/pyhttpx
import pyhttpx

sess = pyhttpx.HttpSession()
r = sess.get('https://httpbin.org/get', headers={'User-Agent': '3301'}, cookies={'k': '3301'})
r = sess.post('https://httpbin.org/get',data={})
proxies = {'https': 'http://username:password@host:port'}
r = sess.post('https://httpbin.org/get',proxies=proxies)
print(r.status_code)
print(r.request.raw)

方法四

镜像有点大,不过有完整的编译环境

docker pull geekbyte1/pyantitls:v1.0

docker run -it -d geekbyte1/pyantitls:v1.0

方法五

Q佬的Dockerfile版

https://mp.weixin.qq.com/s/UZlLuzlQZrI7w82HI7zGuw

方法六

https://github.com/synodriver/pycurl/blob/master/special.markdown https://github.com/synodriver/pycurl/blob/master/special.markdown

2.git clone https://github.com/ycq0125/pycurl.git

3.cd pycurl搞定。可以把./pycurl/requests_curl 整个移动到site-package目录下,就可以全局使用了

方法七

https://pypi.org/project/requests-curl-antitls/ 内有详细步骤

另外下面这个是vmwear的镜像,已经编译好的环境,可以自己拿来玩玩

链接:https://pan.baidu.com/s/1_R02JKqvmA8Km4QNAKYfYg

提取码:curl

 方法八  使用 requests 请求网站的时候,修改 JA3指纹

import random

from requests.adapters import HTTPAdapter
from requests.packages.urllib3.util.ssl_ import create_urllib3_context

ORIGIN_CIPHERS = ('ECDH+AESGCM:DH+AESGCM:ECDH+AES256:DH+AES256:ECDH+AES128:DH+AES:ECDH+HIGH:'
                  'DH+HIGH:ECDH+3DES:DH+3DES:RSA+AESGCM:RSA+AES:RSA+HIGH:RSA+3DES')


class DESAdapter(HTTPAdapter):
    def __init__(self, *args, **kwargs):
        """
        A TransportAdapter that re-enables 3DES support in Requests.
        """
        CIPHERS = ORIGIN_CIPHERS.split(':')
        random.shuffle(CIPHERS)
        CIPHERS = ':'.join(CIPHERS)
        self.CIPHERS = CIPHERS + ':!aNULL:!eNULL:!MD5'
        super().__init__(*args, **kwargs)

    def init_poolmanager(self, *args, **kwargs):
        context = create_urllib3_context(ciphers=self.CIPHERS)
        kwargs['ssl_context'] = context
        return super(DESAdapter, self).init_poolmanager(*args, **kwargs)

    def proxy_manager_for(self, *args, **kwargs):
        context = create_urllib3_context(ciphers=self.CIPHERS)
        kwargs['ssl_context'] = context
        return super(DESAdapter, self).proxy_manager_for(*args, **kwargs)


import requests
headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.131 Safari/537.36 Edg/92.0.902.67'}
s = requests.Session()
s.headers.update(headers)

for _ in range(5):
    s.mount('https://tls.browserleaks.com', DESAdapter())
    resp = s.get('https://tls.browserleaks.com/json').json()
    print(resp)  # ja3指纹一直在变

  

二:tls相关原理可以看这篇文章

python完美突破tls/ja3 # https://mp.weixin.qq.com/s/GU2AgushvIEtLHtYR7BQzg
为什么随机 IP、随机 UA 也逃不掉被反爬虫的命运 # https://mp.weixin.qq.com/s/Qx7PjnBgrTR30oCurU6CGw
Python 如何突破反爬虫指纹 JA3 # https://mp.weixin.qq.com/s/7VJHCl2ht4pjkgIdcOKc5w
JS逆向之猿人学第十九题突破ja3指纹验证 # https://mp.weixin.qq.com/s?__biz=MzU0MjUwMTA2OQ==&mid=2247484137&idx=1&sn=ccfa46a45a09e7fde284dfba281fd719&chksm=fb18f34bcc6f7a5d49ee3050887aa909708ede268cb5046bcd80d43ffdc7c9f948d428c65ec4&scene=21#wechat_redirect
深度剖析ja3指纹及突破 # https://mp.weixin.qq.com/s?__biz=MzU0MjUwMTA2OQ==&mid=2247484649&idx=1&sn=42eb5319db1ca830ca81d75218e4c0e4&chksm=fb18f54bcc6f7c5de60395d03650aa7c6a30e37407989c604c31ffa1076d071a32afcb0556c4&scene=21#wechat_redirect
ja3指纹补充说明 # https://mp.weixin.qq.com/s?__biz=MzU0MjUwMTA2OQ==&mid=2247484522&idx=1&sn=1d47898130e689413c40fecc4b0a2b39&chksm=fb18f5c8cc6f7cde416ae1c5f93555b66be6bfdc01b9d715b2b4c5662900c95f39a1036312ea&scene=21#wechat_redirect

  

三:(SSLError(1, '[SSL: SSLV3_ALERT_HANDSHAKE_FAILURE] sslv3 alert handshake failure (_ssl.c:997)')))

from curl_cffi import requests

全方位TLS指纹管理方案

import undetected_chromedriver as uc
from playwright.sync_api import sync_playwright
import requests_random_user_agent  # 随机UA
import tls_client  # 专用TLS客户端库
import random
import time

# 方案1: 使用undetected_chromedriver
def scrape_with_undetected_chrome(url):
    options = uc.ChromeOptions()
    options.add_argument("--disable-gpu")
    
    driver = uc.Chrome(options=options)
    driver.get(url)
    
    # 模拟人类行为
    time.sleep(random.uniform(1, 3))
    driver.execute_script(f"window.scrollTo(0, {random.randint(100, 1000)});")
    time.sleep(random.uniform(1, 3))
    
    content = driver.page_source
    driver.quit()
    return content

# 方案2: 使用Playwright
def scrape_with_playwright(url):
    with sync_playwright() as p:
        browser = p.chromium.launch(
            headless=False,
            args=["--disable-blink-features=AutomationControlled"]
        )
        context = browser.new_context(
            viewport={"width": 1920, "height": 1080},
            locale="en-US"
        )
        page = context.new_page()
        page.goto(url)
        
        # 模拟人类行为
        page.mouse.move(random.randint(100, 800), random.randint(100, 600))
        page.mouse.wheel(delta_y=random.randint(100, 300))
        page.wait_for_timeout(random.uniform(1000, 3000))
        
        content = page.content()
        browser.close()
        return content

# 方案3: 使用专用TLS客户端库
def scrape_with_tls_client(url):
    session = tls_client.Session(
        client_identifier="chrome110",
        random_tls_extension_order=True
    )
    
    response = session.get(url)
    return response.text

# 综合方案: 随机选择不同方法并使用代理
def adaptive_scraping(url, proxies=None):
    # 随机选择爬取方法
    method = random.choice([
        scrape_with_undetected_chrome,
        scrape_with_playwright,
        scrape_with_tls_client
    ])
    
    # 配置代理
    if proxies and method == scrape_with_tls_client:
        proxy = random.choice(proxies)
        session.proxies = {
            "http": f"http://{proxy}",
            "https": f"https://{proxy}"
        }
    
    # 执行爬取
    try:
        return method(url)
    except Exception as e:
        print(f"Error with {method.__name__}: {e}")
        # 失败时尝试备用方法
        backup_methods = [m for m in [
            scrape_with_undetected_chrome,
            scrape_with_playwright,
            scrape_with_tls_client
        ] if m != method]
        
        if backup_methods:
            return random.choice(backup_methods)(url)
        raise