py每日spider案例之爱盘搜搜索接口


import requests


headers = {
    "accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7",
    "accept-language": "zh-CN,zh;q=0.9",
    "cache-control": "no-cache",
    "pragma": "no-cache",
    "priority": "u=0, i",
    "referer": "https://xunjiso.com/",
    "sec-ch-ua": "\"Google Chrome\";v=\"137\", \"Chromium\";v=\"137\", \"Not/A)Brand\";v=\"24\"",
    "sec-ch-ua-mobile": "?0",
    "sec-ch-ua-platform": "\"Windows\"",
    "sec-fetch-dest": "document",
    "sec-fetch-mode": "navigate",
    "sec-fetch-site": "cross-site",
    "sec-fetch-user": "?1",
    "upgrade-insecure-requests": "1",
    "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/137.0.0.0 Safari/537.36"
}
url = "https://xiongdipan.com/search"
params = {
    "k": "凡人修仙传",
    "s": "0",
    "t": "-1"
}
response = requests.get(url, headers=headers, params=params)
response = requests.get(response.url)
print(response.text)
html=etree.HTML(response.text)
href=html.xpath('//van-row/a/@href')
for h in href:
    # print('https://xiongdipan.com'+h)
    response = requests.get('https://xiongdipan.com'+h, headers=headers, )
    print(response.text)
    down_url=re.findall('window.open\("(.*?)"\),',response.text)
    print(down_url)


posted @ 2025-09-10 21:35  我不是萧海哇~~~  阅读(30)  评论(0)    收藏  举报