爬取自己B站关注列表

import requests

# 请求 URL 和参数
url = "https://api.bilibili.com/x/relation/followings"
uid = input("your uid?: ")
cookies = input("your cookies?: ")

# 初始化参数
pn = 1  # 从第1页开始
ps = 24  # 每页的数量
total_count = 0  # 总记录数

# 打开文件准备写入
with open('follows.txt', 'w', encoding='utf-8') as file:
    while True:
        # 设置当前页的参数
        params = {
            "order": "desc",
            "order_type": "",
            "vmid": f"{uid}",
            "pn": pn,
            "ps": ps
        }
        header = {
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36 Edg/127.0.0.0",
            "Referer": "",
            "Cookie": f"{cookies}",
        }

        # 发送请求
        resp = requests.get(url, headers=header, params=params)

        # 检查请求是否成功
        if resp.status_code == 200:
            # 解析返回的 JSON 数据
            data = resp.json()

            # 检查返回的列表是否为空
            if not data['data']['list']:
                print("爬取结束")
                break

            # 遍历列表中的每个用户
            for user in data['data']['list']:
                total_count += 1  # 总记录数自增
                uname = user['uname']
                mid = user['mid']
                # 拼接 URL
                uid_url = f"https://space.bilibili.com/{mid}"
                # 写入文件
                file.write(f"{total_count}、name:{uname}\n")
                file.write(f"uid: {uid_url}\n")
                file.write("\n")  # 添加空行分隔
                # 输出到控制台
                print(f"{total_count}、name:{uname}")
                print(f"uid: {uid_url}\n")

            # 页数自增
            pn += 1
        else:
            print(f"请求失败,状态码:{resp.status_code}")
            break

input("按任意键结束。")

 

posted @ 2025-09-06 23:12  擎天柱的舍利子  阅读(17)  评论(0)    收藏  举报