2021/2/5-1

爬取贴吧的标题和链接

import requests
from lxml import etree
class Tieba(object):
def __init__(self, name):
self.url = "https://tieba.baidu.com/f?kw={}&ie=utf-8&pn=0".format(name)
self.headers = {# 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64; Trident/7.0; rv:11.0) like Gecko',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.146 Safari/537.36'}
# 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.146 Safari/537.36'
# 爬取的内容被注释掉,换低配的浏览器的Use——Agent可解决
def get_data(self, url):
response = requests.get(url, headers=self.headers)
return response.content
def parse_data(self, data):
# 创建element对象
data = data.decode().replace('<!--', "").replace('-->', "") # 也可以这样写来提取注释里的内容
html = etree.HTML(data)
el_list = html.xpath('//li[@class=" j_thread_list clearfix thread_item_box"]/div/div[2]/div[1]/div[1]/a')
data_list = []
for el in el_list:
temp = {}
temp['title'] = el.xpath("./text()")[0]
temp['link'] = 'https://tieba.baidu.com/' + el.xpath("./@href")[0]
data_list.append(temp)
# 找翻页url的时候不要用索引
try:
next_url = 'https:' + html.xpath('//a[contains(text(),"下一页>")]/@href')[0]
except:
next_url = None
return data_list, next_url
def save_data(self, data_list):
for data in data_list:
print(data)
def run(self):
# url
# headers
next_url = self.url
while True:
# 发送请求,获取响应
data = self.get_data(next_url)
# 从响应中提取数据(数据和翻页用的url)
data_list, next_url = self.parse_data(data)
self.save_data(data_list)
print(next_url)
# 判断是否终结
if next_url == None:
break


if __name__ =='__main__':
tieba = Tieba("ps")
tieba.run()

 

posted @ 2021-02-05 14:20  路人刘  阅读(73)  评论(0)    收藏  举报