import requests
from fake_useragent import UserAgent
from lxml import etree
from time import sleep
from random import randint
def get_html(url):
headers={
'User-Agent':UserAgent().firefox
}
proxies = {
"http": "http://35.236.158.232:8080"
}
sleep(randint(3,9))
html_response=requests.get(url,headers=headers,proxies=proxies)
html_response.encoding = 'utf-8'
if html_response.status_code == 200:
return html_response.text
else:
print(html_response.status_code)
def parse_html(html):
e = etree.HTML(html)
name = e.xpath('//h1/em/text()')
all_info={
'names': name
}
return all_info
def parse_get_url(index_url):
html_r = get_html(url = index_url)
e = etree.HTML(html_r)
movie_urls = e.xpath('''//div/a[@class='name']/@href''')
return ['https:{}'.format(movie_url) for movie_url in movie_urls]
def main():
index_url = 'https://www.qidian.com/rank'
print(index_url)
movie_url_list = parse_get_url(index_url=index_url)
print(movie_url_list)
for movie_url in movie_url_list:
response = get_html(movie_url)
outcome = parse_html(response)
print(outcome)
if __name__ == '__main__':
main()
print('爬虫程序结束')