import urllib.request
import re
#爬取小说是最基础的爬虫,学会思路就能去做一些高级爬虫,思路一样,只是用的库或者JS或者异步等问题不同而已
url = "https://www.qb5200.tw/xiaoshuo/36/36143/"#爬取的小说网址
with urllib.request.urlopen(url) as doc:
html = doc.read()#读取网页
html = html.decode("gbk")#解码
title = re.findall(r'<meta property="og:title" content="(.*?)"/>', html)[0]
fb = open('%s.text' % title, 'w', encoding='gbk')
urls = re.findall(r'<dd><a href ="(.*?)">(.*?)</a></dd>', html)
for i in urls:
chapter_url = i[0]#获取每章小说的主要地址,地址不完整
chapter_name = i[1]#获取每章的章名
chapter_url = "https://www.qb5200.tw%s" % chapter_url#将基地址与每章主要地址拼接
chapter_html = urllib.request.urlopen(chapter_url).read()#解析每章网页
chapter_html = chapter_html.decode("gbk")#decode(“gbk”)或utf-8取决于原网页的编码
chapter_content = re.findall(r'<div id="content" class="showtxt">(.*?)</div>', chapter_html)[0]
chapter_content = chapter_content.replace(" ", "")#用正则将无效数据替换掉
chapter_content = chapter_content.replace("<br /><br />","")#用正则将<br/>(换行)替换
fb.write(chapter_name)#写入txt文件中
fb.write(chapter_content)
fb.write('\n')将换行写入