用requests库和BeautifulSoup4库爬取新闻列表
import requests from bs4 import BeautifulSoup from datetime import datetime wangzhan='http://news.gzcc.cn/html/xiaoyuanxinwen/' res =requests.get(wangzhan) res.encoding='utf-8' soup=BeautifulSoup(res.text,'html.parser') def getdetail(url): resd =requests.get(url) resd.encoding='utf-8' soupd=BeautifulSoup(resd.text,'html.parser') return (soupd.select('.show-content')[0].text) for news in soup.select('li'): if len(news.select('.news-list-title'))>0: title=news.select('.news-list-title')[0].text url=news.select('a')[0]['href'] time=news.select('.news-list-info')[0].contents[0].text dt=datetime.strptime(time,'%Y-%m-%d') source=news.select('.news-list-info')[0].contents[1].text detail=getdetail(url) print(dt,title,url,source,detail) break
import requests from bs4 import BeautifulSoup from datetime import datetime wangzhan='http://news.baidu.com/' res =requests.get(wangzhan) res.encoding='utf-8' soup=BeautifulSoup(res.text,'html.parser') for news in soup.select('strong'): title=news.select('a')[0].text url=news.select('a')[0]['href'] print(title,url)