第一个爬虫程序


from urllib import  request
from urllib import parse
from bs4 import BeautifulSoup
req =request.Request("http://www.xinshipu.com/zuofa/49391")
req.add_header( 'Accept','text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8')
req.add_header( 'Accept-Language','zh-CN,zh;q=0.8,en-US;q=0.5,en;q=0.3')
req.add_header( 'Cache-Control','max-age=0')
req.add_header(  'Connection','keep-alive')
req.add_header( 'Cookie','JSESSIONID=7527E0F61B460FD3DFB60BC50DB9B1F0; _ga=GA1.2.1916127465.1503760815; _gid=GA1.2.132850422.1503760815; Visited="49391,685809,685823,598786,685825,685726"')
req.add_header('User-Agent','Mozilla/5.0 (Windows NT 6.1; WOW64; rv:54.0) Gecko/20100101 Firefox/54.0')
response=request.urlopen(req)
html = response.read().decode('utf8',errors='replace')
soup = BeautifulSoup(html,"html.parser")
reup = soup.select(".font16.ml10.col")[0].text;

 

 
posted @ 2017-08-27 15:40  惜照  阅读(106)  评论(0)    收藏  举报