7-7

#

from bs4 import BeautifulSoup
from urllib.request import urlopen
res = urlopen('http://pythonscraping.com/pages/page1.html')
bs = BeautifulSoup(res.read(),'html.parser')
print(bs.h1)  #获取标签内容
BeautifulSoup  res.status 200  html.parser/lxml/html5lib   
from urllib.request import urlopen
from urllib.error import HTTPError
from bs4 import BeautifulSoup
import sys

def getTitle(url):
    try:
        html = urlopen(url)
    except HTTPError as e:
        print(e)
        return None
    try:
        bsObj = BeautifulSoup(html, "html.parser")
        title = bsObj.body.h1
    except AttributeError as e:
        return None
    return title

title = getTitle("http://www.pythonscraping.com/exercises/exercise1.html")
if title == None:
    print("Title could not be found")
else:
    print(title)
try 异常处理
from urllib.request import urlopen
from bs4 import BeautifulSoup
import datetime
import random
import re

random.seed(datetime.datetime.now())#随机数种子  时间种子
def getLinks(articleUrl):
    html = urlopen("http://en.wikipedia.org"+articleUrl)
    bsObj = BeautifulSoup(html, "html.parser")
    return bsObj.find("div", {"id":"bodyContent"}).findAll("a", href=re.compile("^(/wiki/)((?!:).)*$"))
links = getLinks("/wiki/Kevin_Bacon")
while len(links) > 0:
    newArticle = links[random.randint(0, len(links)-1)].attrs["href"]
    print(newArticle)
    links = getLinks(newArticle)
seed random 随机种子
Natural Language Toolkit,自然语言处理工具包,在NLP领域中,最常使用的一个Python库

 

posted @ 2019-07-07 22:06  追风zz  阅读(454)  评论(0编辑  收藏  举报