爬虫-链家网(利用beautifulsoup定位资源)


from bs4 import BeautifulSoup
import requests

url = 'https://bj.lianjia.com/ershoufang/c1111027378138/?sug=%E6%B5%81%E6%98%9F%E8%8A%B1%E5%9B%AD%E4%B8%89%E5%8C%BA'

response = requests.get(url)

# with open('lianjia.html', 'wb') as f:
#     f.write(response.content)

soup = BeautifulSoup(response.text, 'lxml')
li_tag_list = soup.select('li[class="clear LOGCLICKDATA"]')
for li_tag in li_tag_list:
    title_name = li_tag.select('div[class="info clear"] > div.title')[0].text
    address = li_tag.select('div[class="info clear"] > div.address')[0].text
    flood = li_tag.select('div[class="info clear"] > div.flood')[0].text
    followInfo = li_tag.select('div[class="info clear"] > div.followInfo')[0].text

    print(title_name)
    print(address)
    print(flood)
    print(followInfo)

posted on 2018-08-23 21:22  luwanhe  阅读(558)  评论(0)    收藏  举报

导航