模拟登陆+数据爬取 (python+selenuim)

以下代码是用来爬取LinkedIn网站一些学者的经历的,仅供参考,注意:不要一次性大量爬取会被封号,不要问我为什么知道

#-*- coding:utf-8 -*-
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import time
from bs4 import BeautifulSoup

diver=webdriver.Chrome()
diver.get('https://www.linkedin.com/')
#等待网站加载完成
time.sleep(1)
#模拟登陆
diver.find_element_by_id('login-email').send_keys(用户名)
diver.find_element_by_id('login-password').send_keys(密码)
# 点击跳转
diver.find_element_by_id('login-submit').send_keys(Keys.ENTER)
time.sleep(1)
#查询
 diver.find_element_by_tag_name('input').send_keys(学者名)
diver.find_element_by_tag_name('input').send_keys(Keys.ENTER)
time.sleep(1)
#获取当前页面所有可能的人
soup=BeautifulSoup(diver.page_source,'lxml')
items=soup.findAll('div',{'class':'search-result__wrapper'})
n=0
for i in items:
n+=1
title=i.find('div',{'class':'search-result__image-wrapper'}).find('a')['href']
diver.get('https://www.linkedin.com'+title)
time.sleep(3)
Soup=BeautifulSoup(diver.page_source,'lxml')
# print Soup
Items=Soup.findAll('li',{'class':'pv-profile-section__card-item pv-position-entity ember-view'})
print str(n)+':'
for i in Items:
    print i.find('div',{'class':'pv-entity__summary-info'}).get_text().replace('\n','')
diver.close()

 

posted @ 2017-12-18 19:16  ybf&yyj  阅读(719)  评论(0编辑  收藏  举报