第五次作业
作业①:
要求:
熟练掌握 Selenium 查找HTML元素、爬取Ajax网页数据、等待HTML元素等内容。
使用Selenium框架爬取京东商城某类商品信息及图片。
候选网站:http://www.jd.com/
关键词:手机
代码:
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
import urllib.request
import threading
import sqlite3
import os
import datetime
from selenium.webdriver.common.keys import Keys
import time
class MySpider:
headers = {"User-Agent": "Mozilla/5.0 (Windows; U; Windows NT 6.0 x64; en-US; rv:1.9pre) Gecko/2008072421 Minefield/3.0.2pre"}
imagePath = "download"
def startUp(self, url, key):
chrome_options = Options()
# chrome_options.add_argument('--headless')
# chrome_options.add_argument('--disable-gpu')
self.driver = webdriver.Chrome(options=chrome_options)
# Initializing variables
self.threads = []
self.No = 0
self.imgNo = 0
self.page_num = 1 #爬取页面初始化
# Initializing database
try:
self.con = sqlite3.connect("phones.db")
self.cursor = self.con.cursor()
try:
# 如果有表就删除
self.cursor.execute("drop table phones")
except:
pass
try:
# 建立新的表
sql = "create table phones (mNo varchar(32) primary key, mMark varchar(256),mPrice varchar(32),mNote varchar(1024),mFile varchar(256))"
self.cursor.execute(sql)
except:
pass
except Exception as err:
print(err)
try:
if not os.path.exists(MySpider.imagePath):
os.mkdir(MySpider.imagePath)
images = os.listdir(MySpider.imagePath)
for img in images:
s = os.path.join(MySpider.imagePath, img)
os.remove(s)
except Exception as err:
print(err)
self.driver.get(url)
keyInput = self.driver.find_element_by_id("key")
keyInput.send_keys(key)
keyInput.send_keys(Keys.ENTER)
def closeUp(self):
try:
self.con.commit()
self.con.close()
self.driver.close()
except Exception as err:
print(err);
def insertDB(self, mNo, mMark, mPrice, mNote, mFile):
try:
sql = "insert into phones (mNo,mMark,mPrice,mNote,mFile) values (?,?,?,?,?)"
self.cursor.execute(sql, (mNo, mMark, mPrice, mNote, mFile))
except Exception as err:
print(err)
def showDB(self):
try:
con = sqlite3.connect("phones.db")
cursor = con.cursor()
print("%-8s%-16s%-8s%-16s%s" % ("No", "Mark", "Price", "Image", "Note"))
cursor.execute("select mNo,mMark,mPrice,mFile,mNote from phones order by mNo")
rows = cursor.fetchall()
for row in rows:
print("%-8s %-16s %-8s %-16s %s" % (row[0], row[1], row[2], row[3], row[4]))
con.close()
except Exception as err:
print(err)
def download(self, src1, src2, mFile):
data = None
if src1:
try:
req = urllib.request.Request(src1, headers=MySpider.headers)
resp = urllib.request.urlopen(req, timeout=10)
data = resp.read()
except:
pass
if not data and src2:
try:
req = urllib.request.Request(src2, headers=MySpider.headers)
resp = urllib.request.urlopen(req, timeout=10)
data = resp.read()
except:
pass
if data:
print("download begin", mFile)
fobj = open(MySpider.imagePath + "\\" + mFile, "wb")
fobj.write(data)
fobj.close()
print("download finish", mFile)
def processSpider(self):
try:
time.sleep(1)
print(self.driver.current_url)
lis = self.driver.find_elements_by_xpath("//div[@id='J_goodsList']//li[@class='gl-item']")
for li in lis:
# We find that the image is either in src or in data-lazy-img attribute
try:
src1 = li.find_element_by_xpath(".//div[@class='p-img']//a//img").get_attribute("src")
except:
src1 = ""
try:
src2 = li.find_element_by_xpath(".//div[@class='p-img']//a//img").get_attribute("data-lazy-img")
except:
src2 = ""
try:
price = li.find_element_by_xpath(".//div[@class='p-price']//i").text
except:
price = "0"
try:
note = li.find_element_by_xpath(".//div[@class='p-name p-name-type-2']//em").text
mark = note.split(" ")[0]
mark = mark.replace(",", "")
note = note.replace(",", "")
except:
note = ""
mark = ""
self.No = self.No + 1
no = str(self.No)
while len(no) < 6:
no = "0" + no
print(no, mark, price)
if src1:
src1 = urllib.request.urljoin(self.driver.current_url, src1)
p = src1.rfind(".")
mFile = no + src1[p:]
elif src2:
src2 = urllib.request.urljoin(self.driver.current_url, src2)
p = src2.rfind(".")
mFile = no + src2[p:]
if src1 or src2:
T = threading.Thread(target=self.download, args=(src1, src2, mFile))
T.setDaemon(False)
T.start()
self.threads.append(T)
else:
mFile = ""
self.insertDB(no, mark, price, note, mFile)
try:
self.driver.find_element_by_xpath("//span[@class='p-num']//a[@class='pn-next disabled']")
except:
nextPage = self.driver.find_element_by_xpath("//span[@class='p-num']//a[@class='pn-next']")
time.sleep(10)
if(self.page_num<=4):
self.page_num +=1
nextPage.click()
self.processSpider()
except Exception as err:
print(err)
def executeSpider(self, url, key):
starttime = datetime.datetime.now()
print("Spider starting......")
self.startUp(url, key)
print("Spider processing......")
self.processSpider()
print("Spider closing......")
self.closeUp()
for t in self.threads:
t.join()
print("Spider completed......")
endtime = datetime.datetime.now()
elapsed = (endtime - starttime).seconds
print("Total ", elapsed, " seconds elapsed")
url = "http://www.jd.com"
spider = MySpider()
while True:
print("1.爬取")
print("2.显示")
print("3.退出")
s = input("请选择(1,2,3):")
if s == "1":
spider.executeSpider(url, "手机")
continue
elif s == "2":
spider.showDB()
continue
elif s == "3":
break
结果:
感想:这一部分主要是书上代码 的复现,在理解之后总体而言相对不难。
作业②
##要求:
###熟练掌握 Selenium 查找HTML元素、爬取Ajax网页数据、等待HTML元素等内容。
###使用Selenium框架+ MySQL数据库存储技术路线爬取“沪深A股”、“上证A股”、“深证A股”3个板块的股票数据信息。
##候选网站:东方财富网:http://quote.eastmoney.com/center/gridlist.html#hs_a_board
代码:
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
import sqlite3
class spider:
header = {
"User-Agent": "Mozilla/5.0(Windows;U;Windows NT 6.0 x64;en-US;rv:1.9pre)Gecko/2008072531 Minefield/3.0.2pre"
}
def start(self, url):#开始只需要利用url模拟浏览器搜索爬取
chrome_options = Options()
chrome_options.add_argument("——headless")
chrome_options.add_argument("——disable-gpu")
self.driver = webdriver.Chrome(chrome_options=chrome_options)
self.count = 0
try:
self.con = sqlite3.connect("stock.db")
self.cursor = self.con.cursor()
try:
self.cursor.execute("drop table stock")
except:
pass
try:
sql = "create table stock(count varchar(256) ,stockname varchar(256),num varchar(256),lastest_pri varchar(64),ddf varchar(64),dde varchar(64),cjl varchar(64),cje varchar(32),zf varchar(32),top varchar(32),low varchar(32),today varchar(32),yestd varchar(32))"
self.cursor.execute(sql)
except:
pass
except Exception as err:
print(err)
self.driver.get(url)
def closeUp(self):#关闭数据库与浏览器
try:
self.con.commit()
self.con.close()
self.driver.close()
except Exception as err:
print(err)
def insertDB(self, count,stockname,num,lastest_pri,ddf,dde,cjl,cje,zf,top,low,today,yestd):#将爬取得到的数据插入数据库
try:
sql = "insert into stock (count,stockname,num,lastest_pri,ddf,dde,cjl,cje,zf,top,low,today,yestd) values (?,?,?,?,?,?,?,?,?,?,?,?,?)"
self.cursor.execute(sql, (count,stockname,num,lastest_pri,ddf,dde,cjl,cje,zf,top,low,today,yestd))
except Exception as err:
print(err)
def showDB(self):
try:
con = sqlite3.connect("stock.db")
cursor = con.cursor()
print("count","stockname","num","lastest_pri","ddf","dde","cjl","cje","zf","top","low","today","yestd")#控制台显示的一排数据
cursor.execute("select count,stockname,num,lastest_pri,ddf,dde,cjl,cje,zf,top,low,today,yestd from stock order by count")#sql语句获取数据
rows = cursor.fetchall()
for row in rows:
print(row[0], row[1], row[2], row[3], row[4],row[5], row[6], row[7], row[8], row[9],row[10], row[11], row[12])
con.close()
except Exception as err:
print(err)
def execute(self, url):
print("Starting......")
self.start(url)
print("Processing......")
self.process()
print("Closing......")
self.closeUp()
print("Completed......")
def process(self):
try:
lis = self.driver.find_elements_by_xpath("//div[@class='listview full']/table[@id='table_wrapper-table']/tbody/tr")
for li in lis:
stockname = li.find_element_by_xpath(".//td[@class='mywidth']/a[@href]").text
num = li.find_element_by_xpath(".//td[position()=2]/a[@href]").text
lastest_pri = li.find_element_by_xpath(".//td/span[position()=1]").text
ddf = li.find_element_by_xpath(".//td[position()=6]/span").text
dde = li.find_element_by_xpath(".//td[position()=7]/span").text
cjl = li.find_element_by_xpath(".//td[position()=8]").text
cje = li.find_element_by_xpath(".//td[position()=9]").text
zf = li.find_element_by_xpath(".//td[position()=10]").text
top = li.find_element_by_xpath(".//td[position()=11]/span").text
low = li.find_element_by_xpath(".//td[position()=12]/span").text
today = li.find_element_by_xpath(".//td[position()=13]/span").text
yestd = li.find_element_by_xpath(".//td[position()=14]").text
self.count = self.count + 1
count=self.count
self.insertDB(count,stockname,num,lastest_pri,ddf,dde,cjl,cje,zf,top,low,today,yestd )
except Exception as err:
print(err)
url = "http://quote.eastmoney.com/center/gridlist.html#hs_a_board"
while True:
print("1.爬取")
print("2.显示")
print("3.退出")
s = input("请选择(1,2,3):")
if s == "1":
spider.execute(url)
continue
elif s == "2":
spider.showDB()
continue
elif s == "3":
break
结果:
由于使用pymysql一直会出现如下报错:
因此改用sqlite3:
感想:进一步加深了我对selenium的理解,这部分的主要困难是pymysql在我的电脑上不能很好的运行......
作业③:
要求:
## 熟练掌握 Selenium 查找HTML元素、实现用户模拟登录、爬取Ajax网页数据、等待HTML元素等内容。
##使用Selenium框架+MySQL爬取中国mooc网课程资源信息(课程号、课程名称、学校名称、主讲教师、团队成员、参加人数、课程进度、课程简介)
##候选网站:中国mooc网:https://www.icourse163.org
代码:
import re
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.by import By
import time
from selenium.webdriver.support import expected_conditions
from selenium.webdriver.support.wait import WebDriverWait
import sqlite3
class MySpider:
headers = {
"User-Agent": "Mozilla/5.0 (Windows; U; Windows NT 6.0 x64; en-US; rv:1.9pre) Gecko/2008072421 Minefield/3.0.2pre"}
def startUp(self, url):
chrome_options = Options()
chrome_options.add_argument('--headless')
chrome_options.add_argument('--disable-gpu')
chrome_options.add_argument('--no-sandbox')
self.driver = webdriver.Chrome(chrome_options=chrome_options)
self.driver.get(url)
self.count = 0 # 爬取信息数量
self.page = 1 # 爬取页数
locator = (By.XPATH, "//div[@id='app']/div/div/div[2]/div[2]/div/div[2]/div[1]")
# 等待网页数据加载
WebDriverWait(self.driver, 10, 0.5).until(expected_conditions.presence_of_element_located(locator))
# 连接 MySQL数据库,并创建操作游标self.cursor
try:
self.con = sqlite3.connect("mooc.db")
self.cursor = self.con.cursor()
try:
self.cursor.execute("drop table mooc")
except:
pass
try:
sql = "create table mooc(Id int,cCourse VARCHAR (32),cCollege VARCHAR(32),cTeacher VARCHAR(32),cTeam VARCHAR(32),cCount VARCHAR(32),cProcess VARCHAR(32),cBrief VARCHAR(1024),PRIMARY KEY (id))"
self.cursor.execute(sql)
except:
pass
except Exception as err:
print(err)
def showDB(self):
try:
# 格式化输出
fm = "{0:^4}\t{1:^20}\t{2:^16}\t{3:^16}\t{4:^18}\t{5:^12}\t{6:^30}\t{7:^50}"
cn_blank = 2 * chr(12288) # 两个中文空格作为填充
titles = ["课程号","课程名称","学校名称","主讲教师","团队成员","参加人数","课程进度","课程简介"]
print(
"{0:^5}\t{1:^17}\t{2:^16}\t{3:^18}\t{4:^18}\t{5:^10}\t{6:^28}\t{7:^50}"
.format(titles[0], titles[1], titles[2], titles[3], titles[4], titles[5], titles[6], titles[7], cn_blank))
self.cursor.execute("SELECT * FROM mooc")
rows = self.cursor.fetchall()
for row in rows:
print(fm.format(row['Id'], row['cCourse'], row['cCollege'], row['cTeacher'],
row['cTeam'], row['cCount'], row['cProcess'], row['cBrief'],
cn_blank))
self.con.close()
print("关闭数据库连接")
except Exception as err:
print(err)
def closeUp(self):
try:
self.con.commit()
self.driver.close()
print("总共爬取", self.count, "条信息")
except Exception as err:
print(err)
def processSpider(self):
try:
divs = self.driver.find_elements_by_xpath( "//div[@class='_1aoKr']/div[@class='_1gBJC']/div["
"@class='_2mbYw']")
for div in divs:
try:
course = div.find_element_by_xpath(".//div[@class='WFpCn']/h3").text
college = div.find_element_by_xpath(".//div[@class='WFpCn']/p").text
teacher = div.find_element_by_xpath(".//div[@class='WFpCn']/div").text
count = re.sub("\D", "", div.find_element_by_xpath(".//div[@class='jvxcQ']//span").text)
# 点击课程,打开课程详情新页面
self.driver.execute_script("arguments[0].click();", div.find_element_by_xpath(".//div[@class='_3KiL7']"))
# 跳转到新打开的页面
self.driver.switch_to.window(self.driver.window_handles[-1])
# 等待新页面加载完成
time.sleep(2)
process = self.driver.find_element_by_xpath(
"//div[@class='course-enroll-info_course-info_term-info_term-time']/span[2]").text
brief = self.driver.find_element_by_xpath("//div[@id='j-rectxt2']").text
team = self.driver.find_elements_by_xpath(
"//div[@class='um-list-slider_con_item']//h3[@class='f-fc3']")
cTeam=""
for t in team:
cTeam+=" "+t.text
# 关闭新页面,返回原来的页面
self.driver.close()
self.driver.switch_to.window(self.driver.window_handles[0])
self.count += 1
id = self.count
self.insertDB(id, course, college, teacher, cTeam, count, process, brief)
except:
pass
print("第", self.page, "页爬取完成")
# 爬取前4页
if (self.page <=4):
self.page += 1
nextPage = self.driver.find_element_by_xpath("//div[@id='app']/div/div/div[2]/div[2]/div/div[2]/div[2]/div/a[10]")
nextPage.click()
time.sleep(10)
self.processSpider()
except Exception as err:
print(err)
def insertDB(self, id, course, college, teacher, cTeam, count, process, brief):
try:
sql = "insert into mooc(Id,cCourse,cCollege,cTeacher,cTeam,cCount,cProcess,cBrief) values(?,?,?,?,?,?,?,?)"
self.cursor.execute(sql, (id, course, college, teacher, cTeam, count, process, brief))
except Exception as err:
print(err)
print("数据插入失败")
def executeSpider(self, url):
print("爬虫开始......")
self.startUp(url)
print("爬虫处理......")
self.processSpider()
print("爬虫结束......")
self.closeUp()
url = "https://www.icourse163.org/channel/3008.htm"
spider = MySpider()
while True:
print("1.爬取")
print("2.显示")
print("3.退出")
s = input("请选择(1,2,3):")
if s == "1":
spider.executeSpider(url)
continue
elif s == "2":
spider.showDB()
continue
elif s == "3":
break