# -*- coding: utf-8 -*-"""
Created on Sun Sep 13 21:32:25 2020
@author: ydc
"""import re
import requests
from urllib import error
from bs4 import BeautifulSoup
import os
num =0
numPicture =0file=''
List =[]defFind(url, A):global List
print('正在检测图片总数,请稍等.....')
t =0
i =1
s =0while t <1000:
Url = url +str(t)try:# 这里搞了下
Result = A.get(Url, timeout=7, allow_redirects=False)except BaseException:
t = t +60continueelse:
result = Result.text
pic_url = re.findall('"objURL":"(.*?)",', result, re.S)# 先利用正则表达式找到图片url
s +=len(pic_url)iflen(pic_url)==0:breakelse:
List.append(pic_url)
t = t +60return s
defrecommend(url):
Re =[]try:
html = requests.get(url, allow_redirects=False)except error.HTTPError as e:returnelse:
html.encoding ='utf-8'
bsObj = BeautifulSoup(html.text,'html.parser')
div = bsObj.find('div',id='topRS')if div isnotNone:
listA = div.findAll('a')for i in listA:if i isnotNone:
Re.append(i.get_text())return Re
defdowmloadPicture(html, keyword):global num
# t =0
pic_url = re.findall('"objURL":"(.*?)",', html, re.S)# 先利用正则表达式找到图片urlprint('找到关键词:'+ keyword +'的图片,即将开始下载图片...')for each in pic_url:print('正在下载第'+str(num +1)+'张图片,图片地址:'+str(each))try:if each isnotNone:
pic = requests.get(each, timeout=7)else:continueexcept BaseException:print('错误,当前图片无法下载')continueelse:
string =file+ r'\\'+ keyword +'_'+str(num)+'.jpg'
fp =open(string,'wb')
fp.write(pic.content)
fp.close()
num +=1if num >= numPicture:returnif __name__ =='__main__':# 主函数入口############################### 这里加了点
headers ={'Accept-Language':'zh-CN,zh;q=0.8,zh-TW;q=0.7,zh-HK;q=0.5,en-US;q=0.3,en;q=0.2','Connection':'keep-alive','User-Agent':'Mozilla/5.0 (X11; Linux x86_64; rv:60.0) Gecko/20100101 Firefox/60.0','Upgrade-Insecure-Requests':'1'}
A = requests.Session()
A.headers = headers
###############################
word =input("请输入搜索关键词(可以是人名,地名等): ")# add = 'http://image.baidu.com/search/flip?tn=baiduimage&ie=utf-8&word=%E5%BC%A0%E5%A4%A9%E7%88%B1&pn=120'
url ='https://image.baidu.com/search/flip?tn=baiduimage&ie=utf-8&word='+ word +'&pn='# 这里搞了下
tot = Find(url, A)
Recommend = recommend(url)# 记录相关推荐print('经过检测%s类图片共有%d张'%(word, tot))
numPicture =int(input('请输入想要下载的图片数量 '))file=input('请建立一个存储图片的文件夹,输入文件夹名称即可')
y = os.path.exists(file)if y ==1:print('该文件已存在,请重新输入')file=input('请建立一个存储图片的文件夹,)输入文件夹名称即可')
os.mkdir(file)else:
os.mkdir(file)
t =0
tmp = url
while t < numPicture:try:
url = tmp +str(t)# 这里搞了下
result = A.get(url, timeout=10, allow_redirects=False)except error.HTTPError as e:print('网络错误,请调整网络后重试')
t = t +60else:
dowmloadPicture(result.text, word)
t = t +60print('当前搜索结束,感谢使用')print('猜你喜欢')for re in Recommend:print(re, end=' ')
爬取必应图片
import requests
from lxml import etree
import re
import time
headers ={'User-Agent':'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.86 Safari/537.36'}# 保存图片defsave_img(url):
img_name = url[-10:]
name = re.sub('/','', img_name)# img_name中出现/,将其置换成空try:
res = requests.get(url, headers=headers)except OSError:print('出现错误,错误的url是:', url)else:withopen('img/'+name,'wb')as f:try:
f.write(res.content)except OSError:print('无法保存,url是:', url)# 获取全部图片urldefparse_img(url):
response = requests.get(url, headers=headers)
response.encoding = response.apparent_encoding
data = response.content.decode('utf-8','ignore')
html = etree.HTML(data)
conda_list = html.xpath('//a[@class="iusc"]/@m')
all_url =[]# 用来保存全部的urlfor i in conda_list:
img_url = re.search('"murl":"(.*?)"', i).group(1)
all_url.append(img_url)return all_url
# 主函数defmain():for i inrange(0,120,35):
url ='https://cn.bing.com/images/async?q=%E7%9F%BF%E6%B3%89%E6%B0%B4%E7%93%B6&first='+str(i)+'&count=35&relp=35&scenario=ImageBasicHover&datsrc=N_I&layout=RowBased&mmasync=1'
img_data = parse_img(url)for img_url in img_data:
save_img(img_url)print(img_url)
time.sleep(10)if __name__ =='__main__':
main()