#file-name: pdf_download.py
import os
import requests
from bs4 import BeautifulSoup
def download_file(url, index):
#local_filename1 = index+"-"+url.split('/')[-1]
local_filename1 = url.split('/')[-1]
local_filename = local_filename1.replace('%20','_')
# NOTE the stream=True parameter
r = requests.get(url, stream=True)
with open(local_filename, 'wb') as f:
for chunk in r.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
f.flush()
return local_filename
root_link="https://rockchip.fr/"
#sub = ["main1", "ampak", "chipspark", "firefly", "geekbox", "marsboard", "radxa", "RK312X TRM", "RK3288 TRM"]
sub = [ "ampak", "chipspark", "firefly", "geekbox", "marsboard", "radxa", "RK312X TRM", "RK3288 TRM"]
for i in sub:
if(i != "main"):
r=requests.get(root_link+i)
if(not os.path.exists(i)):
directory = i.replace(" ", "_")
os.mkdir( "./"+directory )
os.chdir( "./"+directory)
else:
continue
#//else:
# r=requests.get(root_link)
print(root_link+i)
if r.status_code==200:
soup=BeautifulSoup(r.text)
# print(soup.prettify())
index=1
for link in soup.find_all('a'):
new_link=root_link+i+'/'+link.get('href')
if new_link.endswith(".pdf") or new_link.endswith(".DSN") or new_link.endswith(".apk") :
file_path=download_file(new_link,str(index))
print("downloading:"+new_link+" -> "+file_path)
index+=1
print(i+" download finished")
else:
print("errors occur.")
exit()
if(i != "main"):
os.chdir( "../")
#else:
# pass
print("all download finished")