#下载大文件
chapters = requests.get(file_url)
with open("xxx.tar.gz",'wb') as f:
for chunk in chapters.iter_content(chunk_size=1024):
if chunk:
f.write(chunk)
print('下载完了……')
#下载小文件
import requests
image_url = "https://www.python.org/static/community_logos/python-logo-master-v3-TM.png"
r = requests.get(image_url) # create HTTP response object
with open("python_logo.png",'wb') as f:
f.write(r.content)
#批量文件下载
import requests
from bs4 import BeautifulSoup
archive_url = "http://www-personal.umich.edu/~csev/books/py4inf/media/"
def get_video_links():
r = requests.get(archive_url)
soup = BeautifulSoup(r.content, 'html5lib')
links = soup.findAll('a')
video_links = [archive_url + link['href'] for link in links if link['href'].endswith('mp4')]
return video_links
def download_video_series(video_links):
for link in video_links:
file_name = link.split('/')[-1]
print("Downloading file:%s" % file_name)
r = requests.get(link, stream=True)
# download started
with open(file_name, 'wb') as f:
for chunk in r.iter_content(chunk_size=1024 * 1024):
if chunk:
f.write(chunk)
print("%s downloaded!\n" % file_name)
print("All videos downloaded!")
return
if __name__ == "__main__":
video_links = get_video_links()
download_video_series(video_links)