Home > Software design >  Download multiple files from array, and place into desired directory using Python3
Download multiple files from array, and place into desired directory using Python3

Time:11-03

#Import desired libarary's -- Make HTTP Requests / Query DOM ELEMENTS

import requests
from bs4 import BeautifulSoup as bs
import zipfile

# Make requests to NGA site to, response stored in r (DOM)
r = requests.get('https://earth-info.nga.mil/index.php?dir=coordsys&action=gars-20x20-dloads')

# Parse data using Beautiful soup libarary, and the default HTML parser
soup = bs(r.content, 'html.parser')
# Output is pure RAW HTML DOM
# print(soup)

# Scan Dom tree and places desired href zip files into an array for future downloading -- Files array
files = ['https://earth-info.nga.mil/'   i['href'] for i in soup.select('area')]
# print(files)



#Download Single file from Array
# firstUrl = files[0]

# Download multiple files from Array
for file in files:
    r = requests.get(file, stream=True)
    save_path = '/Users/iga0779/Downloads/%s.zip'%r
    filex = open(save_path, 'wb')
    filex.write(downloadedfile.content)
    filex.close()

I'm currently a little hung up on the next steps here, I have selected the download directory as where I would like the files to go but I am a little new and not sure how to write to the directory correctly.

CodePudding user response:

You can go with open() and also chunk downloading your files:

for file in files:
    with requests.get(file, stream=True) as r:
            r.raise_for_status()

            with open(f'tmpZip/{file.split("/")[-1]}.zip', 'wb') as f:
                for chunk in r.iter_content(chunk_size=8192): 
                    f.write(chunk)

Example

import requests
from bs4 import BeautifulSoup as bs
import zipfile

# Make requests to NGA site to, response stored in r (DOM)
r = requests.get('https://earth-info.nga.mil/index.php?dir=coordsys&action=gars-20x20-dloads')

# Parse data using Beautiful soup libarary, and the default HTML parser
soup = bs(r.content, 'html.parser')
# Output is pure RAW HTML DOM
# print(soup)

# Scan Dom tree and places desired href zip files into an array for future downloading -- Files array
files = ['https://earth-info.nga.mil/'   i['href'] for i in soup.select('area')]
# print(files)

def download_file(file):
    with requests.get(file, stream=True) as r:
        r.raise_for_status()
        with open(f'tmpZip/{file.split("/")[-1]}.zip', 'wb') as f:
            for chunk in r.iter_content(chunk_size=8192): 
                f.write(chunk)
    return f'File: {file.split("/")[-1]}.zip -> downloaded'

#files sliced to first three urls from result, delet [:3] to get all
for file in files[:3]:
    print(download_file(file))

Output

File: 180W60N.zip -> downloaded
File: 180W40N.zip -> downloaded
File: 180W20N.zip -> downloaded

CodePudding user response:

You can also try this

#Import desired libarary's -- Make HTTP Requests / Query DOM ELEMENTS

import requests
from bs4 import BeautifulSoup as bs
import zipfile
import os  
from zipfile import ZipFile
from io import BytesIO


headers = {
    "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:66.0) Gecko/20100101 Firefox/66.0",
    "Accept-Encoding": "*",
    "Connection": "keep-alive"
}
# Make requests to NGA site to, response stored in r (DOM)
r = requests.get('https://earth-info.nga.mil/index.php?dir=coordsys&action=gars-20x20-dloads')

# Parse data using Beautiful soup libarary, and the default HTML parser
soup = bs(r.content, 'html.parser')
# Output is pure RAW HTML DOM
# print(soup)

# Scan Dom tree and places desired href zip files into an array for future downloading -- Files array
files = ['https://earth-info.nga.mil/'   i['href'] for i in soup.select('area')]
# print(files)

mydirname =  r'C:\\Users\\User\\Documents\\Downloads'

for url in files:
    r = requests.get(url, headers=headers,stream=True)
    if r.status_code == 200:
        newfoldername = r.url.split('/')[-1]
        if not os.path.exists(newfoldername):
            os.mkdir(newfoldername)
        path_ = os.path.join(mydirname, newfoldername )
        zipfile.ZipFile(BytesIO(r.content)).extractall(path_)



print('Finished...')
  • Related