Home > OS >  Scrapy downloading json-files from site?
Scrapy downloading json-files from site?

Time:12-21

i tried to create a scrapy spider to download some json-files from a site -

This is my scrapy spider: (first tested the spider - so it only outputs the link to the json-file which works fine - see commented code below) But i want to download the json-files to a folder on my pc.

import scrapy

class spiderWords(scrapy.Spider):
  name = 'spiderWords'
  allowed_domains = ['kaikki.org']
  start_urls = ['https://kaikki.org/dictionary/Spanish/words.html']

  def parse(self, response):    
    tmpLinks = response.xpath("(//ul)[2]/li/a/@href").getall()
    for l in tmpLinks:
      l = response.urljoin(l)    
      request = scrapy.Request(l,
                              callback=self.parseDetails)     
      yield request

  def parseDetails(self, response):
    tmpLinks2 = response.xpath("(//ul)[2]/li/a/@href").getall()        
    for l2 in tmpLinks2:
      l2 = response.urljoin(l2)    
      request = scrapy.Request(l2,
                              callback=self.parseDownload)     
      yield request

  def parseDownload(self, response):
    class DownfilesItem(scrapy.Item):
      file_urls = scrapy.Field()
      files = scrapy.Field  

    tmpDownloadLink = response.xpath("//p[contains(text(), 'JSON')]/a/@href").get()
    tmpDownloadLink = response.urljoin(tmpDownloadLink)
    item = DownfilesItem()     
    item['file_urls'] = tmpDownloadLink
    yield item         
    # yield {
    #   "link": tmpDownloadLink,
    # }  

And this are the changes which i did in the settings.py:

ITEM_PIPELINES = {
  'scrapy.pipelines.files.FilesPipeline': 1,
}
IMAGES_STORE = r'C:\DOWNLOAD\DATASETS\Spanish'

But unfortunately the download of the json-files is not working.

How can i download the json-files to the defined folder?

CodePudding user response:

You have two problems.

  1. item['file_urls'] should be a list.
  2. IMAGES_STORE should be FILES_STORE
import scrapy


class spiderWords(scrapy.Spider):
    name = 'spiderWords'
    allowed_domains = ['kaikki.org']
    start_urls = ['https://kaikki.org/dictionary/Spanish/words.html']

    def parse(self, response):
        tmpLinks = response.xpath("(//ul)[2]/li/a/@href").getall()
        for l in tmpLinks:
            l = response.urljoin(l)
            request = scrapy.Request(l,
                                     callback=self.parseDetails)
            yield request

    def parseDetails(self, response):
        tmpLinks2 = response.xpath("(//ul)[2]/li/a/@href").getall()
        for l2 in tmpLinks2:
            l2 = response.urljoin(l2)
            request = scrapy.Request(l2,
                                     callback=self.parseDownload)
            yield request

    def parseDownload(self, response):
        class DownfilesItem(scrapy.Item):
            file_urls = scrapy.Field()
            files = scrapy.Field()

        tmpDownloadLink = response.xpath("//p[contains(text(), 'JSON')]/a/@href").get()
        tmpDownloadLink = response.urljoin(tmpDownloadLink)
        item = DownfilesItem()
        item['file_urls'] = [tmpDownloadLink]
        yield item
        # yield {
        #   "link": tmpDownloadLink,
        # }
  • Related