Home > OS >  Scraping several webpages from a website (newspaper archive) using RSelenium
Scraping several webpages from a website (newspaper archive) using RSelenium

Time:12-09

I managed to scrape one page from a newspaper archive according to explanations here.

Now I am trying to automatise the process to access a list of pages by running one code. Making a list of URLs was easy as the newspaper's archive has a similar pattern of links:

https://en.trend.az/archive/2021-XX-XX

The problem is with writing a loop to scrape such data as title, date, time, category. For simplicity, I tried to work only with article headlines from 2021-09-30 to 2021-10-02.

## Setting data frames

d1 <- as.Date("2021-09-30")
d2 <- as.Date("2021-10-02")

list_of_url <- character()   # or str_c()

## Generating subpage list 
 
for (i in format(seq(d1, d2, by="days"), format="%Y-%m-%d"))  {
  list_of_url[i] <- str_c ("https://en.trend.az", "/archive/", i)

# Launching browser

driver <- rsDriver(browser = c("firefox"))  #Version 93.0 (64-bit)
remDr <- driver[["client"]]
remDr$errorDetails
remDr$navigate(list_of_url[i])
   
   remDr0$findElement(using = "xpath", value = '/html/body/div[1]/div/div[1]/h1')$clickElement()
   
   webElem <- remDr$findElement("css", "body")
#scrolling to the end of webpage, to load all articles 
for (i in 1:25){
  Sys.sleep(2)
  webElem$sendKeysToElement(list(key = "end"))
} 

page <- read_html(remDr$getPageSource()[[1]])

# Scraping article headlines

get_headline <- page %>%
html_nodes('.category-article') %>% html_nodes('.article-title') %>% 
  html_text()
get_time <- str_sub(get_time, start= -5)

length(get_time)
   }
}

In total length should have been 157 166 140=463. In fact, I did not manage to collect all data even from one page (length(get_time) = 126)

I considered that after the first set of commands in the loop, I obtained three remDr for the 3 dates specified, but they were not recognised later independently.

Because of that I tried to initiate a second loop inside the initial one before or after page <- by

  for (remDr0 in remDr) {
page <- read_html(remDr0$getPageSource()[[1]])
# substituted all remDr-s below with remDr0

OR

page <- read_html(remDr$getPageSource()[[1]])
for (page0 in page)
# substituted all page-s below with page0

However, these attempts ended with different errors.

I would appreciate the help of specialists as it is my first time using R for such purposes.

Hope it will be possible to correct the existing loop that I made or maybe even suggest a shorter pathway, by making a function, for example.

CodePudding user response:

Slight broadening for scraping multiple categories

    library(RSelenium)
    library(dplyr)
    library(rvest)

Mention the date period

    d1 <- as.Date("2021-09-30")
    d2 <- as.Date("2021-10-02")
    dt = seq(d1, d2, by="days")#contains the date sequence
    
    #launch browser 
    driver <- rsDriver(browser = c("firefox"))  
    remDr <- driver[["client"]]
    
### `get_headline`  Function for newspaper headlines 

    get_headline = function(x){
      link = paste0( 'https://en.trend.az/archive/', x)
      remDr$navigate(link)
      remDr$findElement(using = "xpath", value = '/html/body/div[1]/div/div[1]/h1')$clickElement()
      webElem <- remDr$findElement("css", "body")
      #scrolling to the end of webpage, to load all articles 
      for (i in 1:25){
        Sys.sleep(1)
        webElem$sendKeysToElement(list(key = "end"))
      } 
      
      headlines = remDr$getPageSource()[[1]] %>% 
        read_html() %>%
        html_nodes('.category-article') %>% html_nodes('.article-title') %>% 
        html_text()
      headlines 
      return(headlines)
    }

get_time Function for the time of publishing

get_time <- function(x){
  link = paste0( 'https://en.trend.az/archive/', x)
  remDr$navigate(link)
  remDr$findElement(using = "xpath", value = '/html/body/div[1]/div/div[1]/h1')$clickElement()
  webElem <- remDr$findElement("css", "body")
  #scrolling to the end of webpage, to load all articles 
  for (i in 1:25){
    Sys.sleep(1)
    webElem$sendKeysToElement(list(key = "end"))
  } 
  
  # Addressing selector of time on the website
  
  time <- remDr$getPageSource()[[1]] %>%
    read_html() %>%
    html_nodes('.category-article') %>% html_nodes('.article-date') %>% 
    html_text() %>%
    str_sub(start= -5)
  time
  return(time)
}

Numbering of all articles from one page/day

get_number <- function(x){
  link = paste0( 'https://en.trend.az/archive/', x)
  remDr$navigate(link)
  remDr$findElement(using = "xpath", value = '/html/body/div[1]/div/div[1]/h1')$clickElement()
  webElem <- remDr$findElement("css", "body")
  #scrolling to the end of webpage, to load all articles 
  for (i in 1:25){
    Sys.sleep(1)
    webElem$sendKeysToElement(list(key = "end"))
  } 
  
  # Addressing selectors of headlines on the website
  
  headline <- remDr$getPageSource()[[1]] %>% 
    read_html() %>%
    html_nodes('.category-article') %>% html_nodes('.article-title') %>% 
    html_text()
  number <- seq(1:length(headline))
  return(number)
}

Collection of all functions into tibble

get_data_table <- function(x){

      # Extract the Basic information from the HTML
      headline <- get_headline(x)
      time <- get_time(x)
      headline_number <- get_number(x)

      # Combine into a tibble
      combined_data <- tibble(Num = headline_number,
                              Article = headline,
                              Time = time) 
}

Used lapply to loop through all the dates in dt

    df = lapply(dt, get_data_table)
  • Related