Home > Net >  doParallel instead of apply
doParallel instead of apply

Time:09-21

I have been using apply to submit each row of a data.frame to a function in R using Windows 10. This approach works. However, as the function has become more complex and the data set larger this approach is becoming infeasible. So, I am hoping to submit and run each row of the data.frame to the function using parallel processing.

I have used doParallel before both on my Windows 10 laptop and with a Unix cluster but only to run multiple external R files. Never to submit separate lines of a data.frame to a function within one R file. Can someone show me how to achieve the latter? If I have to I suppose I can create separate R files for each line of the data.frame master.iter in the code below, but there must be an easier way.

Here is a trivial example that runs with apply together with the desired result:

master.iter <- read.table(text = '
    scenario  aaa   bbb   ccc   ddd   eee
       1       1     5     0     20    10
       2       1    10     0   2000  1000
', header = TRUE, stringsAsFactors = FALSE)

master.function <- function(scenario, aaa, bbb, ccc, ddd, eee) {

  scenario <- as.numeric(c(scenario))
  aaa <- as.numeric(c(aaa))
  bbb <- as.numeric(c(bbb))
  ccc <- as.numeric(c(ccc))
  ddd <- as.numeric(c(ddd))
  eee <- as.numeric(c(eee))
  AAA <- seq(aaa,bbb,1)
  BBB <- AAA * ddd
  CCC <- AAA * eee

  my.table <- data.frame(AAA = AAA,
                         BBB = BBB,
                         CCC = CCC)

  output.list <- list(scenario = scenario,
                      aaa = aaa, bbb = bbb, ccc = ccc, ddd = ddd, eee = eee,
                      my.table = my.table)

  master_output <- do.call(cbind, output.list)

  return = list(master_output = master_output)
}

function.output <- apply(master.iter, 1, function(x) {master.function( x[1],  x[2],  x[3],  x[4],  x[5],  x[6])})

master.df <- do.call("rbind", lapply(function.output, as.data.frame))
colnames(master.df) <- names(function.output[[1]]$master_output)

desired.result <- read.table(text = '
   scenario aaa bbb ccc  ddd  eee my.table.AAA my.table.BBB my.table.CCC
          1   1   5   0   20   10            1           20           10
          1   1   5   0   20   10            2           40           20
          1   1   5   0   20   10            3           60           30
          1   1   5   0   20   10            4           80           40
          1   1   5   0   20   10            5          100           50
          2   1  10   0 2000 1000            1         2000         1000
          2   1  10   0 2000 1000            2         4000         2000
          2   1  10   0 2000 1000            3         6000         3000
          2   1  10   0 2000 1000            4         8000         4000
          2   1  10   0 2000 1000            5        10000         5000
          2   1  10   0 2000 1000            6        12000         6000
          2   1  10   0 2000 1000            7        14000         7000
          2   1  10   0 2000 1000            8        16000         8000
          2   1  10   0 2000 1000            9        18000         9000
          2   1  10   0 2000 1000           10        20000        10000
', header = TRUE)

Here is the R code I typically use to submit separate R file to a Ubuntu cluster. I have tried to modify the R code below for the problem described above. However, I have not been able to arrive at a solution.

setwd('/home/ubuntu/')
library(doParallel)
detectCores()
my.AWS.n.cores <- detectCores()
registerDoParallel(my.cluster <- makeCluster(my.AWS.n.cores))
folderName <- 'R_files_a'
files <- list.files(folderName, full.names=TRUE)
start.time <- Sys.time()
foreach(file = files, .errorhandling = "remove") %dopar% {
  source(file)
}
stopCluster(my.cluster)
end.time <- Sys.time()
total.time.c <- end.time-start.time
total.time.c

CodePudding user response:

df <- master.iter

library(doParallel)

ncores <- detectCores()-1
cl <- parallel::makeCluster(ncores)
registerDoParallel(cl)

v <- foreach(i = 1:nrow(df)) %dopar% {
  master.function(df[i,1], df[i,2], df[i,3], df[i,4], df[i,5], df[i,6])
}
stopCluster(cl)

CodePudding user response:

We may use collapse

library(collapse)
dapply(master.iter, MARGIN = 1, function(x) {
    master.function( x[1],  x[2],  x[3],  x[4],  x[5],  x[6])
      }, parallel = TRUE)

-output

 [1]     1     1     1     1     1     1     1     1     1     1     5     5     5     5     5     0     0     0     0     0    20    20    20    20    20
 [26]    10    10    10    10    10     1     2     3     4     5    20    40    60    80   100    10    20    30    40    50     2     2     2     2     2
 [51]     2     2     2     2     2     1     1     1     1     1     1     1     1     1     1    10    10    10    10    10    10    10    10    10    10
 [76]     0     0     0     0     0     0     0     0     0     0  2000  2000  2000  2000  2000  2000  2000  2000  2000  2000  1000  1000  1000  1000  1000
[101]  1000  1000  1000  1000  1000     1     2     3     4     5     6     7     8     9    10  2000  4000  6000  8000 10000 12000 14000 16000 18000 20000
[126]  1000  2000  3000  4000  5000  6000  7000  8000  9000 10000
  • Related