I have several improperly formatted csvs that are tab separated, but have a double-quote bounding each row. I can read them in and ignore the "
with:
library(data.table)
files = list.files(pattern="*.csv")
dt = lapply(files, fread, sep="\t", quote="")
setattr(dt, 'names', gsub(".csv", "", files))
but is there a R data.table
way of handling the quotes beyond separate commands to strip first and last columns?
# sample table
DT = data.table(V1=paste0("\"", 1:5), V2=c(1,2,5,6,8),
V3=c("a\"","b\"","c\"","d\"","e\""))
dt = list(DT, DT, DT)
# these work but aren't using data.table
dt = lapply(dt, function(i) {
i[[1]] = gsub('"', '', i[[1]])
i[[ncol(i)]] = gsub('"', '', i[[ncol(i)]])
i
})
# magical mystery operation that doesn't work???
dt = lapply(dt, function(i){
i[, .SD := gsub('"', '', rep(.SD)), .SDcols=names(i)[c(1, ncol(i))]]
})
CodePudding user response:
Use either index or column names to assign
library(data.table)
lapply(dt, \(x) {
# // get the column names based on the index 1st and last column
nm1 <- names(x)[c(1, length(x))]
# loop over the Subset of Data.table (.SD), use `gsub`
# after specifying the columns to select in .SDcols
# assign the output back to the columns of interest (nm1)
x[, (nm1) := lapply(.SD, gsub, pattern = '"', replacement = ''),
.SDcols = nm1][]
})
-output
[[1]]
V1 V2 V3
<char> <num> <char>
1: 1 1 a
2: 2 2 b
3: 3 5 c
4: 4 6 d
5: 5 8 e
[[2]]
V1 V2 V3
<char> <num> <char>
1: 1 1 a
2: 2 2 b
3: 3 5 c
4: 4 6 d
5: 5 8 e
[[3]]
V1 V2 V3
<char> <num> <char>
1: 1 1 a
2: 2 2 b
3: 3 5 c
4: 4 6 d
5: 5 8 e
Another option is set
lapply(dt, \(x) {
nm1 <- names(x)[c(1, length(x))]
for(j in nm1) set(x, i = NULL, j = j, value = gsub('"', '', x[[j]]))
})
-output
dt
[[1]]
V1 V2 V3
<char> <num> <char>
1: 1 1 a
2: 2 2 b
3: 3 5 c
4: 4 6 d
5: 5 8 e
[[2]]
V1 V2 V3
<char> <num> <char>
1: 1 1 a
2: 2 2 b
3: 3 5 c
4: 4 6 d
5: 5 8 e
[[3]]
V1 V2 V3
<char> <num> <char>
1: 1 1 a
2: 2 2 b
3: 3 5 c
4: 4 6 d
5: 5 8 e