I have a list of values (these refer to weights) and 2 target values (which are the sum of a selection of these weights). The weights should be assigned to one of the two target values, so that the sum of the weights approximates its target value. The approximation should be maximised for both target values simultaneously.
For example, this is a list of weights
Weight |
---|
4.528 |
4.773 |
4.253 |
4.688 |
4.21 |
3.841 |
4.005 |
4.545 |
3.825 |
5.123 |
4.757 |
and there are two target values:
Values |
---|
22.08 |
21.37 |
The sum of a selection of weights will probably not be exactly equal to the target value, so I need an approximation.
Excel Solver can do this for one target value at a time, as far as I know, but I need it to handle multiple target values at a time. Does anyone have any idea how to tackle this? Preferably in R but Python or Excel are also fine.
CodePudding user response:
Here is an alternative:
n= length(Weight)
# all combinations 2 to n-1
cms = lapply(2:(n-1), function(x) combn(n,x))
# sum weights selected and not selected and compute square errors
res = lapply(cms, function(cm)
apply(cm,2,function(sel){
sum1 = sum(Weight[sel])
sum2 = sum(Weight[-sel])
sum((Values-c(sum1,sum2))^2)
}))
# find the min square error
mins = sapply(res,function(re){
wm = which.min(re)
minv= re[wm]
c(wm=wm,minv=minv)})
mins
# [,1] [,2] [,3] [,4] [,5] [,6] [,7] [,8] [,9]
#wm 18.0000 81.000 169.00000 300.0000 163.00000 162.00000 85.0000 38.0000 2.000
#minv 447.1174 212.036 68.92069 14.0989 12.99698 54.91097 184.7123 406.2839 719.574
which.min(mins[2,])
#[1] 5
mins[,which.min(mins[2,])]
# wm minv
#163.00000 12.99698
(sel = cms[[5]][,163]) # group 1
#[1] 1 3 5 6 7 9
(1:n)[-sel] # group 2
#[1] 2 4 8 10 11
c(sum(Weight[sel]), sum(Weight[-sel])) # sums
#[1] 24.662 23.886
Values
#[1] 22.08 21.37
CodePudding user response:
library(tidyverse)
weight<-c(4.528
,4.773
,4.253
,4.688
,4.21
,3.841
,4.005
,4.545
,3.825
,5.123
,4.757)
values <- c(
22.08,21.37)
# a heuristic that says I'm not going to add more than "these" number of entries to hit a target
(most_to_sum <- ceiling(max(values)/min(weight)))
#make combs
combs_to_do <- expand_grid(
set_1 = seq_len(most_to_sum),
set_2 = seq_len(most_to_sum)
) |> rowwise() |> mutate(rsum=sum(set_1,set_2)) |> filter(rsum<=length(weight)) |> ungroup()
unique_sets <- map(seq_len(most_to_sum),
~combn(weight,.x,simplify=FALSE)) |> flatten()
unique_sets_evals <- map_dbl(unique_sets,
~abs(values[[1]] - sum(.x)))
# opportunity here to trade accuracy for speed/memoirt
(set1_reduced <- quantile(unique_sets_evals,1)) # use 1 for exhaustive search; though I got the correct result reducing to 0.01 and even good approximations with less
set1_reduced_sets <- unique_sets[which(unique_sets_evals<=set1_reduced)]
do_second_set <- function(first_sets,size_of_second_set,weight,values){
second_sets <- map(first_sets,~{
available <- setdiff(weight,.x)
if(length(available) < size_of_second_set) return(Inf)
combn(available,size_of_second_set,simplify = FALSE)
})
coeval <- map2(first_sets,second_sets,
~{
x <- .x
y <- .y
xsum <- sum(x)
ysums <- map(y,sum)
evals <- map(ysums,
~sqrt((values[[1]]-xsum)^2 (values[[2]]-.x)^2))
best_y <- y[which.min(evals)]
list(best_second = best_y,
eval=evals[[which.min(evals)]])
})
map2(first_sets,coeval,~c(list(set1=.x),.y))
}
almost_ <- map(seq_len(most_to_sum),
~do_second_set(set1_reduced_sets,.x,weight=weight,values=values)) |> flatten()
all_evals <- map_dbl(almost_,~.x$eval)
(best_eval_num <- which.min(all_evals))
almost_[[best_eval_num]]
> almost_[[best_eval_num]]
$set1
[1] 4.528 4.773 4.253 4.688 3.825
$best_second
$best_second[[1]]
[1] 4.210 3.841 4.005 4.545 4.757
$eval
[1] 0.01769181
> sum( 4.528, 4.773, 4.253,4.688 ,3.825)
[1] 22.067
> sum(4.210, 3.841, 4.005,4.545, 4.757)
[1] 21.358