I am trying to fit experimental data to a set of 3 PDEs:
dX/dt = mumax.(S/(Ks S)).(1-X/Xm).X
dS/dt = -1/Y_x/s * dX/dt - m_s.X - k_LD * dL/dt
dL/dt = -kl * L
I got error No. 6 of 'ftol':
"Error in chol.default(object$hessian) : the leading minor of order 6 is not positive definite"
"reason terminated: Relative error in the sum of squares is at most `ftol'."
I don't know if the problem lies in the models or the data.
Please find the code below. Thanks
library(deSolve)
library(ggplot2)
library(minpack.lm)
library(reshape2)
time <- c(0,2,4,5,6,7,8,10,12,14,16,18,20,22,24,26,28,30,32,34,36,38,40,42)
X <- c(0.010044683, 0.011653262, 0.013306524, 0.014155496, 0.014959786,
0.015630027, 0.016434316, 0.018132261, 0.019651475, 0.021394102,
0.023002681, 0.024432529, 0.026085791, 0.027828418, 0.029347632,
0.031090259, 0.032698838, 0.034262735, 0.035915996, 0.037569258,
0.039311886, 0.040786416, 0.042350313, 0.04409294)
S <- c(0.585443495, 0.537584046, 0.505306743, 0.476368471, 0.464125356,
0.451882241, 0.441308642, 0.426283001, 0.403466287, 0.394005698,
0.37508452, 0.371188984, 0.353937322, 0.355050332, 0.33668566,
0.343920228, 0.328338082, 0.333346629, 0.319433998, 0.314425451,
0.309416904, 0.302182336, 0.294391263, 0.288269706)
L <- c(0.372443284, 0.370216418, 0.361865672, 0.362979104, 0.357968657,
0.359638806, 0.355741791, 0.345720896, 0.34961791, 0.334029851,
0.329019403, 0.334029851, 0.327349254, 0.322338806, 0.319555224,
0.315101493, 0.31398806, 0.305080597, 0.302297015, 0.297286567,
0.295059701, 0.289492537, 0.282255224, 0.27668806)
data <- data.frame(time,X,S,L)
attach(data)
Hybrid <- function(t,c,parms) {
k1 <- parms$k1 # mumax
k2 <- parms$k2 # Ks
k3 <- parms$k3 # Xm
k4 <- parms$k4 # Y_x/s
k5 <- parms$k5 # m_s
k6 <- parms$k6 # k_LD
k7 <- parms$k7 # k_l
r <- numeric(length(c))
r[1] <- k1 * (c["S"] / ( k2 c["S"] )) * c["X"] * (1-c["X"]/k3) # r[1] = dX/dt = mumax.(S/(Ks S)).(1-X/Xm).X
r[2] <- -1/k4 * r[1] - k5 * c["X"] - k6 * r[3] # r[2] = dS/dt = -1/Y_x/s * dX/dt - m_s.X - k_LD * dL/dt
r[3] <- -k7*c["L"] # dL/dt = -kl * L
return(list(r))
}
residuals <- function(parms){
cinit <- c( X = data[1,2], S = data[1,3], L = data[1,4] )
t <- sort(unique(c(seq(0, 42, 1), data$time)))
k1 <- parms[1]
k2 <- parms[2]
k3 <- parms[3]
k4 <- parms[4]
k5 <- parms[5]
k6 <- parms[6]
k7 <- parms[7]
out <- ode( y = cinit,
times = t,
func = Hybrid,
parms = list( k1 = k1, k2 = k2, k3 = k3, k4 = k4, k5 = k5, k6 = k6, k7 = k7) )
out_data <- data.frame(out)
out_data <- out_data[out_data$time %in% data$time,]
pred_data <- melt(out_data,id.var="time",variable.name="Substance",value.name="Conc")
exp_data <- melt(data,id.var="time",variable.name="Substance",value.name="Conc")
residuals <- pred_data$Conc-exp_data$Conc
return(residuals)
}
parms <- c( k1=0.8, # mumax
k2=1.2, # Ks
k3=0.06, # Xm
k4=0.05, # Y_x/s
k5=0.001,# m_s
k6=0.05, # k_LD
k7=0.003)# kl
fitval <- nls.lm(par=parms,fn=residuals)
summary(fitval)
fitval
parest=as.list(coef(fitval))
cinit <- c( X=data[1,2], S=data[1,3], L=data[1,4] )
t<-seq(0, 42, 1)
parms<-as.list(parest)
out<-ode(y=cinit,times=t,func=Hybrid,parms=parms)
out_data<-data.frame(out)
names(out_data)<-c("time","X_pred","S_pred","L_pred")
tmppred<-melt(out_data,id.var=c("time"),variable.name="Substance",value.name="Concentration")
tmpexp<-melt(data,id.var=c("time"),variable.name="Substance",value.name="Concentration")
p<-ggplot(data=tmppred,aes(x=time,y=Concentration,color=Substance,linetype=Substance)) geom_line()
print(p)
p<-p geom_line(data=tmpexp,aes(x=time,y=Concentration,color=Substance,linetype=Substance))
p<-p geom_point(data=tmpexp,aes(x=time,y=Concentration,color=Substance))
p<-p scale_linetype_manual(values=c(0,1,0,1,0,1))
p<-p scale_color_manual(values=rep(c("red","blue","purple"),each=2)) theme_bw()
print(p)
CodePudding user response:
Firstly, if we extract the hessian matrix from the model object we can see:
fitval$hessian
# k1 k2 k3 k4 k5 k6 k7
# k1 3.568033e-09 -1.851988e-09 1.541311e-03 -2.863371e-04 7.213943e-05 0 -3.474817e-10
# k2 -1.851988e-09 9.612743e-10 -8.000189e-04 1.486235e-04 -3.744407e-05 0 -1.831356e-10
# k3 1.541311e-03 -8.000189e-04 1.836024e 03 -2.876605e 02 1.210877e 02 0 -6.701843e-05
# k4 -2.863371e-04 1.486235e-04 -2.876605e 02 4.643049e 01 -1.841829e 01 0 -1.367424e-05
# k5 7.213943e-05 -3.744407e-05 1.210877e 02 -1.841829e 01 8.765142e 00 0 -1.851183e-05
# k6 0.000000e 00 0.000000e 00 0.000000e 00 0.000000e 00 0.000000e 00 0 0.000000e 00
# k7 -3.474817e-10 -1.831356e-10 -6.701843e-05 -1.367424e-05 -1.851183e-05 0 1.215381e 03
Notice that k6 row and column are all equal to zero? Thus, your hessian is singular and when the Cholesky decomposition is ran, you get the error:
chol(fitval$hessian) # Error in chol.default(fitval$hessian) : the leading minor of order 6 is not positive definite
This can happen in a lot of ways. In this case, the error is in the function Hybrid. Notice that you use r[3] to compute r[2] (but r[3] is not yet defined)? If you exchange the lines (compute r[3] first and then r[2]) as in:
Hybrid <- function(t,c,parms) {
k1 <- parms$k1 # mumax
k2 <- parms$k2 # Ks
k3 <- parms$k3 # Xm
k4 <- parms$k4 # Y_x/s
k5 <- parms$k5 # m_s
k6 <- parms$k6 # k_LD
k7 <- parms$k7 # k_l
r <- numeric(length(c))
r[1] <- k1 * (c["S"] / ( k2 c["S"] )) * c["X"] * (1-c["X"]/k3) # r[1] = dX/dt = mumax.(S/(Ks S)).(1-X/Xm).X
r[3] <- -k7*c["L"] # dL/dt = -kl * L
r[2] <- -1/k4 * r[1] - k5 * c["X"] - k6 * r[3] # r[2] = dS/dt = -1/Y_x/s * dX/dt - m_s.X - k_LD * dL/dt
return(list(r))
}
Then, the error disappears:
summary(fitval)
# Parameters:
# Estimate Std. Error t value Pr(>|t|)
# k1 1.958e 03 9.348e 06 0.000 0.9998
# k2 3.738e 03 1.785e 07 0.000 0.9998
# k3 3.041e-02 1.393e-03 21.835 <2e-16 ***
# k4 1.188e-01 6.311e-02 1.882 0.0644 .
# k5 3.078e-02 3.795e-01 0.081 0.9356
# k6 -9.697e-01 5.823e 00 -0.167 0.8683
# k7 6.626e-03 1.525e-04 43.459 <2e-16 ***
# ---
# Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
#
# Residual standard error: 0.005309 on 65 degrees of freedom
# Number of iterations to termination: 37
# Reason for termination: Conditions for `info = 1' and `info = 2' both hold.