forked from bbolker/testing_bias_distribution
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathOR_Sim_not_conv_cases.R
More file actions
212 lines (171 loc) · 6.77 KB
/
OR_Sim_not_conv_cases.R
File metadata and controls
212 lines (171 loc) · 6.77 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
# Sys.setenv(LANG = "en")
# remotes::install_github("bbolker/bbmle")
library(dplyr)
library(tidyr)
library(ggplot2); theme_set(theme_bw())
library(viridis)
library(bbmle)
library(broom)
# source("mle2_tidy.R")
# Set Seeds
set.seed(13519)
## Random parameter values
## BMB: good to be consistent about <- or = for assignment
## see e.g. https://lintr.r-lib.org/
## https://www.tidyverse.org/blog/2025/02/air/
# number of tests
n <- 1000
### Constants for all tests
N <- 1e6 ## pop size
tmax <- 39 ## max simulation time/number of observations
### uninfected testing prob, from 0 to .25
T_B <- runif(2*n,0,0.25)
# 2n: Generate both true values and initial values for fitting
param_mat <- (expand.grid(T_B=T_B)
%>% as_tibble()
# Odds ratio part
## infected testing prob, must larger than T_B, smaller than 1
## BMB: you can chain mutate expressions
%>% mutate(T_Y = runif(2*n,T_B,1),
B= T_B/(1-T_B),
Phi = (T_Y/(1-T_Y))/B,
log_B = log(B),
log_Phi = log(Phi),
## Infection dynamic part
Y_0 = runif(2*n,0,25e-3),
log_Y0 = log(Y_0),
## initial value from 0 to .025 of population
## logistic growth rate
r = log(2)/runif(2*n,1,5)
## random doubling time from 1 to 5
)
)
## BMB: I probably wouldn't bother with dplyr::slice here
## (just param_mat[1:n])
## separate simulation and fit values
param_true <- dplyr::slice(param_mat,1:n)
## initial values for fitting
param_fit <- dplyr::slice(param_mat,(n+1):(2*n))
## Simulate the observed data
dat_func <- function(param_vec, tmax, N) {
## BMB: could use with(param_vec, { ... } ) (at slight loss of debugging capability)
T_B <- param_vec$T_B
T_Y <- param_vec$T_Y
r <- param_vec$r
Y_0 <- param_vec$Y_0
t <- c(0:tmax)
dat <- tibble(t=t
, pY = 1/(1+(1/Y_0-1)*exp(-r*t)) ## Prevalence based on Logistic growth
, T_prop = (1-pY)*T_B+pY*T_Y ## Expected test proportion
, pos = pY*T_Y/T_prop ## Expected test positivity
, OT = rbinom(t,N,T_prop) ## Observed number of tests
, OP = rbinom(t,OT,pos) ## Observed number of positive tests
)
return(dat)
}
## BMB: changed logY_0 to logY_0 for consistency (could also be
## log_Y0 or log_Y_0, but aim for consistency in any case)
### function to calculate negative log-likelihood:
LL <- function(log_B, log_Phi, log_Y0, r, dat, tmax, N, debug = FALSE,
debug_plot = FALSE, plot_sleep = 1) {
Y_0 <- exp(log_Y0)
B <- exp(log_B)
Phi <- exp(log_Phi)
T_B <- B/(1+B)
T_Y <- B*Phi/(1+B*Phi)
t <- c(0:tmax)
pts <- length(t)
## simulated time series
sim <- tibble(t=t
## , pY = pmin(Y_0*exp(r*t), 1) ## Exponential growth
, pY = 1/(1+(1/Y_0-1)*exp(-r*t)) ## Prevalence based on Logistic growth
, T_prop = (1-pY)*T_B+pY*T_Y ## Expected test proportion
, pos = pY*T_Y/T_prop ## Expected test positivity
)
ObsTest_nll <- -sum(dbinom(dat$OT, N, sim$T_prop, log = TRUE))
ObsPos_nll <- -sum(dbinom(dat$OP, dat$OT, sim$pos, log = TRUE))
out <- ObsTest_nll + ObsPos_nll
if (debug) {
cat(B, Phi, log_Y0, r, ObsTest_nll, ObsPos_nll,
out, "\n")
}
return(out)
}
### fitting procedure
fit_proc <- function(dat,param_fit,tmax,N,debug=FALSE){
log_B <-param_fit$log_B
log_Phi <- param_fit$log_Phi
log_Y0 <- param_fit$log_Y0
r <- param_fit$r
## BMB: why do you unpack param_fit and then re-pack it? Seems unnecessary
## RZ: just the param_fit contains more vector than mle2/LL needs. Smarter way
## to do this? I guess I should change the input matrix.
param <- list(log_B=log_B, log_Phi=log_Phi, log_Y0=log_Y0, r=r)
fit <- mle2(LL
, start = param
, data = list(dat=dat
, N=N
, tmax=tmax
, debug = debug)
, control = list(maxit=15000, reltol = 1e-10)
, method = "Nelder-Mead"
)
return(fit)
}
## All 10 cases that fail to converge to true values
NA_case <- c(227,313,343,436,660,711,725,760,882,956)
## plot joint distributions of starting values and true values
plong <- function(x, lab = "true_val") {
(x
|> select(-c(log_Phi, log_B, log_Y0))
|> mutate(run = seq(n()))
|> pivot_longer(-run,
names_to = "param",
values_to = lab)
)
}
comb <- full_join(plong(param_fit, "fit_val"),
plong(param_true, "true_val"),
by = c("run", "param"))
ggplot(comb, aes(fit_val, true_val)) +
geom_point(alpha = 0.3) +
facet_wrap(~param, scale = "free") +
geom_point(data = comb[NA_case,], col = "red", size = 3)
## I don't get the comb[NA_case,] part here
# comb[NA_case, ]
## I guess
# comb[which(comb$run %in% NA_case),]
param_fit[NA_case[1],]
### Checking cases
case <- NA_case[1]
select(param_true[case,],-log_Y0)
select(param_fit[case,],-log_Y0)
## fit_logLik[case]
# true_logLik[case]
### fit again with debug on
dat <- dat_func(param_true[case,], tmax, N)
# fitting result might change due to randomness in data generating
# matplot(dat$t, dat[,-1], type = "l", log = "y")
# legend("center", col = 1:4, lty = 1:4,
# legend = names(dat)[-1])
vars <- c("log_B", "log_Phi", "log_Y0","r")
true_logLik <- do.call(LL, c(param_true[case, vars], list(dat, tmax, N)))
fit_case<- fit_proc(dat, param_fit[case,], tmax, N,debug=TRUE)
fit_logLik <- -logLik(fit_case)
true_logLik
fit_logLik
true_value <- param_true[case, vars]
### error starts here due to ill-behaved or missing hessian
tt <- tidy(fit_case, conf.int = TRUE)
results_case <- tt |>
full_join(data.frame(term = names(true_value),
true.value = unlist(true_value)),
by = "term") |>
select(term, estimate, true.value, conf.low, conf.high)
# results_case
# inCI_case <- min(results_case$true.value<=results_case$conf.high & results_case$true.value>=results_case$conf.low)
# inCI_case
# ggplot(results_case, aes(y = term)) +
# geom_pointrange(aes(x = estimate, xmin = conf.low, xmax = conf.high)) +
# geom_point(aes(x=true.value), colour = "red") +
# facet_wrap(~term, ncol = 1, scale = "free")