From 6a4b2da0e089e0b1b8089fca60f39fd5443e2856 Mon Sep 17 00:00:00 2001 From: Guillem Hurault Date: Sun, 11 Feb 2024 21:04:38 +0000 Subject: [PATCH] Import project --- .Rprofile | 1 + .gitignore | 46 + Dockerfile | 33 + EczemaTreat.Rproj | 13 + R/ScoradPred.R | 654 +++++++ R/data_processing.R | 192 ++ R/powerprior.R | 145 ++ R/recommendations.R | 33 + R/utils.R | 132 ++ README.md | 70 + analysis/00_init.R | 22 + analysis/01_check_models.R | 83 + analysis/02_run_fit.R | 131 ++ analysis/03_check_fit.Rmd | 668 +++++++ analysis/04a_run_validation.R | 360 ++++ analysis/04b_run_validation_reference.R | 157 ++ analysis/05_check_performance.Rmd | 223 +++ analysis/06_analyse_recommendations.Rmd | 586 ++++++ analysis/07_plot_data.R | 136 ++ analysis/07_plot_fit.R | 186 ++ analysis/07_plot_performance.R | 151 ++ analysis/07_plot_powerprior.R | 114 ++ analysis/generate_reports.R | 73 + analysis/view_reports.Rmd | 36 + data-raw/save_posterior.R | 89 + data/par_POSCORAD.rds | Bin 0 -> 68571 bytes data/par_full.rds | Bin 0 -> 7886 bytes models/FullModel.stan | 320 ++++ models/include/data_calibration.stan | 13 + models/include/data_dailytreat.stan | 14 + models/include/data_powerprior.stan | 10 + models/include/functions_OrderedRW.stan | 33 + models/include/get_ragged_bounds.stan | 24 + models/include/get_ts_length.stan | 17 + models/include/gq_decl_calibration.stan | 5 + models/include/gq_decl_dailytreat.stan | 5 + models/include/gq_state_calibration.stan | 10 + models/include/gq_state_dailytreat.stan | 21 + models/include/model_calibration.stan | 13 + models/include/model_dailytreat.stan | 34 + models/include/model_powerprior.stan | 20 + models/include/parameters_calibration.stan | 4 + models/include/parameters_dailytreat.stan | 8 + models/include/tdata_decl_calibration.stan | 4 + models/include/tdata_decl_dailytreat.stan | 5 + models/include/tdata_state_calibration.stan | 9 + models/include/tdata_state_dailytreat.stan | 31 + .../include/tparameters_decl_calibration.stan | 6 + .../include/tparameters_decl_dailytreat.stan | 8 + .../tparameters_state_calibration.stan | 12 + .../include/tparameters_state_dailytreat.stan | 24 + renv.lock | 1680 +++++++++++++++++ renv/.gitignore | 6 + renv/activate.R | 902 +++++++++ renv/settings.dcf | 10 + 55 files changed, 7582 insertions(+) create mode 100644 .Rprofile create mode 100644 .gitignore create mode 100644 Dockerfile create mode 100644 EczemaTreat.Rproj create mode 100644 R/ScoradPred.R create mode 100644 R/data_processing.R create mode 100644 R/powerprior.R create mode 100644 R/recommendations.R create mode 100644 R/utils.R create mode 100644 README.md create mode 100644 analysis/00_init.R create mode 100644 analysis/01_check_models.R create mode 100644 analysis/02_run_fit.R create mode 100644 analysis/03_check_fit.Rmd create mode 100644 analysis/04a_run_validation.R create mode 100644 analysis/04b_run_validation_reference.R create mode 100644 analysis/05_check_performance.Rmd create mode 100644 analysis/06_analyse_recommendations.Rmd create mode 100644 analysis/07_plot_data.R create mode 100644 analysis/07_plot_fit.R create mode 100644 analysis/07_plot_performance.R create mode 100644 analysis/07_plot_powerprior.R create mode 100644 analysis/generate_reports.R create mode 100644 analysis/view_reports.Rmd create mode 100644 data-raw/save_posterior.R create mode 100644 data/par_POSCORAD.rds create mode 100644 data/par_full.rds create mode 100644 models/FullModel.stan create mode 100644 models/include/data_calibration.stan create mode 100644 models/include/data_dailytreat.stan create mode 100644 models/include/data_powerprior.stan create mode 100644 models/include/functions_OrderedRW.stan create mode 100644 models/include/get_ragged_bounds.stan create mode 100644 models/include/get_ts_length.stan create mode 100644 models/include/gq_decl_calibration.stan create mode 100644 models/include/gq_decl_dailytreat.stan create mode 100644 models/include/gq_state_calibration.stan create mode 100644 models/include/gq_state_dailytreat.stan create mode 100644 models/include/model_calibration.stan create mode 100644 models/include/model_dailytreat.stan create mode 100644 models/include/model_powerprior.stan create mode 100644 models/include/parameters_calibration.stan create mode 100644 models/include/parameters_dailytreat.stan create mode 100644 models/include/tdata_decl_calibration.stan create mode 100644 models/include/tdata_decl_dailytreat.stan create mode 100644 models/include/tdata_state_calibration.stan create mode 100644 models/include/tdata_state_dailytreat.stan create mode 100644 models/include/tparameters_decl_calibration.stan create mode 100644 models/include/tparameters_decl_dailytreat.stan create mode 100644 models/include/tparameters_state_calibration.stan create mode 100644 models/include/tparameters_state_dailytreat.stan create mode 100644 renv.lock create mode 100644 renv/.gitignore create mode 100644 renv/activate.R create mode 100644 renv/settings.dcf diff --git a/.Rprofile b/.Rprofile new file mode 100644 index 0000000..81b960f --- /dev/null +++ b/.Rprofile @@ -0,0 +1 @@ +source("renv/activate.R") diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..f4ed586 --- /dev/null +++ b/.gitignore @@ -0,0 +1,46 @@ +# History files +.Rhistory +.Rapp.history + +# Session Data files +.RData + +# User-specific files +.Ruserdata + +# Example code in package build process +*-Ex.R + +# Output files from R CMD build +/*.tar.gz + +# Output files from R CMD check +/*.Rcheck/ + +# RStudio files +.Rproj.user/ + +# produced vignettes +vignettes/*.html +vignettes/*.pdf + +# OAuth2 token, see https://github.com/hadley/httr/releases/tag/v0.3 +.httr-oauth + +# knitr and R markdown default cache directories +*_cache/ +/cache/ + +# Temporary files created by R markdown +*.utf8.md +*.knit.md + +# R Environment Variables +.Renviron + +# Custom +models/*.rds +results/* +results* +docs/* +*.html diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 0000000..765ec7c --- /dev/null +++ b/Dockerfile @@ -0,0 +1,33 @@ +# Take inspiration from +# https://github.com/jrnold/docker-stan/blob/master/rstan/Dockerfile + +FROM rocker/tidyverse:4.1.3 + +RUN apt-get update \ + && apt-get install -y --no-install-recommends apt-utils ed libnlopt-dev \ + && apt-get clean \ + && rm -rf /var/lib/apt/lists/ + +# Global site-wide config -- neeeded for building packages +RUN mkdir -p $HOME/.R/ \ + && echo "CXXFLAGS=-O3 -mtune=native -march=native -Wno-unused-variable -Wno-unused-function -flto -ffat-lto-objects -Wno-unused-local-typedefs \n" >> $HOME/.R/Makevars \ + && echo "CXXFLAGS=-O3 -mtune=native -march=native -Wno-unused-variable -Wno-unused-function -flto -ffat-lto-objects -Wno-unused-local-typedefs -Wno-ignored-attributes -Wno-deprecated-declarations\n" >> $HOME/.R/Makevars + +# Install rstan +# Instead, we can install rstan by restoring package library with renv (remove dependency on EczemaPredPOEM first) +RUN install2.r --error --deps TRUE \ + rstan \ + rstantools \ + && rm -rf /tmp/downloaded_packages/ /tmp/*.rds + +# Create a user variable +ENV USER=rstudio + +# Create project directory and set it as working directory +WORKDIR /home/$USER/EczemaTreat + +# Install R packages to local library using renv +COPY [".Rprofile", "renv.lock", "./"] +COPY renv/activate.R ./renv/ +RUN chown -R rstudio . \ + && sudo -u rstudio R -e 'renv::restore(confirm = FALSE, exclude = "TanakaData")' diff --git a/EczemaTreat.Rproj b/EczemaTreat.Rproj new file mode 100644 index 0000000..8e3c2eb --- /dev/null +++ b/EczemaTreat.Rproj @@ -0,0 +1,13 @@ +Version: 1.0 + +RestoreWorkspace: Default +SaveWorkspace: Default +AlwaysSaveHistory: Default + +EnableCodeIndexing: Yes +UseSpacesForTab: Yes +NumSpacesForTab: 2 +Encoding: UTF-8 + +RnwWeave: Sweave +LaTeX: pdfLaTeX diff --git a/R/ScoradPred.R b/R/ScoradPred.R new file mode 100644 index 0000000..1ccc622 --- /dev/null +++ b/R/ScoradPred.R @@ -0,0 +1,654 @@ +# Model class ------------------------------------------------------------- + +#' ScoradPred constructor +#' +#' Multivariate ordered logistic random walk model with two distributions and optional +#' - power prior +#' - trend +#' - calibration +#' +#' @param independent_items Whether to have diagonal correlations matrices or not +#' @param a0 Forgetting factor for the power prior +#' @param include_trend Whether to include trend in the latent dynamics +#' @param include_calibration Whether we use calibration data +#' @param precision_cal Ratio of calibration std to measurement std +#' @param include_treatment Whether to use treatment data +#' @param treatment_names Vector of treatment names +#' @param include_recommendations Whether to make treatment recommendations (`include_treatment` must be TRUE) +#' @param prior Named list of the model's priors. If `NULL`, uses the default prior for the model (see [default_prior()]). +#' +#' @return Object of class "ScoradPred" and "EczemaModel" +ScoradPred <- function(independent_items = FALSE, + a0 = 0, + include_trend = FALSE, + include_calibration = FALSE, + precision_cal = ifelse(include_calibration, 0.5, 1), + include_treatment = FALSE, + treatment_names = c(), + include_recommendations = FALSE, + prior = NULL) { + + for (x in list(independent_items, + include_trend, + include_calibration, + include_treatment, + include_recommendations)) { + stopifnot(is_scalar(x), + is.logical(x)) + } + + stopifnot(is_scalar(a0), + is.numeric(a0), + between(a0, 0, 1), + is_scalar(precision_cal), + is.numeric(precision_cal), + between(precision_cal, 0, 1), + include_treatment || !include_recommendations) + + if (include_treatment) { + stopifnot(is.vector(treatment_names, mode = "character"), + length(treatment_names) > 0) + } else { + treatment_names <- c() + } + + M1 <- 100 + M2 <- 3 + item_dict <- ScoradPred_items() + D1 <- item_dict %>% filter(Distribution == 1) %>% nrow() + D <- nrow(item_dict) + D2 <- D - D1 + include_powerprior <- (a0 > 0) + + mdl_name <- "ScoradPred" + suff <- list( + ifelse(include_powerprior, paste0("h", sprintf("%03d", round(a0 * 100))), ""), + ifelse(!independent_items, "corr", ""), + ifelse(include_calibration, "cal", ""), + ifelse(include_treatment, "treat", ""), + ifelse(include_trend, "trend", "") + ) %>% + setdiff("") %>% + paste(collapse = "+") + if (nchar(suff) > 0) { + mdl_name <- paste0(c(mdl_name, suff), collapse = "+") + } + + x <- structure( + list( + name = mdl_name, + stanmodel = here("models", paste0("FullModel", ".stan")), + M1 = M1, + D1 = D1, + M2 = M2, + D2 = D2, + D = D1 + D2, + discrete = TRUE, + item_spec = item_dict, + independent_items = independent_items, + a0 = a0, + include_powerprior = include_powerprior, + include_trend = include_trend, + include_calibration = include_calibration, + precision_cal = precision_cal, + include_treatment = include_treatment, + treatment_names = treatment_names, + include_recommendations = include_recommendations + ), + class = c("ScoradPred", "EczemaModel") + ) + + if (include_recommendations) { + x$actions <- get_actions(treatment_names) + } + + x$prior <- c(default_prior(x), + default_prior_calibration(D), + default_prior_treatment(D), + default_prior_trend(D)) + x <- replace_prior(x, prior = prior) + validate_prior(x) + + return(x) + +} + +#' Item characteristics of ScoradPred model +#' +#' - Distribution 1 corresponds to `M1 = 100` (extent and subjective symptoms) +#' - Distribution 2 corresponds to `M2 = 3` (intensity signs) +#' +#' @return Dataframe from `detail_POSCORAD()` with additional columns +#' - `weight_B` +#' - `weight_C` +#' - `weight_oSCORAD` +#' - `weight_SCORAD` +#' - `ItemID` +#' - `Distribution` (1 or 2) +ScoradPred_items <- function() { + + item_dict <- detail_POSCORAD("Items") %>% + mutate(M = Maximum / Resolution, + Component = case_when(Name %in% c("extent") ~ "Extent", + Name %in% c("itching", "sleep") ~ "Subjective symptoms", + Name %in% detail_POSCORAD("Intensity signs")$Name ~ "Intensity signs"), + weight_B = as.numeric(Component == "Intensity signs"), + weight_C = as.numeric(Component == "Subjective symptoms"), + weight_oSCORAD = case_when(Name == "extent" ~ 0.2, + TRUE ~ 0) + 3.5 * weight_B, + weight_SCORAD = weight_oSCORAD + weight_C,) %>% + mutate(across(starts_with("weight_"), ~(.x * Resolution))) + + scores1 <- detail_POSCORAD(c("extent", "Subjective symptoms"))$Name + + item_dict <- item_dict %>% + mutate(ItemID = 1:nrow(.), + Distribution = case_when(Name %in% scores1 ~ 1, + TRUE ~ 2)) + + return(item_dict) +} + +# Default priors ---------------------------------------------------------- + +#' Default prior for a "multivariate" OrderedRW model +#' +#' @param max_score Maximum value that the scores can take (same for all items) +#' @param D Number of components +#' +#' @return List +#' @export +default_prior_OrderedMRW <- function(max_score, D) { + + dp <- EczemaModel("OrderedRW", max_score = max_score) %>% + default_prior() + + list( + delta = matrix(2, nrow = D, ncol = max_score - 1), + sigma_meas = matrix(dp$sigma_meas, nrow = 2, ncol = D, byrow = FALSE), + sigma_lat = matrix(dp$sigma_lat, nrow = 2, ncol = D, byrow = FALSE), + Omega = 10, + Omega0 = 10, + mu_y0 = matrix(dp$mu_y0, nrow = 2, ncol = D, byrow = FALSE), + sigma_y0 = matrix(dp$sigma_y0, nrow = 2, ncol = D, byrow = FALSE) + ) + +} + +#' Default prior for ScoradPred model +#' +#' The priors are the same as "OrderedMRW" models +#' +#' @param model Object of class "ScoradPred" +#' @param ... Arguments to pass to other methods +#' +#' @return List +default_prior.ScoradPred <- function(model, ...) { + + prior <- default_prior_OrderedMRW(max_score = 2, D = model$D1 + model$D2) # max_score does not matter here + prior1 <- default_prior_OrderedMRW(max_score = model$M1, D = model$D1) + prior2 <- default_prior_OrderedMRW(max_score = model$M2, D = model$D2) + + prior$delta <- NULL + prior <- c(prior, + list(delta1 = prior1$delta, + delta2 = prior2$delta)) + + return(prior) + +} + +#' Default priors for calibration parameters +#' +#' - `bias0 ~ normal(x1, x2)` (`bias0` is on a normalised scale) +#' - `tau_bias ~ lognormal(x1, x2)` +#' +#' NB: there is an identifiability issue when `tau_bias << 1` (bias converges to 0), +#' so we must assume that the order of magnitude of `tau_bias` is more than 1 day. +#' +#' @param D Number of items +#' +#' @return Named list +default_prior_calibration <- function(D) { + list( + bias0 = matrix(rep(c(0, 0.1), D), nrow = D, byrow = TRUE) %>% t(), + tau_bias = matrix(rep(c(1.2 * log(10), 0.6 * log(10)), D), nrow = D, byrow = TRUE) %>% t() + ) +} + +#' Default priors for treatment parameters +#' +#' If there are multiple treatments, the priors are the same regardless of the treatment. +#' +#' - `mu_logit_p10 ~ normal(x1, x2)` +#' - `mu_logit_p01 ~ normal(x1, x2)` +#' - `sigma_logit_p10 ~ normal(x1, x2)` +#' - `sigma_logit_p01 ~ normal(x1, x2)` +#' +#' @param D Number of items +#' +#' @return Named list +default_prior_treatment <- function(D) { + list( + mu_logit_p01 = c(0, 1), + mu_logit_p10 = c(0, 1), + sigma_logit_p01 = c(0, 1.5), + sigma_logit_p10 = c(0, 1.5), + ATE = matrix(rep(c(0, 0.1), D), nrow = D, byrow = TRUE) + ) +} + +#' Default prior for the trend parameters +#' +#' - `beta ~ beta(x1, x2)` +#' The element `beta` is +#' +#' @param D Number of items +#' +#' @return Named list with element `beta` that is a matrix with 2 rows (x1 and x2) and D columns +default_prior_trend <- function(D) { + list( + beta = matrix(c(1, 3), nrow = 2, ncol = D, byrow = FALSE) + ) +} + +# List parameters --------------------------------------------------------- + +#' List available parameters for a "multivariate" OrderedRW model +#' +#' @param max_score Maximum values that the scores can take +#' @param D Number of components +#' @param full_names Whether to return the full names of multi-dimensional parameters +#' +#' @return Named list +list_parameters_OrderedMRW <- function(max_score, D, full_names = FALSE) { + + stopifnot(HuraultMisc::is_scalar(full_names), + is.logical(full_names)) + + out <- list_parameters("OrderedRW") + out$Population <- c("Omega", "Omega0", out$Population) + out$PatientTime <- c(out$PatientTime, "ytot_rep") + + if (full_names) { + + omg <- expand_grid("i" = 1:D, "j" = 1:D) %>% + filter(.data$i < .data$j) %>% + mutate(omg = paste0("Omega[", .data$i, ",", .data$j, "]")) %>% + pull(omg) + omg0 <- gsub("Omega", "Omega0", omg) + + ct <- expand_2d_parname("ct", D, max_score) + + out$Population <- setdiff(out$Population, c("Omega", "Omega0", "ct")) + out$Population <- c(out$Population, omg, omg0, ct) + + } + + return(out) + +} + +#' List available parameters for ScoradPred model +#' +#' @param model Object of class "ScoradPred" +#' @param full_names Whether to return the full names of multi-dimensional parameters +#' @param ... Arguments to pass to other methods +#' +#' @return Named list +list_parameters.ScoradPred <- function(model, full_names = FALSE, ...) { + + param <- list_parameters_OrderedMRW(max_score = 2, + D = model$D1 + model$D2, + full_names = full_names) # max_score does not matter here + + param$Population <- setdiff(param$Population, c("delta", "ct")) + param$Population <- c(param$Population, "ct1", "ct2", "delta1", "delta2", "sigma_reltot") + param$Test <- setdiff(param$Test, "cum_err") + param$PatientTime <- c(setdiff(param$PatientTime, c("ytot_rep")), "agg_rep") + + if (full_names) { + param$Population <- param$Population[!grepl("^ct\\[\\d+,\\d+\\]$", param$Population)] + ct1 <- expand_2d_parname("ct1", model$D1, model$M1) + ct2 <- expand_2d_parname("ct2", model$D2, model$M2) + param$Population <- c(param$Population, ct1, ct2) + } + + if (model$include_trend) { + param <- merge_lists(list(param, list_parameters_trend())) + } + + if (model$include_calibration) { + param <- merge_lists(list(param, list_parameters_calibration())) + } + + if (model$include_treatment) { + param <- merge_lists(list(param, list_parameters_treatment())) + } + + if (model$include_recommendations) { + param$Misc <- c("y_rec", "agg_rec") + } + + return(param) + +} + +list_parameters_calibration <- function() { + list( + Population = c("bias0", "tau_bias"), # "bias0_abs" + PatientTime = c("y_cal_rep", "agg_cal_rep") + ) +} + +list_parameters_treatment <- function() { + list( + Population = c("mu_logit_p01", "mu_logit_p10", "sigma_logit_p01", "sigma_logit_p10", "ATE", "ATE_agg"), + Patient = c("p01", "p10", "ss1"), + PatientTime = c("p_treat", "treat_rep", "treat2_rep") + ) +} + +list_parameters_trend <- function() { + list( + Population = "beta", + PatientTime = "trend" + ) +} + +# Prefill Stan data ------------------------------------------------------- + +#' Prefill Stan data +#' +#' Prefill with item spec, prior, power prior, trend +#' +#' @param model Object of class ScoradPred +#' @param file_historical File posterior summary statistics from historical data +#' +#' @return List +prefill_standata_FullModel <- function(model, file_historical = here("data/par_POSCORAD.rds")) { + + # Prefill + out <- list( + independent_items = as.numeric(model$independent_items), + M1 = model$M1, + M2 = model$M2, + D1 = model$D1, + D2 = model$D2, + distribution_id = model$item_spec$Distribution, + N_agg = 4, + agg_weights = as.matrix(model$item_spec[c("weight_B", "weight_C", "weight_oSCORAD", "weight_SCORAD")]) + ) %>% + add_prior(model$prior) + + # Power prior + par_Derexyl <- readRDS(here("data/par_POSCORAD.rds")) %>% + filter(Distribution == "Posterior - Derexyl") %>% + select(-Distribution) %>% + filter(Variable != "ct") + par_historical <- model$item_spec %>% + select(Name, Label, ItemID, Distribution) %>% + right_join(par_Derexyl, by = c("Name" = "Item")) + power_prior <- process_powerprior_FullModel(par_historical) + out <- out %>% + add_prior(power_prior, prefix = "historical_") %>% + c(list(a0 = model$a0)) + + # Trend + out <- c(out, + prepare_data_trend(D = model$D, include_trend = model$include_trend)) + + # Calibration + if (model$include_calibration) { + include_bias <- as.numeric(!(model$item_spec$Name %in% c("itching", "sleep"))) # don't calibration subjective symptoms + precision_cal <- include_bias * model$precision_cal + (1 - include_bias) * 1 + } else { + include_bias <- rep(0, model$D) + precision_cal <- rep(1, model$D) + } + out <- c(out, + list(include_bias = include_bias, + precision_cal = precision_cal)) + + # NB: for treatment, D_treat inputted in prepare_standata + + # Recommendations + if (model$include_recommendations) { + data_rec <- list(N_actions = nrow(model$actions), + actions = as.matrix(model$actions[, model$treatment_names])) + } else { + data_rec <- list(N_actions = 0, + actions = matrix(numeric(0), + nrow = 0, + ncol = ifelse(model$include_treatment, length(model$treatment_names), 1))) + # D_treat=1 when include_treatment=FALSE (implemented as treatment never used) + } + out <- c(out, data_rec) + + return(out) + +} + +#' List of trend inputs to pass to the Stan sampler +#' NB: need to rename as this goes in the prefill +#' +#' @param D Number of items +#' @param include_trend Whether to include trend or not +#' +#' @return List with elements `trend_known` and `beta_data` +prepare_data_trend <- function(D, include_trend = FALSE) { + + if (include_trend) { + out <- list( + trend_known = 0, + beta_data = matrix(numeric(0), nrow = 0, ncol = D) + ) + } else { + out <- list( + trend_known = 1, + beta_data = matrix(rep(0, D), nrow = 1, ncol = D) + ) + } + + return(out) +} + +# Prepare Stan data ------------------------------------------------------- + +#' Prepare the data list to pass to the Stan sampler +#' +#' This function only prepares the time-series data. +#' +#' @param model Object of class ScoradPred +#' @param train Training dataframe +#' @param test Testing dataframe +#' @param cal Calibration dataframe +#' @param treat Treatment dataframe +#' @param rec Recommendation dataframe +#' @param ... Arguments to pass to other methods +#' +#' @return List to serve as input to the Stan sampler +prepare_standata.ScoradPred <- function(model, train, test = NULL, cal = NULL, treat = NULL, rec = NULL, ...) { + + stopifnot(all(train[["ItemID"]] %in% 1:model$D)) + + tmp_train <- train %>% + left_join(model$item_spec, by = "ItemID") + sc1 <- tmp_train %>% + filter(Distribution == 1) %>% + pull(Score) + stopifnot(all(sc1 %in% 0:model$M1)) + sc2 <- tmp_train %>% + filter(Distribution == 2) %>% + pull(Score) + stopifnot(all(sc2 %in% 0:model$M2)) + + out <- prepare_data_lgtd(train = train, test = test, max_score = 1 + max(model$M1, model$M2), discrete = TRUE) + out$M <- NULL + + out$d_obs <- train[["ItemID"]] + out$d_test <- vector() + + out$run <- 1 # do that outside this function for consistency? + + if (!is.null(test)) { + stopifnot(all(test[["ItemID"]] %in% 1:model$D)) + out$d_test <- array(test[["ItemID"]]) + } + + # Calibration + if (model$include_calibration) { + if (is.null(cal)) { + warning("include_calibration=TRUE but cal is not supplied") + } + } else { + if (!is.null(cal)) { + warning("cal is supplied but include_calibration=FALSE will take precedent") + } + cal <- NULL + } + data_cal <- prepare_data_calibration(df = cal) + if (length(data_cal$t_cal) > 0 && length(out$t_test) > 0 && max(data_cal$t_cal) >= min(out$t_test)) { + warning("cal may overlap with test") + } + out <- c(out, data_cal) + + # Recommendations + if (model$include_recommendations) { + if (is.null(rec)) { + warning("include_recommendations=TRUE but rec is not supplied") + } + } else { + if (!is.null(rec)) { + warning("rec is supplied but include_recommendations=FALSE will take precedent") + } + rec <- NULL + } + data_rec <- prepare_data_recommendations(rec) + out <- c(out, data_rec) + + # Treatment + if (model$include_treatment && !is.null(treat)) { + stopifnot(max(treat[["Treatment"]]) == length(model$treatment_names)) + data_treat <- prepare_data_treatment(treat) + } else { + + if (model$include_treatment && is.null(treat)) { + warning("include_treatment=TRUE but treat is not supplied") + } + if (!model$include_treatment && !is.null(treat)) { + warning("treat is supplied but include_treatment=FALSE will take precedent") + } + + id <- bind_rows(train, test, cal, treat, rec) %>% + get_index() + data_treat <- list( + N_treat2 = nrow(id), + D_treat = 1, + k_treat2 = id[["Patient"]], + t_treat2 = id[["Time"]], + d_treat2 = rep(1, nrow(id)), + treat2_obs = rep(0, nrow(id)) + ) + + } + out <- c(out, data_treat) + + return(out) +} + +#' List of calibration inputs to pass to Stan sampler +#' +#' @param df (Optional) Dataframe with columns Patient, ItemID, Time and Score +#' +#' @return List +prepare_data_calibration <- function(df = NULL) { + + if (is.null(df)) { + + out <- list( + N_cal = 0, + k_cal = vector(), + d_cal = vector(), + t_cal = vector(), + y_cal = vector() + ) + + } else { + + stopifnot( + is.data.frame(df), + all(c("Patient", "ItemID", "Time", "Score") %in% colnames(df)), + all(vapply(c("Patient", "ItemID", "Time", "Score"), function(x) {is.numeric(df[[x]])}, logical(1))), + all(is_wholenumber(df[["Patient"]])), + all(is_wholenumber(df[["ItemID"]])), + all(is_wholenumber(df[["Time"]])) + ) + + out <- list( + N_cal = nrow(df), + k_cal = df[["Patient"]], + d_cal = df[["ItemID"]], + t_cal = df[["Time"]], + y_cal = df[["Score"]] + ) + } + + return(out) + +} + +#' List of treatment inputs to pass to the Stan sampler +#' +#' @param df Dataframe with columns Patient, Time, Treatment and UsageWithinThePast2Days +#' +#' @return List +prepare_data_treatment <- function(df) { + + cnames <- c("Patient", "Time", "Treatment", "UsageWithinThePast2Days") + stopifnot( + is.data.frame(df), + all(cnames %in% colnames(df)), + all(vapply(cnames, function(x) {is.numeric(df[[x]])}, logical(1))), + all(vapply(cnames, function(x) {all(is_wholenumber(df[[x]]))}, logical(1))), + all(df[["UsageWithinThePast2Days"]] %in% c(0, 1)) + ) + + list( + N_treat2 = nrow(df), + D_treat = max(df[["Treatment"]]), + k_treat2 = df[["Patient"]], + t_treat2 = df[["Time"]], + d_treat2 = df[["Treatment"]], + treat2_obs = df[["UsageWithinThePast2Days"]] + ) +} + +#' List of recommendation inputs to pass to the Stan sampler +#' +#' @param df Dataframe containing (Patient, Time) of recommendations +#' +#' @return List +prepare_data_recommendations <- function(df = NULL) { + + if (is.null(df)) { + out <- list( + N_rec = 0, + k_rec = vector(), + t_rec = vector() + ) + } else { + stopifnot(is.data.frame(df), + all(c("Patient", "Time") %in% colnames(df)), + all(vapply(c("Patient", "Time"), function(x) {is.numeric(df[[x]])}, logical(1))), + all(vapply(c("Patient", "Time"), function(x) {all(is_wholenumber(df[[x]]))}, logical(1)))) + + out <- list( + N_rec = nrow(df), + k_rec = df[["Patient"]], + t_rec = df[["Time"]] + ) + } + + return(out) +} diff --git a/R/data_processing.R b/R/data_processing.R new file mode 100644 index 0000000..e494ff7 --- /dev/null +++ b/R/data_processing.R @@ -0,0 +1,192 @@ +# Fixing treatment in PFDC dataset ---------------------------------------- + +#' Impute patterns "0-NA-0", "0-NA-NA-0" by 0 in "treatment usage within the past two days" +#' +#' @param x Vector of binary values corresponding to the time-series of "treatment usage within the past two days" +#' +#' @return x after imputation +impute_tw2_missing <- function(x) { + + tibble(x = x) %>% + mutate(Missing = is.na(x), + is0 = (x == 0), + Impute0 = (Missing & lag(is0) & lead(is0)) | + (Missing & lag(is0) & lead(Missing) & lead(is0, 2)) | + (Missing & lag(Missing) & lag(is0, 2) & lead(is0)), + Impute0 = replace_na(Impute0, FALSE), + x = replace(x, Impute0, 0)) %>% + pull(x) + +} + +#' Find pattern in string and return logical vector of matches +#' +#' @param x String +#' @param pattern Pattern to match in `x` +#' +#' @return Return logical vector of length `nchar(x)` indicating the matches +#' +#' @examples +#' find_pattern(x = "0010110100", pattern = "010") +find_pattern <- function(x, pattern) { + + stopifnot(is_scalar_character(x), + is_scalar_character(pattern)) + + matches <- gregexpr(pattern, x)[[1]] + + out <- rep(FALSE, nchar(x)) + for (i in seq_along(matches)) { + if (matches[i] > 0) { + out[matches[i]:(matches[i] + attr(matches, "match.length")[i] - 1)] <- TRUE + } + } + + return(out) + +} + +#' Replace contradictions in "treatment usage within the past two days" time-series by missing values +#' +#' @param x Vector of binary values corresponding to the time-series of treatment usage within the past two days +#' +#' @return `x` after replacing contradictions with NA +replace_tw2_contradictions <- function(x) { + + txt <- replace(x, is.na(x), "n") %>% + paste0(collapse = "") + contr <- find_pattern(txt, "010") | find_pattern(txt, "0((1[1n])|[1n]1)0") + x <- replace(x, contr, NA) + + return(x) + +} + +#' Fix treatment time-series in PFDC dataset +#' +#' For both localTreatment/emollientCream within the past two days: +#' - Impute missing values in patterns "0-NA-0" and "0-NA-NA-0" to 0 +#' - Set illogical patterns "0-1-0" and "0-1-1-0" (with potentially one of the 1s missing) to missing +#' - Reimpute patterns "0-NA-0" and "0-NA-NA-0" to 0 +#' +#' On average, 10% of values are changed. +#' Note the output dataset as more rows than the input dataset (inferred treatment when originally absent). +#' +#' @param df POSCORAD PFDC dataset +#' +#' @return POSCORAD PFDC dataset with updated treatment columns +fix_treatment <- function(df) { + + lapply(unique(df[["Patient"]]), + function(pid) { + + sub_df <- df %>% + filter(Patient == pid) + + for (tname in c("localTreatmentWithinThePast2Days", "emollientCreamWithinThePast2Days")) { + + obs <- sub_df %>% + rename(tw2 = all_of(tname)) %>% + select(Patient, Day, tw2) + + mis <- tibble(Patient = pid, + Day = setdiff(1:max(obs[["Day"]]), obs[["Day"]]), + tw2 = NA) + + tmp <- bind_rows(obs, mis) %>% + arrange(Day) %>% + mutate(tw2 = tw2 %>% + impute_tw2_missing() %>% + replace_tw2_contradictions() %>% + impute_tw2_missing()) %>% + drop_na() + + sub_df <- full_join(sub_df, tmp, by = c("Patient", "Day")) %>% + select(!all_of(tname)) %>% + rename_with(~replace(.x, .x == "tw2", tname)) + + } + + return(sub_df) + + }) %>% + bind_rows() %>% + # Calculate date associated with new observations + group_by(Patient) %>% + mutate(Date = case_when(is.na(Date) ~ min(Date, na.rm = TRUE) + Day - 1, + TRUE ~ Date)) %>% + ungroup() + +} + +# Processing PFDC dataset ------------------------------------------------- + +load_PFDC <- function() { + # Load PFDC dataset + # - Process treatment data + # - Only consider patients with POSCORAD data + # - Recompute day from by considering start date in POSCORAD and SCORAD datasets + # - Remove patients with less than 5 observations + # - Reindex patients between 1 and length(Patient) + # + # Args: none + # + # Returns: + # Named list containing POSCORAD and SCORAD datasets + + poscorad <- TanakaData::POSCORAD_PFDC %>% + fix_treatment() %>% + arrange(Patient, Day) + scorad <- TanakaData::SCORAD_PFDC + + poscorad <- factor_to_numeric(poscorad, "Patient") + + # Only consider patients with POSCORAD + pt_poscorad <- unique(poscorad[["Patient"]]) + scorad <- scorad %>% + filter(Patient %in% pt_poscorad) + + # Find start date + start_date_scorad <- scorad %>% + group_by(Patient) %>% + summarise(StartDate = min(Date, na.rm = TRUE)) %>% + ungroup() + start_date_poscorad <- poscorad %>% + group_by(Patient) %>% + summarise(StartDate = min(Date, na.rm = TRUE)) %>% + ungroup() + start_date <- bind_rows(start_date_scorad, start_date_poscorad) %>% + group_by(Patient) %>% + summarise(StartDate = min(StartDate)) %>% + ungroup() + + # Recompute Day in POSCORAD dataset + poscorad <- full_join(poscorad, start_date, by = "Patient") %>% + group_by(Patient) %>% + mutate(Day = as.numeric(Date - StartDate + 1)) %>% + ungroup() + + # Compute Day in SCORAD dataset + scorad <- left_join(scorad, start_date, by = "Patient") %>% + group_by(Patient) %>% + mutate(Day = as.numeric(Date - StartDate + 1)) %>% + ungroup() + + # Filter out patients and reindex them + poscorad <- poscorad %>% + group_by(Patient) %>% + filter(n() >= 5) %>% + mutate(NewID = cur_group_id()) %>% + ungroup() + patient_id <- poscorad %>% + select(Patient, NewID) %>% + distinct() + scorad <- full_join(scorad, patient_id, by = "Patient") %>% + mutate(Patient = NewID) %>% + select(-NewID) + poscorad <- poscorad %>% + mutate(Patient = NewID) %>% + select(-NewID) + + return(list(POSCORAD = poscorad, SCORAD = scorad)) +} diff --git a/R/powerprior.R b/R/powerprior.R new file mode 100644 index 0000000..e13a16c --- /dev/null +++ b/R/powerprior.R @@ -0,0 +1,145 @@ +# Processing power prior ------------------------------------------------------------- + +#' Extract power prior from historical posterior summary statistics and put in nice format for univariate models +#' +#' @param par_historical Dataframe of summary statistics of population parameters (for only one model) +#' +#' @return Named list (prefixed by "historical_") of parameters in par_historical: +#' - the first element/column correspond to the mean +#' - the second element/column corresponds to the standard deviation. +extract_powerprior_uni <- function(par_historical) { + # Can be useful to integrate to EczemaPred + + stopifnot(is.data.frame(par_historical), + all(c("Variable", "Mean", "sd") %in% colnames(par_historical))) + + par_names <- unique(par_historical[["Variable"]]) + power_prior <- lapply(par_names, + function(x) { + tmp <- par_historical %>% + filter(Variable %in% x) %>% + arrange(Index) + out <- cbind(tmp[["Mean"]], tmp[["sd"]]) + if (nrow(tmp) == 1) { + out <- c(out) + } + return(out) + }) + names(power_prior) <- paste0(par_names) + + stopifnot(all(do.call(rbind, power_prior)[, 2] >= 0)) + + return(power_prior) + +} + +#' Extract and process power prior for multivariate models +#' +#' @param par_historical Dataframe containing posterior summary statistics for all items. +#' It should notably associate each item to a unique Item ID and Distribution ID. +#' +#' @return List containing power prior to add to Stan data input +process_powerprior_FullModel <- function(par_historical) { + + stopifnot(is.data.frame(par_historical), + all(c("Distribution", "ItemID") %in% colnames(par_historical))) + # Other columns are checked in extract_powerprior_uni + + comp1 <- par_historical %>% filter(Distribution == 1) %>% distinct(ItemID) %>% pull() + D1 <- length(comp1) + comp2 <- par_historical %>% filter(Distribution == 2) %>% distinct(ItemID) %>% pull() + D2 <- length(comp2) + D <- D1 + D2 + + # Extract power prior for each item + lpp <- lapply(1:D, + function(d) { + par_historical %>% + filter(ItemID == d) %>% + extract_powerprior_uni() + }) + + # Everything but delta + power_prior <- lapply(lpp, + function(x) { + x[["delta"]] <- NULL + return(x) + }) %>% + merge_lists(along = 0) + + # Deal with delta + tmp <- list(lpp[comp1], lpp[comp2]) + pp_delta <- lapply(1:length(tmp), + function(i) { + lapply(tmp[[i]], + function(x) { + out <- x[c("delta")] + names(out) <- paste0(names(out), i) + return(out) + }) %>% + merge_lists(along = 0) + }) %>% + unlist(recursive = FALSE) + + power_prior <- c(pp_delta, power_prior) + + return(power_prior) +} + +# Power prior influence --------------------------------------------------- + +#' Associate forgetting factor `a0` to the relative importance `rho` of new dataset in the final posterior +#' +#' @param rho Relative importance of new dataset in the final posterior +#' @param n_new Number of observation in the new dataset. +#' Default to number of observations in PFDC dataset. +#' @param n_historical Number of observations in the historical dataset. +#' Default to the number of observations in the Derexyl dataset (=9408). +#' This value can be obtained with `nrow(load_dataset("Derexyl"))` +#' in [EczemaPredPOSCORAD](https://github.com/ghurault/EczemaPredPOSCORAD). +#' +#' @return Vector +rho_to_a0 <- function(rho, + n_new = nrow(load_PFDC()$POSCORAD), + n_historical = 9408) { + n_new / n_historical * (1 - rho) / rho +} + +#' Associate the relative importance `rho` of new dataset in the final posterior to the forgetting factor `a0` +#' +#' @param a0 Forgetting factor +#' @param n_new Number of observation in the new dataset. +#' Default to number of observations in PFDC dataset. +#' @param n_historical Number of observations in the historical dataset. +#' Default to the number of observations in the Derexyl dataset (=9408). +#' This value can be obtained with `nrow(load_dataset("Derexyl"))` +#' in [EczemaPredPOSCORAD](https://github.com/ghurault/EczemaPredPOSCORAD). +#' +#' @return Vector +a0_to_rho <- function(a0, + n_new = nrow(load_PFDC()$POSCORAD), + n_historical = 9408) { + n_new / (n_new + a0 * n_historical) +} + +#' Plot influence of forgetting factor when using a power prior +#' +#' Plot the relative importance `rho` of new dataset in the final posterior as a function of the forgetting factor `a0`. +#' +#' @param n_new Number of observation in the new dataset. +#' Default to number of observations in PFDC dataset. +#' @param n_historical Number of observations in the historical dataset. +#' Default to the number of observations in the Derexyl dataset (=9408). +#' This value can be obtained with `nrow(load_dataset("Derexyl"))` +#' in [EczemaPredPOSCORAD](https://github.com/ghurault/EczemaPredPOSCORAD). +#' +#' @return Ggplot +plot_powerprior_influence <- function(n_new = nrow(load_PFDC()$POSCORAD), + n_historical = 9408) { + tibble(a0 = seq(0, 1, .01)) %>% + mutate(rho = a0_to_rho(a0)) %>% + ggplot(aes(x = a0, y = rho)) + + geom_line() + + coord_cartesian(xlim = c(0,1 ), ylim = c(0, 1), expand = FALSE) + + theme_bw(base_size = 15) +} diff --git a/R/recommendations.R b/R/recommendations.R new file mode 100644 index 0000000..183555e --- /dev/null +++ b/R/recommendations.R @@ -0,0 +1,33 @@ +# Recommendations --------------------------------------------------------- + +#' Action dataframe +#' +#' @param names Vector of action names +#' +#' @return Dataframe with column from `names` and a column `Action` +get_actions <- function(names) { + stopifnot(is.vector(names, mode = "character")) + out <- lapply(seq_along(names), function(x) {0:1}) %>% + expand.grid() + colnames(out) <- names + out[["Action"]] <- 1:nrow(out) + return(out) +} + +#' Dictionary of recommendation result files +#' +#' @param outcome Score to predict +#' @param model Model name +#' @param dataset Dataset name +#' @param val_horizon Validation horizon +#' @param root_dir Root directory. Default to working directory. +#' +#' @return Named list with elements "RecDir" and "RecFile" +get_recommendation_files <- function(outcome, model, dataset, val_horizon, root_dir = ".") { + check_input <- get_results_files(outcome = outcome, model = model, dataset = dataset, val_horizon = val_horizon) + + out <- list(RecDir = file.path(root_dir, "results", glue::glue("rec{val_horizon}_{outcome}_{model}_{dataset}"))) + out$RecFile <- paste0(out$RecDir, ".rds") + + return(out) +} diff --git a/R/utils.R b/R/utils.R new file mode 100644 index 0000000..77fae8b --- /dev/null +++ b/R/utils.R @@ -0,0 +1,132 @@ +# Utility functions ----------------------------------------------------------- + +#' Full names of a two-dimensional parameter +#' +#' @param par_name Name of the parameter +#' @param n1 First dimension +#' @param n2 Second dimension +#' +#' @return Character vector +expand_2d_parname <- function(par_name, n1, n2) { + expand_grid(i = 1:n1, j = 1:n2) %>% + mutate(x = paste0(par_name, "[", i, ",", j, "]")) %>% + pull(x) +} + +#' Merge list of lists by names +#' +#' Only elements with the same names are considered. +#' List's elements with no names in `ll` are discarded. +#' +#' When `along = 0`, arrays are bind on a new dimension before the first: +#' for example, if the list all contains an element "a" which is a vector, +#' element "a" of the new list will be a matrix where rows will indicate the original list ID. +#' +#' @param ll List of lists +#' @param ... Arguments to pass to [abind::abind()] +#' +#' @return List +merge_lists <- function(ll, ...) { + + stopifnot(is.list(ll)) + + lnames <- lapply(ll, names) %>% do.call(c, .) %>% unique() + + out <- lapply(lnames, + function(nm) { + lapply(ll, + function(x) { + x[[nm]] + }) %>% + abind::abind(...) + }) + names(out) <- lnames + + return(out) +} + +#' Recombine results in different files +#' +#' Read files corresponding to different dataframe, concatenate them and save it +#' +#' @param dir_name Directory containing intermediate files +#' @param output_file File to write the recombined results +#' @param reading_function Function used to read the files +#' @param expected_number_of_files (optional) expected number of files in `dir_name` +#' +#' @return NULL +recombine_results <- function(dir_name, output_file, reading_function = readRDS, expected_number_of_files = NULL) { + + stopifnot(is_scalar_character(dir_name), + dir.exists(dir_name), + is_scalar(output_file), + is.function(reading_function)) + + files <- list.files(dir_name, full.names = TRUE) + + if (!is.null(expected_number_of_files)) { + stopifnot(is_scalar_wholenumber(expected_number_of_files)) + if (length(files) != expected_number_of_files) { + warning(glue::glue("Number of files (={length(files)}) is different from the number of expected files (={expected_number_of_files}).")) + } + } + + res <- lapply(files, reading_function) %>% + bind_rows() + saveRDS(res, file = output_file) + + return(NULL) +} + +#' Extract index of `var_name` in `par` +#' +#' @param par Dataframe +#' @param var_name Character of the variable name to extract +#' @param dim_names Character vector of dimension names +#' +#' @return `par` with additional columns `dim_names` +extract_par_indexes <- function(par, var_name, dim_names) { + + stopifnot(is.data.frame(par), + all(c("Variable") %in% colnames(par)), + is_scalar_character(var_name), + is.character(dim_names)) + + rows_id <- grepl(paste0("^", var_name, "\\["), par[["Variable"]]) + sub_par <- filter(par, rows_id) + + id_sub <- HuraultMisc::extract_index_nd(sub_par[["Variable"]], dim_names = dim_names) + + sub_par[, c("Variable", "Index", dim_names)] <- NULL + sub_par <- bind_cols(sub_par, id_sub) + par <- par %>% + filter(!rows_id) %>% + bind_rows(sub_par) + + return(par) +} + +# Available models -------------------------------------------------------- + +#' List models that are investigated +#' +#' @return Dataframe +available_models <- function() { + data.frame( + a0 = c(0, 0.04, 0.04, 0.04, 0.04, 0.04), + independent_items = c(TRUE, TRUE, FALSE, FALSE, FALSE, FALSE), + include_calibration = c(FALSE, FALSE, FALSE, TRUE, TRUE, TRUE), + include_treatment = c(FALSE, FALSE, FALSE, FALSE, TRUE, TRUE), + include_trend = c(FALSE, FALSE, FALSE, FALSE, FALSE, TRUE) + ) %>% + mutate(Score = "SCORAD", + Dataset = "PFDC", + Args = pmap(list(a0 = a0, + independent_items = independent_items, + include_calibration = include_calibration, + include_treatment = include_treatment, + include_trend = include_trend), + list), + Args = map(Args, ~c(.x, list(treatment_names = c("localTreatment", "emollientCream")))), + Model = map(Args, ~do.call(ScoradPred, .x)$name) %>% unlist()) +} diff --git a/README.md b/README.md new file mode 100644 index 0000000..4a55200 --- /dev/null +++ b/README.md @@ -0,0 +1,70 @@ +# EczemaTreat + +This repository contains the code developed for the paper "Data-driven personalised recommendations for eczema treatment using a Bayesian model of severity dynamics" (submitted to publication, [preprint here](https://doi.org/10.1101/2024.01.21.24301575)). + +The code is written in the R language for statistical computing and the probabilistic programming language [Stan](https://mc-stan.org/) for the models. + +## File structure + +This repository is organized as a [research compendium](https://doi.org/10.1080/00031305.2017.1375986), with a similar structure as R packages. +Nevertheless, this project is not a literal package with a DESCRIPTION file but prefers to use [renv](https://rstudio.github.io/renv/index.html) to manage package dependencies (see [Reproduciblity section](#Reproduciblity)) and git tags for version control. + +Functions specific to this project are located in the [`R/`](R/) directory. + +### Stan models + +All Stan models are implemented using a single Stan file, [FullModel.stan](models/FullModel.stan), with optional parameters that can be switched on and off for evaluating the contribution of the different model components. +The model is manipulated using `ScoradPred` objects (inherinting from the class `EczemaModel` defined in [EczemaPred](https://ghurault.github.io/EczemaPred/)). + +### Analysis scripts + +The analysis code is located in the [`analysis/`](analysis/) directory: + +- [`01_check_models.R`](analysis/01_check_models.R): conduct prior predictive checks and fake data checks. +- [`02_run_fit.R`](analysis/02_run_fit.R): fit the model to data. +- [`03_check_fit.Rmd`](analysis/03_check_fit.Rmd): diagnose fit, inspect posterior, posterior predictive checks. +- [`04a_run_validation.R`](analysis/04a_run_validation.R): run the validation process (forward chaining). +- [`04b_run_validation_reference.R`](analysis/04b_run_validation_reference.R): run the validation process (forward chaining) for the reference (univariate) models. +- [`05_check_performance.Rmd`](analysis/05_check_performance.Rmd): analyse validation results, performance. +- [`06_analyse_recommendations.Rmd`](analysis/06_analyse_recommendations.Rmd): generate and analyse treatment recommendations. + +- Scripts to generate figures for the paper: + - [`07_plot_data.R`](analysis/07_plot_data.R) + - [`07_plot_fit.R`](analysis/07_plot_fit.R) + - [`07_plot_performance.R`](analysis/07_plot_performance.R) + - [`07_plot_powerprior.R`](analysis/07_plot_powerprior.R) + +In addition, [`generate_reports.R`](analysis/generate_reports.R) renders reports from [`03_check_fit.Rmd`](analysis/03_check_fit.Rmd) and [`05_check_performance.Rmd`](analysis/05_check_performance.Rmd) for all models and severity items/scores. +[`view_reports.R`](analysis/view_reports.R) creates an HTML document to easily browse these reports. + +### Note on the terminology + +- "ScoradPred" refers to the base model (independent state-space models for all severity items, defined by an ordered logistic measurement distribution and latent random walk dynamic). +- Modifications/Improvements of the "ScoradPred" model as referred to as "ScoradPred+improvement". +For example, the model consisting of ordered logistic measurement distribution for all severity items and a latent multivariate random walk (i.e. modelling the correlations between changes of latent severity) is denoted as "ScoradPred+corr". +- The different flavours of the base ScoradPred model are implemented in a model class also called ScoradPred. +- The Stan file implementing all of these models is called "FullModel.stan". + +## Reproducibility + +This project uses [renv](https://rstudio.github.io/renv/index.html) to manage R package dependencies. +The details of the packages needed to reproduce the analysis is stored in [`renv.lock`](renv.lock) and configuration files and the project library (ignored by git) is stored in [`renv/`](renv/). +After installing `renv` itself (`install.packages("renv")`), the project library can be restored by calling `renv::restore(exclude = "TanakaData")`. +Note that this command explicitly avoid installing the `TanakaData` package, a proprietary (unavailable) package containing the data used in this project. +The data is not available at the time of writing. + +In addition, we provide a [Dockerfile](Dockerfile) to fully reproduce the computational environment with [Docker](https://www.docker.com/): + +- building the image: `docker build . -t eczematreat`. +In addition to installing R packages using renv, the Docker image will also install the correct version of R and system dependencies required to use Stan. +- running the container `docker run -d --rm -p 1212:8787 -e ROOT=TRUE -e DISABLE_AUTH=true -v ${PWD}:/home/rstudio/EczemaTreat -v /home/rstudio/EczemaTreat/renv eczematreat`. +This commands launch an RStudio server session (without authentication, giving the user access to root) accessible at [`http://localhost:1212/`](http://localhost:1212/), while mounting the current directory into the container. + +After that, to reproduce the analysis, we suggest to open the RStudio project (`.Rproj` file) and runs the [analysis scripts](analysis/) in the order indicated by their prefix. +Intermediate and output files are saved to a `results/` directory. + +NB: this project relies on [EczemaPred version v0.3.0](https://github.com/ghurault/EczemaPred/releases/tag/v0.3.0). + +## License + +This open source version of this project is licensed under the GPLv3 license, which can be seen in the [LICENSE](LICENSE.md) file. diff --git a/analysis/00_init.R b/analysis/00_init.R new file mode 100644 index 0000000..e331c41 --- /dev/null +++ b/analysis/00_init.R @@ -0,0 +1,22 @@ +# Libraries and variables ------------------------------------------------- + +library(HuraultMisc) # Functions shared across projects +library(TanakaData) +library(EczemaPred) +library(EczemaPredPOSCORAD) +library(tibble) +library(dplyr) +library(tidyr) +library(ggplot2) +theme_set(theme_bw(base_size = 15)) +library(purrr) +library(magrittr) +library(here) +library(cowplot) + +library(rstan) +rstan_options(auto_write = TRUE) # Save compiled model +options(mc.cores = parallel::detectCores()) # Parallel computing + +lapply(list.files(here("R"), pattern = ".R$", full.names = TRUE), + source) diff --git a/analysis/01_check_models.R b/analysis/01_check_models.R new file mode 100644 index 0000000..68d9027 --- /dev/null +++ b/analysis/01_check_models.R @@ -0,0 +1,83 @@ +# Notes ------------------------------------------------------------------- + +# Prior predictive check +# NB: assume no power prior, no trend, no calibration data, no treatment data but correlations between signs + +# Initialisation ---------------------------------------------------------- + +rm(list = ls()) # Clear Workspace (better to restart the session) + +set.seed(2021) # Reproducibility (Stan use a different seed) + +source(here::here("analysis", "00_init.R")) # Load libraries, variables and functions + +score <- "SCORAD" + +#### OPTIONS +model <- ScoradPred(independent_items = FALSE) + +n_pt <- 16 +n_dur <- rpois(n_pt, 50) + +run_prior <- TRUE +n_chains <- 4 +n_it <- 2000 +#### + +stopifnot( + is_scalar_wholenumber(n_pt), + n_pt > 0, + all(is_wholenumber(n_dur)), + all(n_dur > 0), + is_scalar_logical(run_prior), + is_scalar_wholenumber(n_chains), + n_chains > 0, + is_scalar_wholenumber(n_it), + n_it > 0 +) + +## Files +file_dict <- get_results_files(outcome = score, + model = model$name) + +if (run_prior) { + compiled_model <- stan_model(model$stanmodel) +} + +## Parameters +param <- list_parameters(model) +param2 <- list_parameters(model, full_names = TRUE) +param[c("PatientTime", "Test")] <- NULL + +id <- get_index2(n_dur) + +# Prepare Stan input ------------------------------------------------------ + +l <- make_empty_data(N_patient = n_pt, t_max = n_dur, max_score = max(model$M1, model$M2), discrete = TRUE) +l$Training$ItemID <- 1 +l$Testing$ItemID <- 1 +data_stan <- prepare_standata(model, train = l$Training, test = l$Testing) +data_stan[c("N_obs", "d_obs", "k_obs", "t_obs", "y_obs", "run")] <- NULL +data_stan <- c(data_stan, + list(N_obs = 0, + d_obs = vector(), + k_obs = vector(), + t_obs = vector(), + y_obs = vector(), + run = 0)) + +data_prior <- c(prefill_standata_FullModel(model), + data_stan) + +# Prior predictive check ------------------------------------------------- + +if (run_prior) { + fit_prior <- sampling(compiled_model, + data = data_prior, + pars = unlist(param), + iter = n_it, + chains = n_chains) + saveRDS(fit_prior, file = here(file_dict$PriorFit)) + par0 <- HuraultMisc::summary_statistics(fit_prior, pars = unlist(param)) + saveRDS(par0, file = here(file_dict$PriorPar)) +} diff --git a/analysis/02_run_fit.R b/analysis/02_run_fit.R new file mode 100644 index 0000000..e8d4870 --- /dev/null +++ b/analysis/02_run_fit.R @@ -0,0 +1,131 @@ +# Notes ------------------------------------------------------------------- + +# Fit multivariate model + +# Initialisation ---------------------------------------------------------- + +rm(list = ls()) # Clear Workspace (better to restart the session) + +set.seed(2021) # Reproducibility (Stan use a different seed) + +source(here::here("analysis", "00_init.R")) # Load libraries, variables and functions + +score <- "SCORAD" +dataset <- "PFDC" + +#### OPTIONS +model <- ScoradPred(a0 = 0.04, # 0.04 + independent_items = FALSE, + include_calibration = TRUE, + include_treatment = TRUE, + treatment_names = c("localTreatment", "emollientCream"), + include_trend = FALSE, + include_recommendations = FALSE) + +run <- FALSE +n_chains <- 4 +n_it <- 2000 +#### + +stopifnot( + is_scalar_logical(run), + is_scalar_wholenumber(n_chains), + n_chains > 0, + is_scalar_wholenumber(n_it), + n_it > 0 +) + +## Files +file_dict <- get_results_files(outcome = score, + model = model$name, + dataset = dataset, + root_dir = here()) + +## Parameters +param <- list_parameters(model) +param2 <- list_parameters(model, full_names = TRUE) + +# Data -------------------------------------------------------------------- + +l <- load_PFDC() + +# Prepare POSCORAD time-series +POSCORAD <- l$POSCORAD %>% + rename(Time = Day) +df <- POSCORAD %>% + select(one_of("Patient", "Time", model$item_spec$Label)) %>% + pivot_longer(cols = all_of(model$item_spec$Label), names_to = "Label", values_to = "Score") %>% + drop_na() %>% + left_join(model$item_spec[, c("Label", "ItemID")], by = c("Label")) +train <- df %>% + mutate(Resolution = case_when(Label %in% detail_POSCORAD("Subjective symptoms")$Label ~ 0.1, + TRUE ~ 1), + Score = round(Score / Resolution)) %>% + select(-Resolution) + +# Prepare SCORAD calibration data +if (model$include_calibration) { + scorad <- l$SCORAD %>% + rename(Time = Day) %>% + select(one_of("Patient", "Time", model$item_spec$Label)) %>% + pivot_longer(cols = all_of(model$item_spec$Label), names_to = "Label", values_to = "Score") %>% + drop_na() %>% + left_join(model$item_spec[, c("Label", "ItemID")], by = c("Label")) + cal <- scorad %>% + mutate(Resolution = case_when(Label %in% detail_POSCORAD("Subjective symptoms")$Label ~ 0.1, + TRUE ~ 1), + Score = round(Score / Resolution)) %>% + select(-Resolution) +} else { + cal <- NULL +} + +# Prepare treatment data +treatment_lbl <- paste0(model$treatment_names, "WithinThePast2Days") +if (model$include_treatment) { + treat <- POSCORAD %>% + select(all_of(c("Patient", "Time", treatment_lbl))) %>% + pivot_longer(cols = all_of(treatment_lbl), names_to = "Treatment", values_to = "UsageWithinThePast2Days") %>% + mutate(Treatment = vapply(Treatment, function(x) {which(x == treatment_lbl)}, numeric(1)) %>% as.numeric()) %>% + drop_na() +} else { + treat <- NULL +} + +# Prepare recommendation data +if (model$include_recommendations) { + # Dataset where Time correspond to the time when the action is made, and the scores correspond to Time + 1 + df_rec <- POSCORAD %>% + group_by(Patient) %>% + filter(Time == max(Time)) %>% + ungroup() %>% + mutate(Time = Time - 1) %>% + mutate(Recommendation = 1:nrow(.)) +} else { + df_rec <- NULL +} + +pt <- unique(df[["Patient"]]) + +# Stan input +data_stan <- c(prefill_standata_FullModel(model), + prepare_standata(model, train = train, test = NULL, cal = cal, treat = treat, rec = df_rec)) + +id <- get_index(bind_rows(train, cal, treat, df_rec)) +df <- left_join(df, id, by = c("Patient", "Time")) + +# Fitting ----------------------------------------------------------------- + +if (run) { + cat("Running model:", model$name, "\n") + fit <- stan(file = model$stanmodel, + data = data_stan, + pars = unlist(param), + iter = n_it, + chains = n_chains, + control = list(adapt_delta = .9), + init = 0) + saveRDS(fit, file = file_dict$Fit) + par <- HuraultMisc::summary_statistics(fit, pars = unlist(param)) + saveRDS(par, file = file_dict$FitPar) +} diff --git a/analysis/03_check_fit.Rmd b/analysis/03_check_fit.Rmd new file mode 100644 index 0000000..e6246f7 --- /dev/null +++ b/analysis/03_check_fit.Rmd @@ -0,0 +1,668 @@ +--- +title: "Fit ScoradPred to data" +author: "Guillem Hurault" +date: "`r format(Sys.time(), '%d %B %Y')`" +output: html_document +params: + a0: 0.04 + independent_items: FALSE + include_calibration: TRUE + include_treatment: TRUE + include_trend: FALSE +--- + +```{r setup, include=FALSE} +knitr::opts_chunk$set(echo = FALSE, + warning = FALSE, + fig.height = 5, + fig.width = 8, + dpi = 200) + +set.seed(2021) # Reproducibility (Stan use a different seed) + +source(here::here("analysis", "00_init.R")) # Load libraries, variables and functions + +score <- "SCORAD" +dataset <- "PFDC" + +model <- ScoradPred(independent_items = params$independent_items, + a0 = params$a0, + include_trend = params$include_trend, + include_calibration = params$include_calibration, + include_treatment = params$include_treatment, + treatment_names = c("localTreatment", "emollientCream"), + include_recommendations = FALSE) + +param <- list_parameters(model) +param2 <- list_parameters(model, full_names = TRUE) + +``` + +```{r load-data, include=FALSE} +# NB: atm copy-pasted from `run_fit.R` because we need `df`, `scorad` and especially `id` (when dealing with treatment) + +l <- load_PFDC() + +POSCORAD <- l$POSCORAD %>% + rename(Time = Day) +df <- POSCORAD %>% + select(one_of("Patient", "Time", model$item_spec$Label)) %>% + pivot_longer(cols = all_of(model$item_spec$Label), names_to = "Label", values_to = "Score") %>% + drop_na() %>% + left_join(model$item_spec[, c("Label", "ItemID")], by = c("Label")) + +# Prepare SCORAD calibration data +if (model$include_calibration) { + cal <- scorad <- l$SCORAD %>% + rename(Time = Day) %>% + select(one_of("Patient", "Time", model$item_spec$Label)) %>% + pivot_longer(cols = all_of(model$item_spec$Label), names_to = "Label", values_to = "Score") %>% + drop_na() %>% + left_join(model$item_spec[, c("Label", "ItemID")], by = c("Label")) +} else { + cal <- NULL +} + +# Prepare treatment data +treatment_lbl <- paste0(model$treatment_names, "WithinThePast2Days") +if (model$include_treatment) { + treat <- POSCORAD %>% + select(all_of(c("Patient", "Time", treatment_lbl))) %>% + pivot_longer(cols = all_of(treatment_lbl), names_to = "Treatment", values_to = "UsageWithinThePast2Days") %>% + mutate(Treatment = vapply(Treatment, function(x) {which(x == treatment_lbl)}, numeric(1)) %>% as.numeric()) %>% + drop_na() +} else { + treat <- NULL +} + +# NB: assume no recommendation (at least outside time-series) + +pt <- unique(df[["Patient"]]) + +id <- get_index(bind_rows(df, cal, treat)) +df <- left_join(df, id, by = c("Patient", "Time")) +``` + +```{r load-results, include=FALSE} +file_dict <- get_results_files(outcome = score, + model = model$name, + dataset = dataset, + root_dir = here()) +file_dict$PriorPar <- get_results_files(outcome = score, + model = "ScoradPred+corr", + dataset = dataset, + root_dir = here())$PriorPar + +fit <- readRDS(file_dict$Fit) +par <- readRDS(file_dict$FitPar) +par0 <- readRDS(file_dict$PriorPar) +``` + +# Model specifications: `r model$name` + +- Random walk `r if (params$include_trend) {"**with trend**"}` latent dynamic +- `r ifelse(params$independent_items, "**No correlation**", "**Correlation**")` between intensity items +`r if (params$include_calibration) {"- Calibration with SCORAD measurements"}` +`r if (params$include_treatment) {"- Using treatment data"}` + +# Diagnostics + +```{r stan-diagnostics} +check_hmc_diagnostics(fit) + +par %>% + select(Rhat) %>% + drop_na() %>% + summarise(max(Rhat), + all(Rhat < 1.1)) + +lapply(1, # :model$D, + function(d) { + pairs(fit, pars = paste0(c("sigma_meas", "sigma_lat", "rho2", "sigma_tot", "mu_y0", "sigma_y0"), "[", d, "]")) + }) +plot(fit, pars = param2$Population[1], plotfun = "trace") +# print(fit, pars = param$Population) +``` + +# Posterior estimates + +## Sensitivity to prior + +```{r prior-posterior} +HuraultMisc::plot_prior_influence(par0, par, pars = c(param2$Population, param2$Patient)) + + # coord_cartesian(xlim = c(-1, 1)) + + theme(legend.position = "none") +``` + +## Measurement vs latent noise + +```{r estimates-dyn1-std, message=FALSE} +plot_grid( + plot(fit, pars = "sigma_reltot") + + coord_cartesian(xlim = c(0, .25)) + + labs(title = "Normalised total standard deviation"), + plot(fit, pars = "rho2") + + coord_cartesian(xlim = c(0, 1)) + + labs(title = "Relative importance of measurement variance to total variance"), + nrow = 1) +``` + +```{r estimates-dyn2-std} +tmp <- lapply(c("sigma_lat", "sigma_meas"), + function(x) { + + smp <- rstan::extract(fit, pars = x)[[1]] + + out <- model$item_spec %>% + mutate(Variable = x, + Samples = lapply(1:nrow(.), function(i) {smp[, i]}), + Samples = map2(Samples, M, ~(.x / .y)), + Mean = map(Samples, mean) %>% unlist(), + Lower = map(Samples, ~quantile(.x, probs = .05)) %>% unlist(), + Upper = map(Samples, ~quantile(.x, probs = .95)) %>% unlist()) %>% + select(-Samples) + + return(out) + + }) %>% + bind_rows() + +tmp %>% + mutate(Variable = recode(Variable, + "sigma_lat" = "Latent dynamic", + "sigma_meas" = "Measurement"), + Component = gsub(" ", "\n", Component)) %>% + ggplot(aes(x = Name, y = Mean, ymin = Lower, ymax = Upper, colour = Variable)) + + facet_grid(rows = vars(Component), scales = "free", space = "free") + + geom_pointrange(position = position_dodge(width = .5)) + + coord_flip(ylim = c(0, .25)) + + scale_colour_manual(values = HuraultMisc::cbbPalette) + + labs(x = "", y = "Estimate (normalised)", colour = "Standard deviation:") + + theme(legend.position = "top") +# ggsave(here("results", "dynamics_std.jpg"), width = 13, height = 8, units = "cm", dpi = 300, scale = 2) +``` + +## Measurement distribution cut-offs + +```{r estimates-cutoffs} +plot(fit, pars = "ct1") + labs(title = "Cut-offs for extent, itching and sleep") +plot(fit, pars = "ct2") + labs(title = "Cut-offs for intensity signs") +plot(fit, pars = "delta1") + labs(title = "Normalised difference between cut-offs for extent, itching and sleep") +``` + + +## Expected correlation matrices + +- Correlation of latent initial condition +- Correlation of changes in latent scores + +```{r estimates-correlation, results='asis'} +if (!model$independent_items) { + # Correlation matrix (expected value) + + lapply(c("Omega0", "Omega"), + function(x) { + omg <- rstan::extract(fit, pars = x)[[1]] + + tmp <- list(Mean = apply(omg, c(2, 3), mean), + SD = apply(omg, c(2, 3), sd), + Lower = apply(omg, c(2, 3), function(x) {quantile(x, probs = .05)}), + Upper = apply(omg, c(2, 3), function(x) {quantile(x, probs = .95)}), + pval = apply(omg, c(2, 3), function(x) {empirical_pval(x, 0)})) + tmp <- lapply(tmp, + function(x) { + colnames(x) <- model$item_spec$Name + rownames(x) <- model$item_spec$Name + return(x) + }) + + corrplot::corrplot.mixed(tmp$Mean, lower = "number", upper = "ellipse", p.mat = tmp$pval, sig.level = 0.1) + + # corrplot::corrplot(tmp$Mean, method = "ellipse") %>% + # corrplot::corrRect(name = c("extent", "itching", "dryness", "thickening")) + + NULL + }) + } +``` + +## Calibration + +### Estimates + +- `bias0` corresponds to the initial bias. +The value is reported as a proportion to the maximum value that the score can take. +`bias0 > 0` means that the clinician scores higher than the patient. +- `tau_bias` is the time constant associated to the learning of the patient (whether the bias is reduced with time). + - `tau_bias >> 1` means that the patient does not learn and the bias stays constant. + - `tau_bias << 1` means bias goes to 0 very fast + +```{r estimates-calibration} +if (model$include_calibration) { + + # + Visualise calibration time in PPC plot + + p1_cal <- par %>% + filter(Variable == "bias0") %>% + rename(ItemID = Index) %>% + left_join(model$item_spec, by = "ItemID") %>% + filter(!(Name %in% c("sleep", "itching"))) %>% + ggplot(aes(x = Name, y = Mean, ymin = `5%`, ymax = `95%`)) + + facet_grid(rows = vars(Component), scales = "free", space = "free") + + geom_pointrange() + + geom_hline(yintercept = 0, linetype = "dashed") + + coord_flip() + + scale_y_continuous(limits = c(-.5, .5)) + + labs(x = "", + y = "Initial bias (normalised)") + p2_cal <- par %>% + filter(Variable == "tau_bias") %>% + rename(ItemID = Index) %>% + left_join(model$item_spec, by = "ItemID") %>% + filter(!(Name %in% c("sleep", "itching"))) %>% + ggplot(aes(x = Name, y = Mean, ymin = `5%`, ymax = `95%`)) + + facet_grid(rows = vars(Component), scales = "free", space = "free") + + geom_pointrange() + + scale_y_log10(breaks = 10^(0:3)) + + coord_flip() + + labs(x = "", y = "Characteristic learning time") + + if (FALSE) { + ggsave(filename = here("results", "calibration_learningtime.jpg"), + plot = p2_cal, + width = 13, height = 8, units = "cm", dpi = 300, scale = 2) + } + + plot_grid(p1_cal, + p2_cal + + theme(axis.text.y = element_blank(), + axis.ticks.y = element_blank()), + nrow = 1, + rel_widths = c(.55, .45)) + +} +``` + +### Observed PO-SCORAD trajectories overlayed with a posteriori SCORAD trajectories + +A posteriori SCORAD (that the clinician would have scored) tends to be higher than the observed PO-SCORAD. +However, the bias is not constant over time as the breakdown of SCORAD may change and learning may happen. + +NB: the width of the posterior distribution of SCORAD follows the assumption that SCORAD measurements std is half as much as PO-SCORAD measurements std. + +```{r calibration-trajectories} +if (model$include_calibration) { + + aggcal <- rstan::extract(fit, pars = "agg_cal_rep")[[1]] + aggcal <- aggcal[, , 4] # SCORAD + + ### Plot observed PO-SCORAD and inferred SCORAD as a fanchart + pl <- lapply(sort(sample(pt, 4)), + function(pid) { + + tmp <- POSCORAD %>% + filter(Patient == pid) + + plot_post_traj_fanchart(aggcal, id = id, patient_id = pid, max_score = 103) + + add_broken_pointline(tmp, aes_x = "Time", aes_y = "SCORAD", colour = "Observed PO-SCORAD") + + scale_colour_manual(values = c("Observed PO-SCORAD" = "black")) + + labs(fill = "Inferred\nSCORAD\nprobabilities", colour = "", title = paste0("Patient ", pid)) + + theme(legend.position = "none", + legend.title = element_text(size = 12)) + + }) + + plot_grid(get_legend(pl[[1]] + theme(legend.position = "top")), + plot_grid(plotlist = pl, ncol = 2), + ncol = 1, rel_heights = c(.1, .9)) + +} +``` + +## Treatment + +```{r processing-treatment-estimates, include=FALSE} +if (model$include_treatment) { + + param_treat <- list_parameters_treatment() + + # Process par: Treatment usage parameters + for (x in param_treat$Patient) { + par <- extract_par_indexes(par, var_name = x, dim_names = c("Patient", "Treatment")) + } + + # Process par: Treatment effects + par <- extract_par_indexes(par, var_name = "ATE", dim_names = c("ItemID", "Treatment")) + +} +``` + +### Daily treatment usage + +#### Parameters + +```{r estimates-dailytreat} +if (model$include_treatment) { + + # Plot patient parameters + par %>% + filter(Variable %in% param_treat$Patient) %>% + mutate(Treatment = model$treatment_names[Treatment], + Patient = factor(Patient, levels = pt)) %>% + ggplot(aes(x = Patient, y = Mean, ymin = `5%`, ymax = `95%`, colour = Treatment)) + + facet_grid(cols = vars(Variable)) + + geom_pointrange(position = position_dodge(width = .5)) + + coord_flip(ylim = c(0, 1)) + + scale_colour_manual(values = cbbPalette[c(2, 1)]) + + labs(y = x, colour = "") + + theme(legend.position = "top") + + # Plot distribution of patient parameters + lapply(param_treat$Patient, + function(x) { + tmp <- rstan::extract(fit, pars = x)[[1]] + lapply(1:2, + function(d) { + PPC_group_distribution(tmp[, , d], x, nDraws = 50) + + coord_cartesian(xlim = c(0, 1)) + + labs(title = model$treatment_names[d]) + }) %>% + plot_grid(plotlist = ., nrow = 1) + }) %>% + plot_grid(plotlist = ., ncol = 1) + +} +``` + +#### Average treatment usage + +```{r estimates-dailytreat-avg} +if (model$include_treatment) { + # Average treatment usage for each patient + ptreat <- rstan::extract(fit, pars = "p_treat")[[1]] + lapply(pt, + function(pid) { + data.frame(Patient = pid, + Treatment = model$treatment_names, + AverageUsage = apply(ptreat[, id %>% filter(Patient == pid) %>% pull(Index), ], 3, mean)) + }) %>% + bind_rows() %>% + pivot_wider(names_from = "Treatment", values_from = "AverageUsage") %>% + ggplot(aes(x = localTreatment, y = emollientCream)) + + geom_point() + + coord_cartesian(xlim = c(0, 1), ylim = c(0, 1)) +} +``` + +```{r estimates-dailytreat-distribution , eval=FALSE} +if (model$include_treatment) { + # Distribution of frequency of treatment usage + lapply(pt, + function(pid) { + avg_use <- apply(ptreat[, id %>% filter(Patient == pid) %>% pull(Index), ], c(1, 3), mean) %>% + reshape2::melt(., varnames = c("Sample", "Treatment"), value.name = "AverageUsage") %>% + mutate(Treatment = model$treatment_names[Treatment]) + ggplot(data = avg_use, + aes(x = AverageUsage, colour = Treatment)) + + geom_density() + + coord_cartesian(xlim = c(0, 1), expand = FALSE) + + labs(colour = "") + + theme(legend.position = "top") + }) %>% + plot_grid(plotlist = ., ncol = 4) +} +``` + +### Treatment effects + +The average treatment effects is reported as a proportion of the maximum value that the score can take. +Negative values indicate that using treatment reduces severity. +For example, if ATE=-0.05 for extent (defined in [0, M=100]), it means that using treatment would reduce, on average, the severity of extent by 5 points. + +NB: to be compared to the total noise `sigma_reltot`, which yields an SNR of approx. 0.2. +With a small effect size, it would be hard to detect a difference in performance or assess treatment recommendations. + +```{r estimates-ATE} +if (model$include_treatment) { + # Prob(TreatEffect < 0) + apply(rstan::extract(fit, pars = "ATE")[[1]], c(2, 3), function(x) {mean(x < 0)}) + + # Plot treatment effects + par %>% + filter(Variable == "ATE") %>% + mutate(Treatment = model$treatment_names[Treatment]) %>% + left_join(model$item_spec, by = "ItemID") %>% + ggplot(aes(x = Name, y = Mean, ymin = `5%`, ymax = `95%`, colour = Treatment)) + + facet_grid(rows = vars(Component), scale = "free", space = "free") + + geom_pointrange(position = position_dodge(width = .5)) + + geom_hline(yintercept = 0, linetype = "dashed") + + coord_flip(ylim = c(-.05, .05)) + + scale_colour_manual(values = cbbPalette[c(2, 1)]) + + labs(x = "", y = "Treatment effect", colour = "") + + theme(legend.position = "top") + # plot(fit, pars = "ATE_agg") +} +``` + +## Trend + +- `beta` is the trend smoothing factor. +If `beta=0`, the trend does not change (constant). + +```{r estimates-trend1} +if (model$include_trend) { + plot(fit, pars = "beta") + coord_cartesian(xlim = c(0, 1)) +} +``` + +The plot shows the expected trend for four patients. + +```{r estimates-trend} +if (model$include_trend) { + + expected_trajectory <- function(fit, par_name, id) { + traj <- rstan::extract(fit, pars = par_name)[[1]] + + mean_traj <- apply(traj, c(2, 3), mean) + mean_traj <- as.data.frame(mean_traj) + colnames(mean_traj) <- paste0("Item_", 1:model$D) + mean_traj <- bind_cols(id, mean_traj) %>% + pivot_longer(cols = starts_with("Item_"), names_to = "ItemID", values_to = par_name) %>% + mutate(ItemID = gsub("Item_", "", ItemID) %>% as.numeric()) + + return(mean_traj) + } + + mean_trend <- expected_trajectory(fit, "trend", id) %>% + left_join(model$item_spec, by = "ItemID") %>% + mutate(trend = trend / M) + + p_trend <- lapply(sort(sample(pt, 4)), + function(pid) { + mean_trend %>% + filter(Patient == pid) %>% + ggplot(aes(x = Time, y = trend, colour = Name)) + + geom_line() + + coord_cartesian(ylim = c(-1, 1)) + + labs(title = paste0("Patient ", pid), colour = "") + + theme(legend.position = "none") + }) + + plot_grid(get_legend(p_trend[[1]] + theme(legend.position = "top")), + plot_grid(plotlist = p_trend, ncol = 2), + ncol = 1, rel_heights = c(.1, .9)) +} +``` + +```{r range-trend, message=FALSE} +if (model$include_trend) { + + mean_trend %>% + group_by(Patient, Name) %>% + summarise(Min = min(trend), Max = max(trend)) %>% + mutate(Patient = factor(Patient)) %>% + ggplot(aes(x = Patient, ymin = Min, ymax = Max, colour = Name)) + + geom_errorbar(position = position_dodge(width = .7)) + + coord_flip(ylim = c(-.01, .01)) + + labs(y = "Range of the (normalised) expected trend", colour = "") + # ggsave(here("results", "trend_range.jpg"), width = 13, height = 8, units = "cm", dpi = 300, scale = 2.5) + +} +``` + +# Posterior predictive checks + +```{r ppc, eval=FALSE} +yrep <- rstan::extract(fit, pars = "y_rep")[[1]] +aggrep <- rstan::extract(fit, pars = "agg_rep")[[1]] +df_agg <- POSCORAD %>% + rename(Score = all_of(detail_POSCORAD(score)$Label)) %>% + select(Patient, Time, Score) %>% + drop_na() +pl <- lapply(pt, + function(pid) { + + if (model$include_calibration) { + cal_time <- scorad %>% filter(Patient == pid) %>% pull(Time) + } + + # Breakdown + pl <- lapply(1:model$D, + function(d) { + tmp <- model$item_spec %>% filter(ItemID == d) + reso <- tmp[["Resolution"]] + M <- tmp[["Maximum"]] / tmp[["Resolution"]] + yrep_d <- yrep[, , d] * reso + sub_df <- df %>% filter(ItemID == d, Patient == pid) + if (M < 20) { + p <- plot_post_traj_pmf(yrep_d, + id = id, + patient_id = pid, + max_score = tmp[["Maximum"]]) + } else { + p <- plot_post_traj_fanchart(yrep_d, + id = id, + patient_id = pid, + max_score = tmp[["Maximum"]], + legend_fill = "discrete", + CI_level = seq(.1, .9, .2)) + } + p <- p + + geom_point(data = sub_df, + aes(x = Time, y = Score)) + + geom_path(data = sub_df, + aes(x = Time, y = Score)) + + labs(y = tmp$Label) + + if (model$include_calibration) { + p <- p + geom_vline(xintercept = cal_time, colour = "black") + } + + return(p) + }) + # Aggregate + sub_df_agg <- filter(df_agg, Patient == pid) + p_agg <- plot_post_traj_fanchart(aggrep[, , 4], # SCORAD + id = id, + patient_id = pid, + max_score = detail_POSCORAD(score)$Maximum, + legend_fill = "discrete", + CI_level = seq(.1, .9, .2)) + + geom_point(data = sub_df_agg, + aes(x = Time, y = Score)) + + geom_path(data = sub_df_agg, + aes(x = Time, y = Score)) + + labs(y = score) + + if (model$include_calibration) { + p_agg <- p_agg + geom_vline(xintercept = cal_time, colour = "black") + } + + plot_title <- ggdraw() + + draw_label(paste0("Patient ", pid), + fontface = "bold", + size = 20, + x = .5, + hjust = 0) + + theme(plot.margin = margin(0, 0, 0, 7)) + + plot_grid(plot_title, + plot_grid(plotlist = pl, ncol = 1, align = "v"), + p_agg, + ncol = 1, + rel_heights = c(.5, 8, 2), align = "v") + }) + +if (FALSE) { + for (i in seq_along(pt)) { + ggsave(filename = here("results", paste0(score, "_", model$name, "_", sprintf("%02d", pt[i]), ".jpg")), + plot = pl[[i]], + width = 10, + height = 15, + units = "cm", + dpi = 300, + scale = 3.5, + bg = "white") + } +} +``` + +```{r ppc-scorad-only, eval=FALSE} +aggrep <- rstan::extract(fit, pars = "agg_rep")[[1]] +df_agg <- POSCORAD %>% + rename(Score = all_of(detail_POSCORAD(score)$Label)) %>% + select(Patient, Time, Score) %>% + drop_na() + +pl <- lapply(pt, + function(pid) { + sub_df_agg <- filter(df_agg, Patient == pid) + + p_agg <- plot_post_traj_fanchart(aggrep[, , 4], # SCORAD + id = id, + patient_id = pid, + max_score = 55, + legend_fill = "discrete", + CI_level = seq(.1, .9, .2)) + + geom_point(data = sub_df_agg, + aes(x = Time, y = Score)) + + geom_path(data = sub_df_agg, + aes(x = Time, y = Score)) + + labs(y = score, title = paste0("Patient ", pid)) + }) + + +plot_grid(plotlist = pl[sort(sample(pt, 4))], + ncol = 2) + + +if (FALSE) { + for (i in seq_along(pt)) { + ggsave(filename = here("results", paste0(score, "-only_", model$name, "_", sprintf("%02d", pt[i]), ".jpg")), + plot = pl[[i]], + width = 12, + height = 7, + units = "cm", + dpi = 300, + scale = 1.8, + bg = "white") + } +} + +``` + +```{r ppc-links2, results='asis'} +img <- data.frame(Patient = pt) %>% + mutate(File = file.path("results", paste0(score, "_", model$name, "_", sprintf("%02d", Patient), ".jpg"))) %>% + filter(file.exists(here(File))) %>% + mutate(Link = file.path("..", File), + Link = gsub("\\+", "%2B", Link), + # Link = paste0("![](", Link, ")")) %>% + Link = paste0("- [Patient ", Patient, "](", Link, ")")) %>% + pull(Link) %>% + cat(sep = "\n") +# NB: printing of images does not always work, so just put the links +``` + +NB: the plots are not generated during the report generation but beforehand. +If nothing appears, it may be because the plots are not saved (files not found). diff --git a/analysis/04a_run_validation.R b/analysis/04a_run_validation.R new file mode 100644 index 0000000..44e009b --- /dev/null +++ b/analysis/04a_run_validation.R @@ -0,0 +1,360 @@ +# Notes ------------------------------------------------------------------- + +# Run validation for multivariate models +# NB: use t_horizon=4 for model comparison but t_horizon=1 for recommendations + +# Initialisation ---------------------------------------------------------- + +rm(list = ls()) # Clear Workspace (better to restart the session) + +set.seed(2021) # Reproducibility (Stan use a different seed) + +source(here::here("analysis", "00_init.R")) # Load libraries, variables and functions +library(foreach) +library(doParallel) + +score <- "SCORAD" +dataset <- "PFDC" + +#### OPTIONS +model <- ScoradPred(a0 = 0.04, # 0.04 + independent_items = FALSE, + include_calibration = TRUE, + include_treatment = TRUE, + treatment_names = c("localTreatment", "emollientCream"), + include_trend = FALSE, + include_recommendations = TRUE) +# set include_recommendations the same as include_treatment + +run <- FALSE +t_horizon <- 4 +n_chains <- 4 +n_it <- 2000 +n_cluster <- 4 +#### + +stopifnot( + is_scalar_logical(run), + is_scalar_wholenumber(n_chains), + n_chains > 0, + is_scalar_wholenumber(n_it), + n_it > 0, + is_scalar_wholenumber(t_horizon), + t_horizon > 0, + is_scalar_wholenumber(n_cluster), + between(n_cluster, 1, floor((parallel::detectCores() - 2) / n_chains)) +) + +## Parameters +param <- c("lpd", "agg_rep", "y_pred") +if (model$include_recommendations) { + param <- c(param, "y_rec", "agg_rec", "p_treat") +} + +## Files +outcomes <- detail_POSCORAD()$Name +# Validation files +file_dict <- lapply(outcomes, + function(x) { + get_results_files(outcome = x, + model = model$name, + dataset = dataset, + val_horizon = t_horizon, + root_dir = here()) + }) +names(file_dict) <- outcomes +# Recommendation files +rec_files <- get_recommendation_files(outcome = score, + model = model$name, + dataset = dataset, + val_horizon = t_horizon, + root_dir = here()) + +if (run) { + compiled_model <- rstan::stan_model(model$stanmodel) +} + +# Prepare Stan input ------------------------------------------------------ + +l <- load_PFDC() + +POSCORAD <- l$POSCORAD %>% + rename(Time = Day) + +# Prefill Stan input +data_stan0 <- prefill_standata_FullModel(model) + +# Prepare dataset (model$item_spec controls the indexing of items) +df <- POSCORAD %>% + select(one_of("Patient", "Time", model$item_spec$Label)) %>% + pivot_longer(cols = all_of(model$item_spec$Label), names_to = "Label", values_to = "Score") %>% + drop_na() %>% + left_join(model$item_spec[, c("Label", "ItemID")], by = c("Label")) %>% + select(-Label) + +if (model$include_calibration) { + # Format SCORAD + scorad <- l$SCORAD %>% + rename(Time = Day) %>% + select(one_of("Patient", "Time", model$item_spec$Label)) %>% + pivot_longer(cols = all_of(model$item_spec$Label), names_to = "Label", values_to = "Score") %>% + drop_na() %>% + left_join(model$item_spec[, c("Label", "ItemID")], by = c("Label")) + scorad <- scorad %>% mutate(Iteration = get_fc_iteration(Time, horizon = t_horizon)) +} +df <- df %>% mutate(Iteration = get_fc_iteration(Time, horizon = t_horizon)) + +if (model$include_treatment) { + + treatment_lbl <- paste0(model$treatment_names, "WithinThePast2Days") + + treat <- POSCORAD %>% + select(all_of(c("Patient", "Time", treatment_lbl))) %>% + pivot_longer(cols = all_of(treatment_lbl), names_to = "Treatment", values_to = "UsageWithinThePast2Days") %>% + mutate(Treatment = vapply(Treatment, function(x) {which(x == treatment_lbl)}, numeric(1)) %>% as.numeric()) %>% + drop_na() + + treat <- treat %>% mutate(Iteration = get_fc_iteration(Time, horizon = t_horizon)) + +} + +# Nothing to prepare for recommendation (or trend) + +pt <- unique(df[["Patient"]]) +t_max <- df %>% + group_by(Patient) %>% + summarise(LastTime = max(Time)) %>% + ungroup() + +# Forward chaining -------------------------------------------------------- + +train_it <- get_fc_training_iteration(df[["Iteration"]]) +fc_it <- detail_fc_training(df, horizon = t_horizon) + +if (run) { + + cl <- makeCluster(n_cluster, outfile = "") + registerDoParallel(cl) + + for (j in 1:length(file_dict)) { + dir.create(file_dict[[j]]$ValDir) + } + if (model$include_recommendations) { + dir.create(rec_files$RecDir) + } + + out <- foreach(i = rev(seq_along(train_it))) %dopar% { + it <- train_it[i] + + # Need to reload functions and libraries + source(here::here("analysis", "00_init.R")) + + duration <- Sys.time() + cat(glue::glue("Starting iteration {it}"), sep = "\n") + + #### + + # Split dataset + split <- lapply(1:model$D, + function(d) { + df %>% + filter(ItemID == d) %>% + select(-ItemID) %>% + split_fc_dataset(df = ., it) %>% + lapply(function(x) { + x %>% mutate(ItemID = d) + }) + }) + train <- lapply(split, function(x) {x$Training}) %>% bind_rows() + test <- lapply(split, function(x) {x$Testing}) %>% bind_rows() + + # Deal with reso=0.1 + d_subj <- model$item_spec %>% filter(Resolution == 0.1) %>% pull(ItemID) + l <- lapply(list(train, test), + function(x) { + x %>% + mutate(Resolution = case_when(ItemID %in% d_subj ~ 0.1, + TRUE ~ 1), + Score = round(Score / Resolution)) %>% + select(-Resolution) + }) + + # Calibration data + if (model$include_calibration) { + train_cal <- scorad %>% filter(Iteration <= it) %>% + mutate(Resolution = case_when(Label %in% detail_POSCORAD("Subjective symptoms")$Label ~ 0.1, + TRUE ~ 1), + Score = round(Score / Resolution)) %>% + select(-Resolution) + } else { + train_cal <- NULL + } + + # Treatment data + if (model$include_treatment) { + train_treat <- treat %>% filter(Iteration <= it) + } else { + train_treat <- NULL + } + + # Add recommendations input + if (model$include_recommendations) { + # Make recommendation at the last time of the training iteration (whether there is a training observation, or observed outcome) + df_rec <- data.frame(Patient = pt, + Time = fc_it %>% filter(Iteration == it) %>% pull(LastTime)) %>% + full_join(t_max, by = "Patient") %>% + filter(Time <= LastTime) %>% + select(-LastTime) + } else { + df_rec <- NULL + } + + data_stan <- c(data_stan0, + prepare_standata(model, + train = l[[1]], + test = l[[2]], + cal = train_cal, + treat = train_treat, + rec = df_rec)) + + id <- bind_rows(l[[1]], l[[2]], train_cal, train_treat, df_rec) %>% + get_index() + + fit <- sampling(compiled_model, + data = data_stan, + pars = param, + control = list(adapt_delta = 0.9), + init = 0, + iter = n_it, + chains = n_chains, + refresh = 0) + + ## Performance of individual signs + pred <- rstan::extract(fit, pars = "y_pred")[[1]] + smp <- lapply(1:ncol(pred), function(i) {pred[, i]}) + perf <- test %>% + mutate(lpd0 = extract_lpd(fit), + Samples = smp) + for (d in 1:model$D) { + perf %>% + filter(ItemID == d) %>% + select(-ItemID) %>% + mutate(Samples = map(Samples, ~(.x * model$item_spec$Resolution[d]))) %>% + add_metrics2_d(support = seq(0, model$item_spec$Maximum[d], model$item_spec$Resolution[d])) %>% + select(-lpd) %>% + rename(lpd = lpd0) %>% + saveRDS(file = here(file_dict[[model$item_spec$Name[d]]]$ValDir, + paste0("val_", it, ".rds"))) + } + + ## Performance of aggregates + pred_agg <- rstan::extract(fit, pars = "agg_rep")[[1]] + agg_names <- gsub("weight_", "", colnames(data_stan$agg_weights)) + for (d in 1:length(agg_names)) { + # Obtain test set for aggregate + agg_dict <- detail_POSCORAD() %>% + filter(Name == agg_names[d]) + test_agg <- POSCORAD %>% + rename(Score = all_of(agg_dict$Label)) %>% + select(Patient, Time, Score) %>% + mutate(Iteration = get_fc_iteration(Time, t_horizon)) %>% + split_fc_dataset(df = ., it) + test_agg <- test_agg$Testing + # Extract predictive samples + id_test <- left_join(test_agg, id, by = c("Patient", "Time")) %>% pull(Index) + smp_agg_d <- lapply(seq_along(id_test), function(i) {pred_agg[, id_test[i], d]}) + perf_agg <- test_agg %>% + mutate(Samples = smp_agg_d) # replace by EczemaPred::samples_to_list(pred_agg[, id_test, d]) + if (agg_names[d] %in% c("SCORAD", "oSCORAD")) { + perf_agg <- perf_agg %>% + add_metrics2_c(., add_samples = 0:agg_dict$Maximum, bw = 0.5) + } else { + perf_agg <- perf_agg %>% + add_metrics2_d(., support = seq(0, agg_dict$Maximum, agg_dict$Resolution)) + } + # Save validation results (better to save in the loop in case something breaks) + saveRDS(perf_agg, file = here(file_dict[[agg_names[d]]]$ValDir, + paste0("val_", it, ".rds"))) + } + + ## Recommendations + if (model$include_recommendations) { + + aggrec <- rstan::extract(fit, pars = "agg_rec")[[1]] + yrec <- rstan::extract(fit, pars = "y_rec")[[1]] + + # Add severity item samples to pred_rec + pred_rec <- df_rec + for (d in 1:nrow(model$item_spec)) { + tmp <- model$item_spec[d, ] + pred_rec[[tmp$Label]] <- lapply(1:nrow(pred_rec), + function(j) { + yrec[, , j, tmp$ItemID] + }) + } + # Add aggregates samples to pred_rec + for (d in seq_along(agg_names)) { + pred_rec[[detail_POSCORAD(agg_names[d])$Label]] <- lapply(1:nrow(pred_rec), + function(j) { + aggrec[, , j, d] + }) + } + # Add p_treat to pred_rec + df_rec <- left_join(df_rec, id, by = c("Patient", "Time")) + ptreat <- rstan::extract(fit, pars = "p_treat")[[1]] + ptreat <- ptreat[, df_rec[["Index"]], ] + for (i in seq_along(model$treatment_names)) { + pred_rec[[paste0(model$treatment_names[i], "_post")]] <- lapply(1:nrow(pred_rec), + function(j) { + ptreat[, j, i] + }) + } + + # Save recommendation results + saveRDS(list(Predictions = pred_rec, Actions = model$actions), + file = here(rec_files$RecDir, paste0("rec_", it, ".rds"))) + } + + #### + + duration <- Sys.time() - duration + cat(glue::glue("Ending iteration {it} after {round(duration, 1)} {units(duration)}"), sep = "\n") + + # Return + NULL + } + stopCluster(cl) + + # Recombine validation results + for (j in 1:length(file_dict)) { + recombine_results(dir_name = file_dict[[j]]$ValDir, + output_file = file_dict[[j]]$Val, + expected_number_of_files = length(train_it)) + } + + # Recombine recommendation results + if (model$include_recommendations) { + + # Check actions dataframes + files <- list.files(rec_files$RecDir, full.names = TRUE) + list_actions <- lapply(files, + function(x) { + tmp <- readRDS(x) + return(tmp[["Actions"]]) + }) + all_same_actions <- all(vapply(list_actions, function(x) {all.equal(x, list_actions[[1]])}, logical(1))) + if (!all_same_actions) { + warning("The actions dataframes are not the same across iterations.") + } + + recombine_results(dir_name = rec_files$RecDir, + output_file = rec_files$RecFile, + reading_function = function(x) {readRDS(x)[["Predictions"]]}, + expected_number_of_files = length(train_it)) + res_rec <- readRDS(rec_files$RecFile) + saveRDS(list(Predictions = res_rec, Actions = list_actions[[1]]), + file = rec_files$RecFile) + + } + +} diff --git a/analysis/04b_run_validation_reference.R b/analysis/04b_run_validation_reference.R new file mode 100644 index 0000000..1a50659 --- /dev/null +++ b/analysis/04b_run_validation_reference.R @@ -0,0 +1,157 @@ +# Notes ------------------------------------------------------------------- + +# Run validation for univariate reference models (uniform, historical forecast) + +# Initialisation ---------------------------------------------------------- + +rm(list = ls()) # Clear Workspace (better to restart the session) + +set.seed(2021) # Reproducibility (Stan use a different seed) + +source(here::here("analysis", "00_init.R")) +library(foreach) +library(doParallel) + +dataset <- "PFDC" + +#### OPTIONS +score <- "SCORAD" +mdl_name <- "historical" + +run <- FALSE +t_horizon <- 4 +n_chains <- 4 +n_it <- 2000 +n_cluster <- 2 # floor((parallel::detectCores() - 2) / n_chains) +#### + +item_dict <- detail_POSCORAD() + +score <- match.arg(score, item_dict[["Name"]]) +mdl_name <- match.arg(mdl_name, c("uniform", "historical")) + +item_dict <- item_dict %>% filter(Name == score) +item_lbl <- as.character(item_dict[["Label"]]) +max_score <- item_dict[["Maximum"]] +reso <- item_dict[["Resolution"]] +M <- round(max_score / reso) + +is_continuous <- (score %in% c("SCORAD", "oSCORAD")) + +## Files +file_dict <- get_results_files(outcome = score, + model = mdl_name, + dataset = dataset, + val_horizon = t_horizon, + root_dir = here()) + +# Data --------------------------------------------------------------------- + +POSCORAD <- load_PFDC()$POSCORAD + +# Subset dataset +df <- POSCORAD %>% + rename(Time = Day, Score = all_of(item_lbl)) %>% + select(Patient, Time, Score) %>% + drop_na() + +pt <- unique(df[["Patient"]]) + +# Forward chaining -------------------------------------------------------- + +df <- df %>% mutate(Iteration = get_fc_iteration(Time, t_horizon)) +train_it <- get_fc_training_iteration(df[["Iteration"]]) + +if (run) { + + cl <- makeCluster(n_cluster, outfile = "") + registerDoParallel(cl) + + dir.create(file_dict$ValDir) + + out <- foreach(i = rev(seq_along(train_it))) %dopar% { + it <- train_it[i] + + # Need to reload functions and libraries + source(here::here("analysis", "00_init.R")) + + duration <- Sys.time() + cat(glue::glue("Starting iteration {it}"), sep = "\n") + + #### + + split <- split_fc_dataset(df, it) + train <- split$Training + test <- split$Testing + + # Uniform forecast + if (mdl_name == "uniform" && !is_continuous) { + perf <- test %>% + mutate(Score = round(Score / reso)) %>% + add_uniform_pred(test = ., + max_score = M, + discrete = TRUE, + include_samples = FALSE) %>% + mutate(Score = Score * reso) + } + if (mdl_name == "uniform" && is_continuous) { + perf <- test %>% + add_uniform_pred(test = ., + max_score = max_score, + discrete = FALSE, + include_samples = TRUE, + n_samples = 2 * max_score) + } + + # Historical forecast + if (mdl_name == "historical" && !is_continuous) { + perf <- test %>% + mutate(Score = round(Score / reso)) %>% + add_historical_pred(test = ., + train = mutate(train, Score = round(Score / reso)), + max_score = M, + discrete = TRUE, + add_uniform = TRUE, + include_samples = FALSE) %>% + mutate(Score = Score * reso) + } + if (mdl_name == "historical" && is_continuous) { + perf <- test %>% + add_historical_pred(test = ., + train = train, + max_score = max_score, + discrete = FALSE, + add_uniform = TRUE, + include_samples = TRUE) + } + + perf <- perf %>% + select(-LastTime, -LastScore) + + # Save results (better to save in the loop in case something breaks) + saveRDS(perf, file = here(file_dict$ValDir, paste0("val_", it, ".rds"))) + + #### + + duration <- Sys.time() - duration + cat(glue::glue("Ending iteration {it} after {round(duration, 1)} {units(duration)}"), sep = "\n") + + # Return + NULL + } + stopCluster(cl) + + # Recombine results + files <- list.files(file_dict$ValDir, full.names = TRUE) + if (length(files) < length(train_it)) { + warning(glue::glue("Number of files (={length(files)}) less than the number of unique iterations (={length(train_it)}). + Some runs may have failed.")) + } + res <- lapply(files, + function(f) { + readRDS(f) + }) %>% + bind_rows() + saveRDS(res, file = file_dict$Val) + +} diff --git a/analysis/05_check_performance.Rmd b/analysis/05_check_performance.Rmd new file mode 100644 index 0000000..cdb21c3 --- /dev/null +++ b/analysis/05_check_performance.Rmd @@ -0,0 +1,223 @@ +--- +title: "Predictive performance of `r params$score`" +author: "Guillem Hurault" +date: "`r format(Sys.time(), '%d %B %Y')`" +output: html_document +params: + score: "SCORAD" + t_horizon: 4 + pred_horizon: 4 + max_horizon: 14 + acc_thr: 5.0 + p_thr: 0.95 +--- + +```{r setup, include=FALSE} +knitr::opts_chunk$set(echo = FALSE, + message = FALSE, + warning = FALSE, + fig.height = 5, + fig.width = 12, + dpi = 200) + +# Params: +# - t_horizon: horizon that was used for forward chaining +# - pred_horizon: prediction horizon to plot +# - max_horizon: remove predictions where horizon > max_horizon +# - acc_thr: accuracy threshold +# - p_thr: probability threshold for quantile error + +source(here::here("analysis", "00_init.R")) + +dataset <- "PFDC" +item_dict <- detail_POSCORAD() +score <- match.arg(params$score, c(item_dict[["Name"]])) +intensity_signs <- detail_POSCORAD("Intensity signs")$Name + +max_score <- item_dict %>% filter(Name == score) %>% pull(Maximum) + +if (params$t_horizon == 1) { + mdl_names <- c("uniform", "historical", "ScoradPred+h004+corr+cal+treat") + comp_mdl <- "historical" +} else { + mdl_names <- c("uniform", "historical", + "ScoradPred", + "ScoradPred+h004", + "ScoradPred+h004+corr", + "ScoradPred+h004+corr+cal", + "ScoradPred+h004+corr+cal+treat", + "ScoradPred+h004+corr+cal+treat+trend") + # NB: order important for successive lpd_diff comparison + comp_mdl <- "ScoradPred" # common reference model for lpd_diff ("historical" is used for lpd_diff vs y) +} + +res_files <- vapply(mdl_names, + function(m) { + get_results_files(outcome = score, + model = m, + dataset = dataset, + val_horizon = params$t_horizon, + root_dir = here())$Val + }, + character(1)) + +stopifnot(params$t_horizon > 0, + params$pred_horizon > 0, + params$max_horizon > params$pred_horizon, + all(file.exists(res_files)), + length(comp_mdl) == 1, + comp_mdl %in% mdl_names) + +fc_it <- load_PFDC()$POSCORAD %>% + rename(Time = Day) %>% + detail_fc_training(df = ., params$t_horizon) +``` + +## Learning curves and performance change for increasing prediction horizon + +```{r perf-curves} +if (score %in% c("SCORAD", "oSCORAD")) { + metrics <- c("lpd", "CRPS", "Accuracy", "QE") +} else { + metrics <- c("lpd", "RPS") +} + +pl <- lapply(metrics, + function(metric) { + + perf <- lapply(1:length(mdl_names), + function(i) { + # NB: Can have problem predicting from prior predictive distribution for RW models + res <- readRDS(res_files[i]) + if (metric == "Accuracy") { + res <- res %>% + mutate(Accuracy = compute_accuracy(res[["Score"]], res[["Samples"]], params$acc_thr)) + } + if (metric == "QE") { + res <- res %>% + mutate(QE = compute_quantile_error(res[["Score"]], res[["Samples"]], params$p_thr)) + } + res %>% + filter(Horizon <= params$max_horizon, + Iteration > 0 | mdl_names[i] != "RW") %>% + estimate_performance(metric, ., fc_it, adjust_horizon = !(mdl_names[i] %in% c("historical", "uniform"))) %>% + mutate(Model = mdl_names[i]) + }) %>% + bind_rows() %>% + mutate(Model = factor(Model, levels = mdl_names)) + + p1 <- perf %>% + filter(Variable == "Fit", + Horizon == params$pred_horizon) %>% + plot_learning_curves(perf = ., + metric = ifelse(metric == "lpd" && score %in% c("SCORAD", "oSCORAD"), "", metric), + fc_it = fc_it) + + labs(y = metric) + + p2 <- perf %>% + filter(Variable == "Horizon") %>% + plot_horizon_change() + + plot_grid(p1 + theme(legend.position = "none"), + p2 + theme(legend.position = "none"), + get_legend(p1 + theme(legend.position = "right")), + nrow = 1, rel_widths = c(4, 3, 1), labels = c("A", "B", "")) + + }) +names(pl) <- metrics +pl + +if (FALSE) { + # Save plots + for (i in 1:length(metrics)) { + ggsave(plot = pl[[i]], + filename = here("results", paste0(score, "_", metrics[i], ".jpg")), + width = 13, height = 8, dpi = 300, units = "cm", scale = 2) + } +} +``` + +## $\Delta$ lpd (observation-level) + +```{r perf-deltalpd} +res <- lapply(1:length(mdl_names), + function(i) { + # NB: Can have problem predicting from prior predictive distribution for RW models + readRDS(res_files[i]) %>% + filter(Horizon <= params$max_horizon, + Iteration > 0 | mdl_names[i] != "RW") %>% + mutate(Model = mdl_names[i]) + }) %>% + bind_rows() %>% + mutate(Model = factor(Model, levels = mdl_names)) + +# lpd_diff vs training iteration +# alternative to meta-model: no need to control for prediction horizon or patient with non-constant forecast +brk <- c(1.1, 1.25, 1.5, 2, 10, 100) +brk <- c(signif(rev(1 / brk), 2), 1, brk) +p3 <- res %>% + filter(!(Model %in% c("uniform", "historical"))) %>% + compute_skill_scores(., ref_mdl = comp_mdl, metrics = "lpd") %>% + filter(Horizon <= params$max_horizon, + abs(lpd_diff) < Inf) %>% + group_by(Model, Iteration) %>% + summarise(Mean = mean(lpd_diff), SD = sd(lpd_diff), SE = SD / sqrt(n())) %>% + ungroup() %>% + drop_na() %>% + left_join(., fc_it, by = "Iteration") %>% + plot_learning_curves(perf = ., metric = paste0("lpd - lpd(", comp_mdl, ")"), fc_it = fc_it) + + scale_y_continuous(breaks = log(brk), labels = paste0("log(", brk, ")")) +# p3 + +# lpd_diff (ref is historical) vs y +# "error" relative to a historical forecast +p4 <- res %>% + compute_skill_scores(., ref_mdl = "historical", metrics = "lpd") %>% + filter(Horizon <= params$max_horizon, + abs(lpd_diff) < Inf, + Iteration > 0) %>% + plot_perf_vs_score(perf = ., metric = "lpd_diff", discrete = (score %in% intensity_signs), max_score = max_score) + + scale_y_continuous(breaks = log(brk), labels = paste0("log(", brk, ")")) +# p4 + + +tryCatch({ + plot_grid(p3, p4, nrow = 1, labels = "AUTO") +}, +error = function(e) { + p3 +}) +# ggsave(here("results", paste0(score, "_lpd_diff.jpg")), width = 13, height = 8, dpi = 300, units = "cm", scale = 2.5) +``` + +### $\Delta$ lpd between successive model improvements + +```{r perf-deltalpd-successive} +# Only consider ScoradPred models (start at index 3) +tmp <- lapply(3:(length(mdl_names) - 1), + function(i) { + res %>% + filter(Model %in% mdl_names[c(i, i + 1)]) %>% + compute_skill_scores(., ref_mdl = mdl_names[i], metrics = "lpd") %>% + filter(Model == mdl_names[i + 1]) %>% + mutate(Label = paste0(mdl_names[i + 1], " vs. ", mdl_names[i])) + # mutate(Label = gsub(mdl_names[i], "", mdl_names[i + 1], fixed = TRUE)) + }) %>% + bind_rows() + +tmp %>% + group_by(Label) %>% + summarise(Mean = mean(lpd_diff), + SE = sd(lpd_diff) / sqrt(n())) %>% + ggplot(aes(x = Label, y = Mean, ymin = Mean - SE, ymax = Mean + SE)) + + geom_pointrange() + + coord_flip(ylim = log(c(1 / 1.1, 1.1))) + + scale_y_continuous(breaks = log(brk), labels = paste0("log(", brk, ")")) + + labs(x = "", y = "Pairwise difference in lpd") + + theme_bw(base_size = 15) +# NB: if lpd_diff smalls, can interpret as multiplicative change in average probability on the outcome +# (in that case, changing the scale is optional) + +plot_perf_vs_score(tmp) + + theme(legend.position = "right") +``` diff --git a/analysis/06_analyse_recommendations.Rmd b/analysis/06_analyse_recommendations.Rmd new file mode 100644 index 0000000..231d92e --- /dev/null +++ b/analysis/06_analyse_recommendations.Rmd @@ -0,0 +1,586 @@ +--- +title: "Analyse treatment recommendations" +author: "Guillem Hurault" +date: "`r format(Sys.time(), '%d %B %Y')`" +output: html_document +params: + score: "SCORAD" + min_time: 14 + exclude_patients: FALSE +--- + +```{r setup, include=FALSE} +knitr::opts_chunk$set(echo = TRUE, + message = FALSE) + +# Args: +# - score: Which score to optimise +# - min_time Only consider observations with Time >= min_time (to allow the model to be sufficiently trained) +``` + +# Introduction + +```{r init} +source(here::here("analysis", "00_init.R")) # Load libraries, variables and functions +library(latex2exp) + +## Where the predictions recommendations are saved +filename <- get_recommendation_files(outcome = "SCORAD", + model = paste0("FullModel", "_h004"), + dataset = "PFDC", + val_horizon = 1)$RecFile +## +lrec <- readRDS(here(filename)) + +score_dict <- detail_POSCORAD(params$score) +``` + +## Utility function + +We aim to maximise a utility function (or equivalently minimise a loss function), which we define in terms of reward as: +```{r utility-function} +utility_from_reward <- function(r, pw = 1) { + # Utility as a function of reward + # Utility is an increasing function of the reward. + # The reward is assumed > 0. + # + # Args: + # r: reward + # pw: power of the exponent + # + # Returns + # Utility + + return(r^pw) +} + +utility_func <- function(y, cost, M = score_dict$Maximum, ...) { + # Utility function as a function of the state (score) and cost of the action + # The pair (score, cost) is first linearly transformed into a reward, between 0 and 1, + # where y=0 corresponds to reward=1 and y=M corresponds to reward=0. + # (y + cost) is censored at 0 and M so the reward is always in [0, 1] + # NB: instead of defining utility as a function of the action, we directly input the cost of the action. + # + # Args: + # y: score value + # cost: cost of the action + # M: maximum value that the score can take + + (1 - (y + cost) / M) %>% + pmin(., 1) %>% + pmax(., 0) %>% + utility_from_reward(...) +} +``` + +```{r plot-utility, include=FALSE, eval=FALSE} +data.frame(Reward = seq(0, 1, .01)) %>% + mutate(Utility = utility_from_reward(Reward)) %>% + ggplot(aes(x = Reward, y = Utility)) + + geom_line() +``` + +Where `y` correspond the score of interest (e.g. `r params$score`) and `cost` correspond to the cost of the action, in the unit of the score. +We can interpret the `cost` as the minimum decrease in the score that would expect in order to choose the action. + +We consider two treatments, "localTreatment" and "emollientCream", and we express the `cost` as a function which treatment is used: +$\mathit{cost = localTreatment * cost_{localTreatment} + +emollientCream * cost_{emollientCream} + +localTreatment * emollientCream * cost_{bothTreatment}}$ + +Where: + +- $\mathit{localTreatment} \in \{0,1\}$ and $\mathit{emollientCream} \in \{0,1\}$ indicate whether "localTreatment" and "emollientCream" were used, respectively. +- $\mathit{cost_{localTreatment}}$ and $\mathit{cost_{emollientCream}}$ are the costs assiocated with using "localTreatment" and "emollientCream", respectively. +- $\mathit{cost_{bothTreatment}}$ is the additional cost of using both treatment simultaneously. + +### Possible improvement + +- The utility function could be strictly concave (like the utility of money): e.g. if we assume that the benefit of improving from SCORAD=50 to SCORAD=40 is greater than the benefit of improving from SCORAD=10 to SCORAD=0. +This would be more or less equivalent to saying that the cost is a function of `y` (lower cost when `y` is high because patients may be less afraid of side effects when their condition is already severe?), but I find it's a bit "weird" to have the cost depend on the state... + +```{r eval=FALSE, include=FALSE} +# Using utility function similar to the parametric function that is often used for money + +min_RSS <- function(data, par) { + # Find values for coefficients of utility function assuming we know two points (u, r) stored in data + # b = par[1], c = par[2] + with(data, sum((u - par[1] * log(1 + par[2] * r))^2)) +} +hpar_utility <- optim(par = c(0.5, 0.5), + min_RSS, + data = data.frame(u = c(0.5, 1), + r = c(0.9, 1)))$par + +utility_from_reward <- function(r, b = hpar_utility[1], c = hpar_utility[2]) { + stopifnot(r > (-1 / c)) + return(b * log(1 + r * c)) +} +``` + +## Decision analysis + +The decision analysis consists in computing the utility under different actions, and choosing the action that maximises the expected utility: + +$a = \operatorname{argmax}\big( E(U(a)) \big)$ + +Alternatively, we can maximise a different objective function than the expected utility. +Notably, we can use a risk-sensitive objective function, where risk can be defined as the variance of the utility $V(U)$. +For instance, + +$a = \operatorname{argmax}\big( E(U(a)) - q * \sqrt{V(U(a))} \big)$ + +Where $q$ can be interpreted as a risk tolerance. +$q > 0$ corresponds to an agent that is risk averse (penalises uncertainty) when $q < 0$ corresponds to an agent that is risk seeking (welcoming uncertainty). +In this context, $q$ can also be interpreted as a z-score, assuming the utility is normally distributed. +For instance, if $q = 1.96$, objective function is the lower bound of the 95% CI of the utility, i.e. the 2.5% quantile. +$q > 0$ therefore corresponds to optimising a "worst-case" (we could view the risk averse agent as pessimistic, as the utility would be more likely greater than the maximisation objective). +On the other hand, $q < 0$ encourages exploration of new treatments with uncertain effects (cf. exploration-exploitation trade-off). + +We can generate recommendations for different risk and cost profiles for example: + +```{r decision-parameters} +decision_parameters <- expand_grid( + CostProfile = factor(c("F", "N", "H"), levels = c("F", "N", "H"), labels = c("No Cost", "Normal Cost", "High Cost")), + RiskProfile = factor(c("A", "N", "S"), levels = c("A", "N", "S"), labels = c("Risk Averse", "Risk Neutral", "Risk Seeking")) +) %>% + mutate(DecisionProfile = 1:nrow(.)) %>% + mutate( + risk_tolerance = case_when(RiskProfile == "Risk Averse" ~ 1.5, + RiskProfile == "Risk Neutral" ~ 0, + RiskProfile == "Risk Seeking" ~ -1.5), + cost_localTreatment = case_when(CostProfile == "No Cost" ~ 0, + CostProfile == "Normal Cost" ~ 0.5, + CostProfile == "High Cost" ~ 3), + cost_emollientCream = cost_localTreatment, + cost_bothTreatment = case_when(CostProfile == "High Cost" ~ 3, + TRUE ~ 0) + ) + +knitr::kable(decision_parameters) +``` + +## Evaluating recommendations + +While it is challenging to carefully evaluate recommendations in the absence of counterfactuals, a naive approach would be to assess the observed outcome of using treatment when patients' actions and our recommendations matched, vs when patients' actions and our recommendation differs. + +We computed the change in `r params$score`, $\Delta$ `r params$score` as an outcome measure (we cannot compute difference in utility as we don't have a "baseline" utility as it depends on the action) and compute patients compliance to our treatment recommendation. + +When considering treatment individually, we define compliance as the difference between our recommendation and what the patient actually used, with $\mathit{compliance} > 0$ meaning that we recommended more than what the patient used and $\mathit{compliance} < 0$ meaning that we recommended less than what the patient used. +In any case, if the recommendations are "good" we would expect to observe a (negative) minimum in $\Delta$ `r params$score` for $\mathit{compliance} = 0$. + +If we consider both treatment, we compute compliance as the mean squared error (Brier Score) between our recommendations and the patient's actions. + +In practice, we don't observe whether the patient used treatment on a given day, but only whether he used treatment within the past two days. +A limitation of the above method is that we have to rely the model inference about daily treatment usage to evaluate the model's recommendations. + +### Questions + +- Only consider observations with high confidence in treatment usage estimates? +- Need to control for confounders? Maybe keep it simple, don't try to overinterpret these results. + +# Results + +```{r processing} +# Load results +actions <- lrec$Actions %>% + mutate(ActionLabel = glue::glue("({localTreatment},{emollientCream})")) +actions <- tibble(ActionLabel = c("(0,0)", "(0,1)", "(1,0)", "(1,1)"), + ActionExpression = list(TeX(r'(($\bar{C}$,$\bar{E}$))'), + TeX(r'(($\bar{C}$,$E$))'), + TeX(r'(($C$,$\bar{E}$))'), + TeX(r'(($C$,$E$))'))) %>% + left_join(actions, ., by = "ActionLabel") %>% + arrange(localTreatment, emollientCream) %>% + mutate(ActionLabel = factor(ActionLabel, levels = ActionLabel)) + +pred_rec <- lrec$Predictions %>% + rename(Prediction = all_of(score_dict$Label)) %>% + select(Patient, Time, Prediction, localTreatment_post, emollientCream_post) + +POSCORAD <- load_PFDC()$POSCORAD %>% + rename(Time = Day) + +# Compute observed change in score (outcome) +df_rec <- POSCORAD %>% + rename(Score = all_of(score_dict$Label)) %>% + group_by(Patient) %>% + mutate(Change = lead(Score) - Score) %>% + ungroup() %>% + select(Patient, Time, Score, Change) +pred_rec <- left_join(pred_rec, df_rec, by = c("Patient", "Time")) + +# Process posterior probability of using treatment +pred_rec <- pred_rec %>% + mutate(localTreatment_post = vapply(localTreatment_post, mean, numeric(1)), + emollientCream_post = vapply(emollientCream_post, mean, numeric(1))) +``` + +## Treatment usage + +During data exploration, we noticed that patients mostly answered "no" to the question "did you use treatment within the past two days?" for "emollientCream" or "localTreatment". +However, "emollientCream" was the prefered treatment "used within the past two days" rather than "localTreatment". + +We confirmed this from the inference of daily treatment usage. + +```{r plot-treatment, message=FALSE} +treat_summary <- pred_rec %>% + select(Patient, Time, localTreatment_post, emollientCream_post) %>% + pivot_longer(cols = all_of(c("localTreatment_post", "emollientCream_post")), + names_to = "Treatment", + values_to = "MeanDailyUsageProb") %>% + mutate(Treatment = gsub("_post", "", Treatment)) %>% + group_by(Patient, Treatment) %>% + summarise(MeanUsage = mean(MeanDailyUsageProb), + PropDeterministic = mean(MeanDailyUsageProb == 0 | MeanDailyUsageProb == 1), + Prop0 = mean(MeanDailyUsageProb == 0), + Prop1 = mean(MeanDailyUsageProb == 1), + PropQuasi0 = mean(MeanDailyUsageProb < 0.1), + PropQuasi1 = mean(MeanDailyUsageProb > 0.9)) %>% + ungroup() + +lapply(c("MeanUsage", "PropDeterministic", "Prop0", "Prop1"), + function(x) { + treat_summary %>% + select(all_of(c("Patient", "Treatment", x))) %>% + pivot_wider(names_from = "Treatment", values_from = x) %>% + ggplot(aes(x = localTreatment, y = emollientCream)) + + geom_point() + + geom_abline(intercept = 0, slope = 1, linetype = "dashed") + + coord_cartesian(xlim = c(0, 1), ylim = c(0, 1)) + + labs(title = x) + }) %>% + plot_grid(plotlist = ., ncol = 2) +``` + +Most patients do not use one or the two treatments. +In the heatmap below, a cross indicates that the average frequency of daily treatment usage is less than 5%. + +- 6/16 patients almost never use "emollientCream" +- 7/16 patients almost never use "localTreatment" +- 6/16 patients almost never use "emollientCream" and "localTreatment" + +```{r avg-treatment-usage} +treat_summary %>% + mutate(Patient = factor(Patient), + Label = ifelse(MeanUsage < 0.05, "X", "")) %>% + ggplot(aes(x = Treatment, y = Patient, fill = MeanUsage, label = Label)) + + geom_raster() + + geom_text() + + scale_fill_distiller(limits = c(0, 1), direction = 1) + + coord_cartesian(expand = FALSE) + + labs(x = "") + + theme_classic(base_size = 15) +``` + +To assess our treatment recommendations, it is advisable not to consider patients who always/never use treatments, patients who only use one type of treatments or patients for whom a lot of "daily treatment usage" cannot be infer accurately (so that results will be less sensitive to the imputation.) + +```{r eligible-patient} +# Patients who always/never use treatment +excl_patients <- lapply(c("localTreatment", "emollientCream"), + function(x) { + treat_summary %>% + filter(Treatment == x, + Prop0 > 0.9 | PropQuasi0 > 0.9 |Prop1 > 0.9 | PropQuasi1 > 0.9) %>% + pull(Patient) + }) %>% + unlist() %>% + unique() %>% + sort() +``` + +NB: in the current setup, patients will `r ifelse(!params$exclude_patients, "not", "")` be excluded. + +## Treatment recommendation + +```{r compute-recommendations} +# Utility of actions for the different profiles +df_utility <- lapply(1:nrow(decision_parameters), + function(i) { + + # Compute total cost of actions + tmp_costs <- bind_cols(decision_parameters[i, ], + actions) %>% + mutate(Cost = localTreatment * cost_localTreatment + + emollientCream * cost_emollientCream + + localTreatment * emollientCream * cost_bothTreatment) + + # Compute utility of actions + tmp_utility <- lapply(1:nrow(tmp_costs), + function(a) { + pred_rec %>% + mutate(Utility = map(Prediction, function(x) {utility_func(x[, a], tmp_costs$Cost[a])}), + Mean = map(Utility, mean) %>% as.numeric(), + SD = map(Utility, sd) %>% as.numeric(), + MeanPrediction = map(Prediction, mean) %>% as.numeric(), + Action = a) %>% + select(-Prediction, -Utility) + }) %>% + bind_rows() + + return(left_join(tmp_utility, tmp_costs, by = "Action")) + + }) %>% + bind_rows() + +# Optimal actions +opt_action <- df_utility %>% + mutate(MaxObjective = Mean - risk_tolerance * SD) %>% + group_by(DecisionProfile, Patient, Time) %>% + filter(MaxObjective == max(MaxObjective)) %>% + ungroup() + +# Compute compliance ("distance" between suggested action and actual action as measured by Brier Score) +opt_action <- opt_action %>% + mutate( + Compliance_localTreatment = localTreatment - localTreatment_post, + Compliance_emollientCream = emollientCream - emollientCream_post, + Compliance = 0.5 * (Compliance_localTreatment^2 + Compliance_emollientCream^2) + ) + +# Subset recommendation results +perf_rec <- opt_action %>% + filter(Time >= params$min_time) %>% + # filter(!(Patient %in% excl_patients)) %>% + drop_na() +if (params$exclude_patients) { + perf_rec <- filter(perf_rec, !(Patient %in% excl_patients)) +} + +# Risk Neutral, Normal Cost +perf_rec1 <- perf_rec %>% + filter(RiskProfile == "Risk Neutral", CostProfile == "Normal Cost") +``` + +### Which actions are most often recommended? + +Unsurprisingly, when the cost of treatment is high, the optimal decision is to not use any treatment. +On the other hand, when using treatment is "free", since the treatment effects are negative ($\operatorname{Prob}(\mathit{TreatmentEffect} < 0) \approx 1$), the optimal decision is to use both treatments. + +For a "normal" cost of treatment (similar in magnitude to the treatment effects) and risk neutral agent, because the posterior mean treatment effect of localTreatment is always lower than emollientCream, using localTreatment will always be preferred. +In fact, emollientCream will only be recommended as a supplementary treatment, in addition to localTreatment. + +The recommendation pattern does not change much when the patient is risk averse or risk seeking, even though we can observe that a risk averse patient would be less likely to use the "no treatment" option than the risk neutral patient; and the risk seeking patient would be more likely to use "no treatment" than the risk neutral patient. + +```{r plot-frequency-actions} +freq_action <- perf_rec %>% + group_by(RiskProfile, CostProfile, Action) %>% + summarise(Freq = n()) %>% + ungroup() %>% + group_by(RiskProfile, CostProfile) %>% + mutate(Freq = Freq / sum(Freq)) +freq_action <- decision_parameters %>% + select(CostProfile, RiskProfile) %>% + expand_grid(Action = 1:4) %>% + full_join(freq_action, b = c("RiskProfile", "CostProfile", "Action")) %>% + mutate(Freq = replace_na(Freq, 0)) +pfa <- freq_action %>% + full_join(actions, by = "Action") %>% + ggplot(aes(x = ActionLabel, y = Freq)) + + facet_grid(rows = vars(RiskProfile), cols = vars(CostProfile)) + # labeller = label_both + geom_col() + + scale_x_discrete(labels = parse(text = actions[["ActionExpression"]] %>% unlist())) + + coord_cartesian(ylim = c(0, 1), expand = FALSE) + + labs(x = "Action", y = "Frequency of action") +pfa +# ggsave(here("results", "freq_recommendations.jpg"), width = 13, height = 8, units = "cm", scale = 2, dpi = 300) +# saveRDS(pfa, file = here("results", "subplot_recommendation.rds")) +``` + +### Are recommendations changing as more data comes in? + +While the recommendations are influenced by the decision profiles, it is also possible that they are changing as more data comes in. +Here we investigate the frequency of recommended actions as a function of a time (training day) for a "normal" treatment cost and a risk neutral patient. + +We can see that the frequency of the "using both treatment" action is decreasing with training data. +On the other hand, "using localTreatment" and "using no treatment" becomes more recommended as more data comes in. + +This evolution of recommendations may be explained by the fact that the treatment effects becomes less uncertain with more training data, thus yielding different recommendations. +However, it is also possible that the change in recommendations could be explained by change in the data distribution (average severity decreases in the new training data). + +```{r plot-evolution-recommendations, message=FALSE, warning=FALSE} +evol_rec <- opt_action %>% + filter(CostProfile == "Normal Cost", RiskProfile == "Risk Neutral") %>% + group_by(Time, ActionLabel) %>% + summarise(N = n()) +diff <- actions %>% + select(ActionLabel) %>% + expand_grid(Time = 1:max(evol_rec[["Time"]])) %>% + setdiff(., evol_rec %>% select(Time, ActionLabel)) %>% + mutate(N = 0) +evol_rec <- bind_rows(evol_rec, diff) %>% + group_by(Time) %>% + mutate(N_tot = sum(N)) %>% + ungroup() %>% + mutate(Prop = N / N_tot) + +p_rec1 <- evol_rec %>% + ggplot(aes(x = Time, y = Prop, colour = ActionLabel)) + + geom_smooth(method = "loess", se = FALSE) + + scale_y_continuous(limits = c(0, 1), expand = expansion(.01)) + + scale_colour_manual(values = cbbPalette, labels = actions[["ActionExpression"]]) + + labs(x = "Training day",y = "Frequency of the recommendation", colour = "Action") +p_rec1 +# ggsave(here("results", "evolution_recommendations.jpg"), width = 13, height = 8, units = "cm", dpi = 300, scale = 1.5) +``` + +### How are recommendations influenced by the current score? + +When multiple actions can be recommended, as in the "Normal" cost profile, the recommendations can be different even if the current score is the same, highlighting that the recommendations are "personalised", even if the treatment parameters are not. +This is likely due to difference in the breakdown of PO-SCORAD and measurement error (different latent score can be associated with the measurements, and a latent score close to a cutpoint is more likely to lead to a measurable improvement). + +Also, we can observe that, for this decision profile: + +- when the score is low, "no treatment" is more likely to be recommended +- when the score is high, "both treatment" is more likely to be recommended + +We can observe that we are more likely to recommend treatment when the score is higher that when the score is lower, even though the utility function are linear (with linear treatment effects, the benefit is the same regardless of the score) and not concave. +This may be interpreted as a side effect of estimating different treatment effects for each severity item, and that when the score is higher, more signs are likely to be present, making the condition more treatable (if a new sign is more treatable than the previous sign, the benefit would appear higher on average). +It is also possible that "using both treatment" when the score is high is confounded by the fact that this action is more recommended when there is little training data and that earlier iteration corresponds to more severe eczema. + +```{r distribution-action-score} +p_rec2 <- perf_rec1 %>% + ggplot(aes(x = Score, colour = ActionLabel)) + + geom_density() + + scale_colour_manual(values = cbbPalette, drop = FALSE, labels = actions[["ActionExpression"]]) + + scale_y_continuous(expand = expansion(c(0, .05))) + + scale_x_continuous(limits = c(0, NA)) + + labs(x = params$score, colour = "Action") + + theme_classic(base_size = 15) +p_rec2 +# ggsave(here("results", "recommendations_vs_score.jpg"), width = 13, height = 8, units = "cm", dpi = 300, scale = 1.5) +``` + +```{r include=FALSE, eval=FALSE} +# Figure for supplementary +plot_grid(p_rec1 + theme(legend.position = "none"), + p_rec2 + theme(legend.position = "none"), + get_legend(p_rec1), + nrow = 1, labels = c("A", "B", ""), rel_widths = c(5, 5, 1)) +# ggsave(here("results", "recommendations_confounding.jpg"), width = 10, height = 5, units = "cm", dpi = 300, scale = 2.5, bg = "white") +``` + +### Compliance + +Because most patients do not used treatment, they apppear more compliant toward "not using treatment" recommendation that often happens when the cost of treatment is high. + +Note that not using treatment could be interpreted as: + +- a high (personal) cost of using treatment. +- an unfavourable SCORAD breakdown (treatment effects is hetereogenous across AD severity items) +- heterogenous treatment effects (not in the model) + +```{r plot-distribution-compliance} +perf_rec %>% + drop_na() %>% + ggplot(aes(x = Compliance)) + + facet_grid(rows = vars(RiskProfile), cols = vars(CostProfile)) + + geom_density() + + coord_cartesian(xlim = c(0, 1), expand = FALSE) +``` + +If we look at the average compliance for each patient, we noticed that our model for a neutral risk profile and a normal cost profile tend to recommend more "localTreatment" than what the patients used. +"emollientCream" is more or less recommended depending on which patient we look at. + +```{r avg-compliance} +perf_rec1 %>% + group_by(Patient) %>% + summarise(across(all_of(c("Compliance_localTreatment", "Compliance_emollientCream")), mean)) %>% + ungroup() %>% + ggplot(aes(x = Compliance_localTreatment, y = Compliance_emollientCream)) + + geom_point() + + coord_cartesian(xlim = c(-1, 1), ylim = c(-1, 1)) +``` + +```{r compliance-confounding, include=FALSE, eval=FALSE} +# Compliance localTreatment vs compliance emollientCream, averaged across time + +tmp <- perf_rec1 %>% + select(Patient, Time, Score, starts_with("Compliance_")) %>% + pivot_longer(cols = starts_with("Compliance_"), names_to = "Variable", values_to = "Value") %>% + mutate(Variable = gsub("Compliance_", "", Variable)) + +# Compliance vs Score +ggplot(data = tmp, aes(x = Score, y = Value, colour = Variable)) + + geom_smooth() + + coord_cartesian(ylim = c(-1, 1)) + + labs(y = "Compliance", colour = "") + +# Compliance vs time +ggplot(data = tmp, aes(x = Time, y = Value, colour = Variable)) + + geom_smooth() + + coord_cartesian(ylim = c(-1, 1)) + + labs(y = "Compliance", colour = "") + +``` + +#### Change vs Total compliance + +```{r plot-change-totcompliance} +perf_rec %>% + drop_na() %>% + # filter(!between(localTreatment_post, .1, .9), !between(emollientCream_post, .1, .9)) %>% + ggplot(aes(x = Compliance, y = Change)) + + facet_grid(rows = vars(RiskProfile), cols = vars(CostProfile)) + + geom_point() + + geom_smooth(method = "lm") + + coord_cartesian(xlim = c(0, 1)) +# ggsave(here("results", "recommendations_compliance.jpg"), width = 13, height = 8, units = "cm", dpi = 300, scale = 2) +``` + +#### Change vs Compliance for each treatment + +```{r plot-change-compliance, warning=FALSE} +lapply(paste0("Compliance_", c("localTreatment", "emollientCream")), + function(x) { + perf_rec %>% + drop_na() %>% + # filter(!between(localTreatment_post, .1, .9), !between(emollientCream_post, .1, .9)) %>% + ggplot(aes_string(x = x, y = "Change")) + + facet_grid(rows = vars(RiskProfile), cols = vars(CostProfile)) + + geom_point() + + geom_smooth(method = "loess") + + coord_cartesian(ylim = c(-.5, .5) * score_dict$Maximum) + }) +``` + +### Decision heatmap for a given (patient, time) recommendation + +The decision boundaries will change depending on the latent score, the breakdown of PO-SCORAD and the treatment estimates (so as a function of training data). + +The recommendation patient and time is cherry-picked to show the four actions (otherwise, using emollientCream but not localTreatment is rarely an option). + +```{r decision-heatmap} +# Decision heatmap for a given observation + +tmp <- pred_rec %>% + # Select observation + filter(Patient == 14, Time == 18) %>% + # + expand_grid(actions) %>% + mutate(Prediction = map2(Prediction, Action, ~.x[, .y])) %>% + expand_grid(cost_eachTreatment = seq(0, 1.5, .01), + risk_tolerance = seq(-2, 2, .05)) %>% + mutate(cost_bothTreatment = 0, + Cost = localTreatment * cost_eachTreatment + + emollientCream * cost_eachTreatment + + localTreatment * emollientCream * cost_bothTreatment) %>% + mutate(Utility = map2(Prediction, Cost, ~utility_func(.x, .y)), + Mean = map(Utility, mean) %>% as.numeric(), + SD = map(Utility, sd) %>% as.numeric(), + MaxObjective = Mean - risk_tolerance * SD) %>% + select(-Prediction, -Utility) %>% + group_by(cost_eachTreatment, risk_tolerance) %>% + filter(MaxObjective == max(MaxObjective)) %>% + ungroup() + +ggplot(data = tmp, + aes(x = cost_eachTreatment, y = risk_tolerance, fill = ActionLabel)) + + geom_raster() + + scale_fill_manual(values = c("#999999", "#E69F00", "#56B4E9", "#009E73"), + drop = FALSE, + labels = actions[["ActionExpression"]]) + + # labels = actions[["ActionExpression"]] + coord_cartesian(expand = FALSE) + + labs(fill = "Action") +``` diff --git a/analysis/07_plot_data.R b/analysis/07_plot_data.R new file mode 100644 index 0000000..1f5eee5 --- /dev/null +++ b/analysis/07_plot_data.R @@ -0,0 +1,136 @@ +# Notes ------------------------------------------------------------------- + +# Plot PO-SCORAD, SCORAD (breakdown) and treatment + +# Initialisation ---------------------------------------------------------- + +rm(list = ls()) # Clear Workspace (better to restart the session) + +source(here::here("analysis", "00_init.R")) + +#### OPTIONS +pid <- 3 +#### + +l <- load_PFDC() + +pt <- unique(l$POSCORAD[["Patient"]]) +stopifnot(pid %in% pt) + +poscorad <- l$POSCORAD %>% filter(Patient == pid) +scorad <- l$SCORAD %>% filter(Patient == pid) + +palette <- c("PO-SCORAD" = "#000000", "SCORAD" = "#E69F00") + +# Plot breakdown -------------------------------------------------------------- + +item_dict <- detail_POSCORAD("Items") +subj_symp <- detail_POSCORAD("Subjective symptoms")$Label +intensity_signs <- detail_POSCORAD("Intensity signs")$Label + +pl <- lapply(1:nrow(item_dict), + function(i) { + score_lbl <- item_dict$Label[i] + score_lbl2 <- case_when(score_lbl == "Itching VAS" ~ "Itching", + score_lbl == "Sleep disturbance VAS" ~ "Sleep\ndisturbance", + score_lbl == "Traces of scratching" ~ "Traces\nof scratching", + TRUE ~ score_lbl) + max_score <- item_dict$Maximum[i] + + p <- ggplot() + + add_broken_pointline(poscorad, aes_x = "Day", aes_y = score_lbl, colour = "PO-SCORAD") + if (!(score_lbl %in% subj_symp)) { + p <- p + + geom_point(data = scorad %>% rename(Score = all_of(score_lbl)), + aes(x = Day, y = Score, colour = "SCORAD"), + size = 2) + } + p <- p + + scale_colour_manual(values = palette) + + scale_y_continuous(limits = c(0, max_score), expand = expansion(mult = .05)) + + scale_x_continuous(limits = c(0, NA), expand = expansion(mult = c(0, .02))) + + labs(y = score_lbl2, colour = "") + + if (score_lbl %in% intensity_signs) { + p <- p + theme(panel.grid.minor.y = element_blank()) + } + + return(p) + }) + +# Remove x axis except for the last plot (unnecessary spacing) +id <- 1:(length(pl) - 1) +pl[id] <- lapply(pl[id], + function(x) { + x + theme(axis.text.x = element_blank(), + axis.title.x = element_blank(), + axis.ticks.x = element_blank()) + }) +# Legend +legend <- get_legend(pl[[1]] + theme(legend.position = "top")) +# Reorient y axis label and legend +pl <- lapply(pl, + function(x) { + x + theme(axis.title.y = element_text(angle = 0, vjust = 0.5), + legend.position = "none") + }) +# Size of each plot +sz <- item_dict %>% mutate(SizePlot = case_when(Maximum / Resolution == 100 ~ 2, TRUE ~ 1)) %>% pull(SizePlot) +sz[length(sz)] <- sz[length(sz)] + .5 + +plot_grid(legend, + plot_grid(plotlist = pl, + ncol = 1, + align = "v", + rel_heights = sz), + ncol = 1, rel_heights = c(1, 8)) + +rhs <- plot_grid(plotlist = pl, + ncol = 1, + align = "v", + rel_heights = sz) + +# Plot SCORAD ------------------------------------------------ + +p_SCORAD <- ggplot() + + add_broken_pointline(poscorad, aes_x = "Day", aes_y = "SCORAD", colour = "PO-SCORAD") + + geom_point(data = scorad, + aes(x = Day, y = SCORAD, colour = "SCORAD"), + size = 2) + + scale_colour_manual(values = palette) + + scale_y_continuous(limits = c(0, 103), expand = expansion(mult = c(0.02, 0))) + + scale_x_continuous(limits = c(0, NA), expand = expansion(mult = c(0, .02))) + + labs(colour = "") + + theme(axis.title.y = element_text(angle = 0, vjust = 0.5)) + + theme(legend.position = "top") + +# Plot treatment ---------------------------------------------------------- + +treatment_names <- setNames(c("emollientCreamWithinThePast2Days", "localTreatmentWithinThePast2Days"), + c("Emollient\nCream", "Topical\nCorticosteroids")) + +poscorad <- rename(poscorad, treatment_names) + +p_treat <- lapply(names(treatment_names), + function(y) { + ggplot() + + add_broken_pointline(poscorad, aes_x = "Day", aes_y = y) + + scale_x_continuous(limits = c(0, NA), expand = expansion(mult = c(0, .02))) + + scale_y_continuous(breaks = c(0, 1), limits = c(0, 1)) + + theme(axis.title.y = element_text(angle = 0, vjust = 0.5), + panel.grid.minor.y = element_blank()) + }) + +# Combine plots ----------------------------------------------------------- + +pl_lhs <- c(list(NULL), list(p_SCORAD), p_treat, list(NULL)) + +lhs <- plot_grid(plotlist = pl_lhs, + ncol = 1, align = "v", rel_heights = c(1, 5, 1, 1, 1)) + +plot_grid(lhs, rhs, nrow = 1) + +if (FALSE) { + ggsave(here("results", paste0("example_data", pid, ".jpg")), + width = 13, height = 8, units = "cm", dpi = 300, scale = 3.3, bg = "white") +} diff --git a/analysis/07_plot_fit.R b/analysis/07_plot_fit.R new file mode 100644 index 0000000..82c4702 --- /dev/null +++ b/analysis/07_plot_fit.R @@ -0,0 +1,186 @@ +# Notes ------------------------------------------------------------------- + +# Plot calibration estimates and plot SCORAD + +# Initialisation ---------------------------------------------------------- + +rm(list = ls()) # Clear Workspace (better to restart the session) + +source(here::here("analysis", "00_init.R")) + +#### OPTIONS +pid <- 3 +#### + +model <- ScoradPred(independent_items = FALSE, + a0 = .04, + include_trend = FALSE, + include_calibration = TRUE, + include_treatment = TRUE, + treatment_names = c("localTreatment", "emollientCream"), + include_recommendations = FALSE) + +file_dict <- get_results_files(outcome = "SCORAD", + model = model$name, + dataset = "PFDC", + root_dir = here()) + +# Load data --------------------------------------------------------------- + +l <- load_PFDC() + +POSCORAD <- l$POSCORAD %>% + rename(Time = Day) +df <- POSCORAD %>% + select(one_of("Patient", "Time", model$item_spec$Label)) %>% + pivot_longer(cols = all_of(model$item_spec$Label), names_to = "Label", values_to = "Score") %>% + drop_na() %>% + left_join(model$item_spec[, c("Label", "ItemID")], by = c("Label")) + +# Prepare SCORAD calibration data +if (model$include_calibration) { + cal <- scorad <- l$SCORAD %>% + rename(Time = Day) %>% + select(one_of("Patient", "Time", model$item_spec$Label)) %>% + pivot_longer(cols = all_of(model$item_spec$Label), names_to = "Label", values_to = "Score") %>% + drop_na() %>% + left_join(model$item_spec[, c("Label", "ItemID")], by = c("Label")) +} else { + cal <- NULL +} + +# Prepare treatment data +treatment_lbl <- paste0(model$treatment_names, "WithinThePast2Days") +if (model$include_treatment) { + treat <- POSCORAD %>% + select(all_of(c("Patient", "Time", treatment_lbl))) %>% + pivot_longer(cols = all_of(treatment_lbl), names_to = "Treatment", values_to = "UsageWithinThePast2Days") %>% + mutate(Treatment = vapply(Treatment, function(x) {which(x == treatment_lbl)}, numeric(1)) %>% as.numeric()) %>% + drop_na() +} else { + treat <- NULL +} + +# NB: assume no recommendation (at least outside time-series) + +pt <- unique(df[["Patient"]]) + +id <- get_index(bind_rows(df, cal, treat)) +df <- left_join(df, id, by = c("Patient", "Time")) + +# Results +fit <- readRDS(file_dict$Fit) +par <- readRDS(file_dict$FitPar) + +# Correlation plot -------------------------------------------------------- + +x <- "Omega" + +omg <- rstan::extract(fit, pars = x)[[1]] + +tmp <- list(Mean = apply(omg, c(2, 3), mean), + SD = apply(omg, c(2, 3), sd), + Lower = apply(omg, c(2, 3), function(x) {quantile(x, probs = .05)}), + Upper = apply(omg, c(2, 3), function(x) {quantile(x, probs = .95)}), + pval = apply(omg, c(2, 3), function(x) {empirical_pval(x, 0)})) +tmp <- lapply(tmp, + function(x) { + colnames(x) <- model$item_spec$Name + rownames(x) <- model$item_spec$Name + return(x) + }) + +jpeg(here("results", paste0(x, "_", model$name, ".jpeg")), + width = 20, height = 20, units = "cm", res = 300, quality = 95, pointsize = 11) +corrplot::corrplot.mixed(tmp$Mean, lower = "number", upper = "ellipse") +dev.off() + +# Combine with power prior plot in `plot_powerprior.R` + +# Calibration plot -------------------------------------------------------------------- + +# Estimates +p1_cal <- par %>% + filter(Variable == "bias0") %>% + rename(ItemID = Index) %>% + left_join(model$item_spec, by = "ItemID") %>% + filter(!(Name %in% c("sleep", "itching"))) %>% + mutate(Name = factor(Name), + Name = factor(Name, levels = rev(levels(Name)))) %>% + ggplot(aes(x = Name, y = Mean, ymin = `5%`, ymax = `95%`)) + + facet_grid(rows = vars(Component), scales = "free", space = "free") + + geom_pointrange() + + geom_hline(yintercept = 0, linetype = "dashed") + + coord_flip() + + scale_y_continuous(limits = c(-.5, .5), + breaks = c(-.5, -.25, 0, .25, 0.5), + labels = c("-0.5\nPatient scores\nhigher than clinician", + -0.25, 0, 0.25, + "0.5\nClinician scores\n higher than patient")) + + labs(x = "", + y = "Initial bias (normalised)") + +# Otherwise, post-process figures to give the interpretation of the direction of the effect +# ("patient scores higher than clinician" vs "clinician scores higher than patient") + +aggcal <- rstan::extract(fit, pars = "agg_cal_rep")[[1]] +aggcal <- aggcal[, , 4] # SCORAD + +### Plot observed PO-SCORAD and inferred SCORAD as a fanchart +tmp <- POSCORAD %>% + filter(Patient == pid) +p2_cal <- plot_post_traj_fanchart(aggcal, + id = id, + patient_id = pid, + legend_fill = "discrete", + CI_level = seq(0.1, 0.9, 0.2), + max_score = 60) + + add_broken_pointline(tmp, aes_x = "Time", aes_y = "SCORAD", colour = "Observed\nPO-SCORAD") + + scale_colour_manual(values = c("Observed\nPO-SCORAD" = "black")) + + labs(fill = "Inferred\nSCORAD\nprobabilities", colour = "") + + theme(legend.position = c(.9, .8), + legend.title = element_text(size = 11), + legend.spacing.y = unit(0, 'cm')) + +plot_grid(p1_cal, p2_cal, nrow = 1, labels = "AUTO") + +if (FALSE) { + ggsave(here("results", "plot_calibration.jpg"), + width = 18, height = 7, units = "cm", dpi = 300, scale = 2.5) +} + +# Treatment --------------------------------------------------------------- + +p_treat <- extract_par_indexes(par, var_name = "ATE", dim_names = c("ItemID", "Treatment")) %>% + filter(Variable == "ATE") %>% + mutate(Treatment = model$treatment_names[Treatment]) %>% + left_join(model$item_spec, by = "ItemID") %>% + mutate(Treatment = recode(Treatment, + emollientCream = "Emollient Cream", + localTreatment = "Topical Corticosteroids"), + Component = gsub(" ", "\n", Component), + Name = factor(Name), + Name = factor(Name, levels = rev(levels(Name)))) %>% + ggplot(aes(x = Name, y = Mean, ymin = `5%`, ymax = `95%`, colour = Treatment)) + + facet_grid(rows = vars(Component), scale = "free", space = "free") + + geom_pointrange(position = position_dodge(width = .5)) + + geom_hline(yintercept = 0, linetype = "dashed") + + coord_flip() + + scale_y_continuous(limits = c(-.05, .05), + breaks = c(-.05, -.025, 0, .025, 0.05), + labels = c("-0.05\nTreatment\nreduces severity", -0.025, 0, 0.025, "0.5\nTreatment\nincreases severity")) + + scale_colour_manual(values = cbbPalette[c(2, 1)]) + + labs(x = "", y = "Treatment effect (normalised)", colour = "") + + theme(legend.position = "top") +p_treat +# ggsave(here("results", "treatment_effects.jpg"), width = 13, height = 8, units = "cm", scale = 2) + +# Combine with recommendation plot +p_rec <- readRDS(here("results", "subplot_recommendation.rds")) + + labs(title = "") +plot_grid(p_treat, p_rec, nrow = 1, labels = "AUTO") + +if (FALSE) { + ggsave(here("results", "plot_treatment.jpg"), + width = 10, height = 5, units = "cm", dpi = 300, scale = 3.5) +} diff --git a/analysis/07_plot_performance.R b/analysis/07_plot_performance.R new file mode 100644 index 0000000..3a69cae --- /dev/null +++ b/analysis/07_plot_performance.R @@ -0,0 +1,151 @@ +# Notes ------------------------------------------------------------------- + +# Plot performance at a given iteration (as a pointrange) for all severity items and aggregates (x axis), models (colour) + +# Initialisation ---------------------------------------------------------- + +rm(list = ls()) # Clear Workspace (better to restart the session) + +source(here::here("analysis", "00_init.R")) + +#### OPTIONS +metric <- "lpd" # for comparing learning curves, not paired comparisons +t_horizon <- 4 # horizon that was used for forward chaining +it <- 16 # Iteration to plot the performance for +max_horizon <- 14 # restrict prediction horizon +pred_horizon <- 4 # prediction horizon +#### + +metric <- match.arg(metric, c("lpd", "RPS")) +stopifnot(t_horizon > 0, + max_horizon > 0) + +dataset <- "PFDC" + +item_dict <- detail_POSCORAD() %>% + mutate(Component = case_when(Name == "extent" ~ "Extent", + Name %in% c("itching", "sleep") ~ "Subjective symptoms", + Name %in% detail_POSCORAD("Intensity signs")$Name ~ "Intensity signs", + TRUE ~ "Aggregates")) + +list_models <- available_models() %>% + select(Model) %>% + mutate(ModelGroup = "EczemaPred") +list_models <- tibble(Model = c("uniform", "historical"), + ModelGroup = "Reference") %>% + bind_rows(list_models) +mdl_names <- list_models[["Model"]] +list_models <- list_models %>% + expand_grid(item_dict) %>% + rename(Item = Name) + +list_models[["File"]] <- vapply(1:nrow(list_models), + function(i) { + get_results_files(outcome = list_models$Item[i], + model = list_models$Model[i], + dataset = dataset, + val_horizon = t_horizon, + root_dir = here())$Val + }, + character(1)) +stopifnot(all(file.exists(list_models$File))) + +# Processing -------------------------------------------------------------- + +fc_it <- load_PFDC()$POSCORAD %>% + rename(Time = Day) %>% + detail_fc_training(df = ., horizon = t_horizon) + +perf <- lapply(1:nrow(list_models), + function(i) { + readRDS(list_models$File[i]) %>% + filter(Horizon <= max_horizon, + Iteration > 0 | list_models$Model[i] != "RW") %>% + estimate_performance(metric, ., fc_it, adjust_horizon = !(list_models$Model[i] %in% c("historical", "uniform"))) %>% + bind_cols(., list_models[i, ]) + }) %>% + bind_rows() + +# Performance at given iteration -------------------------------------------------------------------- + +pal <- rev(cbbPalette[1:length(unique(list_models[["Model"]]))]) + +tmp <- perf %>% + filter(Variable == "Fit", + Iteration == it, + Horizon == pred_horizon) %>% + mutate(ModelGroup = factor(ModelGroup, levels = rev(c("EczemaPred", "Reference"))), + Model = factor(Model, levels = mdl_names)) + +brk <- c(.01, .1, .25, .5, 1) + +## Performance items + +item_names <- detail_POSCORAD("Items")$Name + +p1 <- tmp %>% + filter(Item %in% item_names) %>% + ggplot(aes(x = Item, y = Mean, ymin = Mean - SE, ymax = Mean + SE, colour = Model, shape = ModelGroup)) + + facet_grid(rows = vars(Component), space = "free", scale = "free") + + geom_pointrange(position = position_dodge(width = .66), size = 1, fill = "white") + + scale_colour_manual(values = pal) + + scale_y_continuous(breaks = log(brk), labels = paste0("log(", brk, ")")) + + scale_shape_manual(values = c(16, 21)) + + coord_flip() + + labs(x = "", y = metric, colour = "", shape = "") + + theme(legend.position = "none", + panel.grid.minor = element_blank(), + axis.text.x = element_text(angle = 30, vjust = .5, hjust = .5)) +p1 + +## Performance for aggregate scores + +p2 <- tmp %>% + filter(Item %in% c("oSCORAD", "SCORAD")) %>% + ggplot(aes(x = Item, y = Mean, ymin = Mean - SE, ymax = Mean + SE, colour = Model, shape = ModelGroup)) + + facet_grid(rows = vars(Component), space = "free", scale = "free") + + geom_pointrange(position = position_dodge(width = .66), size = 1, fill = "white") + + scale_colour_manual(values = pal) + + scale_shape_manual(values = c(16, 21)) + + coord_flip() + + labs(x = "", y = metric, colour = "", shape = "") + + theme(legend.position = "none", + panel.grid.minor = element_blank(), + axis.text.x = element_text(angle = 30, vjust = .5, hjust = .5)) + +## Custom legend + +legend1 <- tmp %>% + filter(Item %in% item_names) %>% + filter(ModelGroup == "EczemaPred") %>% + ggplot(aes(x = Item, y = Mean, ymin = Mean - SE, ymax = Mean + SE, colour = Model)) + + geom_pointrange(position = position_dodge(width = .66), size = 1, fill = "white", shape = 21) + + scale_colour_manual(values = pal[-(1:2)], name = "EczemaPred models") + + labs(x = "", y = metric, colour = "") + + theme(legend.position = "top") + + guides(colour = guide_legend(title.position = "top")) +legend1 <- get_legend(legend1) + +legend2 <- tmp %>% + filter(Item %in% item_names) %>% + filter(ModelGroup == "Reference") %>% + ggplot(aes(x = Item, y = Mean, ymin = Mean - SE, ymax = Mean + SE, colour = Model)) + + geom_pointrange(position = position_dodge(width = .66), size = 1, shape = 16) + + scale_colour_manual(values = pal[1:2], name = "Reference models") + + labs(x = "", y = metric, colour = "") + + theme(legend.position = "top") + + guides(colour = guide_legend(title.position = "top")) +legend2 <- get_legend(legend2) + +legend <- plot_grid(NULL, legend1, NULL, legend2, NULL, nrow = 1) + +## Combine plots + +plot_grid(legend, + plot_grid(p1, p2, + labels = "AUTO", + ncol = 1, rel_heights = c(9, 2), align = "v"), + ncol = 1, + rel_heights = c(1, 11)) + +# ggsave(here("results", "performance.jpg"), width = 13, height = 8, units = "cm", dpi = 300, scale = 3.5, bg = "white") diff --git a/analysis/07_plot_powerprior.R b/analysis/07_plot_powerprior.R new file mode 100644 index 0000000..04432de --- /dev/null +++ b/analysis/07_plot_powerprior.R @@ -0,0 +1,114 @@ +# Notes ------------------------------------------------------------------- + +# - Compare prior, historical posterior, posterior with/without power prior for a given parameter +# - For a given a0, how much does power prior influences posterior as a function of the number of data included +# The second plot is probably easier to understand. + +# a0 indicates how informative the historical prior is, +# or how much information is shared between the historical posterior and the final posterior +# (very informative: weighted equally in the posterior; not informative: does not contribute to posterior) + +# Initialisation ---------------------------------------------------------- + +rm(list = ls()) # Clear Workspace (better to restart the session) + +source(here::here(here::here("analysis", "00_init.R"))) + +library(ggrepel) + +#### OPTIONS +par_name <- "rho2" # check that processing of par2 works for something else than "rho2" (Index much match ItemID) +#### + +item_dict <- ScoradPred_items() + +# Prior and posterior ----------------------------------------------------- + +# Prior and Posterior Derexyl +par1 <- readRDS(here("data", "par_POSCORAD.rds")) %>% + filter(Variable == par_name) + +# Posterior ScoradPred +par2 <- lapply(c("ScoradPred", "ScoradPred+h004"), + function(m) { + get_results_files(outcome = "SCORAD", model = m, dataset = "PFDC", root_dir = here())$FitPar %>% + readRDS() %>% + mutate(Distribution = m) + }) %>% + bind_rows() %>% + filter(Variable == par_name) %>% + left_join(item_dict %>% + select(Name, ItemID), + by = c("Index" = "ItemID")) %>% + rename(Item = Name) + +par <- bind_rows(par1, par2) %>% + mutate(Distribution = factor(Distribution, + levels = c("Prior", "Posterior - Derexyl", "ScoradPred", "ScoradPred+h004"), + labels = c("Prior", "Historical", "Without power prior", "With power prior"))) + +# Plot comparison estimate -------------------------------------------------------------------- + +par %>% + select(-Index) %>% + filter(Variable == par_name) %>% + ggplot(aes(x = Item, y = Mean, ymin = `5%`, ymax = `95%`, colour = Distribution)) + + geom_pointrange(position = position_dodge(width = .5)) + + coord_flip() + + scale_colour_manual(values = HuraultMisc::cbbPalette) + + labs(x = "", y = par_name, colour = "") + + theme(legend.position = "top") + +# Plot relative importance ------------------------------------------------ + +poscorad <- load_PFDC()$POSCORAD %>% + rename(Time = Day) + +fc_it <- detail_fc_training(poscorad, horizon = 4) +id_xbrk2 <- vapply(seq(0, 1, length.out = 10), function(x) {which.min((x - fc_it$Proportion)^2)}, numeric(1)) + +a0_val <- c(.04, + signif(rho_to_a0(.5), 2), + .01, #signif(rho_to_a0(.9), 2), + .5, + 1) + +tmp <- expand_grid(a0 = a0_val, + N = 0:nrow(poscorad)) %>% + mutate(rho = a0_to_rho(a0 = a0, n_new = N)) + +p <- tmp %>% + group_by(a0) %>% + mutate(Size = (a0 == .04)) %>% + mutate(Label = ifelse(N == max(N), paste0("a0 = ", a0), NA)) %>% + ungroup() %>% + ggplot(aes(x = N, y = rho, colour = factor(a0, levels = a0_val), label = Label)) + + geom_line(aes(size = Size)) + + geom_label_repel(na.rm = TRUE) + + labs(x = "Number of observations in training set", + y = "Contribution to posterior (%)") + + scale_x_continuous(expand = c(0, 0), + sec.axis = dup_axis(breaks = fc_it$N[id_xbrk2], + labels = fc_it$LastTime[id_xbrk2], + name = "Training day")) + + scale_y_continuous(expand = c(0, 0)) + + scale_colour_manual(values = HuraultMisc::cbbPalette[-c(5, 6)]) + + scale_size_discrete(range = c(.5, 2)) + + coord_cartesian(ylim = c(0, 1)) + + theme(legend.position = "none") +p +# ggsave(here("results", "powerprior.jpg"), width = 13, height = 8, units = "cm", dpi = 300, scale = 2) + +if (FALSE) { + # Combine power prior plot and correlogram for paper + + p_corr <- ggdraw() + + draw_image(here("results", "Omega_ScoradPred+h004+corr+cal+treat.jpeg"), + scale = 1) + + plot_grid(p, p_corr, nrow = 1, labels= "AUTO") + ggsave(here("results", "powerprior_correlation.jpg"), + width = 10, height = 5, units = "cm", dpi = 300, bg = "white", scale = 3.2) + # Note that scale is for the first plot (not the image) + +} diff --git a/analysis/generate_reports.R b/analysis/generate_reports.R new file mode 100644 index 0000000..b2b50b9 --- /dev/null +++ b/analysis/generate_reports.R @@ -0,0 +1,73 @@ +# Notes ------------------------------------------------------------------- + +# + +# Initialisation ---------------------------------------------------------- + +rm(list = ls()) # Clear Workspace (better to restart the session) + +source(here::here("analysis", "00_init.R")) +library(foreach) +library(doParallel) + +render_fit <- FALSE +render_perf <- FALSE + +render_parallel <- function(input, rpt, ...) { + + n_cluster <- min(nrow(rpt), parallel::detectCores() - 2) + cl <- makeCluster(n_cluster, outfile = "") + registerDoParallel(cl) + + foreach(i = 1:nrow(rpt)) %dopar% { + + tryCatch({ + tf <- tempfile() + dir.create(tf) + rmarkdown::render( + input = input, + params = rpt$Parameter[[i]], + output_file = rpt$OutputFile[i], + output_dir = here::here("docs"), + intermediates_dir = tf, + ... + ) + unlink(tf) + }, error = function(e) { + cat(glue::glue("Error in rpt row {i}"), sep = "\n") + }) + + NULL + } + + stopCluster(cl) + + NULL + +} + +# Fit --------------------------------------------------------------------- + +if (render_fit) { + + rpt <- available_models() %>% + mutate(Parameters = map(Args, function(x) {x[names(x) != "treatment_names"]}), + OutputFile = glue::glue("fit_{Score}_{Model}_{Dataset}.html")) + + render_parallel(input = here::here("analysis", "03_check_fit.Rmd"), rpt = rpt, quiet = TRUE) + +} + +# Performance ------------------------------------------------------- + +if (render_perf) { + + rpt <- expand_grid(score = detail_POSCORAD()$Name, + dataset = c("PFDC"), + t_horizon = 4) %>% + mutate(Parameters = pmap(list(score = score, t_horizon = t_horizon), list), + OutputFile = glue::glue("perf{t_horizon}_{score}_{dataset}.html")) + + render_parallel(input = here::here("analysis", "05_check_performance.Rmd"), rpt = rpt, quiet = TRUE) + +} diff --git a/analysis/view_reports.Rmd b/analysis/view_reports.Rmd new file mode 100644 index 0000000..2d5dc2d --- /dev/null +++ b/analysis/view_reports.Rmd @@ -0,0 +1,36 @@ +--- +title: "Analysis reports" +author: "Guillem Hurault" +date: "`r format(Sys.time(), '%d %B %Y')`" +output: html_document +--- + +```{r setup, include=FALSE} +knitr::opts_chunk$set(echo = TRUE) +source(here::here(here::here("analysis", "00_init.R"))) +``` + +You should open this file in the browser rather than in RStudio viewer, otherwise the relative paths may not work. + +# Fit + +```{r fit-table, results='asis', message=FALSE, echo=FALSE} +available_models() %>% + mutate(Link = file.path("..", "docs", paste0("fit_", Score, "_", Model, "_", Dataset, ".html")), + Link = gsub("\\+", "%2B", Link), # cf. escape + in html address + Link = paste0("- [", Model, "](", Link, ")")) %>% + pull(Link) %>% + cat(sep = "\n") +``` + +# Validation + +```{r validation-table, results='asis', echo=FALSE} +expand_grid(Score = detail_POSCORAD()$Name, + Dataset = c("PFDC")) %>% + mutate(File = file.path("..", "docs", paste0("perf4_", Score, "_", Dataset, ".html"))) %>% + # filter(file.exists(File)) %>% + mutate(Link = paste0("- [", Score, "](", File, ")")) %>% + pull(Link) %>% + cat(sep = "\n") +``` diff --git a/data-raw/save_posterior.R b/data-raw/save_posterior.R new file mode 100644 index 0000000..2e65172 --- /dev/null +++ b/data-raw/save_posterior.R @@ -0,0 +1,89 @@ +# Notes ------------------------------------------------------------------- + +# Save posterior summary statistics of the main parameters, for all items, for the models trained with the Derexyl dataset. + +# This script assume posterior summary statistics of the OrderedRW (v2) model fitted with the Derexyl dataset +# was saved at the location given by `get_results_files`. +# The script to fit the OrderedRW model can be found in [EczemaPredPOSCORAD](https://github.com/ghurault/EczemaPredPOSCORAD). + +# Initialisation ---------------------------------------------------------- + +rm(list = ls()) # Clear Workspace (better to restart the session) + +source(here::here(here::here("analysis", "00_init.R"))) + +intensity_signs <- detail_POSCORAD("Intensity signs")$Name +subjective_symptoms <- detail_POSCORAD("Subjective symptoms")$Name + +#### +datasets <- c("Derexyl") +#### + +param <- list_parameters("OrderedRW") + +list_models <- data.frame(Item = detail_POSCORAD("Items")$Name, + Model = "OrderedRW") %>% + mutate(PopulationParameter = list(param$Population), + PatientParameter = list(param$Patient)) +l_prior <- list_models %>% + mutate(Distribution = "Prior", + File = map2(Item, Model, ~get_results_files(outcome = .x, model = .y, root_dir = here())$PriorPar) %>% unlist()) + +l_post <- list_models %>% + expand_grid(., Dataset = datasets) %>% + mutate(Distribution = paste0("Posterior - ", Dataset), + File = pmap(list(Item, Model, Dataset), + function(x, y, z) { + get_results_files(outcome = x, model = y, dataset = z, root_dir = here())$FitPar + }) %>% + unlist()) %>% + select(-Dataset) + +list_models <- bind_rows(l_prior, l_post) + +stopifnot(all(file.exists(list_models$File))) + +# Combine estimates ------------------------------------------------------- + +par_POSCORAD <- lapply(unique(list_models[["Item"]]), + function(item) { + tmp <- list_models %>% filter(Item == item) + + out <- lapply(1:nrow(tmp), + function(i) { + readRDS(tmp$File[i]) %>% + mutate(Distribution = tmp$Distribution[i], + Model = tmp$Model[i]) %>% + filter(Variable %in% tmp$PopulationParameter[i][[1]] | + (Distribution == "Prior" & Variable %in% tmp$PatientParameter[i][[1]] & Index == 1)) + }) %>% + bind_rows() %>% + select(-Patient, -Time) %>% + mutate(Distribution = forcats::fct_relevel(factor(Distribution), "Prior"), + Item = item) + + return(out) + }) %>% + bind_rows() + +saveRDS(par_POSCORAD, file = here("data", "par_POSCORAD.rds")) + +# Compare fit ------------------------------------------------------------- + +# par_POSCORAD <- EczemaPredPOSCORAD::par_POSCORAD + +bind_rows(par_POSCORAD %>% + filter(!is.na(Index)) %>% + mutate(Variable = paste0(Variable, "[", Index, "]")), + par_POSCORAD %>% + filter(is.na(Index))) %>% + select(-Index) %>% + ggplot(aes(x = Variable, y = Mean, ymin = `5%`, ymax = `95%`, colour = Distribution)) + + facet_wrap(vars(Item), scales = "free") + + geom_pointrange(position = position_dodge(width = .25)) + + coord_flip() + + scale_colour_manual(values = cbbPalette) + + labs(x = "", y = "Estimate", colour = "") + + theme_bw(base_size = 15) + + theme(legend.position = "top") +# ggsave(here("results", "compare_fit.jpg"), width = 13, height = 8, units = "cm", dpi = 300, scale = 3) diff --git a/data/par_POSCORAD.rds b/data/par_POSCORAD.rds new file mode 100644 index 0000000000000000000000000000000000000000..608961ce6d0ce6ef598801e612a64f558046ba05 GIT binary patch literal 68571 zcmV(uKUu!d>(8s(669;LYsn2(r zPohXHkcL$np=k76iK*T_6pJ2xHRpB~#p4(U(-b&RBK~zzA3+!;6Q-S~_CLqd@r9(p zLnlxwp5`OZRo91C&d+Kumcx59JfJ9^8_2M?zvB zO}~a9LXraivCCHxl^b|M5|1N3_r1O@QxIuL*=kEiz5FgT*B z0#UhZODd|iPp~r7Poo;lyBiVrpjyX8{faCzR98@+O}$Bs>K&gY(*M$+hN9Pc*b9Bs zcq+Iubi@%gl`uJ{Tpu-`X?8KdUer>mIWu`D6Sbaw7;LcMMs1Z6t5!V?)PDArgm>m0 z>L_Pr3sBLZ&Xc*ai+^TNS81-VD$W#jpY}96>>;3@5}BtM12yVB*%NTZRvPuWXQw}Q zeM5cwP9#YQNB!blsyT}m)c>~Ua;s7(8gPB~dUJUN4eS)$-+PCkK~Zq5^ebC57`YS5 zamxS=xxSpxG5m#w_MF$K^?cBW_ zz3W7D{ckiWx_0?liWr)V5Kn3`44^6Ji+pan1~j$#B&4v^f~Ex{dT}=&qUl$o!-LGP z(2TRN|JIFSG_zAOT5BdpvtrGy3)eT$?AtMssnJ7d&ZXl`dA1YH?dtok-+YDUMI)E0 zQx2l}zY+sA*j6SklNZA`o+O53l8Y#Ch~#%lGaCv7*0Q9!bEDYBWB%93#gIo~8Gh+s zM-5e@m81%n|H~_6UcYvK%0Uo$?tQ)e*?bWNjBbnNK0Sg$d(1{;XAnhl6bBS1-=g^T zq+RP16_h-j(flwo8l^bozxLRyZTnd8w2 z2QR9#iIOAoW{q9_cpY&-F5r>y7AkN*EBa9%fr{jc>co4!Q87y0#n{CUm8dSgGqh+)_`XdNFm(87);*AHG%f z9~=Bn?0zV zexRdz_daSb7f!rv-9{Zw|9cFn=TWEhvSKdZ3hKQ7%)c}`hPvW8t~poRP}jAt+MCxN zb?@evygNyRy3^jS_0Kp^@5sRS?_v*7?;7JXs=ZH8Z<*ZLzhO5|*bhq%ef)^}vahBz zzAvD@wb{b$NFUUX^z1xmUxoSwV=E0|$*A9UPk3*|MbsbhAoh=$Lj9d5Z?j2e(SZAP z;iz1@<$TF~UQIm-)Oe>4f4r(8>5LzCOI^(Mu;dclr@%%2QR zMaU0lpE-l3z7Y~X{F%|TL2Bo!_XwI!PgMOOi$XKOZiS>BCNy)Gc%^#eBbwc)o0|Dk zgl4~P*K|zmpgE6A<7}e_nmaj0n@kO$d5M9-EsC4}msfD}W30^n?_qFL;A-Q5(JjdC zZMTpwybagS{^{&oQGne1{cVbm6`|y6o=)gJCrFI=#^l4K1(~5YL_XhOgbOLkZ71Sx z!WB8`tm86bkXGdPGh@XKGIu|ZZ`4_czPj+eMkE=MI`yxn$G?S3BYp<8HK!oNuh-Fw zOBd3EpIo<$CWW{wl2qCnyl}y+BKq~b7bL`deJf$q3@KC6{Hx!JA#_&oYv__P#0S?5 zWtg0V^xW!+PO?sj>wKGS=i30$#Lv2e)8`B9*E`HOEY>c5<=vkrXDw!hJd^aI&`Q9?i05i`P6$LjC3l9(pMi`A1=s8Y(+r~ z+OSOx6GNa_*UZwVlMrp?bL(2(M+gyZ{bPAN8GN-Le?{30@RNRE!6*C*g5NLAFbc;) zfP8Yw533Rgq}a%_?+}Ba#4?TH1EmnD+Fs~!!VjVk5QimMO@fE>ea};G`yj!hYyFjz z5%{FnztE`ihVxU-Zj0p9;2T?$dFgEqgvF)!9`CyXerH&Z#}FR^zihUj)ycgOTVxXd zQJx5*8XMMGN{JxofPT9BLJ0(Minu*p=7b;`>$|iIeGufxr>SBP1%W$ZdtpS-~{-y#eIb!YlHq~?E;8qI;FSAcL9R=bvS$ZydaxdO(Mj)6hc25 zODR131xYPSkLS40KV4#2U2t;T1&xT0FhvI|(UovzMf{pF=!pAg6ig zD#T4temH(19^x67x$8}oAU>4x#;rf@5RX(7_2!}wAD7wXd66CB)oe-?MovTA+^sKa z%aRbc^EK1TOdsM;dLO4+orZXk6UTpcX+u1n*iG8AMxgL5LzGt-GCVhP$8BAghK9||R{ZdOKfdmF*m3gY5r(B6g zRT?NXE)@Tp-;OVwuaq7|GGbAQD+4}A$Ly^e6S9nqRHvI)%R})1Z8xtZix;vTEL)rL zpG8g%`+y(G0eG0Er}YU*1s-LPY`s0)ioDdGcG0T3$VV4LR%9B50<>RTp8KA{y)UQXy4pq8#HXBq@*yFJ1SBOfFH92a|`J%Eyy$y+V z94bGK@74%cLKOulJA0NHRXQ9(eTX@Q5`79Qe+8;Nt4Sy6 zR6}*;TUHNihfw`-9)G^fQPhBKUv~=;)M$Nth(UYU7;^o>I!N@xV9YQ0&2IPtiAa5E$YZO5evmlqfV=?OU~{Np({%!lYPMubz9^( zr+!VMo}BcEo#QUYwFI1Vr{AqFhwhYx9IoE2L&G&oZaKU4CtY?ai7klAj%aaP?C8{Fl$tm{|jcI(clOf;e3I~sHMSUlsshsO2>Enm32 z(YTn;z3=;DG#=@gCnior6RvIbcz3N`ej^TFqP~kJ#e>$bzQv-+w+my+CC|{5`)%8V zNE4dcJ%9Dx$`wtEn=3V~3(@r3`+bUvHE72DnUibp4w~6lbvjz?>JkyMTS?b<`R(1n zMX?k#=a%dL?CFl?_M7F;L+E$;ZOmMRvGN~&Q~xx^bW9)szLBlksd&hufc+fZ{eUbK zh--*dJO38NDvRuNNtf~HpR`{ru9s2$D*&HCF#895yb$CQTXql?LLDNdn#mue$n7*O1Z=Acim8G5?n_IX;B8!h6bNTqa4yQ z7m1Fbe6(ls#wB_r+$eJKFxN)7*Qt0e!WYrbKc}ztEaEqtEcHtzs9;$7^pUC>Dm*{? z>cN61Dyp3AB*=xL;w}2#lQe^<6p;6UReuhZ#^ueL1KUtp&x*rIKNXeRH-{z^CQ*fp zhxylH6RKP}_Vc_@0jex^esZzAimFN_jV~7~P_^v25{+;Zs!j;>WctLSTDA(E>%JgV zTYLI2^5H&I*AH;+aw|pk4C22gygjHs@VKGF_z-HSxm`H9=Lc$JMeZ~!a_;h*efkxm zZPa9G^l~lrKuwkK5SVKatvsH^spQ*=-Wb<3O{ zUJvp{J^F+E@K6x-2oLxAibUaW)7+kDw+}MdEDWEe zK*JQJzX@Tj5TO2l=B2cc-hexh>2Yd5ddN~_pO zzo3z`Z(xgS85*^_hm$5UqtVhHno`>9Xw2ZG!JOZY#&QXCeU+SO?5}F#(P@Cj>1e2w z$cV;Yc;lkC?9hZ0w~h-xMH6dB-(r{jXp(tn&yQgOnu-qQk>AxuQ`<+!_P<<3)3(xD zjfxmFW6xaJI=_Hs9tX+{ldhuKhe5*>=2SE%-ZMn>tpLr{h6iVPR?xhTYPhsz;~##j zv9fuso%s>Ui$nESUG70i-G^KWmjh5FO6KoI_7MtL`o6_&nnUJ>-U;#Ebjb8h+_Dn? z4H@wy!?axqkdpcO*VXhZklFlVXoH^|F8;`FF17jyiG%N%S1M#7VVf>L@YWMZP+}Ob z<30rOz3=Y*@Fs`YkZ%9e!*?MzwT)hT@jb*h^p5pbGeOK{SsoIjLvTgyvEPY#XNYZN z&hC8B2hktvNn9RJK&)8brR&{?A?gXKj2p>Gh-K58b*2=AXeCFGk~;ulZ$oV}+7Qm~ zSFrP*_yEB^j5(Tl58=|$vjRO&)ZlzvK-*MoK17(k%_qAP1wnCMUFDg3Avon?@4m~K z5c7Ti(OSY^2$LtIu2LL^&_u0?0=rc3yL{rZH$yLY=O&qcK79jX4eH%#v&rDx??dRj z%RA@3{}62E4hHWKzXz`2FCb8^YU+9PehBYcO%D(_4d-^e*u@WJLf8w|_+k5Ei0w<< zR}||D5l5e7*@kUF>Lw3aWMdRW=G7iJ|F{)m+>W{BP4oItyRey`TNbheO@L_h*XT8R1Mg zm*paIMO+g66I>N|CKMpB$9LV`W)GY%iT`b5J^+EY$~(jBH6h}dCN`?ZL2!2KLG6RO z5Wz*0JyyQ}$+EBJ9X;hC$k~BL+h*5)++eEnkzM{%y7{H=X(0H|lKYIm6#!qVK<6J@ zn&5HV^}ayaH)|qB36;-G<1o zOq*M8P2j>0*?`xLq>yGY%vgVk0MTIzk46{5QGu zS(h%3T2{V6o`@5IQ$6N*l5?)@K-0a))UA-bKLU2ZUqYc zait*S%A?2|7d?oFn)=66^ zoBg4=p`8fjp8c*`slA2r>ysCb@P{H{B^)DHh!Lur0%P z)Om6yxDypF@xG=tzk-THV}&BDzfrL%Admf)5-PE?Uu(XUgGxDOgN1&csI-~O6X9Ti z%BuacuXgiY`Edx^_54N^?w&(W%qdaDC(Q7%$jt8gemfnb?1!rR1O_{jXi&92e1Cte z1gg%qiZtDrMm1f1vr}FXyIjY{$6+UdYBQPdX0Gi;^%Dsf6Q&bTJ#5xRhtNF`(u_iEgopQ>f`MmD3W@kD70+LMMB6xsLAG z)Da3F)H0ZqyV$LVS{Y;2GI>0x_3Ctuop&E<^GtF19{PgXN$v`gP8qv-aiC1t?I7wr zixzAknD6FAe9jq`e$>q}R_K<#hq_O@^aPAZQIDNB=qckT)U)u^`J6I`4T4?m!sBkBLFO|PF_zP4&`-C4pTvT zvoH5kM`yS+DL^i2%mWG8)x`=u*C6v8tFWQ@K}a%Ai1*d@f}}O)!fK<_5bxG4f4}h} zL~~2;9Py}RE_%K02tWA_f>@STES@1mUHvG#KSKgS66wZ0#u_1f z@lQltOa=s+AKRGy%L!p*-Snfss37#{+Q~0uI}p{Dxialm0RagIT%_)`LFA`QCypR~ zh^$?Ilo;j&Asi-2e*$4iwSE08LtrAHM>4hi35W0d7rAWrVow4zWLL{e51F*bh&AIg{4 zXobcgLjGv`6#j%j?kO{`Sw#p>d5!BrBoHk!Fp%YS7NU1D0uFy@hnRYx_3D>`(426} z%B^?^d&+jxgm(m@#!v5wH1CAqTE!p!vERUd`Oq9ZdjSQAg(Evmb?%m>Npxs5%EL7J$h?Hk{L*5ix3h1s|wLuuTr)bG$HZLX}S(I zI!F_YZsv+uhifN<=_n&(;rwOZ^cy^hkQ8orL9>%#_k2h3`!8zYqU(K!TQ5l=+0rIp zD<~FX+E>JCJ;fnIyVc0{r60tv{%j`ii-Y(ZCmX_h)9^ICpubcZ0s61`yMI3)j@)-A zitcgCAZud5r=+e6$h)`1`i7_;f@8u|o=wg_{IZG;lZ^+Wtmc zFEs8e9}L`0)J-Z{hu)c5v){g<@ZRZsE6KDN4D)?GJ3AB(OKa4eE%LGdF`uhUDMH*5 zb^na_Rp5X%dRoG_slfLmFOAy2m;9O>biEJ5SHIaGktu~A4hpzvCFPg=a$~hx29(CrJTZX}Z%;#FRU+VP8&40%G_Ad=z-*fyt zhIQ@VZqy89cecX!Ll>mf)qLQ+Wg4@;hbesg9VIXS<~j_9aOiq0i^E!AHog6=0;2!@ z-o(z@|Be@X{ch^Xy`zwn{o>W&!!k&H^yjr*!V+AiE%@WBqy_0eUw6H^-wc`GXb&Eo z$%JcHj_w^7Fof$T7glb6dS4G&hB^x6qfwzVfc6+isquOaeoqp;u|W~hupHDctJ_(iSH#S(QdgW@sbTn zM$cXM{__P&23g#Sl;7>@?~hkce1%f-dAl>O%Al09nu<#-0!m#yDSn1cLg`$otSmck|g z)S=whJF|S_3X~TO9BQCD1m$mUrnYZ+LIw3Z<(!M@P!ZiokstXHD!%MBb^9Fy6=jnT z?uBvDjJ~{H_5ig<5#Z#A~6F=gr%qv&Wz^@VDz#z?hKWZ zx8I$+)B+VN-D#os7@=ZFe`i~C11fI$hyIo{Z-Y9VDPQV6~ixI0B9H)ZvQ@?4{ za+sjp_HpU@rwu3@^{tXFKL=&W-^OXYd7$jP9-r!v0F>r%9w%(bK&eg zqcD9FN*^#AP&`V8QorzvP61?4YQ>PPG;|tDQV$uH&C_~5=P0v9Im7R?^YcLes zT_*%=>Oe8$>&F5@G*I;6Xq~*#7bqOs*U&_n3k7S(v%dUXfr7a9#;nAJ?D5_4eh#;oJ_$uK)Vhh^rN@iMLJ|J*bCFk=YxdEd}Y%%iVh< zq9C1oX6R-7H%NG0DF-VA!7ahl-3%@=aO>ukXWum?;kK!i@L@?-Xy)#4 zvQ>13=J*rtUS=+ECq+N)^5!MD%Q_cA-$ey?yHw{p#Sg)~VB5VE;w^Arn}6%~tEX_k zs9KKgO%*(l8@myIRSh1BEsY%}Plbn1E$`0cDZwLY)k?lbOL&y>!uQ1&6L|DOtL}NB zAhgJc40UQwK}*KQG^55Kw9=?;+MVa zP)cZHq^sh{9EG-XE|-ropM$osplO!-9MCT8`%qda2ikqqCcR6$p#7H5&xddf+BbeC zDLGt&4%X8W&l|^}136=L=08D)zWdw+hYfVd`Ey(loQ4kB*fnm#Gw2Xe$?}w-f(})M z%CH3^=&(J^@+2+?I%0mvgh?kr$En0YY0*OH;F`1-IZy;0icPWVu5Y2k&SK7KTL3!j zTXcsQrl2F({8xYWx83WMoBKcWgAUVF-Nter=n&>@pkn_59i+`nW_SNWd%j`O%ZcC6 zK6I!n{OYdX@O+x&Y!zrX_dCJdWe@G4z zpiQ!njg~G7S|2JunhI=y*5G@CdJay|s=)7A;Pnbx=^D-3uM9y;WXorH>51KWxKB;; zUJo8leDKMuqK1dH=e~C<=fH!l2bGHRL-4?IQ8K%K40AgjlUvE6wahVgWpnX1{qWe2-F>F;yVE}5YDUP7Df*fByc#m&@7|;@ zxCm(?*3k!J=OAGwvSMXJQ|^UXB(L&(FK`JA-~Kys5I7aTexH9n0JPt@FYY7Z2Ksn; zRpIXxuy0?&`e8CQU}i;$ovIPSmZ+!9Z}|^EWo@Kq%3(p+bhCZdbVnY@T7z@qHID(A zM`ix4jYOcmocb_+){C%2PfmL<^%ddoR>{w@+w?%d$U1B8Y8`w!mN`vWDVj$uGf z3t{cM1DR-#J>jp^*|S!k1POm-Bhv1E90201+=v+J8N$M|l%lsgyoAk$y11gIA3!;o ztl-1SPT008qUPFXLD;ZOJJND{ny_+(e!rh+Ct)ridi9j05n*GogvRen8evUM{;L;z zCt=&=Z>?}z7h$>Vbd*OTfv^%ULK{94W zc}*Y^sSP~l`4xyxN~Q{iJOm=jtJ|II7l6d(Oe(u;Cy+-eyq0?*L-@<>TEA@R0oC&Y3f-ICK)h6C@bX|NP^*5@HBU_d5|P|w0rf*b?st)WkEt%u znmIRr488@_wF#+Vii|)hZmIFYz!)g1PWT=w=#%i^Ausp&aFC2DH|Inxu#wg}iM-wi2W8ujYuw;~ z1NZMF$b54Hwv+p_8PWuR#b}+@hQbV3{yb;dV&DMQwbv(0)Fpu3>bI5P5gy>U{CP%* zxE0vS2V#{!?g0*qfyY1JcS94wRI^t#28;>?_L(u>fCTCpt49AQTOofmHKhK0^g!-QI$ZtxD~GIT4KijDmrCh5 zAj{8?Ga=?MTuU%Iq)zu&WloamoZ^rzb(=NuKXbpYhG`3dF^Qb4}I>S>!B zXQ05)doA(dE66Y2dvPG`Amsl}re@*02n8EwNMvgbp|FsjnEW(76!9ICNGrbwMcHKM zCB;3USo#9Ty{SwnZp%F0vN;VU%)L#EOYfoNNEm6qwiuMS{#~WXAMUyIf~} zU1}fO43q|OxPAa}D5H`tl;vZEGA{%2=+iq;7ISEg?s?oU$JJbKVPJ%^jy$E>Dl#Z* z7u^09rwC=Qo(K`IeT1@^tA-cadZ651$ymu{7|Mxgi(-z}LAgu&!1$s!ls}zuJ;=7Z zkHrY|&cBb?t;3dWXD?iW3d(C=<*h29LWn$%%6khcPMxV5qYr`#i)hlKj2}?ps@+%| z@DeKIl7svD2cd$H`{mu~U5;Y9_@VXhE;qdzxcw<>4$52F_5_CtK>3g5u2V!BP(Cd~ z&Pb4k3O4rZPn{;A;>fJk3C@F1K|6T{@?Jst&bXU4%TuUu(&rIpV1tTq38Pkn6{z^* z8?bhG_qgAoYcprLp<=3gk!f$)E;sU6Q2yG6iq`meCdu9N-6DLmXZ4^WA^MnhlkKjr znI;;xWT;3=&*!FQfC}a^;yo7`p!~D;i^UjUC_nHn*_ZYZlwIiBV=(atJlNjiFW5{(%b7zRu+_e-VoxqNQ9EQUrCz9W>Dg^!1{#h z7!;Q-$%sW8Lb1rwy=cBlD9U-|DPVgD3VRmSD!+(A!Hzs>!O$uc1pA8)(`!LKFRhv| z_lwrad0ic!_(Pv05;H+A3~B%l15;7%BYgodihKu&787d?}TXFgEDyGft#0XLZ8 z?CY$WHRVuv-s+m+;Xnt^Ogt&>dAP#M&glnl*dIcRj#8mQum^k`?-={xrS(r<1&*+@ zhRbL|%rjL!c@{lL%91Nf)is67dmgc8n&`sS+qy{Rcmguoqa@8A#6wodqgQ3eRv=rc zp!mJoU$`FFwIL->3%NEd=8ueDLSFV+()Q=3kYAFxD#>sW;1>1C{U2#;zM5fE(Pb6P|nN;D&`f+5BxKxRIQ; za`?Il-1t2GqK?)Us`$9iv3I?LD%mIDuL!%^{p^g?-6E)J=6y@NR17y+^3U?!WQCio zE2rca%;2WN4JX$-g>cjJFq=fSF5J9xd%vjgez>{hOfmeZ52_{dHoqb*R2Na5)|aM& z>MpGo;kkLJG1BDDKVl9w2`;moF*Q*0exi@nVhn1g`G+!uGN6{_p_|a@r%(OcSoN)G%{uk|KfTM4bSUNE70pega0zu$ml3Ea4D^>CNe++ z-y{A={t~GF_oBNmVo-BLI;*QbD-`LRkYvpdZ;ZB9;wW!+r2+$$4g5? z;pQ`<(RYbyP&IM9ir7OMDt~jAs=eKWveiCPFT;8$@rgcCFq#8-M_Z)#y`F{a`~vdF z?FMl5mHsR9(_Ii_MYsQXL8ROZ^|hzUPlthKOL3o%$Q2;ZxaLVtT??e2>&kDqS_2WA zSQTZ$3=pxOtIJ_{NLZJ-6?OW?Pr_=R6AkJ42*T?9f+kueM#B7P_me&H%!GAoq2+;E zVZz)PpThS(Uc&6RhaG>ecM+yX;>V*Ie-fr!&I)^ntr2FtUkN3L-zH4yw%uWE8zD?3 ze+J)1PQuKc{gTS@q}(!YP_#1q2As3W7G&=EaV@d?`u=Kv1E^qp{IeC^1gqZ^lO{q1^gvEn@ zI$m`(66Wm2uF%LmCCtmI8=0q15tj9=6XQij2=j}9(Ht8!gr#*!Gw%=Xge8_Wzqa;r z!p5=J2|^CsKy=AoB08KvSnU4O^O7}$u%0Nn@BK_XVNG;%>Qki_VbSMN%L}o2!cyL| zlc$u_35($uk989J5!RH~lYYIDCTz7;$8*#~5mq0s4@yaG0;%dpKECtHK*am|FkMICH+mT$lT@8oA;D@f3E_$n$aG8 z(~30QtA5`WHd2j)?Z&hXqwIK-~r zI~v6cOgvfFE9-rMHS&s=ii9s5=GW$O>h^;Jz9}tX)Sl4bQZ5-KM-I;0B(cntt8jT) zn&iyLw<+uOx+kcSXVtSsHooe-fOD-&` zsee1+>ZR`O^x6@~awO01c5L$PhE)Yq{#C}B9blX%1iO0t!DV}>q4DZTu%?X}lX+SadBYjy(4 z1Zb>|9tq!F_gnIrS81S}Sb)nn<`R_0ikvDr=L;3GU184|oT1{ucKC1S=TM2`Srwb~ zP+9(w|4mc?RQ~n3V4fWUH?Gq}d@F2#8*^nf@%BwnC8O7qa&{7`;$ju<-Is)_EM=JHySN$JXZJUqZE%ZDMqQG*qXb?@R5_hUy=_8s+y}phog5 zdGx}=U7i{)%zpU=YWiFy=!*NHc5mA4b^%wYb>{C1;|qn_CWfHogS}8Y^&(~$#<#biKH^jZFKHaqx6$iZ8ni=$ zu065kTZD%3ZP$Axf1&Z{cHo~vPiWLj{JogF%SUQIcqna_pwWI^AcW}{G@I$lW`5uSRvs9-@nqfltN(ZWQgiZ}5H$9kzHs9K z5i~AYiw%seK@&w>Vah=VXd-`-f6!_7ytzB_Poznqacq)R^T-!y{PSGtKyolNesi== z9ASsXAMr2JG^C+%xbTC9TNyMCd^&8grvMtC-b*+xsIlu~BcdSkJ2a-8Se$rY4UJv` zca}a6@A|z~D#YspjfB-%mG<4pcTXzpW-v5Ns4U3wX+eW!vEOD;4m411$=dn&L;ab9 zp%Yy+P`5)TB5oT5bsPx?PWgvJt!p)XH@_^@oIQ2j&8ix1cDC_9@%aIjTeWQAvw~33 zb*rbOY#YiZNmzWz3!(JdPzK4K2T&Y$VM@~34)QWi+Wm4*gX?cN?kn!@<7t*>(oU^~ zK|FP;-Mf@kx#u0%WsfVp0v6KD)BRbdus3=9jleV=kfyQ^zxBKgM0^_0o*cSD*ipUR zKTz#WSgqVM`grj!Ve6CB56cH_gcZHO@a0FB3G?@fj&Smj5dMBVo-puUny^B2TH@@` z6~gkAT9E1~dcypKvIwP29bs0nAphb%Ho|hEg6+GTErj_C3EoV`>4fpG`{_eI#Ss>_ znjH^p?2eZxct@ydfw0c8C7s|PL|9NU|5y?wOIYX&8Vvo&PneT;SKdi}K=@;m{b2F> zNy6fxI{D}wCBnS0!D3M6dBQB0c2eJ~AA~taTe}l3LWDne&K*@L+9a%*73#Urq!DIn z>#UX!IT02D#v*xk6;^-GpUVVUC}wOoZhMwjXYSML;Ad%5Qw< z6Jh(OZOlRItAs7C6Crc^J`*;$9@W0!9U#ojF%29$a-OhJ*}-nfrbbwnR-n60K0#PN z_v595xd7pBTHc>(Jxju04)REw7E8j;{mFc>dOpHmvmx>U=MRLxcMZo<@9%P0b|;D7 zjVpw|&I~f^hkosHRila+-8sVMd%EwQ6|02Rp9+?j6p08+6!wL;_RJ8rXs=X-aFqb% z$q2dCi<5-4D$)FKeHEa~@+^Bv zLdu&JqV@Jjxb5Upy{b3)Pn`(}muOP?nE#JHukXC+;2sDb^X)>lUPItVWu{Bbq6dCE z*-i(t_dXNpQD)#HUag3$e~OUJiYU{OA40?^dtxOced2+5exORR33;5(R~y zgi7BkQZRVL?|9hd1aNr`Saf^LgGicFp^w)pa4!|GJi2=cWJw<%KT31=Kju@W^~`4q zr{sUM;Qwg+-{=O0E^ohA_S^-Rb&5}t`OH9g(!pCw8v_P%87(u%zkoE5CwB_H0;8d7 zeoDch|CmpmpHoXq?M?q^`Tx<+{x?1W@0`B8XoXu4A3*cx#)dlB5HYNH7<~tS(~Pr` z(d1y{X2<`Fs0sY+R9~fXm;T3mYNro{{*?0mN6Y<>mjB;)2z)8t4wq0IhYP)h&BqRL zfSpYRT3ru)QoJWS2oBFjdpEasbRO5vMZG-2}zz(}HI`w7@r? zi>jJs{Qtf;LG-^b7<)M&8$6{2vua0Iv&r*e;r$oMoZv^Wyn1USy~`5T*t)j4=K5j7 zMN~h4iv+gFsBK@h3d7%f*5}^rr-Gd;^1mKL(IHVzLR@=8GZLG)gx+g1LlR{#Sz&W8 zBv}n?Iu_1@q?Li?qynZ$y3_RJc%d(nT{0q&d_IrlLT_i(p4B0FtH!U1<@-p1tFki= z-bhhhBNVW1iWJ+EE>1aK(69q#rcm0GK=^Qi|&bqHn*?L?l;={ktiw2%2|_WVX_=91S7{GLdyC!O)+z&WJO-jkYfyBMjze7t#WA{%L# z8DFn1bRdo0KHi6f3rO>bLxGmV6KT4wjDPd)L0ZZY_a3F&NQ;`X=OdYrR)fUVL3+^%Yx0{gmO|R)~wF0CaQL1YCPK0#(YZB?Y+L4ZrPeV9j73m1XRO*Yz zknSLn!k*zZq?4#Q7TRHfbas0>+8^9PI#<@1cF`Q93pp&wv3~{WOs%>5eIk)g<_P@{ zy5mS^J-@QCeH-b#?kI5W`HFOm(*gwnJxDtf5}72yfV4lm4dn=yNITqS6#b_KX&=Xy zkFFg;+RmED0*f0+YcINkXVy+0&8x(7ca^?d4&wF{X@eL-*Q<>iM+ZEhj2=H7$UC!}@x$IX!H z3mY@rU=C8{C#Q8RwzQ@Mo=Hq^SJt?|Skl zQXGq=zx2Zt$#3_w+qTyuxnQ`1hJ!ScwReTP+t?!60dWtZ1uGD(H(1oHI?d( z++03buCrT;CLDl8nQY}!MpKxrc%AS>Ss2D*rsqATF3R&vcY8=Px1qFTS9wV+6?~#y za9iV!!lUO${G;@q;n78V75OB7ty!vp3lJQ=D zIedFgH#Mx81V4=e#4|=YVSL$9`bRdxRA1q~>ie`XcWKpYJY5Cmz1!Y+tjNH^d|SG6 z@_tzCJm#WX%nM5^pL!!-*}#gNoYz?39a!y>rupHL0c&m!{E9t)Vcq(|oK?9lYy_TC zQ4TYQjfru$Z_j&S%RsQv+Uh3!rGBt*0`9}#fx5E+0*SC)=f+$XxCJ}+PGugRw??9a zKW15S-y>1jNnX2CVn}?VwBthTY-;)oK zB#y&U`L_X*wAyM?``RKYb@?fZg57zNll7*M*4Uk|w1n%|7?CvmyQV@w5t81%vff{5 zfuwI#ye9YeA{i@HeaMOjk|`BmGjEYVvWw1lB#hmVtjYI7l>9W34OBQtu{#r-My{)c`g|tY?@?t4KNEazda@G$m z?;ypv%K|Oc_ucC~JTTfDxf^G$JxUZsidG5rdV3Y5sHz$Nc`OAft_5yH{7gj(#kb;G zP5Y2y--GV=H>;8STTjmi*#k&k%^~1Ibri{?&tHpm9zgPw?Zw}IMI+fa^Zjc3$B<01 zPH4!%3`rkWjjE_QBB{`nNy;oglK72_dmkP_;(?=*Cx-4I@&01_ibz1Bpoe#kMHRr# z2|~H~&L8;O9@t$mq6?eDp#u&_Kf{KZ?bhe)bXXny{dT5=8kUm}j$BSDgN5&fe#OuC z!Yq?v)AzHU zfLRBudAKg`GfD%8^{VQb^a;>^GdaxNs{!T~M8-j6?4aKi@M)GR9!w-^&QYEqfa&6k zV3h+7V42r9IN-(jZDC} zJ*!OM0t=XK91hW@eRLMLKOeu!uhTTHWgqIJU{{fBWzq zSO@Do=2>P2%Uh<77E%PTIQEjROJEv|oJNJRmWaWYFM`#MG7_xHCoc!z@&JbpkxxR; z-+|4`6{*t>k;FMV-*|pUGPU;8v z?^>RRGgdd|Hy&LCx6bjhkD>lLbK#w684vK|l9p*BxeYFRH80bA4+LL} z>1hVTGH_*gmo2Je10UInOmY=#a4Q>O-D;i%_ngH0kAj=QUsHlZ`eO|E>28c3QPBo( z7Ms0Vr;WiU|NgzHy~E(0%Kel>vITsiczv7##KCJ|8JRF1d~D5A z&Cd|3B(B!XR0$z2+8%gS4?<=ZthAWDAo`G`dgrfsh$yHQ4CSzZ@O08irNZ|R;b&+* zLh1k!k7s{;Nl}Cd_ZWpLvYQaeW%8qaQWzp-jF-1wIYRiiW5JKPJt4easEvft8^Q|{ z-YcAY2jOq_2R`4w0ulU#1osVUh)~`!^?G_AA|j<+?(uR##OYUN;wIb>Vbo?JHp~TU zuMCgmt>{6GF@8L;uv7amEc^ zxuY^Lr<`d$RkR3;^0NU&f9zppuTI;Uedk~;_g(NX_fy!ksSy{Lc>-IDqsD%d&9MDI zGS@4u7l{-e0;%^oB-)VvJZe#a#4l5oYRDatzo%oERqx85Nc zDXW6I_H`uFW;0E83`4S!%lwZkcKOY#_Qa(34J4m_FW>W+4JlaP8uKroMv5elfj#pq zNYVRFtbAhxDXsi3@KqaikwGUnKGv8KzpQRQe*Od zn1QqgtCM|Idy!TfCp6#9BW>KnYZpmQB5lL3&q>l@NZbB4MVT!IX>Y|1?%Ux)+B(HJ zPlp|(4dhMw-0Y6DDH6*28*d?POVg`#=O0Mhw9;|sVGh#1lCD>5I)}7hq88h#Op*4h zuG#e(b)>DozkE_{6lr5jG>%`gMp|1%k6cSKq&-r?e`k&zX%!E~U1(cGTFFnxN>|2^ z_TajR;$uss`CjB8qW)u-(|8*neHBNVShBf80)j}RcvjK&$p}(UOo$$E5Ju|Wiw!i! z`;fZg&E4OloJg%1)i!kL3{sPBy-R+xh*Tj_25L0Bd?t4IjwiVSQhs8IYNNA4%1hjW zL1T7EX=K58SZfF=Nkh%=j=Ld6X2>U|_sU4Y*h6hNON-<#6-AfQs*r5u=}1JnIFeoM zC1*BXLo&hWJelloNZRr9u|;eolG;<&b-B$U$xo(2QQaR%BC91ot8RzH8D3S2OY2BX z&$~EEat($k-`@kJLko`F5o23d+5#4{67>Z9TGROhfEhf{vj1R*K`$mtPdO~ zU$;aa#w6ky|If(xiC!p=@Bj}D+U~J&6+k%)vf;hY7XQhsH;Ma&pOUgb@0lZ}r?|Xf z@QF(n^?6VDI@_?xTm2qJ!#h)(5+A_#(Y?kejRN7P{xfM1t#scl+%riVCtB!QNP6~Ons3~ETLkA>2pF= z`3+_;!=-dsMu>X z+5yXL)A3p7|G@I!qV(iF`mn;s^*$(=luT66RIjm_-@-Lf_z}neU zJ2TOjVeR_M!9>wfSZgDBmQ5cC>wA;$4eI2v!iq_NwB`uQ}K5(88&z&E>Fli!-ndWORhgTVZ)!o_B5X?Y&;siKdEmI z8w1r7jIv9xN%rLD`Vc>Cp4OVw>-B|AGlAE})jhE3{zRJW=04c;zvCX9CIXvjsxOE{ zlVP(!-E99GQrN6(&3^C~V6!LS2)n^;*nC@Y({=71Z2qXUl37fG&EN3>ZoFi$IZOUn z{z}7c%r`NeriaZrX^R8Wd9c}QqAjX=ad*7QA9+jUyVs+eXHI&w>v!VGpE)I1Z>B7i z)$xS2jt}=P=j*~MOXIO?v5~M`y=oABb_Ny&ay_NL2*doNX*X6`8u+6;+%6pP6Grnm zZc_#(%ku_qoATVd4o)q{&r&ygfdhH|%H=C}z;6GBuD9(&uqJ3+R9T(}t5NzP6Jbd( zt67Sg^gIS;i6zG-9b>`Nd#?8Usx26mY1O$bNQ2qTUyG|kDPVZ>b^>R&Cm8OJ*lcex z1B3HbX9>pFz>uXNEA09{Ft|OeZc%p|^mPti)Elz{19^*+Gi*s<5N6YmN8Sqh_e>dD z)wRH+CHC-8q#79PyJ#yuwabn0gp%UxCK$+`WPNzuAN0jV#G|WZ!H{>nD`ZCq^cRlO zOiBC#-7smbwu@0ud;=1JS zd7LDU>@P;ZY^XoF;i@K>q`c1L?xg^G%_<6F>q#&hl~O5<{|*+5^Cg5ZWiUv{YQ3}> z0>%Z}`ppsbU~(t&Z_W37Ff3Pe`b+f?jP6^V?}DPr;I zqfTI=VS)7VuffXl6yeaQ2$sVC&k)@W4M3EVc`!#mtU_Ee{hD z+kgjHUy>neDP;zGuh$-IUjx9Ep5pck*D$cPZup@m^9P)sBxo0``GI||vHLd;cd%t; zy2-5V|u;4?bV_3x8nD zhqIFvCJIT}aQ3U=WI&M#L^!8d1#3M3U$vGFf`2oFZl}4OGH--)B^|ZzXG0-$+GD8A zVFDu03LSI&$N@o-Zrr9xr4Y9DM(&$!5ky0Md#D-_1V6RUd~i-~cR%v|r0i4-c|Xq7 z4S5g4TT<$*ZEq_0XsTeY#uy3jUKvb`g%iW?ESlUeF5>^?xBv3nfBEe{$Z!25A3RSq z*26c&&l0gspWw&->x~b}#o(8HE4`Bm4~*Bidc(v3Oc4J)f8Kf)CSRQwYty!X=@XZX z#tf2RMtrEKJ@f?3N`vTeG|sR%6i(P*s|HJHV%E>^55m$!k!N$I2Q1ru z=v0ZCg5^$&vuc{@u)^Va=Kuf_LZSj~zO$|} zY>1y76bu`Jjqut}HFTb^@oJGpLe?8LiD)cj9;U#i_IekGPT?*``1Z*i)`ZQ$=&9pl zYOp1&wqU8`4qL9u8X)`@wr<6h^ND!D)+T$+@d(1U;%EbZL)`Q~UZaPL>@cgx0jzIQKSPc&#@awlxilEzr~M4g>MtPgyoUw z!Bw-DM@o=rs93o5#_oQ&ZhqeILkJR6t(*S(EQiE2r%f}7Mv(YGbmvU%O(Yf)*nGOL z5Q(*NvnCVhkXW0f$ske^iO?!Gu#IDz{-)A>!{Lt$yeu2b_ynE=B zWRdug=u4qjrbx{4%6l$U8Hv^sr+@X;A<+*dx0QkWyPqdO#vokKPiWA3^2j^`Boj;m_Atf7ITk#G8dL%W{@>_w@>Ka z7wD%x31gO?kKg|~EYJJ*z4#9zZSd~!xYAm+3@(pb3|O+6z`>H+xl-H@Yzt{e*BdT^ zt>dRd?<-8eJisA;u~rGJQ$(gY%G|*so@y>$=<9i z0Or#If1bWg08{(JcjU4}!N_UkS<=`bSW+vrs*sGN8J@Y|>|i)0b~OZ?bU$p|${zv8Cd1bj zXGg%{8yJ1|0^!Dc;GjnIQ1LJL8bTG5{Wr?PPr1oxs=6PwaxO0C>si=4C7`K!}Nzk=ax* z1Oy$`T@Go0kT8MGJ*{yN@j%n9Mf)1WMAx;g*wRDL&$IWAx@ST7MfQysCo3S7_)nvO z|9yxso(f+RH~^7XtcjC1iy<_zc(9O86yhpm?rl$|Lgb5$$@;M%h!|_QAZz**i8L)a zt93@8NN^Zpbe_L&5s+N5rI28t7#G3Ly#daXFKaZ|-u>kx( z2AixpY(x4|)Hyb))c=^z%;LIY;|H4m==gu>e_}~8=V5}NDwKN^j>+!!chQ^R-y-w{ z(#-$=Ckhgo#IfLw10HLzw|$`Rg@veQ|cT%Omq77 zrbz}0gk!xFWn`i1PtPrRqcte_+g_Y3;|1+!$Gl@MH2lYW)=bJ(4-d8eqx1fy|A`%M zLyeXh$f1o{FFcNG74pq`HvKFvKyA*SOQWj3P^fiFkX5Z39>31JxSB}*AM;s|etdS2 zRpB3<`Y-)YtWj?)x_6uaH(KsSEyWK*^7OA34XOyZ79v0w{%amC-7mbAIOPvjH9p%< zi?9C2d{!q5+px!m{G)UKrT>W?CzsL!!^@!2MwQe=!45Jw<3AC`^`Wq6tW)v8BxGMn z7n|SQg!b=WV&jhW5&i$4)A8$gMnO2^PguAypVYa`34fhD$jPP^kxYVMeLE%)X?SDD z-f$d11_RFyxhGV(-{>0ot)mrqVC|zlZ8R&guIosriALce9`A-gSsCP(in^D5X&)XQ zQ#!qI;|m_W{+OQs`cdRXYMG%p2ILc3EvI?dg8aRO8{?hsC|KvAbvU#KPXtQrVb$zL zAs&vXefn{Dit2prrA1~u^@p;{+>#eX=3kP`T}?#M1>G+!%Z@1ax@_g)`{O8nXI(JY zp9Cc`d}1lhCs8sx^MkzLeLS5ZbyF|nK1$_$c*fQG6s6B=f7qn@hBA(=;;rhlDC-t~ zi^wMp<)o`?PG{1gJi()`_s=mTNdNA;yw?grN^qgK#|)9qpk{BG2V!U-zq*43DiGPl zkDjPUg)ptoGWXx8Fhk(KT7DlDgZVpW&pt=RWsd8QFKVEYdq^0i9VsgH8FX?ty+mcp za?{@*gHd_<_#WrSk*MN4e5kUl7*&2&UKj#WRCT7~WeT{0szYA7->A7!&8AI}{;nme zO`Wfpxq1=Rt>_;yW=x`bzmsjXS2SvvOuqOt_yRS)GVdfsJV#CY)5lW3x1#2hVS~D# zA!?a9$oY{GP;1yE{vy{e)OPEad2}!pwI}MnZ2w(B9nUN06#9BmXU>n}``2%%8!kfO z!9jt#e|z^<-nxi-{({fpcn0ci_FNNUu0nnE@^7Kpjj@*mc?Zc+|3%bRo7Ny2(0G3l zeS8KD4DvYp7*C)0y!yt#xqLNPt+pUMedC~lP(?Kx_U zlBd2oshafT=~GuY6r;FND&qrRNZA6)&}u!e<#s^Xug(|mZY`i(&Znq?#5*W|FPibe zS``xh&eMC+A4f>IXfh$oiI;@bx3yUb3ZBdw;ND4{2) zrd=nJ4PB__b#Yk1zy;O%0+m#QAEEl5_w)NyL{QzkM|Fd`AJuR5vdbs_LG_VaTQa#q zs6l(Sv+P$8YUoFAL>G^tMohivi&$&aXgP}wz3Qm3%G%)(C61b=ch_k$%uw^zaIk%F z5^8==kG>M1gj$xoET2dNP%GhMh7J!aYEAXdgt{C>ZDs-~SzI=1JL>TA?=wN|Ys~W3 zneU-?cl%wj0V&jxw(8jCUqqb(ubNISH`MuN^e4575p`L=F{PhBkGf`-&-KXiQ8%;L z^-RD%)O~-8f~Hvz^$uKoM$tctdN#pz3MxaWmlwG*%eaYpKOec!l^3A?5mVhK$8=F& z%{TVj&r;NX8)?8vF^&4GKLSmgTF`({G1?;%g9h$)<)zDbep~5Xnc$3S*EHN8ZV^@&}SK;iNc^T&#y2vxp;8- zy(B-H44C|3zeR_p97$G}jtQfw>l%O3=Wl3Q*BiXD^At_DlJxHs45OKbuJa=zPBhDY z&EvN+iDomW?&L6DMsv-{yr2u*|Cd*A!{?2kKh!|w>2<(jzMS5jF>Yb6+Oq%I#|RAm^0 zh}JzHb6HM6(C1?f=X}uF!L1273{Jhcu0r1dVif%qM2l|S-Cz^jQLcsOT7wdnU!J#5R zRfBX8{HpvW7t^l78BXa_Gv{N$_}w|VFRC}dJOY$M=H$RZXj(~7wjC@wSJJw4E`s%v zmVUC+AF!b@;^W&_1#bFbr|5V?z+Hsv&_vg)H@DYs=f z^XS&7U$8l-CFV0M`7DAp*xJyPHG=xD6T&68LqXl)6#Z*@6YvP&GZf~r0SD=+6qj4m zpnI#v&D~H7bcnTGtL*4N_uK8^pP4zJee20q+!7Dy2C#dUwoicW1LJv_v_a7238!_P z?gafIt&BJM!Js$sp=mMp05~YTyi;A*1A5yp-@mBZorm?fq%lYjVf zEAas6*Cu!!YPbV_uNs$0G?Kws>Eu%MxDmJ=jOiy22nFLkM~RNVN&}OAP9iaOPjGvC zO;CV+1I&gO#XYnSfkkDTm6+^(Fk_G0`C2~<-aXsiJ|44Rt-~((Fl`uYRL`~WE#Coa z4a)RIvQ@A(zLC3e>@iq(DVe-r(cg{Tl?5pUVD*`yDCW`#SlfcnWXM6V78y76RW1N) z!P7px5=LN4{P6hw>zBcD(02d5N6KLH?E2#$y<%V;{4v`&Yjziw8hDTJWdhc&n4q+t|)=`uZ5DAVt$TM?8Vcr zRK0HTb||G(7Q-kmkJ6{o97{gSqKpueq&#a0%8DP1HBdi+a=dITZ;%V+e(AqUJd%y_ ziwUZElSh#7J$L;5i)#p<<`Pciwjc)7GnBXJB5t+6`b7E$6~bvf?GEg6+>Vpmn;9Wg zJjZP~yK!Nc-_&Aa+BH!rP^Pn=of4HM`vduDY*9Jj@gJ?4Tf6m8ep&Q+E~!iD zi9XZ_HlEe^rH2|Drw1O7aigZUfW!))7;3IevzUyZMJ?wZsxvk#s5P;_aC65SwFCV> z_ikjN_Ijfb@rE7hM7|tO$kj%j^?rLMepb|tY+zYzcSSwMt3mcps!=bsbYh>tZPcf5 zKESkQi2Ayup$|eyQU6l;R<9KW>h~Ed>s*aP1D1jJ)}kwDVDY1;fZY!b@;Sma+~1(V z_br_v+6Xk{PUB3?EJj0*F}2&@m(cJYv+W>792#zkGB?l2q7i}Q__jwV8pSYBGU+24 zb&07bR1l#tL%$fu-gz`O{1V(Xns{6N$s_uM zCav5LUc8$`Q`%}a$@flZYEi%XJM1=^7O>Zm6_lgtpTa$-dHK*x_UYbqiPLD7B_z4h z7=~uk(S5JEz0q95c4kG5?H_(S%;`72V)q$J6~~Sk>NFvB!Ff~vWLBi6-|N`%`yq1t z95W&q7Qt8r%aJ#yx{-pqfrR_a+P|N(mUAFP`t}|?a*l~&Y_S9d_VpidwiidC?IdY>=u))f&aQ@&yvI=TVZDt?M;KFrFSH<%)9fKxzJctIlOlQI;`ye}b4h%BjB4 zv!}{Jc^8g@qmY0EuT{&&^W6wO^v5;ja}mY-ZaDq1LM-knOBW|b1%~@F*7h;^O^>1y`@o3+-$SSr<=E`(ri@C{^rla}sZm*j^8NdSR#d)A zH2l!^45~0k%q+LAp^C#vlR>sVlMseHSxKSs@v{?m1xHmKRaYsmTP7HUz_8ZV^3 zMJ@A6+231tP^~5!>BvkKX8@dBkCPz%Z-heL_OC< zZ;jL?)En;$I>+mW`gBohWz6-cFUH0b=C8P$A0~Z`RAs1t_Ev(?v1rsU_NNN^P>A|% z)qW%TcTxZM`pt*)t!O~IK>EG35e+2Mrzj?I@H%(h@ zYrpV8>DmVSfmVMgX6L=u>-Qb-W^(j zaPvc7K3e%gWR=Nz5nf#gO_la|PO%pvHUvk*{%aAuALCj7aG$iuLhy=UM^P( zL&3MN_Zr%NgG2}Ey^I_)5SrDm?WCFsL4mHJ-@gcg&-!@ylJ`mQjk7y=>tzvyZ&S@h zCL#D2?qSIv;RF9Ym!(b$7lP{~lexrt2RIRWo&7x}Ai~+AEN1f)1oSeG+`7X60iGg? zQ#GUDSl@n;DEmElstVI_Yb-$^P0P8oM`945L%z>pY7P9&uI%?2`2`U^6wg@hy@x>R z2j@<^c|&4J*FBoJgAf=&>CxkU7DAupEZIcf28XR`qn6cX@Zc8=zF*@7o+RgNX7~HH=JR z%fegWLL#43_U#;a4O}9`+NFRE<$cAIx{eS_bGE%Z+Yo$*BugO?z`Q@tpojD>xWzeq z;M{TqU(N7XUtT$|>VIbV`F96|wYqk2%shj@b3805)#cz<_v+WGA`^Hg2sM!X(1pve zuYTj*eXzZy;kit6AHw%&{SKowgOKU7Z_f91fuBLZ1JaNaaE|B260!CcnB=jNm{ZPz zdm!rxSz>nxzV*FdUFa=Xu(y5QDW^fT3GMw}3~tcZIkDN}HI1B{0e|$bDe3)|3k7Rp*<~df|I2Ux<+uOx+kcSXICK^b#pB12PY=mC{qp??n|~|BOoxW2jR1TBsyj z1y!lXJCBZD!V0jkTk?u?4B55DBW+#ycqV+oJkHM)Be<_i30vtKYL_h8kH~g`rEcsL|J6 z-+6y8YBH0hWROpyruzCRA~8?YEHWM7U6?}6t)ZufH2qPls`+e{=pNLT7~OAtz8tl4 zzZ-=IrK0vc#k+im2??$wsMD}GEG968y5w7u9})~v*Hh~B8#^M@ z?T%&n?K_Klk_+)|cT!O=f@nx8-W2s-F;`uXV?ljpnRrvjW2i52hk~|84fPF<=+c{1 zp}x-&?d5Gf)Q=N*`$8`f_0uW33wqX2zn(6XLdq8P=a@HMWK*I3&P^}ffC4lCSvIv3 z&S>DwoW}Ex7!Be-l!ZpIqQOfIifY|eH2A%`C_3VWhO~6w8~A+CP=}C0_xAuA20D0J zO>m*%U~ks6fGHa7o8C zo1sa77&{x5GQl>us`+~72G0z{CBLm%re4_PHznz}!=K>%=I8Fs*BlVk$mp}6+y(J8 zKYC092O;?Bl@!$nV&KE%^g?3L1fmWNifs<0Ktz17{0j?BxbV#O3w_;92s?foUYg{? zd1)Py^|yr(A~ilG%B2dvpN8I6z9(_GL!9&3S(1;`;GrWukMq-#f`e7y-xwt-7T^i)-Zi<# zzXrh9UXJTVfh72BO^TeEc7ecD;{E1t+`%>9Rnvc80t8T}EQI-$LqHPQ*I@IFO(Kde*_ z9IBohE;e2PPwPF?)}IOBM?OyLVJtEFqEWb~Jobz9!LHlN~+_E5A z{xJ@LKhM00dM*kezDoHxxdsAmn7aoFT-@b0g;!rN5llO)svpw-9a&`7-qL2On+x+22hXjb$ycCj6NWi6l9FvdXGud!kWo89>x z_Pe{6d<{(PJHz*Wy$_z6La(?VDT1l-n8~v%{NPPP8B`jq1KwHI*M<&^f#uy^4$W&` z5Y}uP*+h04Y}BiIJiF(>X0C9;+P)6O+Om`5Bfh~0lcop9Vy%&jPqOi0wLKmnB42Y& zj>4lKIQc?C^-<2v{KNJUi+}iSLRm zw&1~0b>;(%nb2eM#Je`71akWHPh}*^!F{>rs@R&Qe?R|1Q`|HDP}skpfAKF3!&J|j zv~M!Q_Li`!+JG{AV|n%6#Eul6(T`-U7S_S@Gp59RETQoFuiUqB)?@Jg*0ozE@ACh1 zKK1rxO{?wyx6glR7*h1q%WVsUbszf9Telvl-cxva zOs3aUJs3W6Jo)R^c^5ioAN|gV+XJ5kdt>E7)M0*5wmQ>9@;~RZX3gn$c=12u4Vle# zzFSp*uVXhS-+nY`pXi`Z3wp2)uj1oxO+6ch52m%xl-Wq2%krH|2b(&44x~?3qPYx9 zB4=_qzjFNN=H1U~z$TLA-}C-24TH?bDI9bNFz#MM`+PS~zt$N&vN^yFA4ubveP4fr zj~V6{O8ykXP~R`Iv+?8bi=mTwqf_)h=5tkKjBRg%&A*>};?yI4h~^Ot@3~DM_KL54 z{CcT{Te1aw(K=>CcQhK_jM)V6H}Jx{Zjo1yWIAAg`-A1fE8ehdEKD9Cu}<{=|K7x` zK_!vP^D5Yinegar_C<1TiWI%1airs9%5D@P!F`WdmQ0xVk=ZW5Yw2bWvWiVypXql& zjxX|@jWYSjE#Gu>#?1$h@c;TKG24Yal-~`MNDz4k6rBCUQjqV30L%U+KNKieTs=h0 zhQ}{Y2+_Mt;mI?G(~H!9QP_)XG4sw7JXOmcqZn0$BE@CBn%=S~n$Awa@-z{}Qr;{V zD#xREd`qQlJ}XM3+Dj$0JENqJ8*hw3DxUVJ{PkqfAEi7qeVM5;P+BLrEMZ#{WlYIV z7Wv+k@qa!3>9tr9pagcFdU%@Od(mjXdsB;uN}FX9nyFC*GZh{c_0 zMui-xa9H$qd`b{1%`&25y@RnT^PAC9)+5Fl*nFmJV(u|Omh~# zt*Etc^WbkwOVqlynZho|gxUx1&?{f&M(ymg2DT?opbp!f$dHZRXH@ntJUYqRlrM1}gxkc3P zRX$~D?1%=;eOXPy@w@%mx1%v19-=|E^SjBzztCXNfw=o(A{w%Fo6hIjqM?;IV_<;} z8W!X*I&JTA}hvp@`BOKap8VnegK+P+fMniy+E^h z?IpG5F*KJyLwU&_i@>=C7wR<;^z%2EqkCOpGW-eP{?i`|~GOzo=CB!QkpYy{npn~Uqst+4W zsPHwnL-|q=DxT3JdUdc26~|N8CW*Or*X_w;`?YQM>JfIYQ{_&hdgx@C7xGo{#Ak^g~Vaw0*=ua;TZ3LjnF8sQE5+;@Tw- z)Y|L81^bjx%Q*V5#(Q$qsi+b@pVw9jTmyMCg!NJ43gvk+=Gh6?|FEP&c$WGvw} z9jLQQ9a~?@P$#_A+CpXpbvj>rl_`3n&MITZrv?Yq6*V81YkP&d6*p79Ix?W{j)3H6DDFYpC^K>edl%tZ-`sIPRe=T7z@>bnet zlIvTee*B~B!$xmWzkDHp@@NO@_lab_9Xg2m>&v$kZyZE}l;51nK8nGnYn>p))M(W~flUM$r zQRX>mN*i-D>K+`Bg%fB@l6r`2v>A=1?yyT%7Nc?cm)g13Ni=?&@Zn%CH<~b%FGK|T zpowud!OnRGO|FOW@mS@e$+2I+NPRW;CmRGt8aOk7mp78ej15_GxHKd`_1G%~Qm=M{-5}FR!5L zeeFsbuQpT(KKeOveh#kPT`FK{`U=@+Lqf(tZ(`!ZY1 zA;YuZcd3^V67~=FH_$(YRQub(9d3?rMWK3aX6qMZm`2!7lv6{LKlOu49ZhgylZ=Vy zdI%){`XeD?CJRBNp)HL^qTs56>LD@uRfuUa%2ZxihjY&f`3z{+xP8ph+LH&x=U*Ve&5V(Ecvg3x1Wq)(e?*$5GuJi(-;B% zYa6Aqfs|lJr69IUX$I$h1bH7G$^ftI6Ay0KGeGD}-trxDFYrw^F-@pZ1*gB;+RpGB zTu3tuGz=!-ER9__W#I*Iqa~4%Hl+mj=kHbn6#c+6ed-!J%TWlVc}J=2_Zxzenj%x> zox$SnG}lxU{86ZOsxC~cx$P=R%d<*_E$>I zGW1;l`^WdS)lnHja)Y1HTkZC%%NJOk`R>3O(TzP#S2Mw>Jl{ue?F%^Ttdmyj{sCv7 zv{~*WM&LXj(P&lI56%%?ly}-q!D&opAlr5doE7%@(*&)8bHxeTy(x>}KX;kyXE^gO7Q47^kXB#c22ykg_8rg0g;xsT7Mg-;&=-$=5pUtF5tTfMu9r^$ftvHRl$ zRSSrg5S8O6cSt*_iui)tTQ_?E(2Rg?vWw?8OeV4WnDU}kFbi@E|DI=`Z+?hJmqM#g z-Ft<+-$v~UdG{k<&D-+R;oB&Xs(s>NSUVn1cxv+X?PWX}cH>Vz|4|fn59{)BXTnpJ zO>y^B2T`PAQq(HS14VNU=9E^8P%Pt~MW)CKil=#AhUHL{h&MkOe)l&@Mzw4oc0Y@! zLo_CXyAY*3s>sX8E~2#ksZ#goN|Z4_R!Z>mLs^TH`c+S;P)^fjpsi>K<>e1Yl!(?M zL4o;BRSO*g!SlOlOb+59-Z8$(QN(J}Bk#)pq5@Bn;g&!eDir>SzPP(DDYE;2B9*&< zidhshulIVO64U<3{^9{ty5?^p;8TUl^yx=7jsm`sG`c%k!MoR zXi$~irk0!C8dWn#Sm`cLpc=btWBc(LRLf9Fnk9dO>U*_+mUp(I`o(+28Rz9tgVL#W zHT^JZ#Bwae>|~@==TNTlkabpHM67%?mQRU4Gj?e|76d6Kbbd zXgrBlL!JE-%AV7^9Cy83>>yatEU_s2A#Zf@I2&7eTk=u@YR2^z4z%nQB4g9dh8+K-p_pg{@W0ix;y zXz*k7*wQIqG~{_nsz*gYLpLr(iubS3@J3zi8P-rVoD@?zzA=DC{5co<=Hj zftMeREzX{hL{_5~SN*4$oI!5@b9-`WH{j_$VVoKV99VJTio;?emf9X z+wxLL8jo_&$2P1xqQHR6^6E(r6z(d%wy^&$idKFnscpD@*3OWtAX_> z<#+J)m(;%~{rKhikCFB$``{^oI`IO^Egd#E-Q$IXgEm!M@tFubEzf=)?MGyMHhIeJ zGGf}_!xn*as6cX~P4!?FD)|4rFKGJ`6&9j&eaSdc(Ym%&h4?2bv7I^poF#Ub-`LK) z{NRI1^J9ZwLsL-MA?{e(y(v^~@4`R#TT$g?S6nw`^6vU&*J+q~hbkklSbS(_P!);} zZTB5U)wt-4oThK6CX{b0oxTUvF1OXVRC=J=SgZ<}`5#p069WEJp^k7(iK5^DI9Xs8K(K#dz$Q%7T^P~*dF!ik<2sHsRfTtsS$n%N|IMQ2)2 zbK*c;R1`OAF$Z;ronb{SBjZw0GDFm=x+ZAG#)MiMoiAimcu-s8QSHtYF>1GHsmux; zL7jcV=k6DrLY*$r`Xf-a+rK2dliXj9y2;N=)Q&Ks?xQ^)6h;(LcWr-tkz5$+Nq;?g zvojC%yjOR!1zb??Mtoc9!!FeO#c*RFEpxZOVJ>FqaM{g|=hYF6DX8!MSlL~@5cRJv zav27Vqke5!#c{3(G<4j1+*kKL8lE>&Ha|#%hS}Xq*FQZ* z!x|NL>#xda_%5ryPrVHdH-~cM;tmmGvVMeo$ z?H8*Wf1>%3HUY{iGBnTK?qAvE)&JwS>I<~mB-1~k?C0LQ9nro}Lgps()WI1FiB%bo zg-t`=?{is#2lIsxl4r* zce0q5=K5)f=Eyj`M~nzk&*V1`&O||s1ksN(%l;5GRA90{>o7#uPzaAHia_{A!Co5a zI}m+&ul*&y^AK4h>tNhl3?W>cb7_yYAmYS@7fg255U`@#S>a>}$$NYTbcU!P!rM7G zvFjv+nUYUisYikTQzDDY=Jz3B;??6Yh074lHNSsKy%K^I4MO7J5d>Xg_O1A}AH2Uu z{u=9k2W~YH+{8vc5Pe>pp|Ua!LLPE+j83LQNWj&{ifp&Qt-5ZO^UpQ#Q@7~v6;Xmv zJ>$bYx&{#9UHv_hG!=qlx4!e9c7W&(UM&hU0)$$QKel%%fD{^0t3-uB2=h6gviPnF zqAYUBt;{cjTgs_KqwD$LXE=FZR_Y1(P3Qj9*}o1R)jPKg1O9*yO}T)x^h@w{`PyPg z!2xHBo_%1N?uM8TdGTyVa=^npzrriQAA;(>uum=j1h3mqkEN4jf-mQUlC7vFoC^@x zpCA4hd?_wp-I-p4;Jb`F`@b53U+CSc2kJc#%*#=roN^j2P#t;w@#G6Qd;aI}sYx=3 z95!hw@NWd4t4Fysb@qZs_L99)r4pP`weM+*$p!nX4ZKAeoZzR=H?#{yp1VkT}3A(ydbqVLC4!ZFcMh*MggL*T~x-kaM0%(yrZ1g zUrV87w;ynoI#1UQ(IzX+JkJf_!n$*FQ8_nw#(i{d)-i{3ua`8)ANWFiU(JN)iaPl6 zno-kyYDV>f=!9g+Z5X(uf7WZ}C~{F1Y{?)0imcq9pB270Lmp0}9s*|~o;X0iR%x#F zUw->9zx|ir{)7B>sLa*=`7Q_W%jBN!`Ra{A2Om^cH3_1KdGLI8^B9WxJif{zK8E7& zWh(ss?A_(Jbx9A7JUqQpZ?!xUj#8U8{~vpQ8P`?Utqq`nfQX79AtKT(Aflwe9CUYz zfJi9PNOyO4OR9jBNH>V0w3MQtQYxqjh;*FgdG`C<=j`wIxBcn*jk#uwIp>ILxE6mW zJOWRG=np!lR>@8fBU$r5u)Yf7Ks|kjt^_1R@O$WlGeF|&<5SxLF9A+VQ-6P<1Yk`` z_-Y~%pk(^JbKiN8WO{mX(WL_|6sW=`O<6P#TabE4dZ*{RpI4n_9IBFdi4) zOk(()1*G46VQgRN0U3Em!p_jwAoCE#D(i-U%xdQE$YLpwg>L*m6m}q+pd+?4ISO(d zUQe#-Z-Cq#72FboMv!~q+k5xvGmt-hZ5`1zgZyor_UX&FLH@y$Cg$VEK>qvuW8$0C zpdc{U#>n>(6q4`Re5I)Zg%JTV{O_3Ev;Qi7f)|F_{i`o(m!mK>&d(Pa z%z=_~(kLli-*`CmZeu61UP$V<>j%MBs;QVJR^ zD|844H}ztkM7IvgXSr3Ay+;l(7a&%f|V``G=(1=5^gzyrpNKDmn+Vo z8D+Jgj@Jm9{T~TV?)iY`x-5Gnb{<-1=2)K!VSMdo?N`U88=&RI#5PMi16o-gA`6)m zpfwj+IULdo+Jt;HUvFdnWxX@2bxsZAZ|brZ<~jrWZFQL+mFal81uJNMs|THte3;%pn?mI!Jd=prs3&}Du6dxa*C|#!hI+_JE%CwSXg1_|(2*+{#)OvzZ&`G*%d@I0nmAml+#`xh(E-fuvTuSfzZ zi9?VLd>IaEmP76#C-kaU?jWy=3uHN--%zBW+lfzP5-3QI)tfz;7Wr$wB**m-KyH^( zr@JK|APdti)MFu!TqT|NsXBU*tpdSq;dD#nk~x-`laiEB& zXHy)Y9J!mn5#^>lk314}`A=QjLFP{wwRUe*BS%-Q*L#IMWrBjNctNHJhm*D zsdGdVc_o9C=dys|YQ@~^`2nE5cxUHaGBvQodIr!it%C&Zt}RQi!hiYO`ZH;ZHSXH~ zzMuWo7_9NmhadHO*KG;(SD;r?-!evQgV3u8I+;F&S~U4H$V&H*9_qZn$@tJ1&$N&x+iVjS+6mdeMFH6YG5(oc1Js+4zZkCt*qvwBQ^kLx{X~rKFw6i&4$vH2K z-tXTuKd6;PZ@#RB2M5`p;rx>^>iK-=onYjZl+X`o@}|Oi<;|pjP0vm8T^}+>!~f1V z&AzeC^tldwuH)eCfNbC1w&F5$+b^GU-5jlGQ-d}Rg zYZd*@Qoe>$?eg#G={?Q$)@R`#J+~TlD|cR@&sxK+^-qdB7X9AVXw5gGcV2hjnVF@c zvDYldt0REk5GYVjpE^Vz;Naj#{VB9b_Pn?%?$^I}?{f)+Ef388x%dCb*ge0bx4N#4 zmT<07lW=Hb_Nk%h|AY>W4uHaLpgfwuv2Q%;W=7LbC~G;jZlNzuNeA{si~pLQ+b3?) zZ+=ku?|h&CBcqw>^XFHh`a0$fH%GHA5Ga?OK=8cn%r zEa&3(p)Imsd^3{WIRE>*i9a5^n@vK00B_*8bH~{gAnx+6i&-QDGOI}ck?)y68LvC1 z13%z=5X<;a`98Q<;Lb|p%Lfdj2Ly7wUx8`jz$<~716bakWb%`~18n1uNy&EOfMXPo zq*ECWxW$jcAaq}EpeMcqgk4|cTk_om5f7=%eYt!P zz2BH>So#94JGGqSGL!=`C;JScH~Ap$J-hsikpZw8ZbRfy0gz++u@i4E1Bj)+rFOjr z&{{H+yw3%aLTmizBrHI(#ri{Mvl~bWZ;tP@q<~cG>1rO*Rgf01XmzZW1L^L|{i$Wd zAR~DgE0LH6GVQy{{2$#x7722n*vJ9dZinMDfm9$T_w`<_-ZaQ{U;I|w%m(rjIVl#( z`yk(`y?BZA87N4f$M5am28HJg*RvZ&( zwxDxwfE9({fX-vVQ-!`vptC&dlcN0%blLc1Z?NltuGRf)w`=jB`zYg+gWV+Pez7_& zgw3PJz7+am+y(S3B{`YKF&_Eogv{L8BGCKN{8ae82k5ik((Sm>3;K7dKJtI52K~~p z=8bdpp#Ro@E^wjMUGsP(_cr>XOa*j zd6fxZ9)6+ga?`0YEAYaMUZguNrV(g`K_eM6=Z`nk#qhR&Wj)&>*^N#)B|M1ocew~iUFBOcL<$Y z1KE@0Hioq(AiHv#A$r;fotn{PbK+(f! zzqel$6c;Y$N?+LpCB~t`_eMgX6n^}XRj@TE4GN@NvA+UkTz586y9`hkJpFp5>OLs@ zYTGL2FoAN@GEUm~Wl-Mam`OVA0V={h{?YhjppwjyyTjoQDqB8B0n55roz_owdVK&? z3yXW+wcQ8Rk(bxF-vxo%RiPlCNj*@z*Z51R)(_NbKRhL$3IMgA?(+AK=0KgXtZU!H z6x7onYMpnn1@%KQOPdY}(4bp&{=;F3)oVr@UkSc}#;rIanbsiC2)TccGv5R>im!%# zt$7R@JyJi8ZDG$BhL0U;$bshZrel>{NubHjUm82U44MYn4h1`;py`bh@a)_&Xc3!Q z4I5E{7GH|68&eEu`4Tg(FpXmVbW3snVC}`W5p&fzypq*(< z?=O@9+AoPah8(s*=Zx^nu?9KNk@eywOTy6eX=44wP0)GbZFKpf4Cvw^f83wOpodI>*Kgc}zV0RHw$`MG?hu0RAwI1@8wu!%db*PZbc3FUWJ6+85a=}uaL;+12fa<) zM=rPdLH|nW#mA%Epntyt;q9z~er1_2<4?Nk~VTyBXQH?{x+d!>78d8A-a zHZi%s75M-B3YEUOuQYF|SF6-2pF z2l$PkaBB_eM4@PuV(;KoF6@oMJm&E!igi#t(|PITxqOt6_LSmdkP=FSF*pLu&ac#jz3ya zsHre7!+Hpc?o12KIkt~HS_TwmA90}&*-Q9@PGTs@D?Ke-p&13o{1p5sGl~3*BPAZl zRU!{E(K-%0U1Zy7b35mK8uBsRjgid#g4|XuKk8Jnp}^+b22)FP$jytr%jbAGy1y3L zL2P?~tXTF}vx^Uq2Tph3VpIjPd~1G_ek=?HykPfx!PJACr8VtGKVkJtN`jD2p#pN< z|I;E6O@-`o6H5p0Ga`qQyo?izILOr_!s{bdIC9GBmf6_AL-#di-Z<5WA?JnBt^uPO zaJ!msmc<>ezj(&f)$2 zhUo-ydtCZsysQk_9tt^BwwED0;&)pfTgxc$Om4M7qYd)Dk8|8%=smiRbKz&tktK4H zne1n9t3gg(rIixR7(R&1Wf$y4PRq_tJ2mCV$$yX4=OjOJ>L%;$w`D<259QECP(HeU z_UXG$Sw`ejsaC15Yk}@hDs!fKETA-vbA3ZZSCQ+P08{NoLKJ>F?Tl>G7vv&;s^r3h zSQPP?ygEfG2zhASIbZz81qBZVq+S-XMIN$dp=f;rd0xb|y?venyH~M}RW4KHlOJhM zbcP%GJa!~+z378{96Xe}zyd{-`SQYsBML~^364)3LxIFM2NLRRP(W|e9rqm-6v!62 z9dtbz1(K%CJfuHD0m~1??g#gvK(4js=Cu|S@TZR6xx5htd@h)CytIY_=^ZQYt>U0S zJyA9FcLFG2Eo}(j?IjA-SS`zV;EbIg^G?lFnOQyl_i!<6cIppX4=^-6p1k;E6_}=9 zY{l6d085Wd7DPM%wy9X_gW7k%@k%}E(j!IS8fos+9ryy=!xSN7#}|ON@7CJ4Ee_yw zp}e(AyNR|6@DlbLVD zUx8G+I4XL>4bmb!rnUD1K)SiM_@?h&EdL#ZP|PTROsis*SzJ5F3e}B2T{i^TdfwqA z-!~w4{Z|u5w*ttuo~N=ux(@P4{b#(D8OXQqy?Ax&0VtpVnz;pjP-wnIV;|BCisA%q zCykUqv1Q1K?F%a?0hksC)qql0jHcDyB~V84!Jia`LAhgOyex?ZRHQ~9pBNqjm2MSh z$r?IPMZ1eKtnHxM!R~bFiUp{NE3KY)Jqv1$gI+E#r9oZjv6)qE4yd=U$noJ+f(E@z zfWj!2myG9%H|j9?L6uAypAKluy6HxG;(#XOm$N2W>!4{CO8QX)ALDP-33jDY7=Oc~ z6?@DHTC9dSICCdK%S@sD+So2=J$~Z#Dj@waauyM_WV z{??fsLq!a_Wpf+~hnT!fMG!ha0(zH5EhV-tfSygp#Zc{h(0g*EXB{XEdfzK%G7AMj zpL6PJeJC^N+u(CA$$SOO zjBj52FMm7795(LF`x}_UYCmt;wgZ>RWg%a|F5tUH63Q|p3|H}kO9XIR7#y^zL z_5s}NsFJb14=_XM`+fkkV-0h8?jRA6?4E1+Sfl__Qf0?X^jkryTI$A>;1EbVj{4tx zEdbKdHkd0~%>n0in*;VPTYYl23hkiBx z`Vate`2vx#6&FEn!40oJln~^TXpZ@-aDeH4v0)eSWPu2!ir) z$uS8NDp0;*Xur_a3(8?#8qyZop!`C^qmh9KRA^17XPp*6#UVSXJv;|gI`ovudX+)- z>c~#`F9uMJ(Y<6%WsTJ_!yL-4VW3vvQLTItK>bt>Yu&pHP!}_BP)!m5bw3-u=baRw z-u3VdU;81b|EBS}4Y;5|<(H{kmjoID788MNETExnYP}|?02)b-BIhF8K%+cAoTQBb zG$yZVmbVat#@gLM$K)d7mxs@C%(EgpW&b=gx@w)Zt z&<}>7Bi6P_UU3sh?VmM`@I`s(j9o~I^e;2EZrp~tPbOwL{ z;bFpu!4WXfI4|_X?l&0Je#sAYK>y`$PjnieDE?wV#W|gE8@#Qk;BiQOkR&I{<5bh3 zTn$6HWM^Jq{au4n4w4+;1O-a5tbb)$IgFA$qNz8ISE1-YZwL2hFHuUxaAw$rAr#qj zrN3c(0fmoJzp2~Bc-X?Mt-$$B6v}1j?qeN;LfSRO8#}L}pj-Gq?WjCakQ&-tHRE-u2zRqCG3Pby#JrdCng`ict$w&~|e-HSvX`^uEfH1WtoF%l>792;_y zPkGyta)fO6tAcMTRHGmssa-CMDCG6{{KXsg3&<<)+Z(>A0c1OU;+Fg|734hoo8(RN z7vvqI*tL5j0eRI1xRv-WBhP>nw(IE}DCk+)E8RwFTgbVZ zX}5djK5{;uT{VbbhC-a?*m-_tAxC*x#g2_?alYrLZj#q^k++hV?k0&LG%T4hNi$FC35xric=*ILXj1%nw1NUEN`!03gX z&nnH5oE<>n${adbPDLn$(euW|FCi%C^9gH*t!Wf^%wU!<%>iYsh2FBS^hBOv-vUXV z@uFCB6|cMpLn!9N8TRGRt0>&`j;zb3J&K>MVBoxY6?sG-mc=Bhppfc&+s@ibDE0@5 z`9*Yyyr-)Ij8p1C@%-1JcY|DLs)3w|H@Fe#OWhe==8WO|pRHl~5j9}(d}+(-LjgRL zriJv!JpPZr{U3k(KmPVF@;ACnSE}1i6u|ML@R}UYC*b|=cW&_Mb+|HM#V=_08LqCJ z{27yH3IclTJlRW!AjHPz7F6j6!qGoQBb1~-B>h~6_eov2p2Trl(D*%wwW)DE@-_wW zwVIf7Fai=kG=8KBp8`}cE+zJQ6~HcB>`a0uz+g)WLz5gxlHEG`Op*hVkv{8Isb(N~ zXf&!^u>evPL$r1ig&<9x+(&iH0i;tcyBjpyL3&GGjlE+PWF$YWl4W7oGHv1E_6=ld z8YIJFoI&>9!$uDkc93lrnOSeZ?3&oi_rYz9$CYqK4tGw2+;^NCS~_MQ4&zP#ai1Kdkd5X&1){d;{|2nhAz#=A3#}?YOFPy1e6mq6K3~` zLHXrog#J++sIZ)#R-|kMl`v~^jn-?RdcE?yv8*+ycKHi06B~ev1;QuRxRf!Bl0BJ!oFjAD8M|1x>9>^J+i(K=ZDd z|5jxbXu5ZgE|3_4X4=_viL-K``EIMvN%1piF2&vpf@#p=e8nC7))BNcZmHOm?}L`z zWGz*02x!%>zV_Zp0Ij!S${jQ=ptZAALVn^7Xp5)BZrccBe#U+;q{kGrUr%+#-Zcds zyc6AVr#L`I{E}5YUKZ#WDbEY0d4f*3wKEN~5$HUW5LGbk1D!1-L2xVqba^iLyZh&Z zt}DN*={t7NooeO~RK*89%cujYc4N>>=aY-=xdVEimDUa4*n&PauPp1(DbT-L!5f`H z5BiN(42tC~pues+sUffp23B&Cch<_mpo~b&Rn7Rn{H?~9ROoHo4^%RCt*xou5k1&F zAMZpOhceiAC0h$!EQ2E6uwD%v@JAua=b=G|4*7g{4HZK@DB_1yxYp-1e63cpOoTf+J;!`rUU?P-RJnv|K)l(OW-1ExmvOW=7gtl&MjOWW3#P-bv)|csB6$ zl{w^on4N3(BM1d&4_w8IV?loY9gW}aFd~mee9GBkS>&aoL=%!BhdeVfW%cc=QKW`H zo*FX)aw+F_P9A@S{8*_ApJoXo_w&&QFEwqEcPg`8vrPnYo_wEF-dKh_;>k!S5=@Z$ zBguhN-e@gZH{!^nUX0CeM zVNU>s776=szZOG511!ts_<_jhvbgy0?<8bPMWNG6_Xv6G>@E~O@j%X`^vR)1B*@F! zf-1b61NmQk#`*e3H41R*-{v6wfC333xYVe}k-NjT!)*+9wQ< z%a5}^B=usDlg&&7oyQ?^`bbvJz+;Q%*^$I=mkb?c zOYLpTjicFwqM5I+M^Vr7?&;+%74&lT)xqpdA~f_%`kveaM>LkbAT1|P@bBq4(O2BB zLi>-N|B=zOj&=RLyRXo$cFt^wKX!?AJKRJ^v+E_Felr;$a*Yw;X|M0@DVo?Q zS`P0vMl&+IVPw=S=&SuJ*WB)_|C*lLY5abI0v`W-H}M}CeH_a^mYhn~@j0_INd4#| zniDCnU!t@^Z?v+$PHHftvDNa>CB}C2-cEKc$0!GF-|Cz2RHecB-``Cfco&gz5{fOO6e2s^S~&?^w~hX379QPdHEY| z0c*x4Z7Jb7V6V_w_xGa)&Y9`C_?|4dw6-BjB=-<_37_7+s#Xeo7atw>WV{SlxXcXw zw>p8JJ4Dk&qynxApBJsAPlIbRb)UV=#XvxF|IhSbHwc6I7mTm z<5R8rGgpvz?~F5|{|xebNrE=N96-U3(@5?pmgk7H>1ch zC`Cvu^ju{G<&(vYx!mro^MuO(UyS95q?j!F(IhB_8qi+ zegLX})=!5?Pl1|~hWzh+MNnI5QnNlE1?tXe{94CDL4C9R>{-(z&=9~ss{POg8g4a< z(oVLZ(QvYsi|H6>Y%*RG|04{V0!$&%E)t;Wbh-hzTMaayCac<$FoWi5>$OqkTF|=c z{lr*;9JCx}X?bdeK&yr>ZB^$dX!l0Y40PVf$J0Hy0J>BX8{h8NgRaiQ+vCw| zpqm^g0Flw4JIZvhH9-P;B#{j^cUVAAQK?DLo*ndpT~CX~g@9f=fzD2nHRx>@Y_6`Z zgT4T%FrB9q=sVG+#<1mp{*$yE@*<31E;3vEVGRI-i{CP0^L)TSPi^80UoIHLo>z(f zbO;8W+a5{Ym;Z-f;(^9u9EWar7FvoiyuB*x0hbdVP)FnU!{tvePq{?h1Yv-x~X+92?^Q{Jmm_i6V7JAF$dkGS{pT0T^mXG7;ODWx zS<)j1fhsQoqp(>Jd>46D><6~WNA(3LPS+nqlLmm&rLW8Od{&^OHF9s7gbtLRdvQGW!tDQWO8A_%7${ri8wJ+u zfpVp`u$zS{D9>{|5!Ct&Ds-xeHf@EVa<^AVr1~zX6g51kDzFBXc_)v4T?0^M`kpe> zL;LqX2%Nk8Uea5a-kWvM!(6-DqFwlB*0sWZE0IlB~YgES%LHqpt$G0o1 zplxSpA}!}P4$7^`6_34sRw$xZ*C7( zT?V~)$vNLq%%2A46C@k-L7z}+3$J+&^kJvYuEz!ReTu7W(y+R%?rUV`9)SMOtqjLm zLNH*xcv6!i0Ss>X*zTHN0E0x&$K6@$V9@n~ufxFS|M?Xv8xxTt^?r}aF5XWDDGrqJ ztM_%@sus#j_7!N9YDU?8;w>Cjxu{U1UFF=9NE9*DOpWidic&rAKXV+ZMB(#HIqqZ= zC?Oby7&k1S#Kfwgd+9PL)&GjnOsf|Pe0!&!tBMjuxi6MJ_@ad3dO|Ino39|xB3c=S z@sB7u_iFC3QhyW*-^lY_DN%%QhrtQqRTOUh(QK4y62-A5nktr;An(uf+7g&v5eah?8%g6OZ_9HGrdKK+BKR2G1o-kO@M&kUkK+{OoIS&fhn zsd(~xFLf01k^MxeWexJ~h(6zIU5wnLR8EDMY9L2p;o8^s`^fL2ltg#XB(nQ8uHYNq zjza9d*ay_(B9EEeAGCMgpanfJ$Nq`o z=r;!BQT|d@`SdvQd`k54gAq6KX8L%#-(?8}SxxWM_N}6j%W>R9j;+XPL^`MJ`%gR#1An6rOW1_j(X&z_zsi`>gw#jXu$AomV=5_WMp z6yAexxfI2QLYN;Ae)!Iae2l_KRx7NL&vIPHQ%5c2LqjVYzm4JiH3aDbUs8D>*w#ev{Nfh=w(cJm`HxyRUk`)=&fWrQ~2&otIKuJ__1b%}1 zD4M;Jz$QrtMR_TwpILp5q6vR}Y~`0k(dBzejnf$@`bMsA#Rw0I{&|zMRdWVKJ<}(9 z^`Q(!lYZ|SPsB&jRK1=Nob4!@_X7thi#>|^y)<~uT3CQ6^@Cq)&Kx#2+mIxU;Z`R z26T7zC2D%U0;8UjDt^m8F#G-Z5lM^#tiAV}y6@Zr_IGda`G0x?=ki8;^21`_W_WRa ze*7KqGVSKM-8BF{X72!(`W3jsn!~+)i4FJ#NWNt4J%p>$EfNi{o8g+K?4j!0X%JB9 z?GeY~xuA|+NzvUZ5V{pfVxFf9!bbU_?A_5IVjJ->nQs?F9jwIocirH6kmeSLBOZuF zdu|wXhk|(Uz|^&tFp$W7ei2XXA)xZz>*Ryd0A{sY;syZ#ABTr;PxgSMg>IXub{wCN5WySxOOdNDc;B z`;6L@fvX_9@>G+q+#2M(2voOg)In}7FmHk}8RQ-9>uxJzd2lUW!H2sA6kNt9^1k+g z!k+};O}1q$&vnL$Ytn<_x;WQd0Rbqv2tC*cGzF#IS{$-6MNsw;mg1l!1?5e-?F=t_ zP;tt0c6#3pD%-JFLkg%s)yYAahl37Of7*sFmwAAieb05v<20c5y?^~pt|+MA6S2D? z@&VMpFR3JvO=0|v!YeU%05tAfa$gP{1C1KU>$*M<8o$_%PFf^_<`q4H`|i`AY4?tn zo;3|LE34ve@415J&!`G52?o%*R7y;6Tn4mkYCm4A;sLG4!JjO=#zE_g+OD&p*GWDS%<>5I_T8Rd*2DOkaOOJl+Xj7?nfb(eSJ1EGc)css4*Fm2h=1pC z1OtZJ>eVU_FwoZud{(Z8@weCEUrH^pJcG}6yte!w{`TrZ&G_w9v~=~%*%ik$xEwkp z%}~q$mpy|U;;1t~m}Y&rtIr;|tNRUbM|(h%EAsgM`+xpk@mwEnl7szRAt4Mw^1G&OJO;r^=kS7qt+q-WB@+#__s~$5Tf3SSw z)mI`=xJ@p>>h}W_nua1au0&&XiX6XTDlI5VwWl?_^#sLQimuO>FJkqIt)G-v4k)QF z-&V751EsLut=B({K&kFD3#o7`D07^ez;ojQWv^yhlk#_m6*P9#}(0%-Sz2Xx)=zd!IiJN*8^t>bLp48n2y+=Qf`yL2_ z{;{3f{Xa~gZ`}QZLy#5pAJIe*a3B^s0 ziT{_sm0#L=IIk&&9vX00bp|S+LQV54g`Xo(J`V@OAO1e{Ku$Tbq~0E-u6U~o?l_}V z^X2F1E@CLf!DQ^q!?!3_xLB%HlpLit&EpT0zCh9TIJOMDnkhDTh$E;)RUkam;<9ybi zS1FM1KwOfT>sjPGQ;_p%h6XtF#DgCD@Xn(v~t!T9U?y#!uE@*s~A73 z^^u&+K_N}~zGe=OkbgF%@C=GHZ_>0P@GZp`<%cf?PL*Ut63fKpssLr?Z1Upuo-7!E{30$fIPiyin&93UD%L zrA(qgUW)yYfjPWYPh7afeNwd+LEgAA3B%qGW@-w4;?zsd^xH zFB8G+Ngd>(LPKPD`6Y6E8Ne&Si1FD^OOH#G3{aqbL$+*sD{?o>)3IW}M~RmorQZD& zheFaWtVMjnNA8?Vp5OUMP+;cM&(A~bP^ct{Yo*E#@?sa;wr)2;F?)Kp#5-?LWa<;5 zBn}c3(fP^xb#FZi`*7w_G+O}5omwiNICzGFyhsumxeZa$g6*er{Z5qhO=ev*Fb*Z0 z7umY>E)=E8+^=uUazh~^JPQ>MD^cuAL$BYqFHkCe>QU`%GzwGwO-T}*4~n_6YM*|6 zNAKqdUY~P$4-D-2(P|@EK+`v*>XecLEaR~^|IF$F@3~7CS}Dc{VfQ!ci66P zoB`g4XWX~3Sto3-@R`S;_f|dQ`CkaQO&_Cc!L;FU8@y;PaS|TRR+a3 z0f6y0nXKpqNM7CZE!-OcNe^lr!+RPaxxqULo3BC2`gPV1pCyo{&pc$f_Zp<5KCfx% zbASv<+qo$ljK|&IHfW-C1DP&|&%yj9AS-^ny2;4^WV44y82d>uJC8Ov7u^AJQjT7= zoEjjP>i4+sY6r**iFCUEc?t5K1f%9>JV1V0b1ow=5)_yV3Nn9u1_dwcHzJuBziUmU zxw!NW6i+3m-n!}oiZ^t5S+zGnv1v+6pK2Tw|D=3K$;JmI-NdbSj+>zL%siewjTe*- zFNiYBoC0P0V<&NV96-6+Kbcr%43z(Pz9*f%2P%TNN%L%HLB+v~oAiu4sJzl&cb%38 zRnDAU@BK+o{X%CpcVr4`jm z+yhP3_P$bwe9&~PQ;t2o2$~5SZL|c}L9^=OLeT|Gevu(G!PNkoIhobHj$z}1%s;)CK&wiAbs4({t)XrY?s$5P$34Gt{j3UT3$T~< z6fT0cu10Xa&@gCc-PsIww+HPGn`suxYoNm>ekwbX2jguTeh&m1K_}gP=|H6hbe@e6 z%bz?6I-eqYm91Dn=jeIL*9}w9)hd11Xr~6cA=^e;KVO6H(58{3KLO}ny*xi&Sr2-3 z68oxdnV`2|k*%jp4Enspfsui)K>ya-7y*6~#^31k#VvzDf9>;N-$WD`Fl*ah9Nz$g zm_Gll@B=XD%yd0kSotr1dq#VD**={RmA>)9**p?L`I2R#7kICtOgFkJEypmFu6edI zprr`K8)-b3>t{j9Bjq`_KBl9%0}J|m@^KWdOU`zsaTBG!e68!R=7^%qn)&K^x>3~U zIk}tnH!;3eC-pJS3*vLSq)tt!VgGL+z(ul+@(+yd@MM?(5VE4+4fzw=J<&5G|^!E?Ly=o$mnq!hXsX>){nKw zw4*@ji}wl7s39MV$%q+g9OTFM_-gKpL*(mE*>UV2 zRn8+qUOVYUG*4b2|4J_xJ`FA8p6Z)K{2>bYO%Tv$b}vR>JEGl3{z;cPAaSqBvKU}*cN`ZttdHnx;Z!GIzTOVzx_R8W|lJ`az< zH56#2p;Y8egYGZfFZ{GRhXM{L8ky^UBaiPa``(ITC?E@Rw`Y$~umm0)%{+K2#K>cI zPb$R~Tj$ehl}WCb=-!Sg1sNv~@)Qj?85*XCqWHhl$pqUYdzbHw8+#v7WF*NL#ce+1 zUr;#@D{P$e*c2L8Ba%$IOo*@8k7(KIO|OA&MpS zQPMaHGuVCO+MtT!B!X1$_7I~KNeAIx}B+0ZL?jS!=5tg(C1A(7l{-l)-T-{S?OkV#q!|42+aU(OHrh%d z{_}f)-`8Z%UM9VCqAb2JIQAdxk`0H_6Lo}23?is(-arBn4!2TXxBN|H|s-L5| zf@W+^zFV`pi#DOqI<5J|zo%yfpR}Tm(SP;)WM~?eRMLoMLzpKxf@wQG#nl_l@|dE} zzb|RATd|@^-eL>4%HL?>4ojpwB`KPDWpVlJtv6_g_Nl@F&-TA}?}0h1%5(bv-24Am z) z?9xR4o*%LQb|z}h{>P90Bco406+6!=-tGACMANJGVKSPJefaq(&jpRrQVmT#;YAad z&rFX}si6-J?GcmTgwam_!rK6t#Q9&}O~7%xx2$=0M}aIh@p01;2~efR+1|ERhYO2> zw@Yr&00Y8HKJH%*Oe4l;4lYvzD`SLZszD*JCm6V#O#2R;<5T@(daA%J@Y*~-_&o5& zl#Tlg?*N~RstpeRB3y~yG_2g10RD#_R+e&RaJBh;NJ+LET>Hebd2^x^1dg)qZu|0p zkg9~b)2Y)SEG91VTl6A`IO6i|)LjA5_(Dky!_#oxTkrD|B_$9GeQm_hI1b`DXYb`n zZG*%E(T~@NutLA^)M_rp1c0^oN+=;2z>-vo7azuBZ&^EB^$!QhpF~SE8h#+Oe87 z@#W~}eo#7nnn$JE29!dJ`OXaDfijKAf>4PYC}+!`%Bocb71}QmJWH=YC0%p2yZ0xk zQp@|@_cRC9d}6=b8Bw4n$Z0H#)mLh@0=FVPqd}dOXzJuZ0H{~`nLSg@01fiqU1nvB zU#g2_d9e0?Mr0E~yha~r^mp7&#%l)6W7&0Ky&|9~rKwC)Hv^h}W)shv9YC`s&t66f z7c{rj`Pn7~QUTC(c7(SHWCnU)h++Ae~&iGDQM zxHf1Pa;=&^eSpjC0-bnjEi0T_(0TDy_~%Iy(8W8L z-kpsBT}g%&Uzu{y^}70Pv1tW#>-Z8?{Io##JJBuQW1m5fRYFSjYzF8VXuBVmQU<-a zJ6|k<3qkLN6T2bdeb76gICih)5cIE!T{1gy4D@dcrfl{`fqptAA)n84(C=TR|2=O6 z`bTE$$=;ae12)Gu8B3f`;gZRG@sRoy@PW{yk7rNARl>Ti&D6Uf_+;`9!OR^H`W8Xx z(&ho8r`0(8R8!!(KzpZwjx~raXuh9scnT63A8GkK9{?C%;@4hE1nBcpAjZc9NvB~Y zVaaw$AyM5Q&8;R)zWTJ10~eKXS>G?O3@SRNlXr)H1sNI!@v=g_UWGr(6NHD zuJoUDyWLr4W>t6%VYw(tyfU71Q~kXi!PL@`IYu3{*ZgupM3x!R%k1Z=rt* zRP`@}mA8>%elR4DXeeT0Us0>GAusTY;9p2wkNj6KEB* zt8Hgvbr+}naZkB$(AGT6Ec*NVEZJ*jkk<*?Z4H}q`+lH9?qIyCC;&QQbe2~ICqc*c zS=*DjAE1*zJU${R13Cl0d>AkM0bPnZ;ru^X9VYKU_$cK&=z3bB0DP?dQEpOhzz*oH zXO{lScLhCe9u}u#rJ!f}TCgqV6X+#Ncd+O@2EBe+5^GZ{&^y{#sb$0bN3_<*hNl+v z?I(ZUe|iD*^T@BJg}ej((aC9Z;$bi#IMi)iX$J%TpsOh&JYaA;Z*)zm01RTE5TzYY z#r%r+E!D9>Fqn`SBc-YUdO(H0`+t6g%GFhWp3zN4Pl`fMDt>>CvQh{Z=|fXc_GMY& zY|9Ol8!1Wr>TVn=a@=c+y`zJosZ52ONK{ce!6w_MTUIFQ5-rZ-pgxp1N#|jk5rC4( zEYo(bT|wz;A1Ap|wo&jqsjj|GH5AK2C!=!&DB;h2vj#hJRi@A z5hVBQ&T-nJXbHw$j{Xc3<#Nm7PFNI5_>@MZkU)(BvMI~*7Nbz))=jNrnh#N0yqHxs zUK)xbsYe@n6)3bbkV3FZ69q2cE!%d?MDAeSZlsGJg-p+%w{qh~QS4qMIu;EmHk)zZdt?mx)A!Hat2snTX5^;p;-65ERV(+< zgCi8rP$r!e?Sg_+(pLRB15wocUaMA-C<-OgXOuZ(g+lR~m~wtIqQunQR>@LU6z?>P z`ykE>h5sCVq5XvnMf&vE#YKgqh-!la!2voHd83c(>%a*VVZOY*#b1mfe^jh5kguYM zl|SW=c?nU3sbkynn_LuGIvqBhw}cWJhn|N&i${@jV+}PaL#XgipP$mrBouq`c!?!# z8A?~VFme1%0g5xa_ip3%8I&d9B8B%Z9VNVbQq#&xg3_-&yI}_>Q4&RvQTdq(lqi@q zqxvNcWpW(5spvt0k_Q*Ue|+IYDL7^{^K+^wVYAW<;!SEGBBUNpQJfy z2dv@2L%%$(0(U|HQ%GuSmIsR~6SxvZ?w-v*#U98+9BKXf*=v|cC&};K41s!e|z*DfVu|G zgYH3q$*e8i+4CT2&ribf!2~3~KjP=_O9m-lGsPR3!ytWHQp`jD7)Zw%iDBDc%AQO@s)<;7HvbbdruY4Q=*}(h4=^`Bf+>hsh zTCOE-sGS94=Dr(rxU#q06cu~pC@%riV`jO8V9)+-Oy>p>&nZml5x3D9Wwdvomq zmcNduK4k<=fTrLi&4vaCXnImU!WG8@&8H8GzF+VH%_TjRk^KVDV(olyX!8NI?&h3U zh`bG2Wfuu^Z(RVb`Rw{Cv0tD~&CV}>Ee*5{eeo2p`G9tERYUV73`f{Ig6y+Ehoqq` zRlf;zN`Lul)i4|*cH%eQY}5{kPSMUl=QT5mqC}aTU}4|0q9yPt)_~60o~jj zQObw+K=Y+dK!@q-04)H7ZKiaX0itK+B_a_6k~bkk6+|>B@NKO zv|sW?ixKoK^>g1PJ_P-YKdizJeuMttwh>eR0T|$X=8$sq1cNI#VlEhrVf=0CEzL(P z&qRGlemPqM2G1&J*^xFOK^; z5}Sj}?~5l791TEN*N0wLl?(K{BznQ(fQ|&>JXiN7oeIW=_1qm-UmVoGS!CML`%W&Q5t(0S^1&I5|E8q$z zf)Uccro)BEL~0ylxgEuWZ!CapVVmWvx+;((=(#d* ze;MRFtB-5mV*3;3)$WU75AH5I@;`@QdB4~uTc*wU*!m5i2XoS zs_0P+eio>vg_8`?r-ACy-PD6xYEZk=p|+BO5{{Oog36#_5;Vb)6N&LSYeQ8RJkZEmOA3@A1C7Q^fv|I$pz-d3$!(!j z(AX(BQOdgjnxfy!@P!9JQ=iuaO6IUS=mmbO1Au0(wgK;oDrmMY6pX6af##ZgN7u|2 zXptUrwB1z&Er~?h*CrTmyZuVx^GO=eif3wAC-eoaIk7;QYy!~6H{eJ%-vMo8M3q8T z1KMV?wf1rqpzUXNij#>Rv@2qnj3h4ZL`Z3z~Jm_-JoY^$<16{e`+IO_WpqubB zL?)FQbl=?e{GIg@^iFFfyAwVFJvs5jwnI_Sdq0z=(6|ozcr}(Np7Yy!QC!u0J4F)-{OPRRw!C+EJ_DT)gfB9R5LzKPk zRvda{c5Vm9>li9h3rnsYu0sW67qm-Q3{c*YZI9(V#>bpI_JWeXp|sV))FfR`l={s~ zfWP7eimxDP=32$_;ML0n4VTZN*l#0DUwp$*bgkqWQGIq4+5IbIQoa{Oo}y-4YIuzz zUi9>9+_grb2UHJkZoNZc0zGH%9W9^;=huJKcTG?zWlS9JDLIt59KA=PA&9~ZrW-?W zr%;IEiJ;~4geY`uy)99Z3I%;VdC1GffI`345DNEaqTp}A=JNawD1artEO4_J1xt}~ z*WyeepZjv~=<*^;$eiYj&>cg;rNh*h+Pk_^M`42<1GR5fQK0*c08LU_ z6xeo-tcI=)xiZW+4Vuv*@3*+g(`mRUh&A%}&Xoid`1#dQ%P$HPNd0}dHYo~)Z+(gQ3CdCv1orbQ@tyT@_dJ{pBHRNrSaU_tIGBI^v|9BDkie~IQpT30x=!6s$mA;~|@ScMU71(}he%p4&&ZCgYn7BmuITW}QNwHA9 zh(g21HD>RgLP;zqq#p+1puh*&;)lkRC|ZH{yp72g3OXm}c6TWY1>9PaD7kqW`8G

)TEd7?8F%WvmqO>O#tY;vSFpJ29kG9*lld6faLH# z9?v6fkfPvy`dh02q>?t<%2FCZ`b0awl{v=ey5$l-+^PkcD;{)xZsQ=+@WS^waX-k? zoOSXzI|H)!Qcs6oG6vZnH+5emeg!$57f$0QA3&~f@o7QMQ;^$?W%5to2YD4*P0QW0 zAirk1<{CB$3WECIZP^7uA%`_>IOR4d%zb3=RvZCEsRqYWsTy}{UamZ0cvM=mb!h%LG8`w zvSrm=P&dfbsu0Ea+?zb>-*=yb2JJD{+UPXU;NPYwQT721Q^V-beLpb2;kmAlD+wA; z&NrI|v4h6U$j^Q?%umP?Iqvbt)x zYP85D++(11+UL0Vt^jE9w6H4DYJ--{b;m@?6wuOl?b3X~3REl*wMT7Q-1*^BB zgP_C0ajpH=CCuMeafr6@LFWkr>z?R5=*+!c-CXGd-HXDMxXrvt05>;QFYw4?=wo2sEo0f zo=2(AvU$kXO;O^di=H|IEGSA&&-&xUHIzOQKm&``<3kP@n+c%Cf%vP!wrT82>&n4TYqQ-774_ z_|c_b5+~UIpuqmbilEMkzyFWN2QSilDB)OFR^H172q~aE}ZHmT86U-gt-tIVRqMCpikWI2>=_azwrygGyWCZYcCR z5gBn#9tvrnyXeb{t>-@$`*h?ciV&YZflHH#{OM)nsR!mz!2HmmO+YLP$eA~C>h?oE zUuaGfY_p?KOEf?W`Y61}BAOwY5G7ZN$5`5gqPU`yqTRiDC`SLL{^7Si6n1zeMxb65 zd1tU(SULFug$4b%Jahr$i&=Hf+wx2(%;||m7ikiTB5u0H;A4oQw(fWCUROXd3<+b^ zQzua96{})RxdIeM`KMZQ;xvk)Dr)h@77y8b`jUL$I|>>snmg0DiGu9EadoRsAb+0a zC_NKWPg8Y3ijJ^Sm!lHiDMmPcH3u=S9#0P!KXZw7>|21)C1E$^}Oh-pZ8IS z-k|G;(r+kPG(%AFNDPIs*Bq5r-bG0}?KJHw zyou5YuKlJj5Jb6uhI^C}@1UYfu{$cS?QOYQD%9-7cS{plw#}2c=7B3%G{id z%ruBc+0to`tRi_(N>(bX>z_bWI56Fc^J5Za5>_OfyEuX}6io_bI1544DuP@!q8Ls4 zsNIh&$^*tNw?qQRFreL|)ceL(0&I#Ue8cLeKvKDa?<>u{|K=IoY`f|oOCSFGzLy|Q zfZ}bm88y>FB;%Fizfk!6k%fF`Qu1TK(fBwsV-k<+RMl-jk$Jw3UqdiGeFY*m*^x0BU z;G-)g8e@>7ePm9LMp=~(t7a0=RJJgmi+m+|KR13+f3)LY({mfb$&Lre{qws6|B=z$ zXeg%#wQNVFL{kzQsTZ2HHr?1V%SZ1HmcnF3UZBY<&Ig-Z0qE29iln(JR&R)g_w4{ts!S&pYcQUcSw~Q+yK2}489^$a{|3Pb<#+#)c-mGS1Q`OH}kVWsp;Rl_dnJ?!#8gKbMOC=(ac2!by^ z&i{NjakTwE`ZW@bk**n)K+~*S#4n<6&{PyhH*!G@nv*&2=ZJoRX5ue|i=ywKDJ{Eu zOPCNEE131i>nos?!G?F478>8KIJs$ZLlaxt zu~Vha&@^`XaWl(XXpz4yV`@eK&Au{n@jj~1G*5lIYRMg%bt;x-#I>N=vp0vAf(V-G z$ti3unL~3-;Ow`pbZB9t)nl+(f#!u&1&ZZ6(DIDtTK6{zXqghZ^^`drT9D0ciK`sY za+>J4Cg&8i7~g3YUsQ(X;o4)Q!~)PPc(cjL&IX!w$(Q1L>7khi_DB*fuzh}CW~asW zS@>`mH^&>AKPShto?(FIz*j>)R~|z1z0&8BmmWb=O&5-FrYbak*P57BT!E%%7CqVH zEYMVG?zLuR3Qgq=8@CX)f2o`;E23a%YTV%4{u2gGtbVG*664UM$XdSNEdovH?nL~Q zWYBE!xX>!@EHwW~KO>?l3N1G0D7NN5LrcV|#e|BF&~mP9;#hDWG~bT=MsoNAn%&7e z!ly!^B}`^@u+tJ+g4EPZdCx=h2%fTzxFs}~)(A3G=|YR<%wgM+95h!QUNhaufuGaTI%kJJ%u?(%ugEXyvbyFFI<#^jw^_aPxz<`t^d_M*Pqgx%a7iR4Wb{_@|`$s<=pe4@xj^K;-&{Y8-gwVd1s9j{!V@>S5={Ef5FYDCkb?8ytwe64Z9iaPML@rRL# z+&6f7t#RNSK@B{imJQwCyABOzG8U(benHdSnuW{Wd;j&TpzUV|=dgM?fZL1fwpJtf z%aQlo)&uaJDen1X(gr?Vsz=9~T)@Y)=_|ig5BSKom5|`y1fNUqCGYVb10RNtiy87h z;Jw?e)@>UHJ`~>!lNnpV+x905bA}3dn_MY%nehhi&|mC{av|VdBzP1bunyi4RjKVN zr@_0JfmNcv1iT+)6}H{`4BoDvC{jg^z}wKLsqEZk@Rk~Vk#1H4-Z#Dw`i!T5x6F;b z5zb`r_VZb>ZtVx}j5aZwK6miW7qVS35e4s(_a2L+*m_0hoGeol!8>fSjxl~1TOWr` zip~(c1I49R{r$kZ2G5(qVGg{Tbq({*&472)m&YFsdcnKTTJ+~NHSm6(aHQ%%1m5G) z3W{Qf*gU+S-3~B4x@4=4_hS0BRe#!gUIgBa-o@OCyxRM}*90=%DP)@8oI2k*SQ zs#2Ah+;E3wNqG@`^h`qI`v$=0#c>Pfp9$crv2q4i(HMNy8cRz;1;MvmIl_(09(?VC zcpp9$2EUQth5l=c;D1T((~k3d@W;_cnijI)_eGy*S2_&*ezKR}4)F)SE=O9;+>hXI zLI3iy{51@PBMrN*K>)*DFq^*z0ry?_ch96lz~X#?N3c8uB*!TxM*2b^b5Dz=-V6ka zx@RokvV}k{5x)uXSO`3q`D88ZGz6+DN$L|kfBZ}?Nt{c;Y?1*OpG%mvFg{@ z*#ab!UE(VLA_gfr6vEQQWsrvYW}~T-Azf2)~7*bLKpK-?-r+W0*mm(PwyaVUU1j*cmE3d`lRHbD?To)T7QV94cJ3 zlbHbt@`~+n-3HR$$fAwnuMa6 zvH*Y8M5tyJ#m%oc15a2VT&%iZ3Pq2to_{ns0p$!s@7LDDp!$w7-P{`ocs8GVD=SI{ z>P4@8`?;_IwZ|m~9~$LAZM&;UC*VTe8<7bXxfp2Iy^q%@a2FZ^Z%updyo1(D9?O9o zJy3V#c(mtG4s~lehU2eUp}C@?Y~arzG$*QNkKkBCn}*|*LJl1?vY$mBug*jJ``dF$ zC)A*Ew`vUzTcI6CK1;ki6dEjU=27Y8Lw$>~VQ^9!wBBvEIE=muZH&LK$9zN3_UMdL zQ&bByIV*&Pssunw2>tWMmO5yPR`0hyp${!{PIx>&jG@`zq`NTbJJiqji(maB4Q&R! ze0!$$P(N5U)hOfutws6o-bFV;8&#aaTn;6c-`-_uk~lz{fNttFrjO91l(?EiSp?0e z^`wO<7oe?jEdNoR7Bo#ht7CVt#PX)gNT5~;G;Zd+oP4nY4U0@VvZPqPGQ82z7Ml!h zpBRHYwAi5eVA8#7=rz4kJ?V=7x|0aa#=Uj#>RO?>b$eDNC<~e%+Fq?I+J}be=VW*a3Q$+dp!VgJ z4?H`R)I3_c5A|8KtGWk6P$!U68L%h@71!?4C|^0V;uJT$Znj^iQDG=J}Lq)H7(-bW!)jK`7Y5FK>L&I$q)Oj(5Cl>;Z(OOw6)hvU*4*Pc9|`zM9whFXWUt-%~2!+<2JIbGa9YAZ!Oko*A3beIw**tCIhBi^HOL+8E&@%G;k@5Hn z#y<#>NZ1RZwVKn#fc-tRa)?sh`y&V~eTKtPFjjWpI(h6fXx_U#(S(M8`qPmZbU>#b3}uX0or! zat2z3=r{}7Vxi^Q-I$Xmn$UvtTmJ9^c1|grudatmL-TH;^tmodX#Q5lN_yQ45|X!)XAeCv@kw%@{$fHYiasbuV-HeSYf%X*m!&L3#zf2|~XwF{cUe{Dq*uR~*l zo@Zh49yGnY%I3!t2aWgVk0>`7p@Rh zg7Kc+{ZhJcXd2F0^GkjY&7`dKF^VS8to3bKkL@xvKNHdc0Y7L~uL)p0Jp;`j$bg3w zK`YBO&NpwcxHHJ}ZZN(UT6leutd_K~b4^X?<@3RKI@`jT*$rq`Z1dBRc7#Spy&h%s z2^#9dCfS?=pg%(Q$2epD$5%?RpKS+E?x)ywi}XS_;}7nxAu4EMI-}A~ z#0cf2bEJ>7-a-}cSc}ovV<^=X=P(gI4^IbGjwP9%hfa})FOy~U{_9r-7jK5hbzFt? ztE0jBI}(ue=T^-`CIcjF&q!&(c9q!h>pze5TSR2 zXolC>$EJKCDy81lrJNX|Xf>w{gNz_Di+ypkAsQm>$c+*e%^^~}^@6I+1&Ab>+?~+4 z1Q8!e&xBJ?L&PMBhfc~{hhycUq-PM`ZN>P$;{=2U4xG$WkA?7$o`erxeuePf6ZfwReS+}d zYiF6SE?2rH=X4(KU>u$r`VD$;5QC)T5neN75s zzs;?GuZcof6OUb2bq9paG06TBTZ6DS-|c(uazfbbc9HVEWe9s(-%b?b4dKFB-=z+J zL%3K%vkAsK!fl$2l`NPc{GNFadGY~-t53af+s5>f#SfzmXn=6mS{(m}a}X{d;l^71 z6ypKKW?2glAzbciqH$Xkgp2zqZiwhYxMoebTJso$>&qt1rr1FEEt>1Y9%dLCQ{DGe zhj1g8`zFiE5bmtTy|`oz;Tp~6)ycLH)<@9y=Iwh3+dnLedN~JS*~ghS-hY8G;Z404 z!x|7~CnBLwZ3$t!!l#N={UDtD$JxQUFbMmiJtgDZ0^y2N*Kc%MVdr=@x7EuQ!e7rx z9mu|h@OcTn@A-!izCv=W=h71hXK*xe8hr_2tr6`wqZ<&eW!m*b!x+LN?$nqS4?#Hn zyJuG}_CxskFhxFH8iY4JKE8Bw5hDDl(#IkOAd=Ad*P-2Uh^+B+!kak-krf_+7VD}I zB}buow`T~V@nboP6>uOrfG+)psvg8xYPb+l>p<+#;euwS1jJ1gvfzBGgM?!KF)NJ~ zNF-|c?ovk#NtGL6Kh2pS^$ykH(CsiFhS9BIP=Gy8Ys#>prIBGgp!;b!eSvFc)VpAn@sEs z<(C@$_-VqRsxOSor|1}z=g@n#iG{#3BA!75M^N0_iYn9(8OWPNxIh#6Iag-mAgE`lAY(`!geK{5=UPJs zczSfz{D+wUG;#~IsUpH9JLW2@X)pYkJv@uj&s4vck4icBN0=7HQO6B2C zS|p3*gP9ql#H`No30S2GvOZu@+uZrL~-!2t> z@*s3X*BP$lJjUvMv*G%766jJRc;c1J2OTbr^VZjIVfzdEXtit#J!EfrVs4W|SM_DJ zBolUM@9oSpziS9>{Sz9<>@;*N6&+opbc?8|>*F+CxpFoSqO!I<_0(2ea zW*Xj0fclR#0#ma_(D-q&EaplsG|dtX+}e}D@~z=_U5OodHtQ_7A^!zxpOaS|2%Ugx zqb^ByM^&g1q&uy6ksPWER4(%+YeV@ng^vP5C;sCf?{Oi>ap!f$OcEH0GnTJrP=h0h z(lFU;Z7@$I_cOW~38DO+=kZETLQLjab@L6{|Ko4}$KU>szx|8+EvwBeIXdw?r0l&} zuX%U{lD;PQUw9b=i3d{^c7>dfSd^*NGZzer0&Rso<>ru(e~I5#ArBHHnM1kwLm@%R zU%gZS7H9Q1A;;u%kdm^I`;+*X`p5@wHar5$;Sirx@s>ql0g#|Cls zT|1WM%Mf>;;7rA86U5QC)3+DggE*XHCFebdAr9|w*mqtJVjtGr?x1-Lu|c);+}i{Y zo4GGuNtcXas>Ym94#dh!Yehc34zVG%Z71ZvL+ssyj3Iu{^hcJBmSrlIsVs~3M<4V;bcC14AN=+=p*1ovUcB%$qpMkKd+!ZkGXxLLa%iA9+CHop&-eI>nG!^*QM=#aBpN{&nr?OgAJ6@LWB%dl8cO zS9_;;G5gA3XC`bBgrp`TLWK)0kbI)!!g_iSB)d9HeibH%rp{Ri=oq9kK_%T^jv zUY#?VGueXFPMpR$$4W@cQ}#GJpaSVW(l)BH3y{g|EhUbAs-~o5n&*s%W z$k(zsNlv5(1&_;rh;j}=(b)p7-u1HjO~(wT^^Szl25Nr>|9FPkQN!C__h80ZcseNsek?mqKEwt-9AA^2`bvx0PMS ztuml7uld0cITqLMd~=ojhy(S?19;Xw_n?!piT6t3acH>a`!h{99@-h{7?klyp#JjS zTM|BhXqv+97P*7vGb07&6uA{>mkL_^b4di6E6(6P;XU=QtZ`fL3;GH5N< zk2^6y1(8qB)l4c$zW+!OEWp;LyF=)N_s?8%7nrq)x4>8w4p#_SH9 zXjz7ij`YXQqg~J$o)XdQ>kI8KzDlSIKZ4fJ3|_7Sk?%8~MUe+cpjj8<-tDPT`1~;s5XW%HHzgny+{M|K-J%|KR*r z$bDjJ{LIN5f*XV)440`P^4`L|qZ6(WPR*DgW3B6 z*31v5|0TcwA1r{_v(_hxY+k@UIZY#HgJEzEWMA5S)dse&o-^k}--f`6iwA;OoCuL9 zB)$2K^1J6)fq?G0UWP|q|HVhfN^r6`D-ly z1=CdSYQcAi;(v0{jO@z4ckfZ%RiQnvurh z$(?dAri;~kw;2WZ9}Q-e@i~LH&|6+tvzC8N&w4(kH%bTSf63ARg9Q*5=Q7siEecLO zc5fyZo`8EJp6^B6TySHt{z6e~jPaNmq3B2vNbZ?_b3Q!&U(>Vf`pHB@6!pL4i2uQ4 z2+jCT7=YDh&dR4=Hp|}2ij)H zI5GM2QWmE^h7Z=KRA-B^1} z^{)%pF*MYRpP9hMXJr{L7Gm!^Uslr0TT9Wc-Mv%nS@AZ0#xk>gT(8BJL$7n|TSG z7XGX6hmhd|Z2j4HpC)}VnQ}(Ze+t7$zM%X)41a7opZdF>_dBX0f6ou+DU!j*@P^=k zy8wpxTrR)gVK|w4k?XHsLQ&&-v=~~b^FHau(ERiIr_X2=SN`K@#y*BJzfvM?vG$(A z*U}moiZnbT|Ag)HQ{=mob=W$q(HWJg7(RcucpW>pLoEsMomvcOtNpxwVknc}_N*Dh z0&Yd~c?{oiJm+D>#(Pz6S7u?d&OGk}A`D*^HVytA*BRer`U=C%2)C0d*m{*K*TOb1 zv^IOxQ;i`>g8sVz3{USKssGjE3ROP=@n1V|rSrnh?_fwaW563j9oy>{_!-9_rjsT#2pE{T}}-_WZzAu4kbOYi|@T$o`CeZ4C~XNqon~`;SeE z_W!lFtEr5W7`}n`iH2Bzg3AoDz1VmL#}B!`u=b-~d$lX**Uq?LfsoTbe#Mk*EaT>d zRym8$GO+rfRi0a3Cw+L(>g9zh)Aws=mEW|_t^zxkYj;033QnL^feS&xc@NO4pvsUj zXD5cg>z}+<#!%;N=>jMAJg>}eSQ?YXwJM%r^9%On8nULNRUxW61&KyXe*gQ-trkp{ z8GKQcgrVh!oBoa%D);ooV0s8eo~IXbO8Se7hbmAI@$-AJk)TM^aztB`5ZT~c7Xw&X~IFQ{Un)n#a}(W7^v_u zyA&eNe@(WEA#J1NZV1-C^XK)Cw-|2Cm&Qt9?H^ve_aDO6+mcFtPlln{V;fs+KSKCh zH(4+JmAgtw-=kHLmS5+OWA-g7p_>(O7n8FL$&|74646oMBaFc?DOpn+lZDA*-4uE- zw5Q=0#pV?(ym5g*?eDqGGm08xdLpxv48a(hfABrQf>y7#8a1iKq17v!%~P3}-uwab z@`jXHy9Jp^$1M!6Jsp3K$yd^o@=4BNDDIGBn-s6>uRSc0 zKD>eLi!-i#FVPCEvh7t*ngUv7Kd~tnHicGM5=Evb`OqqJ?=4#^?0gve7%Szmd!c=J z=J&(17@opC96g3sFO+UQ-B?7c=jT3T5M%d4t=WYm*^XAJ&OZtFWyEk`!jdN+t&-2v zDh7POF#ATn%o7aN+ijY%FnoW8%~>CA@ zRw?L^=Y}@6Pv7BX87v-?rtBNF+F^RAuxsmM@#OS{+}V3S(CRsZYZbazFn##HUFoXB zaOUNm-{ffZMDXRNJD6UCull_OqtPmfcGJnSub95aEazRYcu1h7{_Q2^Hv}|8EHygV zxI0DFY}onZs4`Q@%V6zSt*P*_d&Nb2+IpB>;@uq{9mnpU(EaOjDfQnt&N38ej`d$) zJwt1R$u?V&OvYGTYjNV-X2!-P$}BkijsJx+lhPwtKka-4zSCH|csAxpH;c(H^LRxt zKRPBF=r6B@jjPS8!N=m;F69!}ES5)ij|+1d{*C+YUhEy%dE_d|@I`QJQGOzBn_j*S!bC`j?a z`md({a%D%qw!>;pHevhQ{p8O$Rfdf#AHU;u9FrYU??r6fuF+1-6)c_}#tIf6^kVDX zm6j1r!P;rxC9%E3`UkbXjKuQHPPb(4Wl3y5TL*3-l^B*ru1);=p z^P64j!Oj);*k_ZQCYT)e{Cb7JF^pjcm!C#&;bne`DjQ{K%|&98>WLLAur?x_$- zTXJFZzULFVj_qSl-u<}7-}`d3yQhfR)2?||j{+lx1L3nbXfUi6&+>eX^&=Ox5ykkx z?$=i%(SI;`{<&i+mKP6XVJuG+L$+tiKVmS<*DGY##m zFzor$jKt>}7>+BZl41GxVDG(_oEN4qN#V&Ij3*veR^2Fn|Cdj4HPvD39I~ZygkW(U z2Tuc4V0j8hWA^NkFoxmM)*OHN8>vK0I_8Hs!*Beq?_l?!O2~f#i(|Mawcbc!ycJh) zB&HubZ`_u=^D@{tJVmj_V2m%}_VC`gfaO(OA=?YvSUkk73Ki=2#O7J*WX;CnB93V7 zmoXv?C7Kk!t77^+d~cb54%27Fwg6uY!>kj^^640ATJv|aGwio6k+#`%ls(10E53ykmJUYZTP zUGjH5M|CEyzx-!n`S@SnSN-Fe3g)MW0eojn9$@F{#PpD}29t3=Br7dr=$AKvgZb5= zI#KK$j2|A^hTI*T`s-gaI((SEI24br@?yO0XyH-sCyZYmo$=#-j>WOVZ~ne-U9kBy zpA^$$c5#q+y#5vkhLhJ$Tvx&DV^Fd_5NqG(qa(**{L8;%5_GZi+K+TwLUY)2k;GIt zM@$|p?cm4az&=;=N18iWy8+W)SP&-P;SjX`D_?MB?fCFFUJsZhSp5I+Hyb(nvcGoR zZa}Ai#kt){Qz0(~OuyWS<)Pb{yjMb;|JPn-n-%KcVEu`$%3Bxz@v9>iPHw`DzxTmo zOmG6N;#)*+O=0=unCJIzX_#LhE23@{#_r?T*at!d>>Tjd_FtdG@;Sle3hq8OFG0a- z9iAqZCv+B8WU;(Ypr1>63*$uuw`Ukdo3Z}Cd7fOy#u0GiB-R&V@*A8$gz+$ZZ^y|t zc?^9&?gWfu=*E9|5z_-d&;8zgEZ^ae<MUY7J7u? zjK+@Z-|?G$0*shH5YoQfDCWmxG2%~i7#|_H;3iu1cfC!i1s}}5@CWK%EMoDGfN|!+ zd5k|2Xp>v-V|o$Da1@xfVyHorFN*OJ0@czbD<=#Mt_G32VQ4X7$?JikP)C53DTa#e zB4b#dB{0=q!o|*qfO$So|2~HIPh@;!!thb~X#Qgi!yb42V)7*HdBG@{Z`84Tg!^+?TL=k08=2I4=`J`bwhLr!b_{paRT~@vB2m>|x{Z zZwDTK=;%UOG{c!9T~7w`}}H@rXH z_EE<%w5PT5!usP$^r=z)wYM=hnXjc7w*M-X$LVNzK zt9L^d#;LLX4|RMLK45-WL>428#ryr1QoALr4&1+SeN_{y=Z}WTrDp#Ad&19EF^x>D zU))<;p}+C>?09wr#$R^vt`N3kapACKw(+PK%dhWyemvU2aDToFuz0^~`u}O~O2eW$ zvS$eG`ro+CI&G2;esMT)G@d#E^$Fr%$Q(; z(YT;OA{xP!QH;2tjU14wM2f*OcDvnc=^7^sdH|)TE}tR4+~e^6LddDt4{iMSOy1eT1(Q@fj!P1;hKKxbDHc5&3{K6Ws3*fAiJ|pLK9tsp0b(8^Nzg zg6>2g1ifp0bN?a-avgVX?uBtzX5CS0QvvmEu;;GZ3iTG=`*s%iP;u!Ti9YdX>jU>l z{Q&zfjHpY5`lp)qrFcU-Wrv-{TL53oeR}~perr5W2IHrA%0RCMn7=z$%k6I_K)coR zL#Dueka#$z4Xy)Tp?}jf8S44)BNmN;ez~-tzZ1r9QA;{E0LD?tG~R9XY;x#sIkfPOkcp@4ue@|#Qn9&S(1}Z6p6v9qxD4|rGrQf33(pN}khXOJ zJU6Vl^|L*$0zR17uLO@X|1V!FTqtQy-})at+hlgw`fR35nGfZ*AiQ5kEo@%Ra>m7C8#Q)qE*}%z19lV zTBC%BSH1T=?_cnqIp=e}&zyPYhdJ}(%qadbdztDSbm39tGUknjKx0irnf6;qRmF*EKY;;zoP#@z4hfNNQ@f z;`QDZ!TYF%^i;5$X5Ob?)jr9*1506@dxHlF5^rk!fK;K{3r;4C`%7W@%+WeWhlE*O zwpyKcFpeeehl}CC(~5l}xsw&xMLv<#rm%#Fn)J%H%-uXq-l^ykN#{U{`T#z2_hMaR zSdxH5k9cDrr}eu)(zs{UaWm^1f$eyGINfIq-RCG4g=9Ivp4Zls25e)$WRyDG`bvt~ z7jIOxwpwz^W0@R+rB&~fc{dxsVIO)hJosIk;ksKEh0VLUX#m3}WWUqqhQAkYhfy5L zx+cZI-z;i8Qt$;xp0xtZ_xKfHMi9LHX|MhmQ8s@R5Zemkz@e#-nr8p&&Onb9pXge# zt;uFIXz8rv?EEzM4%%gGygWK|iTF_Kn3eJ3WDCh_?ZuM~3H9afsFDjqH0a^8DCV}4 zIy(cjn3Rq8)f(LU@asF{cBQYo6ynkjpwm?>QfgyKM+iZE8sk$wJk-U{G*J_L5LGYs z=-H<|o>WrSoP&it5@pXLzD8~hsUvj`r({{C=en94jnEprx)P(nV!KSdcKz&n+WAlo z_~W9_700Ukg8AS5`i{0v9d;OxUm?Z*a8k%nsiMG(;D~Vn;Yk%EgrxDX4 z?#H*vVi?f#^2d`Os8GEHPNs3rL{K2jbZ+^hq@`nKh+M3p3wn4*_5v(6{Elba{4lx? z-VB3ZDHaKt5DC;x407r%p7K3=VL!1I@kD->sB({F zlI09849bZYK~^J0w!+UB3sj7(0LdCuc9Qu{to{*mu`Esc*mMQ4N7wL}T zrLB2=b`6*_kaRPmi|#v`_k(g42lbpNDOL&LRsa@eb@#bkg|Ulw2f&P9?NVE_;R5Pkl8W{*-EDifBt>Lxw2H&N`+ zGM%~eZ2Dr{an`E7J0kv2$>y`seJ!HZl_~9_%Ph-&8i>IN&q)gL?)y=c*Yh83a-*%^ z^}Ja3AXz^4F~aNEykZ@1z`i~)|3DSc7HG5cG|PK4C95TJUEETt%g?@SKq77IQUB*N z%c>1DlP&D(XswmwRB#)dKoM$9>Yjb<5+$UO`|2i_cNB=1(L8Yd3rwa1Y=+h~RQi*{ zko03QW`^sUJUXA3+1a@3e?3!mt2z^?v}tILyMZS0c9}5BJO%?d;R_bwm*|h&KIWgS z`PBn=o~vmKt+(I)K6)_lTY=qq(uIyxlrKTAlz6%9gR>vI%`9+M& z`~?Me&dppE(d&X|`w#_j8-ITkXIF{Qf%`cK96J3_g3=x~(F9%IAYZJj)hj0hp!#M& zCy}Vwg3j{qBHQ$Q=nC~gV&)b(=5C~8T@lreu2$@GY5Cf|Q}UcuQ-6)Ma>2UFtM?8XtH-C-BBHIB=MskZ zJDQ$u;s`>L!S_7eTLx(Qui5ZpX0b_s8Vy&Md5a@RPxkJ472RlkJRP}V=mQ~X3{OQq zDzTMEt#tQt!5_@eK|*&bfA2XeA}CQL7`cIHIHS2$ni-gek=_r?mOU@1DoI<2-n+Ir zhp@2v8{-m4xtil^z+2X`)?hv5A=O~JW!>xV!nLCP4JwYSq4pUwcW9i6kqZN-}8@! zdqpHN8%(xuZVu$~KJ^4Avn%qK>OEV~NS0MhEA=%A`sHnd@azj;;~+MOaaxm1;vofXm9G*$78+rL9pm0!03-5LP);gb*NK zQz$m2UnN)C85M1dKBGLJ!6K~4oeF!UGld8^Ia*<_WF{8@B_Ap5mCY0*V#p6{jm8p6 zZR>12STKN5ufRD&ioW%e>AWxxz0gNF}aR!4PMV>+dgZ_v-#YGJI1M(Dl81xwO z6!9{6dAV7gmZQ3=Vfo?eC{9mzn^!hc%i}5P!|5k4l=Ul{IdQxsDXhz;DKR`g+`gK)^;c^(LEp?qp*+U}frO?7YpcuFB*xK%WJ+}?+AGfok z${xaRr%1l>A`xCDk&DaEED$x5aKiRX!-O7tP6$MQwXz!QlXA6DwQaq zq`iwFP!000001MQdxR8&j$ubb55oO1&rA`(TEAbTqciUGs`ilRu)T!IR6 zk)Q+-1POuyih@W+MN|}N#E2lEk_1F0qd##?|X|oxM-h z@2j*=RUHd90)fClV5I&Sn5ee|rro;?mDf;x{8WRRdT~*&zzWkR114N%?(mhf?^cBS zB)kn}R7sFEfX-2!H}R8Sp}N|0)%}1tWWC|7ZSeIi^sH1hoUPTanCUDtE2)r2#zM`D z^xFgAd8i|!)3r9BNL-3q2z5fDV2^HgTNiv|IW~EGoUkYq9Ht{p5qeO0vQ7Q{hfX9h+}K%du!d&-Vn^X^myF#=BKR0{t@c93@*K;a zeaa}k;*&Br`_i7_E0I@B=IX7350GbPxVdOT3`&o!G5uI}4oR7h zyCn{vqnST{^Kn^k$UQQzg~g~>bTLXgOjPbq5ki69@a>7D1QhliNIxJBTFzNUTr6&;0-=SRZyMCa8nE4Yy>=kiw4lI|m}XJ!U(d z1Ik?B@|C=!iTqkxZ!WNUp@PrxQxdB8QNn;HG0jt)X8t6Y>Gx!uBn!)`SQS_-MN-dH z{~4xND0IKV=f$1vC}1gfWy`M@Vdy%G4Er`tlrr+Fxp`#>C1#B_FFss{%9gj%LK;?~ znDsRU5%ulxw&Z+g=a2{Z$N5@R#{qmL{&L1zj}4;bguL?-hM@OJ^Zkq=iHi0E)*XKI zW-xlE#CN8|2;L`lSw#2ERg|>c5)Ri7gfASK)|{~?{!nkQP|W;_?kX~oz2-&J&@p7O zOlh!RWe#KY9523^8Dkx;xK!}fj(xK<6m@Oq|77$Y zTz4sLlj`~a$(i&8F52<%tIOMEmCnu(o16Y{PY4%8(YL?bnX&`?KW^Ny-sCJ4VC#l? zc^ydSiCnKXH4RsP>E?-74Tr=h^0)So6=>$4lp9#(P3(q@Io`)Sr^g{yg0p(j&KOch zLY2GBuRsz(d|SNp9q=iwjkq?p0GT|7y>T|9kf3mO{6%OQ#C*1BQcTmP_J`YG6?+%W z{5S)x4{Lqu;o`)sRFDw~3MB;}o){5>%e~5R`X;k*#lP-mY?d>4@@Tqm&DaVlhniu%@(U6D_Y6%9M=cLX<+kV%YXh>SRvsun15t6OU=r471LF}%<^V>fxLS$Q*)T-)CDDv)# zuzzX^mxg||7}i>VoUR3@x!yyN*6lCiQ?Qq2e$dv@T#<2gxUxN?s>yu?WXnbcUi04% z2@Z3u5s`}!Z}<4FNb?MMD)0vNYi)+&mS84j@k4OAAWDg0~92eBBM}EZpREn&IwZjegGHm^FXOacHimc+J6OMs5*}6GO zKG9IuGwAenKpyIDS|7cWYXY$=a($2actfvSpm6+0ZRj=(Rt=91g+v+bIe4ZU20nk( z5B3p2j`6&TnybrU#H~JJ9ias2uu3`Z2piI|?sFG=HHF;1$uEAfYl4zb$7l6KZ$Nj* z+SJwCjiENQXHxO{GH6#<)V;{k2Tf6s6MOU;Je%guo&9VM3mXdF)dcv$*o#+6TO5pF z!mw}I2=h1$vUf!6cYH`QzsjorYJ*oZe2S>7eSc3LUN<=m+oWhfM~J7C^oSg^nj8oY*H9b_z=Ducl}4KZ)G zPUuRNxamzYf!QO^6qDNXXy(^z9nw%2z5$;#_{(1UTEUp$h3wCPi}0~s@=n~x6zCgZ z-+o8uAiQ21zR4)u8Yl{b97gfZFx2_N*|8-9Mnle8F&(aj>7W6NqJdeO`IU>&$8>}# z@OEE?l%MQUXzr;z{?RuWny;h;4%jO|qdK=vnc;nC$l>qN3A_(8jg}{tF86>@zm^=k ztQYXLDz1U?h7^n*szN98r!?~)?|xlyKXnHB7R1GOC!T7F? zvG)`6;{@__a{`5_0}0oIg;Z4b!g$qc&y5W^Futzw0)=%W3`jA<<|tRFD|>e-vmgLQ zZZ9TnO??SN?s^fAjS6Az!(QP-9Csm<*N?I5X%H0HSp1b zAI03^;)LF*-U9NI((iFkX5^5(X|qxpZf`r9zsqwDE=(N`2%lYqD=zzZHC7y}c+S>W zC}DL2iFvBWM;#uRWokDZ;U_z&u$g!A-#YfTsm8SCY;sfoZq z`oz0}bPZ%UZj@?eGmXRnF1{CUKcIx-4WXx%u_%_4Rcr984DIcAR@I2Zp?zM83V-*A6t8Ku;*=x-*6;74I|OeJE%MKn6DA^{5zklyoaw za*m#aOe~C+#CHLZTIlkf&66=)8&3saZOKEJ9OAj;{h>%0@#=%kTT+)>n9ESP5gc*#^0Z)rG;R!O*P zS&JYW@pYN#B{md{eS4=b?F4cUux2o?c>)thc3i5Bm`9OQ#gdj+$jBzmtuCMR4&^FO z^*&;)M!~#{u)q;Tn)zRY*RPpguS%A1Up44+uoNZq99YEUdr?5q(T24z8+n?%26ih2 zK_63xUi9S@c|MAAh`riUwON7PAsi@RDOi?B45;yq;s(?9RoCX{Y3j zCcShh9l6}_&~cbdJXIViu{I8RHwR7>tR+-@+*Ktsz#a({|4Wve_x=Lyt*!Ed;a(LT zQZegQeHx&9<+lA5!XbaCH!x-(pMQXl%;LMVBbLqri5s_ylIZWkXas9ee5ERxwbGz9 zK5+vwZL-u|^G@o=`JQr4cjWTHa_Dpz7bu*z1#&KTr=t}SUKaFNm%P`5%xkY~B%jzL z%MK&We6Quu-F9O|egOlSPC&9qx3HWVCq6m1_>$o1b)KeupZ7tlIa_Y_<@eBN<}OoT zYyuy|SXvUShpFpWd*v+^F&Nu6*ttIL96XPWKDqr^8cgM>jmT&?;)*Tv_p<#mkl1e$ zeN`(0vz$i;A%AVAk}#C)cp9?6Cyv6oNv9SAEMO><_e6N| zA!P3-v`{!Rko~ip!%2;Nl-i`?$SA;tQmf3)z7b@gnLiV@%KxG4eKJ4ygKna535q!c z8K>|zA-_)Ij*wlOQIIEDixg4}{cr0#B=9YC8t#1@!ljSQxps!j>b9YDft0VhPY!aP z_H8L@`a&~*(!%87lNXQ3qL($h4FsG}yy@hvo9V8|r?ck9?dDYEm#xpzeBTNKbW?i>|gdA~V zjbh42QK)j~0be!>@{;Zf@3Uzu681W^nIZ@XMy zm;D(+VglvRv0^;{f3mkPbHIi*2HP8IJ<^FP+vlVDU$g4bGy2px@Y-)2N=Rn*F@8mhS(n57dhBWrvt>sD6| z=+UT+@ZWi;Vs@$56&*EGWSG~zfAm5&JU5JuGd#!xgGFg?I3hTZ!TojH?yVL;Nl4`5 z(49m2Usp^syj(zrh^~vDw7wyOMg5`r*4Hq7Jk>fy$qAKZ!2t2TwAW0SnFi z#p@zcSz?8T-R=_n#dx>$IqaP@uUON)Je?w%Cw z{r{(*Uo&HF%e_%-zb4hQR`w? z7Y-wlb%wm|Y9Puv`|uAtT|{B_UC~_pnKbjK<}Dx9nNg_qi1O5P3q!8Rtq*-CmY{IH zdfir{GV;zEpR*X{hPkj{Ha6#Rl<_Ez<6iVfzWa_196HsguxoV-SN~-c|H#HG{IZ{B z{={NH<>Bx{WT8f!mp{PxLR##Y1KGR(Z~f1K|r zC)yeE7o1^Mn`q&*HwrpZ!iq<2lcE3XI{GAnA2crnVSsiFvW<5I1v=S7PuN$h$lGTs zzV@HWYs5{+e1*HG)Yk*5m&K|Ck)vUv;> z`6!t9#;{Yq!3vi9~Zdd5e`zZS4*{X(xy)gRh@GiOTH7LNTJr~jDg?uiRoME#Y zk@PG!m_6zP%F_2-7|F7vov%%K(O$HM%ysW;mtGASr9bzIoNHV_q0L?AKBe2BXk@^V zXGhavxXP{P^KJ>4GpXRSQ{kbmcT4Z2lY&vkCvK}XhdK&pFc9Bfo}#(`=W=q#beEnc z3+kJ+EmNpOQm4jrM?Q6*tscKNVS7F`o|F9N=JR1bl+bj#`iQngP<+~_YHgIqCiA-EJqGmO&N!ie#pHP1LJSI4vm0MCKME8LHhxN8H_ANONc>b*iX?e2#fdyd zROM;Ce|)DQO4w~7H9f0_j(L3vnW|jqx%Vh7F?tuwZlr`d8*~Fjwx@Ep{Ws{H)~L*g z(5`45-Z%N?{zDkkTg|~MGXE$1nRS)V zo~NWkfAU(wMB5n{e*O6C?(3G&z5Ah6in%IsDw^vvN#BEBj@!}!i6<)Fr>`iM_PLIn zLq4q&bNirHpZRS?=;`vp+n<2&k_j|ARB+4)}k)u)nS2KUl+ zxTv5?*uE|K=DH}Az+9*6){W{etLBn>Z=-Bu8T5IG)5=GB`B?ga#5|Oli=B0cFd?!xHpeV0)hQZRUCiO098R z690a|>5Llf{LhJrQDw`?Y_Xq>$oHaA_T-j;cWGiMsv#p0LOh9**p0|i9HbXk~tm!q5-pJc?=l$Yw zdm~DBst3(xC%_JLJp&hr(6>P#XCkGoqKT_( zy@HcFeC1x-zJ`kcA5;&mJFK?_UhfM&G?Cg#joV;FedqK))cfqFax8m!5}DzahD^%kE$DD|}mj#?SYKRz>fsL{ib3Pssl*?73~`*pIlcG9@s!etjB=#+2w87+qYC zXY12a?)9YL&6%Xvv8P^Rv3Jr`^~g!QmF8GvDc_7YNyd`BTbJU^kUb&q*6qh4dEaWc!c zGBV_2^1}J0MpBDZ1&+ylGRp`Ea0>h2{ z8e|>bVf>1c$JHh?7?PDgu3>iq6El$fTA+<){;}RvW&UzrJbrwMsq0o>^j~@N@$z+5 z7+iHEm~V1QJpd59+&<7?TubbB}d*yoST(U0beY=V(R)dx{KegO)htz>sa6xHvl z=J;e6P=wb+Gmp>Y%=mw0lZv6@rp|EC(&LE5|Y+aGd(Fqh5y`3dx1wf&9 zO+W8yCgnF+~_i^j}erXQ)2o(9j>>wK=P+(G6fHwvxD;~XD;qLC_% z0;<2(z(S@TC~VIn-?5wn3eRT7uf*rn{@HD}26s^PYuuXS>8Rr+@lEHdFO1BjalPFi zK|5b5R#Vnh7%0mEYjO;D3Xi&!d$51EtF%kZmf#h{-L~>4lydzLjEip|6m-pANNNrtSa~)ttwA zT(LkAkg#^RCHqqbzU%gpKbW~3@8lM0#l;Z)Ox%LlJ}aV@=b4*=SkDfSG0ND zO?Co`%7L$~dDMP3T)pMA&10aD4t9$1P}|Kp@zLes2kJaaaK5g=)wERDa(^ zP@wh~v*?E-HHp;`8hovk-EX0Ixd27?69B8-_oEKbdB17CZ~GN z8&c<$N=8XOb^K&zrQ_C6=dZ}?c4_TXKoNCmxkR5p9e?LrW;wt9u^xZ=0ro{|+zy9s z_HCO5%G&C)WhK7UelgryKB7;Z&nu((8>#b@=rt6Cx2Q4C^QX8cFcMg)DTkZyL01n4 zC!d3!>T19FxV@bm+^qjMH`AZpzZndF_5Z69|7y9vH>e_h-?Op&rSuP*$=1%*$NC=` z^nU+X`d?G~pQ7=zRXu=wqf7Sn;{jaI~zby5isqt@H;}aC> zKY{tm?^F15N+A5R=d`BZ_3$qV;g9rxpa0*T?Z-<`&s_~PWk`Sar!6u80r7~{4c8l(bevVovSzDF9#-u?`w&v zmy?IrZw9}qhqsU2pByA*(gr&(yJLQ?zdhpl+efB`8w`G@&Sc|i?fqT+2h!f!#)qm- zAS@96{+!0N-pPHN!GF?VHSw~gw$Rqh{HNomc4umf7~*;AGEW#|IRUUwEn(Ov+S| N_pt; // Number of patients + + // Distributions + int M1; // Upper bound of observations + int D1; // Number of signs + int M2; // Upper bound of observations + int D2; // Number of signs + int distribution_id[D1 + D2]; // Indicating whether the d-th item use M1 or M2 + + // Training + int N_obs; // Number of non-missing observations + int k_obs[N_obs]; // Patient index + int t_obs[N_obs]; // Time of observation (from 1 to t_max) + int d_obs[N_obs]; // Sign index + int y_obs[N_obs]; // Observations + // Testing + int N_test; // Number of predictions to evaluate + int k_test[N_test]; // Patient index + int t_test[N_test]; // Time of prediction + int d_test[N_test]; // Sign index + int y_test[N_test]; // True value + + // Options + int run; // Switch to evaluate the likelihood + int independent_items; // Whether to have diagonal correlation matrices or not + int trend_known; // Whether to the trend smoothing parameter is known or not + vector[D1 + D2] beta_data[trend_known]; // Smoothing parameter + int N_agg; // Number of aggregates to compute + matrix[D1 + D2, N_agg] agg_weights; // Weights of each item in the aggregate + +#include /include/data_powerprior.stan +#include /include/data_calibration.stan +#include /include/data_dailytreat.stan + + // Priors + vector[M1 - 1] prior_delta1[D1]; + vector[M2 - 1] prior_delta2[D2]; + vector[D1 + D2] prior_sigma_meas[2]; + vector[D1 + D2] prior_sigma_lat[2]; + real prior_Omega; + real prior_Omega0; + vector[D1 + D2] prior_mu_y0[2]; + vector[D1 + D2] prior_sigma_y0[2]; + real prior_ATE[D1 + D2, 2]; + vector[D1 + D2] prior_beta[2]; + + // Recommendations + int N_rec; // Number of recommendations + int k_rec[N_rec]; // Patient for which we make the recommendation + int t_rec[N_rec]; // Time at which we make the recommendation (prediction at t + 1) + int N_actions; // Number of actions to investigate + matrix[N_actions, D_treat] actions; // Actions + +} + +transformed data { + // Dealing with two measurements distribution + int D = D1 + D2; // Total number of items + int M[D]; // Maximum for all items + int size_ct = D1 * M1 + D2 * M2; // Size of ragged ct array + int id_ct[D, 2]; // Index of the first and last values of ct + int d_sub[D]; // Index of d in sub-arrays corresponding of size D1 or D2 + // Dealing with ragged time-series + int t_max[N_pt] = get_ts_length( + append_array(append_array(append_array(append_array(k_obs, k_test), k_cal), k_treat2), k_rec), + append_array(append_array(append_array(append_array(t_obs, t_test), t_cal), t_treat2), t_rec) + ); // Length of each time series + int N = sum(t_max); // Total number of observations + int id_ts[N_pt, 2] = get_ragged_bounds(t_max); // Index of first and last observation of each patient time-series + int idx_obs[N_obs]; // index of non-missing observations + int idx_test[N_test]; // index of predictions + int idx_rec[N_rec]; // index of recommendations + int yc_obs[N_obs]; // Categorical y_obs +#include /include/tdata_decl_calibration.stan +#include /include/tdata_decl_dailytreat.stan + + // Dealing with two measurement distributions + for (d in 1:D) { + { + int i1 = 1; + int i2 = 1; + if (distribution_id[d] == 1) { + M[d] = M1; + d_sub[d] = i1; + i1 += 1; + } else { + M[d] = M2; + d_sub[d] = i2; + i2 += 1; + } + } + } + id_ct = get_ragged_bounds(M); + + // Dealing with ragged time-series + for (i in 1:N_obs) { + idx_obs[i] = id_ts[k_obs[i], 1] - 1 + t_obs[i]; + } + for (i in 1:N_test) { + idx_test[i] = id_ts[k_test[i], 1] - 1 + t_test[i]; + } + for (i in 1:N_rec) { + idx_rec[i] = id_ts[k_rec[i], 1] - 1 + t_rec[i]; + } + + // Categorical y + for (i in 1:N_obs) { + yc_obs[i] = y_obs[i] + 1; + } + +#include /include/tdata_state_dailytreat.stan +#include /include/tdata_state_calibration.stan + +} + +parameters { + // Latent dynamic + vector[D] eta[N]; // Error term, non-centered parametrisation + cholesky_factor_corr[D] L_param; // Cholesky decomposition of correlation matrix + vector[D] sigma_lat; // Vector of standard deviation + // Measurement distribution + vector[D] sigma_meas; // Equivalent standard deviation (not scale) of logistic distribution + simplex[M1 - 1] delta1[D1]; // Difference between relative cutpoints + simplex[M2 - 1] delta2[D2]; // Difference between relative cutpoints + // Initial condition + cholesky_factor_corr[D] L0_param; // Cholesky decomposition of initial condition correlation matrix + vector[D] mu_y0; // Population mean y_lat at t0 + vector[D] sigma_y0; // Population standard deviation of y_lat at t0 + // Treatment effect + matrix[D, D_treat] ATE; // Average treatment effect (in percentage of score) + // Trend + vector[D] beta_param[1 - trend_known]; + +#include /include/parameters_calibration.stan +#include /include/parameters_dailytreat.stan + +} + +transformed parameters { + // Latent dynamic and measurement distribution + matrix[D, D] L; + matrix[D, D] L0; + vector[D] s = sigma_meas * sqrt(3) / pi(); // scale of measurement distribution + vector[M1] ct1[D1]; // Cutpoints in [0, M] space + vector[M2] ct2[D2]; // Cutpoints in [0, M] space + vector[size_ct] ct; // ct1 and ct2 concatenated + vector[size_ct] z_ct; // Cutpoints in affinity space + vector[D] y_lat[N]; // Latent score + vector[D] z_lat[N]; // Latent score in affinity space + vector[D] y0[N_pt]; // Initial latent score + // Treatment effect + matrix[D, D_treat] ATE_abs = ATE .* rep_matrix(to_vector(M), D_treat); // Average treatment effect (in units of the score) + // Trend + vector[D] trend[N]; // Trend + vector[D] beta; // Smoothing parameter for the trend + +#include /include/tparameters_decl_calibration.stan +#include /include/tparameters_decl_dailytreat.stan + + if (independent_items == 0) { + L = L_param; + L0 = L0_param; + } else { + L = diag_matrix(rep_vector(1, D)); + L0 = diag_matrix(rep_vector(1, D)); + } + + // Cutpoints + for (d in 1:D1) { + ct1[d] = make_ct(delta1[d]); + } + for (d in 1:D2) { + ct2[d] = make_ct(delta2[d]); + } + for (d in 1:D) { + if (distribution_id[d] == 1) { + ct[id_ct[d, 1]:id_ct[d, 2]] = ct1[d_sub[d]]; + } else { + ct[id_ct[d, 1]:id_ct[d, 2]] = ct2[d_sub[d]]; + } + z_ct[id_ct[d, 1]:id_ct[d, 2]] = ct[id_ct[d, 1]:id_ct[d, 2]] / s[d]; + } + +#include /include/tparameters_state_calibration.stan +#include /include/tparameters_state_dailytreat.stan + + // Trend + if (trend_known) { + beta = beta_data[1]; + } else { + beta = beta_param[1]; + } + + // Latent dynamic + for (k in 1:N_pt) { + y0[k] = mu_y0 + sigma_y0 .* (L0 * eta[id_ts[k, 1]]); + y_lat[id_ts[k, 1]] = y0[k]; + trend[id_ts[k, 1]] = rep_vector(0, D); + for (t in (id_ts[k, 1] + 1):id_ts[k, 2]) { + y_lat[t] = y_lat[t - 1] + trend[t - 1] + ATE_abs * p_treat[t - 1] + sigma_lat .* (L * eta[t]); // Multivariate Random walk + trend[t] = beta .* (y_lat[t] - y_lat[t - 1]) + (1 - beta) .* trend[t - 1]; + } + } + for (i in 1:N) { + z_lat[i] = y_lat[i] ./ s; + } + +} + +model { +#include /include/model_dailytreat.stan +#include /include/model_calibration.stan + + for (i in 1:N) { + eta[i] ~ std_normal(); + } + // Priors + // NB: technically the prior should be raised to the power 1-a0 since the power prior is on likelihood and not the posterior + // But given approximations, weakly informative priors and a0<<1, I don't think it really matters + L ~ lkj_corr_cholesky(prior_Omega); // LKJ prior for correlation matrix + L0 ~ lkj_corr_cholesky(prior_Omega0); // LKJ prior for correlation matrix + for (d in 1:D1) { + delta1[d] ~ dirichlet(prior_delta1[d]); + } + for (d in 1:D2) { + delta2[d] ~ dirichlet(prior_delta2[d]); + } + sigma_meas ./ to_vector(M) ~ lognormal(prior_sigma_meas[1], prior_sigma_meas[2]); + sigma_lat ./ to_vector(M) ~ lognormal(prior_sigma_lat[1], prior_sigma_lat[2]); + mu_y0 ./ to_vector(M) ~ normal(prior_mu_y0[1], prior_mu_y0[2]); + sigma_y0 ./ to_vector(M) ~ normal(prior_sigma_y0[1], prior_sigma_y0[2]); + // Priors treatment + for (d in 1:D) { + ATE[d] ~ normal(prior_ATE[d, 1], prior_ATE[d, 2]); + } + // Prior trend + if (!trend_known) { + beta_param[1] ~ beta(prior_beta[1], prior_beta[2]); + } + + // Power prior +#include /include/model_powerprior.stan + + if (run == 1) { + // Measurement + for (i in 1:N_obs) { + yc_obs[i] ~ ordered_logistic(z_lat[idx_obs[i]][d_obs[i]], segment(z_ct, id_ct[d_obs[i], 1], M[d_obs[i]])); + } + } +} + +generated quantities { +#include /include/gq_decl_dailytreat.stan +#include /include/gq_decl_calibration.stan + + // Additional parameters + matrix[D, D] Omega = multiply_lower_tri_self_transpose(L); // Correlation matrix + matrix[D, D] Sigma_lat = quad_form_diag(Omega, sigma_lat); // Covariance matrix + matrix[D, D] Omega0 = multiply_lower_tri_self_transpose(L0); // Correlation matrix of initial condition + vector[D] sigma_tot = sqrt(square(sigma_meas) + square(sigma_lat)); // Total noise std for one-step-ahead prediction + vector[D] sigma_reltot = sigma_tot ./ to_vector(M); // Normalised sigma_tot + vector[D] rho2 = square(sigma_meas ./ sigma_tot); // Proportion of measurement noise in total noise + matrix[N_agg, D_treat] ATE_agg = agg_weights' * ATE_abs; // ATE for aggregates + // Replications of the scores + matrix[N, D] y_rep; // Replications (of the entire time-series, not just observations) + matrix[N, N_agg] agg_rep; // Replications of aggregates + // Predictions + real lpd[N_test]; // Log predictive density of predictions + real y_pred[N_test]; // Predictive sample of y_test + // Recommendations + matrix[N_rec, D] y_rec[N_actions]; + matrix[N_rec, N_agg] agg_rec[N_actions]; + +#include /include/gq_state_dailytreat.stan +#include /include/gq_state_calibration.stan + + // Replications of the scores + for (i in 1:N) { + for (d in 1:D) { + y_rep[i, d] = ordered_logistic_rng(z_lat[i][d], segment(z_ct, id_ct[d, 1], M[d])) - 1; + } + } + agg_rep = y_rep * agg_weights; + + // Predictions + for (i in 1:N_test) { + y_pred[i] = y_rep[idx_test[i], d_test[i]]; + lpd[i] = ordered_logistic_lpmf(y_test[i] + 1 | z_lat[idx_test[i]][d_test[i]], segment(z_ct, id_ct[d_test[i], 1], M[d_test[i]])); + } + + // Recommendations + for (a in 1:N_actions) { + for (i in 1:N_rec) { + y_rec[a][i] = multi_normal_cholesky_rng(y_lat[idx_rec[i]] + trend[idx_rec[i]] + ATE_abs * actions[a]', diag_matrix(sigma_lat) * L)'; // Linear predictor + y_rec[a][i] = y_rec[a][i] ./ s'; + for (d in 1:D) { + y_rec[a][i][d] = ordered_logistic_rng(y_rec[a][i][d], segment(z_ct, id_ct[d, 1], M[d])) - 1; // Measurement + } + } + agg_rec[a] = y_rec[a] * agg_weights; + } + +} diff --git a/models/include/data_calibration.stan b/models/include/data_calibration.stan new file mode 100644 index 0000000..9a9e981 --- /dev/null +++ b/models/include/data_calibration.stan @@ -0,0 +1,13 @@ +// Data: calibration + +int N_cal; // Number of observations for calibration +int k_cal[N_cal]; // Patient index +int d_cal[N_cal]; // Item index +int t_cal[N_cal]; // Time index +int y_cal[N_cal]; // Calibration value +vector[D1 + D2] precision_cal; // Ratio of calibration std to measurement std +int include_bias[D1 + D2]; // Whether to include bias or to set it to 0 (later for subjective symptoms) + +// Priors +vector[D1 + D2] prior_bias0[2]; +vector[D1 + D2] prior_tau_bias[2]; diff --git a/models/include/data_dailytreat.stan b/models/include/data_dailytreat.stan new file mode 100644 index 0000000..c1ff4c1 --- /dev/null +++ b/models/include/data_dailytreat.stan @@ -0,0 +1,14 @@ +// Data: daily treatment usage + +int D_treat; // Number of treatments +int N_treat2; // Number of "treatment used within the past two days" observations +int k_treat2[N_treat2]; // Patient index corresponding to treat2 +int t_treat2[N_treat2]; // Time index corresponding to treat2 +int d_treat2[N_treat2]; // Treatment index +int treat2_obs[N_treat2]; // Observations of "treatment used within the past two days" + +// Priors +real prior_mu_logit_p01[2]; +real prior_mu_logit_p10[2]; +real prior_sigma_logit_p01[2]; +real prior_sigma_logit_p10[2]; diff --git a/models/include/data_powerprior.stan b/models/include/data_powerprior.stan new file mode 100644 index 0000000..0f03103 --- /dev/null +++ b/models/include/data_powerprior.stan @@ -0,0 +1,10 @@ +// Data: power prior + +// Set a0 to 0 and historical_* to arbitrary values to remove +real a0; // Discounting factor +real historical_delta1[D1, M1 - 1, 2]; // Historical mean and sd of delta1 +real historical_delta2[D2, M2 - 1, 2]; // Historical mean and sd of delta2 +real historical_sigma_meas[D1 + D2, 2]; // Historical mean and sd of sigma_meas +real historical_sigma_lat[D1 + D2, 2]; // Historical mean and sd of sigma_meas +real historical_mu_y0[D1 + D2, 2]; // Historical mean and sd of mu_y0 +real historical_sigma_y0[D1 + D2, 2]; // Historical mean and sd of sigma_y0 \ No newline at end of file diff --git a/models/include/functions_OrderedRW.stan b/models/include/functions_OrderedRW.stan new file mode 100644 index 0000000..35f9820 --- /dev/null +++ b/models/include/functions_OrderedRW.stan @@ -0,0 +1,33 @@ +vector make_ct(vector delta) { + // Define cutpoints from simplex of difference between cutpoints (delta) + int M = num_elements(delta) + 1; + vector[M] ct; + ct = append_row(0, delta); + ct = cumulative_sum(ct); + ct = ct * (M - 1) + 0.5; // If cutpoints are equally spaced expected value of k integer = k + return(ct); +} + +real[] compute_cumulative_error(int y, real[] pmf) { + // Return cumulative error distribution for the distribution pmf and observation y + // Observations are assumed to be between 1 and M and pmf is an array of length M such as pmf[i] = prob(y = i) + + int M = size(pmf); + real cum_err[M]; + + if (y < 1 && y > M) { + reject("y is not in 1:M"); + } + + // Compute cdf first + cum_err[1] = pmf[1]; + for (j in 2:M) { + cum_err[j] = cum_err[j - 1] + pmf[j]; + } + // Substract step to get cumulative error + for (j in 1:M) { + cum_err[j] = cum_err[j] - step(j - y); + } + + return(cum_err); +} \ No newline at end of file diff --git a/models/include/get_ragged_bounds.stan b/models/include/get_ragged_bounds.stan new file mode 100644 index 0000000..cd7aff5 --- /dev/null +++ b/models/include/get_ragged_bounds.stan @@ -0,0 +1,24 @@ +int[, ] get_ragged_bounds(int[] group_size) { + // Get indices corresponding to the first and last observation of groups + // Args: - group_size: array of length N_group indicating the size of each group + // Return array of size N_group * 2 : first column correspond to first index, second column to last index + + int N_group = size(group_size); + int id[N_group, 2]; + + if (N_group != size(group_size)) { + reject("group_size should be an array of length N_group"); + } + + for (i in 1:N_group) { + if (i == 1) { + id[i, 1] = 1; + } else { + id[i, 1] = id[i - 1, 2] + 1; + } + id[i, 2] = id[i, 1] - 1 + group_size[i]; + } + + return(id); + +} diff --git a/models/include/get_ts_length.stan b/models/include/get_ts_length.stan new file mode 100644 index 0000000..21d93fd --- /dev/null +++ b/models/include/get_ts_length.stan @@ -0,0 +1,17 @@ +int[] get_ts_length(int[] k, int[] t) { + // From vector of patient ID (k) and timepoints (t) + // ... get the length of the time-series for each patient + + int N = size(k); + int N_pt = max(k); + int t_max[N_pt] = rep_array(0, N_pt); + if (size(t) != N) { + reject("k and t should have the same length"); + } + + for (i in 1:N) { + t_max[k[i]] = max(t[i], t_max[k[i]]); + } + + return(t_max); +} \ No newline at end of file diff --git a/models/include/gq_decl_calibration.stan b/models/include/gq_decl_calibration.stan new file mode 100644 index 0000000..7e879a7 --- /dev/null +++ b/models/include/gq_decl_calibration.stan @@ -0,0 +1,5 @@ +// Generated quantities declaration: calibration + +// Replications of the scores as calibrated measurements +matrix[N, D] y_cal_rep; // Replications (of the entire time-series, not just observations) +matrix[N, N_agg] agg_cal_rep; // Replications of aggregates diff --git a/models/include/gq_decl_dailytreat.stan b/models/include/gq_decl_dailytreat.stan new file mode 100644 index 0000000..3841343 --- /dev/null +++ b/models/include/gq_decl_dailytreat.stan @@ -0,0 +1,5 @@ +// Generated quantities declaration: daily treatment usage + +// Replications of the treatments +int treat_rep[N, D_treat]; +int treat2_rep[N, D_treat]; diff --git a/models/include/gq_state_calibration.stan b/models/include/gq_state_calibration.stan new file mode 100644 index 0000000..33d5d10 --- /dev/null +++ b/models/include/gq_state_calibration.stan @@ -0,0 +1,10 @@ +// Generated quantities statement: declaration + +// Replications of the scores as calibrated measurements +for (i in 1:N) { + for (d in 1:D) { + y_cal_rep[i, d] = (y_lat[i][d] + bias[i][d]) / s_cal[d]; // Latent score for calibrated measurement + y_cal_rep[i, d] = ordered_logistic_rng(y_cal_rep[i, d], segment(z_cal_ct, id_ct[d, 1], M[d])) - 1; + } +} +agg_cal_rep = y_cal_rep * agg_weights; diff --git a/models/include/gq_state_dailytreat.stan b/models/include/gq_state_dailytreat.stan new file mode 100644 index 0000000..651a628 --- /dev/null +++ b/models/include/gq_state_dailytreat.stan @@ -0,0 +1,21 @@ +// Generated quantities statement: daily treatment usage + +// Replications of the treatments +for (d in 1:D_treat) { + for (i in 1:N) { + treat_rep[i, d] = bernoulli_rng(p_treat[i][d]); + } + + for (k in 1:N_pt) { + for (t in id_ts[k, 1]:(id_ts[k, 1] + 1)) { + if (treat2[t, d] > -1) { + treat2_rep[t, d] = treat2[t, d]; + } else { + treat2_rep[t, d] = bernoulli_rng(1 - (1 - ss1[k][d])^3); + } + } + for (t in (2 + id_ts[k, 1]):id_ts[k, 2]) { + treat2_rep[t, d] = 1 - (1 - treat_rep[t, d]) * (1 - treat_rep[t - 1, d]) * (1 - treat_rep[t - 2, d]); + } + } +} \ No newline at end of file diff --git a/models/include/model_calibration.stan b/models/include/model_calibration.stan new file mode 100644 index 0000000..241fc3a --- /dev/null +++ b/models/include/model_calibration.stan @@ -0,0 +1,13 @@ +// Model: calibration + +// Priors +bias0 ~ normal(prior_bias0[1], prior_bias0[2]); +tau_bias ~ lognormal(prior_tau_bias[1], prior_tau_bias[2]); + +// Likelihood +if (run == 1) { + for (i in 1:N_cal) { + yc_cal[i] ~ ordered_logistic((y_lat[idx_cal[i]][d_cal[i]] + bias[idx_cal[i]][d_cal[i]]) / s_cal[d_cal[i]], + segment(z_cal_ct, id_ct[d_cal[i], 1], M[d_cal[i]])); + } +} diff --git a/models/include/model_dailytreat.stan b/models/include/model_dailytreat.stan new file mode 100644 index 0000000..db66acc --- /dev/null +++ b/models/include/model_dailytreat.stan @@ -0,0 +1,34 @@ +// Model: daily treatment usage + +// Priors +for (k in 1:N_pt) { + eta_p01[k] ~ std_normal(); + eta_p10[k] ~ std_normal(); +} +mu_logit_p01 ~ normal(prior_mu_logit_p01[1], prior_mu_logit_p01[2]); +sigma_logit_p01 ~ normal(prior_sigma_logit_p01[1], prior_sigma_logit_p01[2]); +mu_logit_p10 ~ normal(prior_mu_logit_p10[1], prior_mu_logit_p10[2]); +sigma_logit_p10 ~ normal(prior_sigma_logit_p10[1], prior_sigma_logit_p10[2]); + +// Likelihood +if (run == 1) { + for (d in 1:D_treat) { + for (k in 1:N_pt) { + // Likelihood for initial condition + if (treat[id_ts[k, 1], d] != -1) { + treat[id_ts[k, 1], d] ~ bernoulli(ss1[k][d]); + } + for (t in (1 + id_ts[k, 1]):id_ts[k, 2]) { + if (treat[t, d] == -1) { + // If treat is missing and treat2[t] == 1, probability that treatment was used at least once during the past two days + if (treat2[t, d] == 1 && t > 1 + id_ts[k, 1]) { + 1 ~ bernoulli(1 - (1 - p_treat[t][d]) * (1 - p_treat[t - 1][d]) * (1 - p_treat[t - 2][d])); + } + } else { + // When treatment is observed, Markov Chain likelihood + treat[t, d] ~ bernoulli(p11[k][d] * p_treat[t - 1][d] + p01[k][d] * (1 - p_treat[t - 1][d])); + } + } + } + } +} \ No newline at end of file diff --git a/models/include/model_powerprior.stan b/models/include/model_powerprior.stan new file mode 100644 index 0000000..8c839bd --- /dev/null +++ b/models/include/model_powerprior.stan @@ -0,0 +1,20 @@ +// Model: power prior + +if (a0 > 0) { + for (d in 1:D) { + target += a0 * normal_lpdf(sigma_meas[d] | historical_sigma_meas[d, 1], historical_sigma_meas[d, 2]); + target += a0 * normal_lpdf(sigma_lat[d] | historical_sigma_lat[d, 1], historical_sigma_lat[d, 2]); + target += a0 * normal_lpdf(mu_y0[d] | historical_mu_y0[d, 1], historical_mu_y0[d, 2]); + target += a0 * normal_lpdf(sigma_y0[d] | historical_sigma_y0[d, 1], historical_sigma_y0[d, 2]); + } + for (d in 1:D1) { + for (i in 1:(M1 - 1)) { + target += a0 * normal_lpdf(delta1[d, i] | historical_delta1[d, i, 1], historical_delta1[d, i, 2]); + } + } + for (d in 1:D2) { + for (i in 1:(M2 - 1)) { + target += a0 * normal_lpdf(delta2[d, i] | historical_delta2[d, i, 1], historical_delta2[d, i, 2]); + } + } +} diff --git a/models/include/parameters_calibration.stan b/models/include/parameters_calibration.stan new file mode 100644 index 0000000..82fc430 --- /dev/null +++ b/models/include/parameters_calibration.stan @@ -0,0 +1,4 @@ +// Parameters: calibration + +vector[D] bias0; // Initial bias (normalised) +vector[D] tau_bias; // Time constant \ No newline at end of file diff --git a/models/include/parameters_dailytreat.stan b/models/include/parameters_dailytreat.stan new file mode 100644 index 0000000..14ff494 --- /dev/null +++ b/models/include/parameters_dailytreat.stan @@ -0,0 +1,8 @@ +// Parameters declaration: daily treatment usage + +vector[D_treat] eta_p01[N_pt]; +vector[D_treat] mu_logit_p01; +vector[D_treat] sigma_logit_p01; +vector[D_treat] eta_p10[N_pt]; +vector[D_treat] mu_logit_p10; +vector[D_treat] sigma_logit_p10; diff --git a/models/include/tdata_decl_calibration.stan b/models/include/tdata_decl_calibration.stan new file mode 100644 index 0000000..b4582b3 --- /dev/null +++ b/models/include/tdata_decl_calibration.stan @@ -0,0 +1,4 @@ +// Transformed data declaration: calibration + +int idx_cal[N_cal]; // index of calibrations +int yc_cal[N_cal]; // Categorical y_cal diff --git a/models/include/tdata_decl_dailytreat.stan b/models/include/tdata_decl_dailytreat.stan new file mode 100644 index 0000000..f3669b1 --- /dev/null +++ b/models/include/tdata_decl_dailytreat.stan @@ -0,0 +1,5 @@ +// Transformed data declaration: daily treatment usage + +int idx_treat2[N_treat2]; // index of non-missing observation for treat2 +int treat2[N, D_treat]; // Whether treatment was used within the past two days +int treat[N, D_treat]; // Daily treatment usage diff --git a/models/include/tdata_state_calibration.stan b/models/include/tdata_state_calibration.stan new file mode 100644 index 0000000..f547896 --- /dev/null +++ b/models/include/tdata_state_calibration.stan @@ -0,0 +1,9 @@ +// Transformed data statement: calibration + +for (i in 1:N_cal) { + idx_cal[i] = id_ts[k_cal[i], 1] - 1 + t_cal[i]; +} + +for (i in 1:N_cal) { + yc_cal[i] = y_cal[i] + 1; +} diff --git a/models/include/tdata_state_dailytreat.stan b/models/include/tdata_state_dailytreat.stan new file mode 100644 index 0000000..2ac4b08 --- /dev/null +++ b/models/include/tdata_state_dailytreat.stan @@ -0,0 +1,31 @@ +// Transformed data statement: daily treatment usage + +// Fill in treat (-1 means missing) +treat2 = rep_array(-1, N, D_treat); +treat = rep_array(-1, N, D_treat); +// When treat2=0 +for (i in 1:N_treat2) { + idx_treat2[i] = id_ts[k_treat2[i], 1] - 1 + t_treat2[i]; + treat2[idx_treat2[i], d_treat2[i]] = treat2_obs[i]; + if (treat2_obs[i] == 0) { + for (dt in 0:min(2, idx_treat2[i] - id_ts[k_treat2[i], 1])) { + treat[idx_treat2[i] - dt, d_treat2[i]] = 0; + } + } +} +for (d in 1:D_treat) { + for (k in 1:N_pt) { + // Transition 0 to 1 + for (t in (1 + id_ts[k, 1]):id_ts[k, 2]) { + if (treat2[t - 1, d] == 0 && treat2[t, d] == 1) { + treat[t, d] = 1; + } + } + // Transition 1 to 0 + for (t in (3 + id_ts[k, 1]):id_ts[k, 2]) { + if (treat2[t - 1, d] == 1 && treat2[t, d] == 0) { + treat[t - 3, d] = 1; + } + } + } +} diff --git a/models/include/tparameters_decl_calibration.stan b/models/include/tparameters_decl_calibration.stan new file mode 100644 index 0000000..beed4aa --- /dev/null +++ b/models/include/tparameters_decl_calibration.stan @@ -0,0 +1,6 @@ +// Transformed parameters declaration: calibration + +vector[D] s_cal = s .* precision_cal; // scale of measurement distribution +vector[D] bias0_abs = bias0 .* to_vector(M); // Initial bias in original scale +vector[D] bias[N]; // Calibration bias +vector[size_ct] z_cal_ct; // Cutpoints for calibration in affinity space diff --git a/models/include/tparameters_decl_dailytreat.stan b/models/include/tparameters_decl_dailytreat.stan new file mode 100644 index 0000000..14256c0 --- /dev/null +++ b/models/include/tparameters_decl_dailytreat.stan @@ -0,0 +1,8 @@ +// Transformed parameters declaration: daily treatment usage + +vector[D_treat] p_treat[N]; // Probability of using treatment +vector[D_treat] p01[N_pt]; +vector[D_treat] p10[N_pt]; +vector[D_treat] ss1[N_pt]; +vector[D_treat] p00[N_pt]; +vector[D_treat] p11[N_pt]; diff --git a/models/include/tparameters_state_calibration.stan b/models/include/tparameters_state_calibration.stan new file mode 100644 index 0000000..de70649 --- /dev/null +++ b/models/include/tparameters_state_calibration.stan @@ -0,0 +1,12 @@ +// Transformed parameters statement: calibration + +for (d in 1:D) { + z_cal_ct[id_ct[d, 1]:id_ct[d, 2]] = ct[id_ct[d, 1]:id_ct[d, 2]] / s_cal[d]; +} + +for (k in 1:N_pt) { + for (t in id_ts[k, 1]:id_ts[k, 2]) { + bias[t] = to_vector(include_bias) .* bias0_abs .* exp(-(t - id_ts[k, 1]) ./ tau_bias); + } +} + diff --git a/models/include/tparameters_state_dailytreat.stan b/models/include/tparameters_state_dailytreat.stan new file mode 100644 index 0000000..eb77908 --- /dev/null +++ b/models/include/tparameters_state_dailytreat.stan @@ -0,0 +1,24 @@ +// Transformed parameters statement: daily treatment usage + +for (k in 1:N_pt) { + p01[k] = inv_logit(mu_logit_p01 + sigma_logit_p01 .* eta_p01[k]); + p10[k] = inv_logit(mu_logit_p10 + sigma_logit_p10 .* eta_p10[k]); + ss1[k] = p01[k] ./ (p01[k] + p10[k]); + p00[k] = 1 - p01[k]; + p11[k] = 1 - p10[k]; +} +for (d in 1:D_treat) { + for (k in 1:N_pt) { + for (t in id_ts[k, 1]:id_ts[k, 2]) { + if (treat[t, d] == -1) { + if (t == id_ts[k, 1]) { + p_treat[t][d] = ss1[k][d]; // Prior for initial condition + } else { + p_treat[t][d] = p11[k][d] * p_treat[t - 1][d] + p01[k][d] * (1 - p_treat[t - 1][d]); // Markov Chain prior + } + } else { + p_treat[t][d] = treat[t, d]; + } + } + } +} \ No newline at end of file diff --git a/renv.lock b/renv.lock new file mode 100644 index 0000000..932727c --- /dev/null +++ b/renv.lock @@ -0,0 +1,1680 @@ +{ + "R": { + "Version": "4.1.3", + "Repositories": [ + { + "Name": "CRAN", + "URL": "https://cloud.r-project.org" + } + ] + }, + "Packages": { + "BH": { + "Package": "BH", + "Version": "1.78.0-0", + "Source": "Repository", + "Repository": "CRAN", + "Hash": "4e348572ffcaa2fb1e610e7a941f6f3a", + "Requirements": [] + }, + "EczemaPred": { + "Package": "EczemaPred", + "Version": "0.3.0", + "Source": "GitHub", + "RemoteType": "github", + "RemoteHost": "api.github.com", + "RemoteRepo": "EczemaPred", + "RemoteUsername": "ghurault", + "RemoteRef": "v0.3.0", + "RemoteSha": "4a500e1c612eea7c6de1ffc3be3a4f47b1b823bf", + "Hash": "2d7ef4d1481f60b89e5553b32c0caa6e", + "Requirements": [ + "BH", + "HuraultMisc", + "Rcpp", + "RcppEigen", + "RcppParallel", + "StanHeaders", + "dplyr", + "ggplot2", + "magrittr", + "markovchain", + "rstan", + "rstantools", + "scoringRules", + "tidyr" + ] + }, + "EczemaPredPOSCORAD": { + "Package": "EczemaPredPOSCORAD", + "Version": "0.1.0", + "Source": "GitHub", + "RemoteType": "github", + "Remotes": "github::ghurault/HuraultMisc, github::ghurault/EczemaPred", + "RemoteHost": "api.github.com", + "RemoteRepo": "EczemaPredPOSCORAD", + "RemoteUsername": "ghurault", + "RemoteRef": "HEAD", + "RemoteSha": "509464ef54be59eb0b40589beeed36724ce19251", + "Hash": "2f25a4f81e5ba0cd3eaedee927686e56", + "Requirements": [ + "EczemaPred", + "HuraultMisc", + "dplyr", + "gamm4", + "ggplot2", + "rlang", + "tidyr" + ] + }, + "Formula": { + "Package": "Formula", + "Version": "1.2-4", + "Source": "Repository", + "Repository": "CRAN", + "Hash": "cc8c8c4d61346cde1ca60030ff9c241f", + "Requirements": [] + }, + "HDInterval": { + "Package": "HDInterval", + "Version": "0.2.2", + "Source": "Repository", + "Repository": "CRAN", + "Hash": "3b986a53325a1d95610b8d86a284dac0", + "Requirements": [] + }, + "Hmisc": { + "Package": "Hmisc", + "Version": "4.6-0", + "Source": "Repository", + "Repository": "CRAN", + "Hash": "642812f46e5fb638c01150f2a2b5db44", + "Requirements": [ + "Formula", + "base64enc", + "cluster", + "data.table", + "foreign", + "ggplot2", + "gridExtra", + "gtable", + "htmlTable", + "htmltools", + "lattice", + "latticeExtra", + "nnet", + "rpart", + "survival", + "viridis" + ] + }, + "HuraultMisc": { + "Package": "HuraultMisc", + "Version": "1.1.1", + "Source": "Repository", + "Repository": "CRAN", + "Hash": "5efe31546a49901d3b37846290cfcec1", + "Requirements": [ + "HDInterval", + "Hmisc", + "cowplot", + "dplyr", + "ggplot2", + "magrittr", + "reshape2", + "rstan", + "tidyr" + ] + }, + "MASS": { + "Package": "MASS", + "Version": "7.3-55", + "Source": "Repository", + "Repository": "CRAN", + "Hash": "c5232ffb549f6d7a04a152c34ca1353d", + "Requirements": [] + }, + "Matrix": { + "Package": "Matrix", + "Version": "1.4-0", + "Source": "Repository", + "Repository": "CRAN", + "Hash": "130c0caba175739d98f2963c6a407cf6", + "Requirements": [ + "lattice" + ] + }, + "R6": { + "Package": "R6", + "Version": "2.5.1", + "Source": "Repository", + "Repository": "CRAN", + "Hash": "470851b6d5d0ac559e9d01bb352b4021", + "Requirements": [] + }, + "RColorBrewer": { + "Package": "RColorBrewer", + "Version": "1.1-2", + "Source": "Repository", + "Repository": "CRAN", + "Hash": "e031418365a7f7a766181ab5a41a5716", + "Requirements": [] + }, + "Rcpp": { + "Package": "Rcpp", + "Version": "1.0.8.3", + "Source": "Repository", + "Repository": "CRAN", + "Hash": "32e79b908fda56ee57fe518a8d37b864", + "Requirements": [] + }, + "RcppArmadillo": { + "Package": "RcppArmadillo", + "Version": "0.10.8.1.0", + "Source": "Repository", + "Repository": "CRAN", + "Hash": "303dcd8a3fca86087e341ed5cc360abf", + "Requirements": [ + "Rcpp" + ] + }, + "RcppEigen": { + "Package": "RcppEigen", + "Version": "0.3.3.9.1", + "Source": "Repository", + "Repository": "CRAN", + "Hash": "ddfa72a87fdf4c80466a20818be91d00", + "Requirements": [ + "Matrix", + "Rcpp" + ] + }, + "RcppParallel": { + "Package": "RcppParallel", + "Version": "5.1.5", + "Source": "Repository", + "Repository": "CRAN", + "Hash": "f3e94e34ff656a7c8336ce01207bc2b8", + "Requirements": [] + }, + "StanHeaders": { + "Package": "StanHeaders", + "Version": "2.21.0-7", + "Source": "Repository", + "Repository": "CRAN", + "Hash": "0459d4dd7a8c239be18469a30c23dd4b", + "Requirements": [ + "RcppEigen", + "RcppParallel" + ] + }, + "TanakaData": { + "Package": "TanakaData", + "Version": "2.0.0.9000", + "Source": "GitHub", + "RemoteType": "github", + "RemoteHost": "github.ic.ac.uk/api/v3", + "RemoteRepo": "TanakaData", + "RemoteUsername": "tanaka-group", + "RemoteRef": "HEAD", + "RemoteSha": "8e7c1430e77b9395ba62191a287647b4acc0ae13", + "Hash": "a854f2accd149bb0c01d73f419d3d774", + "Requirements": [ + "HuraultMisc", + "cowplot", + "data.table", + "dplyr", + "ggplot2", + "magrittr", + "readr", + "readxl", + "reshape2", + "scales", + "tidyr" + ] + }, + "abind": { + "Package": "abind", + "Version": "1.4-5", + "Source": "Repository", + "Repository": "CRAN", + "Hash": "4f57884290cc75ab22f4af9e9d4ca862", + "Requirements": [] + }, + "backports": { + "Package": "backports", + "Version": "1.4.1", + "Source": "Repository", + "Repository": "CRAN", + "Hash": "c39fbec8a30d23e721980b8afb31984c", + "Requirements": [] + }, + "base64enc": { + "Package": "base64enc", + "Version": "0.1-3", + "Source": "Repository", + "Repository": "CRAN", + "Hash": "543776ae6848fde2f48ff3816d0628bc", + "Requirements": [] + }, + "bit": { + "Package": "bit", + "Version": "4.0.4", + "Source": "Repository", + "Repository": "CRAN", + "Hash": "f36715f14d94678eea9933af927bc15d", + "Requirements": [] + }, + "bit64": { + "Package": "bit64", + "Version": "4.0.5", + "Source": "Repository", + "Repository": "CRAN", + "Hash": "9fe98599ca456d6552421db0d6772d8f", + "Requirements": [ + "bit" + ] + }, + "boot": { + "Package": "boot", + "Version": "1.3-28", + "Source": "Repository", + "Repository": "CRAN", + "Hash": "0baa960e3b49c6176a4f42addcbacc59", + "Requirements": [] + }, + "brio": { + "Package": "brio", + "Version": "1.1.3", + "Source": "Repository", + "Repository": "CRAN", + "Hash": "976cf154dfb043c012d87cddd8bca363", + "Requirements": [] + }, + "bslib": { + "Package": "bslib", + "Version": "0.3.1", + "Source": "Repository", + "Repository": "CRAN", + "Hash": "56ae7e1987b340186a8a5a157c2ec358", + "Requirements": [ + "htmltools", + "jquerylib", + "jsonlite", + "rlang", + "sass" + ] + }, + "cachem": { + "Package": "cachem", + "Version": "1.0.6", + "Source": "Repository", + "Repository": "CRAN", + "Hash": "648c5b3d71e6a37e3043617489a0a0e9", + "Requirements": [ + "fastmap", + "rlang" + ] + }, + "callr": { + "Package": "callr", + "Version": "3.7.0", + "Source": "Repository", + "Repository": "CRAN", + "Hash": "461aa75a11ce2400245190ef5d3995df", + "Requirements": [ + "R6", + "processx" + ] + }, + "cellranger": { + "Package": "cellranger", + "Version": "1.1.0", + "Source": "Repository", + "Repository": "CRAN", + "Hash": "f61dbaec772ccd2e17705c1e872e9e7c", + "Requirements": [ + "rematch", + "tibble" + ] + }, + "checkmate": { + "Package": "checkmate", + "Version": "2.0.0", + "Source": "Repository", + "Repository": "CRAN", + "Hash": "a667800d5f0350371bedeb8b8b950289", + "Requirements": [ + "backports" + ] + }, + "cli": { + "Package": "cli", + "Version": "3.2.0", + "Source": "Repository", + "Repository": "CRAN", + "Hash": "1bdb126893e9ce6aae50ad1d6fc32faf", + "Requirements": [ + "glue" + ] + }, + "clipr": { + "Package": "clipr", + "Version": "0.7.1", + "Source": "Repository", + "Repository": "CRAN", + "Hash": "ebaa97ac99cc2daf04e77eecc7b781d7", + "Requirements": [] + }, + "cluster": { + "Package": "cluster", + "Version": "2.1.2", + "Source": "Repository", + "Repository": "CRAN", + "Hash": "ce49bfe5bc0b3ecd43a01fe1b01c2243", + "Requirements": [] + }, + "codetools": { + "Package": "codetools", + "Version": "0.2-18", + "Source": "Repository", + "Repository": "CRAN", + "Hash": "019388fc48e48b3da0d3a76ff94608a8", + "Requirements": [] + }, + "colorspace": { + "Package": "colorspace", + "Version": "2.0-3", + "Source": "Repository", + "Repository": "CRAN", + "Hash": "bb4341986bc8b914f0f0acf2e4a3f2f7", + "Requirements": [] + }, + "commonmark": { + "Package": "commonmark", + "Version": "1.7", + "Source": "Repository", + "Repository": "CRAN", + "Hash": "0f22be39ec1d141fd03683c06f3a6e67", + "Requirements": [] + }, + "corrplot": { + "Package": "corrplot", + "Version": "0.92", + "Source": "Repository", + "Repository": "CRAN", + "Hash": "fcf11a91936fd5047b2ee9bc00595e36", + "Requirements": [] + }, + "cowplot": { + "Package": "cowplot", + "Version": "1.1.1", + "Source": "Repository", + "Repository": "CRAN", + "Hash": "b418e8423699d11c7f2087c2bfd07da2", + "Requirements": [ + "ggplot2", + "gtable", + "rlang", + "scales" + ] + }, + "cpp11": { + "Package": "cpp11", + "Version": "0.4.2", + "Source": "Repository", + "Repository": "CRAN", + "Hash": "fa53ce256cd280f468c080a58ea5ba8c", + "Requirements": [] + }, + "crayon": { + "Package": "crayon", + "Version": "1.5.1", + "Source": "Repository", + "Repository": "CRAN", + "Hash": "8dc45fd8a1ee067a92b85ef274e66d6a", + "Requirements": [] + }, + "data.table": { + "Package": "data.table", + "Version": "1.14.2", + "Source": "Repository", + "Repository": "CRAN", + "Hash": "36b67b5adf57b292923f5659f5f0c853", + "Requirements": [] + }, + "desc": { + "Package": "desc", + "Version": "1.4.1", + "Source": "Repository", + "Repository": "CRAN", + "Hash": "eebd27ee58fcc58714eedb7aa07d8ad1", + "Requirements": [ + "R6", + "cli", + "rprojroot" + ] + }, + "diffobj": { + "Package": "diffobj", + "Version": "0.3.5", + "Source": "Repository", + "Repository": "CRAN", + "Hash": "bcaa8b95f8d7d01a5dedfd959ce88ab8", + "Requirements": [ + "crayon" + ] + }, + "digest": { + "Package": "digest", + "Version": "0.6.29", + "Source": "Repository", + "Repository": "CRAN", + "Hash": "cf6b206a045a684728c3267ef7596190", + "Requirements": [] + }, + "doParallel": { + "Package": "doParallel", + "Version": "1.0.17", + "Source": "Repository", + "Repository": "CRAN", + "Hash": "451e5edf411987991ab6a5410c45011f", + "Requirements": [ + "foreach", + "iterators" + ] + }, + "dplyr": { + "Package": "dplyr", + "Version": "1.0.8", + "Source": "Repository", + "Repository": "CRAN", + "Hash": "ef47665e64228a17609d6df877bf86f2", + "Requirements": [ + "R6", + "generics", + "glue", + "lifecycle", + "magrittr", + "pillar", + "rlang", + "tibble", + "tidyselect", + "vctrs" + ] + }, + "ellipsis": { + "Package": "ellipsis", + "Version": "0.3.2", + "Source": "Repository", + "Repository": "CRAN", + "Hash": "bb0eec2fe32e88d9e2836c2f73ea2077", + "Requirements": [ + "rlang" + ] + }, + "evaluate": { + "Package": "evaluate", + "Version": "0.15", + "Source": "Repository", + "Repository": "CRAN", + "Hash": "699a7a93d08c962d9f8950b2d7a227f1", + "Requirements": [] + }, + "expm": { + "Package": "expm", + "Version": "0.999-6", + "Source": "Repository", + "Repository": "CRAN", + "Hash": "1bb0f92b6125ea03dd7142f8eaf36bf5", + "Requirements": [ + "Matrix" + ] + }, + "fansi": { + "Package": "fansi", + "Version": "1.0.3", + "Source": "Repository", + "Repository": "CRAN", + "Hash": "83a8afdbe71839506baa9f90eebad7ec", + "Requirements": [] + }, + "farver": { + "Package": "farver", + "Version": "2.1.0", + "Source": "Repository", + "Repository": "CRAN", + "Hash": "c98eb5133d9cb9e1622b8691487f11bb", + "Requirements": [] + }, + "fastmap": { + "Package": "fastmap", + "Version": "1.1.0", + "Source": "Repository", + "Repository": "CRAN", + "Hash": "77bd60a6157420d4ffa93b27cf6a58b8", + "Requirements": [] + }, + "fontawesome": { + "Package": "fontawesome", + "Version": "0.2.2", + "Source": "Repository", + "Repository": "CRAN", + "Hash": "55624ed409e46c5f358b2c060be87f67", + "Requirements": [ + "htmltools", + "rlang" + ] + }, + "forcats": { + "Package": "forcats", + "Version": "0.5.1", + "Source": "Repository", + "Repository": "CRAN", + "Hash": "81c3244cab67468aac4c60550832655d", + "Requirements": [ + "ellipsis", + "magrittr", + "rlang", + "tibble" + ] + }, + "foreach": { + "Package": "foreach", + "Version": "1.5.2", + "Source": "Repository", + "Repository": "CRAN", + "Hash": "618609b42c9406731ead03adf5379850", + "Requirements": [ + "codetools", + "iterators" + ] + }, + "foreign": { + "Package": "foreign", + "Version": "0.8-82", + "Source": "Repository", + "Repository": "CRAN", + "Hash": "32b25c97ce306a760c4d9f787991b5d9", + "Requirements": [] + }, + "fs": { + "Package": "fs", + "Version": "1.5.2", + "Source": "Repository", + "Repository": "CRAN", + "Hash": "7c89603d81793f0d5486d91ab1fc6f1d", + "Requirements": [] + }, + "gamm4": { + "Package": "gamm4", + "Version": "0.2-6", + "Source": "Repository", + "Repository": "CRAN", + "Hash": "07fd04f71ff5fbf2a69c74e4ec128d65", + "Requirements": [ + "Matrix", + "lme4", + "mgcv" + ] + }, + "generics": { + "Package": "generics", + "Version": "0.1.2", + "Source": "Repository", + "Repository": "CRAN", + "Hash": "177475892cf4a55865868527654a7741", + "Requirements": [] + }, + "ggplot2": { + "Package": "ggplot2", + "Version": "3.3.5", + "Source": "Repository", + "Repository": "CRAN", + "Hash": "d7566c471c7b17e095dd023b9ef155ad", + "Requirements": [ + "MASS", + "digest", + "glue", + "gtable", + "isoband", + "mgcv", + "rlang", + "scales", + "tibble", + "withr" + ] + }, + "ggrepel": { + "Package": "ggrepel", + "Version": "0.9.1", + "Source": "Repository", + "Repository": "CRAN", + "Hash": "08ab869f37e6a7741a64ab9069bcb67d", + "Requirements": [ + "Rcpp", + "ggplot2", + "rlang", + "scales" + ] + }, + "glue": { + "Package": "glue", + "Version": "1.6.2", + "Source": "Repository", + "Repository": "CRAN", + "Hash": "4f2596dfb05dac67b9dc558e5c6fba2e", + "Requirements": [] + }, + "gridExtra": { + "Package": "gridExtra", + "Version": "2.3", + "Source": "Repository", + "Repository": "CRAN", + "Hash": "7d7f283939f563670a697165b2cf5560", + "Requirements": [ + "gtable" + ] + }, + "gtable": { + "Package": "gtable", + "Version": "0.3.0", + "Source": "Repository", + "Repository": "CRAN", + "Hash": "ac5c6baf7822ce8732b343f14c072c4d", + "Requirements": [] + }, + "here": { + "Package": "here", + "Version": "1.0.1", + "Source": "Repository", + "Repository": "CRAN", + "Hash": "24b224366f9c2e7534d2344d10d59211", + "Requirements": [ + "rprojroot" + ] + }, + "highr": { + "Package": "highr", + "Version": "0.9", + "Source": "Repository", + "Repository": "CRAN", + "Hash": "8eb36c8125038e648e5d111c0d7b2ed4", + "Requirements": [ + "xfun" + ] + }, + "hms": { + "Package": "hms", + "Version": "1.1.1", + "Source": "Repository", + "Repository": "CRAN", + "Hash": "5b8a2dd0fdbe2ab4f6081e6c7be6dfca", + "Requirements": [ + "ellipsis", + "lifecycle", + "pkgconfig", + "rlang", + "vctrs" + ] + }, + "htmlTable": { + "Package": "htmlTable", + "Version": "2.4.0", + "Source": "Repository", + "Repository": "CRAN", + "Hash": "1acb74d3f9e0ade3b13282195baab090", + "Requirements": [ + "checkmate", + "htmltools", + "htmlwidgets", + "knitr", + "magrittr", + "rstudioapi", + "stringr" + ] + }, + "htmltools": { + "Package": "htmltools", + "Version": "0.5.2", + "Source": "Repository", + "Repository": "CRAN", + "Hash": "526c484233f42522278ab06fb185cb26", + "Requirements": [ + "base64enc", + "digest", + "fastmap", + "rlang" + ] + }, + "htmlwidgets": { + "Package": "htmlwidgets", + "Version": "1.5.4", + "Source": "Repository", + "Repository": "CRAN", + "Hash": "76147821cd3fcd8c4b04e1ef0498e7fb", + "Requirements": [ + "htmltools", + "jsonlite", + "yaml" + ] + }, + "httpuv": { + "Package": "httpuv", + "Version": "1.6.5", + "Source": "Repository", + "Repository": "CRAN", + "Hash": "97fe71f0a4a1c9890e6c2128afa04bc0", + "Requirements": [ + "R6", + "Rcpp", + "later", + "promises" + ] + }, + "igraph": { + "Package": "igraph", + "Version": "1.2.11", + "Source": "Repository", + "Repository": "CRAN", + "Hash": "1d10cd31c2979f9c819ffe4d16b9dc2b", + "Requirements": [ + "Matrix", + "magrittr", + "pkgconfig" + ] + }, + "inline": { + "Package": "inline", + "Version": "0.3.19", + "Source": "Repository", + "Repository": "CRAN", + "Hash": "1deaf1de3eac7e1d3377954b3a283652", + "Requirements": [] + }, + "isoband": { + "Package": "isoband", + "Version": "0.2.5", + "Source": "Repository", + "Repository": "CRAN", + "Hash": "7ab57a6de7f48a8dc84910d1eca42883", + "Requirements": [] + }, + "iterators": { + "Package": "iterators", + "Version": "1.0.14", + "Source": "Repository", + "Repository": "CRAN", + "Hash": "8954069286b4b2b0d023d1b288dce978", + "Requirements": [] + }, + "jpeg": { + "Package": "jpeg", + "Version": "0.1-9", + "Source": "Repository", + "Repository": "CRAN", + "Hash": "441ee36360a57b363f4fa3df0c364630", + "Requirements": [] + }, + "jquerylib": { + "Package": "jquerylib", + "Version": "0.1.4", + "Source": "Repository", + "Repository": "CRAN", + "Hash": "5aab57a3bd297eee1c1d862735972182", + "Requirements": [ + "htmltools" + ] + }, + "jsonlite": { + "Package": "jsonlite", + "Version": "1.8.0", + "Source": "Repository", + "Repository": "CRAN", + "Hash": "d07e729b27b372429d42d24d503613a0", + "Requirements": [] + }, + "knitr": { + "Package": "knitr", + "Version": "1.38", + "Source": "Repository", + "Repository": "CRAN", + "Hash": "10b3dc3c6acb925910edda5d0543b3a2", + "Requirements": [ + "evaluate", + "highr", + "stringr", + "xfun", + "yaml" + ] + }, + "labeling": { + "Package": "labeling", + "Version": "0.4.2", + "Source": "Repository", + "Repository": "CRAN", + "Hash": "3d5108641f47470611a32d0bdf357a72", + "Requirements": [] + }, + "later": { + "Package": "later", + "Version": "1.3.0", + "Source": "Repository", + "Repository": "CRAN", + "Hash": "7e7b457d7766bc47f2a5f21cc2984f8e", + "Requirements": [ + "Rcpp", + "rlang" + ] + }, + "latex2exp": { + "Package": "latex2exp", + "Version": "0.9.3", + "Source": "Repository", + "Repository": "CRAN", + "Hash": "51e7b9701693f88f7998fe00f35cea3f", + "Requirements": [ + "magrittr", + "stringr" + ] + }, + "lattice": { + "Package": "lattice", + "Version": "0.20-45", + "Source": "Repository", + "Repository": "CRAN", + "Hash": "b64cdbb2b340437c4ee047a1f4c4377b", + "Requirements": [] + }, + "latticeExtra": { + "Package": "latticeExtra", + "Version": "0.6-29", + "Source": "Repository", + "Repository": "CRAN", + "Hash": "590829599d6182cf7461787af34666ee", + "Requirements": [ + "RColorBrewer", + "jpeg", + "lattice", + "png" + ] + }, + "lifecycle": { + "Package": "lifecycle", + "Version": "1.0.1", + "Source": "Repository", + "Repository": "CRAN", + "Hash": "a6b6d352e3ed897373ab19d8395c98d0", + "Requirements": [ + "glue", + "rlang" + ] + }, + "lme4": { + "Package": "lme4", + "Version": "1.1-28", + "Source": "Repository", + "Repository": "CRAN", + "Hash": "15b3e9b19ecd9c2ea0d6a3d32248063c", + "Requirements": [ + "MASS", + "Matrix", + "Rcpp", + "RcppEigen", + "boot", + "lattice", + "minqa", + "nlme", + "nloptr" + ] + }, + "loo": { + "Package": "loo", + "Version": "2.5.1", + "Source": "Repository", + "Repository": "CRAN", + "Hash": "31b53df2593afd03a4969ae028c437b1", + "Requirements": [ + "checkmate", + "matrixStats" + ] + }, + "magrittr": { + "Package": "magrittr", + "Version": "2.0.3", + "Source": "Repository", + "Repository": "CRAN", + "Hash": "7ce2733a9826b3aeb1775d56fd305472", + "Requirements": [] + }, + "markovchain": { + "Package": "markovchain", + "Version": "0.8.6", + "Source": "Repository", + "Repository": "CRAN", + "Hash": "69cfd3234ae7b62f711b142edba11df2", + "Requirements": [ + "Matrix", + "Rcpp", + "RcppArmadillo", + "RcppParallel", + "expm", + "igraph", + "matlab" + ] + }, + "matlab": { + "Package": "matlab", + "Version": "1.0.2", + "Source": "Repository", + "Repository": "CRAN", + "Hash": "7e17d4b237b82050ed00dedd121f439c", + "Requirements": [] + }, + "matrixStats": { + "Package": "matrixStats", + "Version": "0.61.0", + "Source": "Repository", + "Repository": "CRAN", + "Hash": "b8e6221fc11247b12ab1b055a6f66c27", + "Requirements": [] + }, + "mgcv": { + "Package": "mgcv", + "Version": "1.8-39", + "Source": "Repository", + "Repository": "CRAN", + "Hash": "055265005c238024e306fe0b600c89ff", + "Requirements": [ + "Matrix", + "nlme" + ] + }, + "mime": { + "Package": "mime", + "Version": "0.12", + "Source": "Repository", + "Repository": "CRAN", + "Hash": "18e9c28c1d3ca1560ce30658b22ce104", + "Requirements": [] + }, + "minqa": { + "Package": "minqa", + "Version": "1.2.4", + "Source": "Repository", + "Repository": "CRAN", + "Hash": "eaee7d2a6f3ed4491df868611cb064cc", + "Requirements": [ + "Rcpp" + ] + }, + "munsell": { + "Package": "munsell", + "Version": "0.5.0", + "Source": "Repository", + "Repository": "CRAN", + "Hash": "6dfe8bf774944bd5595785e3229d8771", + "Requirements": [ + "colorspace" + ] + }, + "nlme": { + "Package": "nlme", + "Version": "3.1-155", + "Source": "Repository", + "Repository": "CRAN", + "Hash": "74ad940dccc9e977189a5afe5fcdb7ba", + "Requirements": [ + "lattice" + ] + }, + "nloptr": { + "Package": "nloptr", + "Version": "2.0.0", + "Source": "Repository", + "Repository": "CRAN", + "Hash": "6e45c045fea34a9d0d1ceaa6fb7c4e91", + "Requirements": [ + "testthat" + ] + }, + "nnet": { + "Package": "nnet", + "Version": "7.3-17", + "Source": "Repository", + "Repository": "CRAN", + "Hash": "cb1d8d9f300a7e536b89c8a88c53f610", + "Requirements": [] + }, + "pillar": { + "Package": "pillar", + "Version": "1.7.0", + "Source": "Repository", + "Repository": "CRAN", + "Hash": "51dfc97e1b7069e9f7e6f83f3589c22e", + "Requirements": [ + "cli", + "crayon", + "ellipsis", + "fansi", + "glue", + "lifecycle", + "rlang", + "utf8", + "vctrs" + ] + }, + "pkgbuild": { + "Package": "pkgbuild", + "Version": "1.3.1", + "Source": "Repository", + "Repository": "CRAN", + "Hash": "66d2adfed274daf81ccfe77d974c3b9b", + "Requirements": [ + "R6", + "callr", + "cli", + "crayon", + "desc", + "prettyunits", + "rprojroot", + "withr" + ] + }, + "pkgconfig": { + "Package": "pkgconfig", + "Version": "2.0.3", + "Source": "Repository", + "Repository": "CRAN", + "Hash": "01f28d4278f15c76cddbea05899c5d6f", + "Requirements": [] + }, + "pkgload": { + "Package": "pkgload", + "Version": "1.2.4", + "Source": "Repository", + "Repository": "CRAN", + "Hash": "7533cd805940821bf23eaf3c8d4c1735", + "Requirements": [ + "cli", + "crayon", + "desc", + "rlang", + "rprojroot", + "rstudioapi", + "withr" + ] + }, + "plyr": { + "Package": "plyr", + "Version": "1.8.7", + "Source": "Repository", + "Repository": "CRAN", + "Hash": "9c17c6ee41639ebdc1d7266546d3b627", + "Requirements": [ + "Rcpp" + ] + }, + "png": { + "Package": "png", + "Version": "0.1-7", + "Source": "Repository", + "Repository": "CRAN", + "Hash": "03b7076c234cb3331288919983326c55", + "Requirements": [] + }, + "praise": { + "Package": "praise", + "Version": "1.0.0", + "Source": "Repository", + "Repository": "CRAN", + "Hash": "a555924add98c99d2f411e37e7d25e9f", + "Requirements": [] + }, + "prettyunits": { + "Package": "prettyunits", + "Version": "1.1.1", + "Source": "Repository", + "Repository": "CRAN", + "Hash": "95ef9167b75dde9d2ccc3c7528393e7e", + "Requirements": [] + }, + "processx": { + "Package": "processx", + "Version": "3.5.3", + "Source": "Repository", + "Repository": "CRAN", + "Hash": "8bbae1a548d0d3fdf6647bdd9d35bf6d", + "Requirements": [ + "R6", + "ps" + ] + }, + "progress": { + "Package": "progress", + "Version": "1.2.2", + "Source": "Repository", + "Repository": "CRAN", + "Hash": "14dc9f7a3c91ebb14ec5bb9208a07061", + "Requirements": [ + "R6", + "crayon", + "hms", + "prettyunits" + ] + }, + "promises": { + "Package": "promises", + "Version": "1.2.0.1", + "Source": "Repository", + "Repository": "CRAN", + "Hash": "4ab2c43adb4d4699cf3690acd378d75d", + "Requirements": [ + "R6", + "Rcpp", + "later", + "magrittr", + "rlang" + ] + }, + "ps": { + "Package": "ps", + "Version": "1.6.0", + "Source": "Repository", + "Repository": "CRAN", + "Hash": "32620e2001c1dce1af49c49dccbb9420", + "Requirements": [] + }, + "purrr": { + "Package": "purrr", + "Version": "0.3.4", + "Source": "Repository", + "Repository": "CRAN", + "Hash": "97def703420c8ab10d8f0e6c72101e02", + "Requirements": [ + "magrittr", + "rlang" + ] + }, + "rappdirs": { + "Package": "rappdirs", + "Version": "0.3.3", + "Source": "Repository", + "Repository": "CRAN", + "Hash": "5e3c5dc0b071b21fa128676560dbe94d", + "Requirements": [] + }, + "readr": { + "Package": "readr", + "Version": "2.1.2", + "Source": "Repository", + "Repository": "CRAN", + "Hash": "9c59de1357dc209868b5feb5c9f0fe2f", + "Requirements": [ + "R6", + "cli", + "clipr", + "cpp11", + "crayon", + "hms", + "lifecycle", + "rlang", + "tibble", + "tzdb", + "vroom" + ] + }, + "readxl": { + "Package": "readxl", + "Version": "1.3.1", + "Source": "Repository", + "Repository": "CRAN", + "Hash": "63537c483c2dbec8d9e3183b3735254a", + "Requirements": [ + "Rcpp", + "cellranger", + "progress", + "tibble" + ] + }, + "rematch": { + "Package": "rematch", + "Version": "1.0.1", + "Source": "Repository", + "Repository": "CRAN", + "Hash": "c66b930d20bb6d858cd18e1cebcfae5c", + "Requirements": [] + }, + "rematch2": { + "Package": "rematch2", + "Version": "2.1.2", + "Source": "Repository", + "Repository": "CRAN", + "Hash": "76c9e04c712a05848ae7a23d2f170a40", + "Requirements": [ + "tibble" + ] + }, + "renv": { + "Package": "renv", + "Version": "0.15.2", + "Source": "Repository", + "Repository": "CRAN", + "Hash": "206c4ef8b7ad6fb1060d69aa7b9dfe69", + "Requirements": [] + }, + "reshape2": { + "Package": "reshape2", + "Version": "1.4.4", + "Source": "Repository", + "Repository": "CRAN", + "Hash": "bb5996d0bd962d214a11140d77589917", + "Requirements": [ + "Rcpp", + "plyr", + "stringr" + ] + }, + "rlang": { + "Package": "rlang", + "Version": "1.0.2", + "Source": "Repository", + "Repository": "CRAN", + "Hash": "04884d9a75d778aca22c7154b8333ec9", + "Requirements": [] + }, + "rmarkdown": { + "Package": "rmarkdown", + "Version": "2.11", + "Source": "Repository", + "Repository": "CRAN", + "Hash": "320017b52d05a943981272b295750388", + "Requirements": [ + "evaluate", + "htmltools", + "jquerylib", + "jsonlite", + "knitr", + "stringr", + "tinytex", + "xfun", + "yaml" + ] + }, + "rpart": { + "Package": "rpart", + "Version": "4.1.16", + "Source": "Repository", + "Repository": "CRAN", + "Hash": "ea3ca1d9473daabb3cd0f1b4f974c1ed", + "Requirements": [] + }, + "rprojroot": { + "Package": "rprojroot", + "Version": "2.0.2", + "Source": "Repository", + "Repository": "CRAN", + "Hash": "249d8cd1e74a8f6a26194a91b47f21d1", + "Requirements": [] + }, + "rstan": { + "Package": "rstan", + "Version": "2.21.3", + "Source": "Repository", + "Repository": "CRAN", + "Hash": "8fa400c2cf6409067a4515581ae4d99b", + "Requirements": [ + "BH", + "Rcpp", + "RcppEigen", + "RcppParallel", + "StanHeaders", + "ggplot2", + "gridExtra", + "inline", + "loo", + "pkgbuild" + ] + }, + "rstantools": { + "Package": "rstantools", + "Version": "2.1.1", + "Source": "Repository", + "Repository": "CRAN", + "Hash": "c91e0f20c967e246cb6f2efe8c60e15b", + "Requirements": [ + "Rcpp", + "RcppParallel", + "desc" + ] + }, + "rstudioapi": { + "Package": "rstudioapi", + "Version": "0.13", + "Source": "Repository", + "Repository": "CRAN", + "Hash": "06c85365a03fdaf699966cc1d3cf53ea", + "Requirements": [] + }, + "sass": { + "Package": "sass", + "Version": "0.4.0", + "Source": "Repository", + "Repository": "CRAN", + "Hash": "50cf822feb64bb3977bda0b7091be623", + "Requirements": [ + "R6", + "fs", + "htmltools", + "rappdirs", + "rlang" + ] + }, + "scales": { + "Package": "scales", + "Version": "1.1.1", + "Source": "Repository", + "Repository": "CRAN", + "Hash": "6f76f71042411426ec8df6c54f34e6dd", + "Requirements": [ + "R6", + "RColorBrewer", + "farver", + "labeling", + "lifecycle", + "munsell", + "viridisLite" + ] + }, + "scoringRules": { + "Package": "scoringRules", + "Version": "1.0.1", + "Source": "Repository", + "Repository": "CRAN", + "Hash": "443fb7653ed5fbcf766e934850490696", + "Requirements": [ + "MASS", + "Rcpp", + "RcppArmadillo", + "knitr" + ] + }, + "shiny": { + "Package": "shiny", + "Version": "1.7.1", + "Source": "Repository", + "Repository": "CRAN", + "Hash": "00344c227c7bd0ab5d78052c5d736c44", + "Requirements": [ + "R6", + "bslib", + "cachem", + "commonmark", + "crayon", + "ellipsis", + "fastmap", + "fontawesome", + "glue", + "htmltools", + "httpuv", + "jsonlite", + "later", + "lifecycle", + "mime", + "promises", + "rlang", + "sourcetools", + "withr", + "xtable" + ] + }, + "sourcetools": { + "Package": "sourcetools", + "Version": "0.1.7", + "Source": "Repository", + "Repository": "CRAN", + "Hash": "947e4e02a79effa5d512473e10f41797", + "Requirements": [] + }, + "stringi": { + "Package": "stringi", + "Version": "1.7.6", + "Source": "Repository", + "Repository": "CRAN", + "Hash": "bba431031d30789535745a9627ac9271", + "Requirements": [] + }, + "stringr": { + "Package": "stringr", + "Version": "1.4.0", + "Source": "Repository", + "Repository": "CRAN", + "Hash": "0759e6b6c0957edb1311028a49a35e76", + "Requirements": [ + "glue", + "magrittr", + "stringi" + ] + }, + "survival": { + "Package": "survival", + "Version": "3.2-13", + "Source": "Repository", + "Repository": "CRAN", + "Hash": "6f0a0fadc63bc6570fe172770f15bbc4", + "Requirements": [ + "Matrix" + ] + }, + "testthat": { + "Package": "testthat", + "Version": "3.1.2", + "Source": "Repository", + "Repository": "CRAN", + "Hash": "32454e5780e8dbe31e4b61b13d8918fe", + "Requirements": [ + "R6", + "brio", + "callr", + "cli", + "crayon", + "desc", + "digest", + "ellipsis", + "evaluate", + "jsonlite", + "lifecycle", + "magrittr", + "pkgload", + "praise", + "processx", + "ps", + "rlang", + "waldo", + "withr" + ] + }, + "tibble": { + "Package": "tibble", + "Version": "3.1.6", + "Source": "Repository", + "Repository": "CRAN", + "Hash": "8a8f02d1934dfd6431c671361510dd0b", + "Requirements": [ + "ellipsis", + "fansi", + "lifecycle", + "magrittr", + "pillar", + "pkgconfig", + "rlang", + "vctrs" + ] + }, + "tidyr": { + "Package": "tidyr", + "Version": "1.2.0", + "Source": "Repository", + "Repository": "CRAN", + "Hash": "d8b95b7fee945d7da6888cf7eb71a49c", + "Requirements": [ + "cpp11", + "dplyr", + "ellipsis", + "glue", + "lifecycle", + "magrittr", + "purrr", + "rlang", + "tibble", + "tidyselect", + "vctrs" + ] + }, + "tidyselect": { + "Package": "tidyselect", + "Version": "1.1.2", + "Source": "Repository", + "Repository": "CRAN", + "Hash": "17f6da8cfd7002760a859915ce7eef8f", + "Requirements": [ + "ellipsis", + "glue", + "purrr", + "rlang", + "vctrs" + ] + }, + "tinytex": { + "Package": "tinytex", + "Version": "0.36", + "Source": "Repository", + "Repository": "CRAN", + "Hash": "130fe4c61e55b271a2655b3a284a205f", + "Requirements": [ + "xfun" + ] + }, + "tzdb": { + "Package": "tzdb", + "Version": "0.2.0", + "Source": "Repository", + "Repository": "CRAN", + "Hash": "5e069fb033daf2317bd628d3100b75c5", + "Requirements": [ + "cpp11" + ] + }, + "utf8": { + "Package": "utf8", + "Version": "1.2.2", + "Source": "Repository", + "Repository": "CRAN", + "Hash": "c9c462b759a5cc844ae25b5942654d13", + "Requirements": [] + }, + "vctrs": { + "Package": "vctrs", + "Version": "0.4.0", + "Source": "Repository", + "Repository": "CRAN", + "Hash": "95c2573b232eac82df562f9e300f9790", + "Requirements": [ + "cli", + "glue", + "rlang" + ] + }, + "viridis": { + "Package": "viridis", + "Version": "0.6.2", + "Source": "Repository", + "Repository": "CRAN", + "Hash": "ee96aee95a7a563e5496f8991e9fde4b", + "Requirements": [ + "ggplot2", + "gridExtra", + "viridisLite" + ] + }, + "viridisLite": { + "Package": "viridisLite", + "Version": "0.4.0", + "Source": "Repository", + "Repository": "CRAN", + "Hash": "55e157e2aa88161bdb0754218470d204", + "Requirements": [] + }, + "vroom": { + "Package": "vroom", + "Version": "1.5.7", + "Source": "Repository", + "Repository": "CRAN", + "Hash": "976507b5a105bc3bdf6a5a5f29e0684f", + "Requirements": [ + "bit64", + "cli", + "cpp11", + "crayon", + "glue", + "hms", + "lifecycle", + "progress", + "rlang", + "tibble", + "tidyselect", + "tzdb", + "vctrs", + "withr" + ] + }, + "waldo": { + "Package": "waldo", + "Version": "0.4.0", + "Source": "Repository", + "Repository": "CRAN", + "Hash": "035fba89d0c86e2113120f93301b98ad", + "Requirements": [ + "cli", + "diffobj", + "fansi", + "glue", + "rematch2", + "rlang", + "tibble" + ] + }, + "withr": { + "Package": "withr", + "Version": "2.5.0", + "Source": "Repository", + "Repository": "CRAN", + "Hash": "c0e49a9760983e81e55cdd9be92e7182", + "Requirements": [] + }, + "xfun": { + "Package": "xfun", + "Version": "0.30", + "Source": "Repository", + "Repository": "CRAN", + "Hash": "e83f48136b041845e50a6658feffb197", + "Requirements": [] + }, + "xtable": { + "Package": "xtable", + "Version": "1.8-4", + "Source": "Repository", + "Repository": "CRAN", + "Hash": "b8acdf8af494d9ec19ccb2481a9b11c2", + "Requirements": [] + }, + "yaml": { + "Package": "yaml", + "Version": "2.3.5", + "Source": "Repository", + "Repository": "CRAN", + "Hash": "458bb38374d73bf83b1bb85e353da200", + "Requirements": [] + } + } +} diff --git a/renv/.gitignore b/renv/.gitignore new file mode 100644 index 0000000..275e4ca --- /dev/null +++ b/renv/.gitignore @@ -0,0 +1,6 @@ +library/ +local/ +cellar/ +lock/ +python/ +staging/ diff --git a/renv/activate.R b/renv/activate.R new file mode 100644 index 0000000..33833ee --- /dev/null +++ b/renv/activate.R @@ -0,0 +1,902 @@ + +local({ + + # the requested version of renv + version <- "0.15.2" + + # the project directory + project <- getwd() + + # figure out whether the autoloader is enabled + enabled <- local({ + + # first, check config option + override <- getOption("renv.config.autoloader.enabled") + if (!is.null(override)) + return(override) + + # next, check environment variables + # TODO: prefer using the configuration one in the future + envvars <- c( + "RENV_CONFIG_AUTOLOADER_ENABLED", + "RENV_AUTOLOADER_ENABLED", + "RENV_ACTIVATE_PROJECT" + ) + + for (envvar in envvars) { + envval <- Sys.getenv(envvar, unset = NA) + if (!is.na(envval)) + return(tolower(envval) %in% c("true", "t", "1")) + } + + # enable by default + TRUE + + }) + + if (!enabled) + return(FALSE) + + # avoid recursion + if (identical(getOption("renv.autoloader.running"), TRUE)) { + warning("ignoring recursive attempt to run renv autoloader") + return(invisible(TRUE)) + } + + # signal that we're loading renv during R startup + options(renv.autoloader.running = TRUE) + on.exit(options(renv.autoloader.running = NULL), add = TRUE) + + # signal that we've consented to use renv + options(renv.consent = TRUE) + + # load the 'utils' package eagerly -- this ensures that renv shims, which + # mask 'utils' packages, will come first on the search path + library(utils, lib.loc = .Library) + + # check to see if renv has already been loaded + if ("renv" %in% loadedNamespaces()) { + + # if renv has already been loaded, and it's the requested version of renv, + # nothing to do + spec <- .getNamespaceInfo(.getNamespace("renv"), "spec") + if (identical(spec[["version"]], version)) + return(invisible(TRUE)) + + # otherwise, unload and attempt to load the correct version of renv + unloadNamespace("renv") + + } + + # load bootstrap tools + `%||%` <- function(x, y) { + if (is.environment(x) || length(x)) x else y + } + + bootstrap <- function(version, library) { + + # attempt to download renv + tarball <- tryCatch(renv_bootstrap_download(version), error = identity) + if (inherits(tarball, "error")) + stop("failed to download renv ", version) + + # now attempt to install + status <- tryCatch(renv_bootstrap_install(version, tarball, library), error = identity) + if (inherits(status, "error")) + stop("failed to install renv ", version) + + } + + renv_bootstrap_tests_running <- function() { + getOption("renv.tests.running", default = FALSE) + } + + renv_bootstrap_repos <- function() { + + # check for repos override + repos <- Sys.getenv("RENV_CONFIG_REPOS_OVERRIDE", unset = NA) + if (!is.na(repos)) + return(repos) + + # check for lockfile repositories + repos <- tryCatch(renv_bootstrap_repos_lockfile(), error = identity) + if (!inherits(repos, "error") && length(repos)) + return(repos) + + # if we're testing, re-use the test repositories + if (renv_bootstrap_tests_running()) + return(getOption("renv.tests.repos")) + + # retrieve current repos + repos <- getOption("repos") + + # ensure @CRAN@ entries are resolved + repos[repos == "@CRAN@"] <- getOption( + "renv.repos.cran", + "https://cloud.r-project.org" + ) + + # add in renv.bootstrap.repos if set + default <- c(FALLBACK = "https://cloud.r-project.org") + extra <- getOption("renv.bootstrap.repos", default = default) + repos <- c(repos, extra) + + # remove duplicates that might've snuck in + dupes <- duplicated(repos) | duplicated(names(repos)) + repos[!dupes] + + } + + renv_bootstrap_repos_lockfile <- function() { + + lockpath <- Sys.getenv("RENV_PATHS_LOCKFILE", unset = "renv.lock") + if (!file.exists(lockpath)) + return(NULL) + + lockfile <- tryCatch(renv_json_read(lockpath), error = identity) + if (inherits(lockfile, "error")) { + warning(lockfile) + return(NULL) + } + + repos <- lockfile$R$Repositories + if (length(repos) == 0) + return(NULL) + + keys <- vapply(repos, `[[`, "Name", FUN.VALUE = character(1)) + vals <- vapply(repos, `[[`, "URL", FUN.VALUE = character(1)) + names(vals) <- keys + + return(vals) + + } + + renv_bootstrap_download <- function(version) { + + # if the renv version number has 4 components, assume it must + # be retrieved via github + nv <- numeric_version(version) + components <- unclass(nv)[[1]] + + methods <- if (length(components) == 4L) { + list( + renv_bootstrap_download_github + ) + } else { + list( + renv_bootstrap_download_cran_latest, + renv_bootstrap_download_cran_archive + ) + } + + for (method in methods) { + path <- tryCatch(method(version), error = identity) + if (is.character(path) && file.exists(path)) + return(path) + } + + stop("failed to download renv ", version) + + } + + renv_bootstrap_download_impl <- function(url, destfile) { + + mode <- "wb" + + # https://bugs.r-project.org/bugzilla/show_bug.cgi?id=17715 + fixup <- + Sys.info()[["sysname"]] == "Windows" && + substring(url, 1L, 5L) == "file:" + + if (fixup) + mode <- "w+b" + + utils::download.file( + url = url, + destfile = destfile, + mode = mode, + quiet = TRUE + ) + + } + + renv_bootstrap_download_cran_latest <- function(version) { + + spec <- renv_bootstrap_download_cran_latest_find(version) + + message("* Downloading renv ", version, " ... ", appendLF = FALSE) + + type <- spec$type + repos <- spec$repos + + info <- tryCatch( + utils::download.packages( + pkgs = "renv", + destdir = tempdir(), + repos = repos, + type = type, + quiet = TRUE + ), + condition = identity + ) + + if (inherits(info, "condition")) { + message("FAILED") + return(FALSE) + } + + # report success and return + message("OK (downloaded ", type, ")") + info[1, 2] + + } + + renv_bootstrap_download_cran_latest_find <- function(version) { + + # check whether binaries are supported on this system + binary <- + getOption("renv.bootstrap.binary", default = TRUE) && + !identical(.Platform$pkgType, "source") && + !identical(getOption("pkgType"), "source") && + Sys.info()[["sysname"]] %in% c("Darwin", "Windows") + + types <- c(if (binary) "binary", "source") + + # iterate over types + repositories + for (type in types) { + for (repos in renv_bootstrap_repos()) { + + # retrieve package database + db <- tryCatch( + as.data.frame( + utils::available.packages(type = type, repos = repos), + stringsAsFactors = FALSE + ), + error = identity + ) + + if (inherits(db, "error")) + next + + # check for compatible entry + entry <- db[db$Package %in% "renv" & db$Version %in% version, ] + if (nrow(entry) == 0) + next + + # found it; return spec to caller + spec <- list(entry = entry, type = type, repos = repos) + return(spec) + + } + } + + # if we got here, we failed to find renv + fmt <- "renv %s is not available from your declared package repositories" + stop(sprintf(fmt, version)) + + } + + renv_bootstrap_download_cran_archive <- function(version) { + + name <- sprintf("renv_%s.tar.gz", version) + repos <- renv_bootstrap_repos() + urls <- file.path(repos, "src/contrib/Archive/renv", name) + destfile <- file.path(tempdir(), name) + + message("* Downloading renv ", version, " ... ", appendLF = FALSE) + + for (url in urls) { + + status <- tryCatch( + renv_bootstrap_download_impl(url, destfile), + condition = identity + ) + + if (identical(status, 0L)) { + message("OK") + return(destfile) + } + + } + + message("FAILED") + return(FALSE) + + } + + renv_bootstrap_download_github <- function(version) { + + enabled <- Sys.getenv("RENV_BOOTSTRAP_FROM_GITHUB", unset = "TRUE") + if (!identical(enabled, "TRUE")) + return(FALSE) + + # prepare download options + pat <- Sys.getenv("GITHUB_PAT") + if (nzchar(Sys.which("curl")) && nzchar(pat)) { + fmt <- "--location --fail --header \"Authorization: token %s\"" + extra <- sprintf(fmt, pat) + saved <- options("download.file.method", "download.file.extra") + options(download.file.method = "curl", download.file.extra = extra) + on.exit(do.call(base::options, saved), add = TRUE) + } else if (nzchar(Sys.which("wget")) && nzchar(pat)) { + fmt <- "--header=\"Authorization: token %s\"" + extra <- sprintf(fmt, pat) + saved <- options("download.file.method", "download.file.extra") + options(download.file.method = "wget", download.file.extra = extra) + on.exit(do.call(base::options, saved), add = TRUE) + } + + message("* Downloading renv ", version, " from GitHub ... ", appendLF = FALSE) + + url <- file.path("https://api.github.com/repos/rstudio/renv/tarball", version) + name <- sprintf("renv_%s.tar.gz", version) + destfile <- file.path(tempdir(), name) + + status <- tryCatch( + renv_bootstrap_download_impl(url, destfile), + condition = identity + ) + + if (!identical(status, 0L)) { + message("FAILED") + return(FALSE) + } + + message("OK") + return(destfile) + + } + + renv_bootstrap_install <- function(version, tarball, library) { + + # attempt to install it into project library + message("* Installing renv ", version, " ... ", appendLF = FALSE) + dir.create(library, showWarnings = FALSE, recursive = TRUE) + + # invoke using system2 so we can capture and report output + bin <- R.home("bin") + exe <- if (Sys.info()[["sysname"]] == "Windows") "R.exe" else "R" + r <- file.path(bin, exe) + args <- c("--vanilla", "CMD", "INSTALL", "--no-multiarch", "-l", shQuote(library), shQuote(tarball)) + output <- system2(r, args, stdout = TRUE, stderr = TRUE) + message("Done!") + + # check for successful install + status <- attr(output, "status") + if (is.numeric(status) && !identical(status, 0L)) { + header <- "Error installing renv:" + lines <- paste(rep.int("=", nchar(header)), collapse = "") + text <- c(header, lines, output) + writeLines(text, con = stderr()) + } + + status + + } + + renv_bootstrap_platform_prefix <- function() { + + # construct version prefix + version <- paste(R.version$major, R.version$minor, sep = ".") + prefix <- paste("R", numeric_version(version)[1, 1:2], sep = "-") + + # include SVN revision for development versions of R + # (to avoid sharing platform-specific artefacts with released versions of R) + devel <- + identical(R.version[["status"]], "Under development (unstable)") || + identical(R.version[["nickname"]], "Unsuffered Consequences") + + if (devel) + prefix <- paste(prefix, R.version[["svn rev"]], sep = "-r") + + # build list of path components + components <- c(prefix, R.version$platform) + + # include prefix if provided by user + prefix <- renv_bootstrap_platform_prefix_impl() + if (!is.na(prefix) && nzchar(prefix)) + components <- c(prefix, components) + + # build prefix + paste(components, collapse = "/") + + } + + renv_bootstrap_platform_prefix_impl <- function() { + + # if an explicit prefix has been supplied, use it + prefix <- Sys.getenv("RENV_PATHS_PREFIX", unset = NA) + if (!is.na(prefix)) + return(prefix) + + # if the user has requested an automatic prefix, generate it + auto <- Sys.getenv("RENV_PATHS_PREFIX_AUTO", unset = NA) + if (auto %in% c("TRUE", "True", "true", "1")) + return(renv_bootstrap_platform_prefix_auto()) + + # empty string on failure + "" + + } + + renv_bootstrap_platform_prefix_auto <- function() { + + prefix <- tryCatch(renv_bootstrap_platform_os(), error = identity) + if (inherits(prefix, "error") || prefix %in% "unknown") { + + msg <- paste( + "failed to infer current operating system", + "please file a bug report at https://github.com/rstudio/renv/issues", + sep = "; " + ) + + warning(msg) + + } + + prefix + + } + + renv_bootstrap_platform_os <- function() { + + sysinfo <- Sys.info() + sysname <- sysinfo[["sysname"]] + + # handle Windows + macOS up front + if (sysname == "Windows") + return("windows") + else if (sysname == "Darwin") + return("macos") + + # check for os-release files + for (file in c("/etc/os-release", "/usr/lib/os-release")) + if (file.exists(file)) + return(renv_bootstrap_platform_os_via_os_release(file, sysinfo)) + + # check for redhat-release files + if (file.exists("/etc/redhat-release")) + return(renv_bootstrap_platform_os_via_redhat_release()) + + "unknown" + + } + + renv_bootstrap_platform_os_via_os_release <- function(file, sysinfo) { + + # read /etc/os-release + release <- utils::read.table( + file = file, + sep = "=", + quote = c("\"", "'"), + col.names = c("Key", "Value"), + comment.char = "#", + stringsAsFactors = FALSE + ) + + vars <- as.list(release$Value) + names(vars) <- release$Key + + # get os name + os <- tolower(sysinfo[["sysname"]]) + + # read id + id <- "unknown" + for (field in c("ID", "ID_LIKE")) { + if (field %in% names(vars) && nzchar(vars[[field]])) { + id <- vars[[field]] + break + } + } + + # read version + version <- "unknown" + for (field in c("UBUNTU_CODENAME", "VERSION_CODENAME", "VERSION_ID", "BUILD_ID")) { + if (field %in% names(vars) && nzchar(vars[[field]])) { + version <- vars[[field]] + break + } + } + + # join together + paste(c(os, id, version), collapse = "-") + + } + + renv_bootstrap_platform_os_via_redhat_release <- function() { + + # read /etc/redhat-release + contents <- readLines("/etc/redhat-release", warn = FALSE) + + # infer id + id <- if (grepl("centos", contents, ignore.case = TRUE)) + "centos" + else if (grepl("redhat", contents, ignore.case = TRUE)) + "redhat" + else + "unknown" + + # try to find a version component (very hacky) + version <- "unknown" + + parts <- strsplit(contents, "[[:space:]]")[[1L]] + for (part in parts) { + + nv <- tryCatch(numeric_version(part), error = identity) + if (inherits(nv, "error")) + next + + version <- nv[1, 1] + break + + } + + paste(c("linux", id, version), collapse = "-") + + } + + renv_bootstrap_library_root_name <- function(project) { + + # use project name as-is if requested + asis <- Sys.getenv("RENV_PATHS_LIBRARY_ROOT_ASIS", unset = "FALSE") + if (asis) + return(basename(project)) + + # otherwise, disambiguate based on project's path + id <- substring(renv_bootstrap_hash_text(project), 1L, 8L) + paste(basename(project), id, sep = "-") + + } + + renv_bootstrap_library_root <- function(project) { + + prefix <- renv_bootstrap_profile_prefix() + + path <- Sys.getenv("RENV_PATHS_LIBRARY", unset = NA) + if (!is.na(path)) + return(paste(c(path, prefix), collapse = "/")) + + path <- renv_bootstrap_library_root_impl(project) + if (!is.null(path)) { + name <- renv_bootstrap_library_root_name(project) + return(paste(c(path, prefix, name), collapse = "/")) + } + + renv_bootstrap_paths_renv("library", project = project) + + } + + renv_bootstrap_library_root_impl <- function(project) { + + root <- Sys.getenv("RENV_PATHS_LIBRARY_ROOT", unset = NA) + if (!is.na(root)) + return(root) + + type <- renv_bootstrap_project_type(project) + if (identical(type, "package")) { + userdir <- renv_bootstrap_user_dir() + return(file.path(userdir, "library")) + } + + } + + renv_bootstrap_validate_version <- function(version) { + + loadedversion <- utils::packageDescription("renv", fields = "Version") + if (version == loadedversion) + return(TRUE) + + # assume four-component versions are from GitHub; three-component + # versions are from CRAN + components <- strsplit(loadedversion, "[.-]")[[1]] + remote <- if (length(components) == 4L) + paste("rstudio/renv", loadedversion, sep = "@") + else + paste("renv", loadedversion, sep = "@") + + fmt <- paste( + "renv %1$s was loaded from project library, but this project is configured to use renv %2$s.", + "Use `renv::record(\"%3$s\")` to record renv %1$s in the lockfile.", + "Use `renv::restore(packages = \"renv\")` to install renv %2$s into the project library.", + sep = "\n" + ) + + msg <- sprintf(fmt, loadedversion, version, remote) + warning(msg, call. = FALSE) + + FALSE + + } + + renv_bootstrap_hash_text <- function(text) { + + hashfile <- tempfile("renv-hash-") + on.exit(unlink(hashfile), add = TRUE) + + writeLines(text, con = hashfile) + tools::md5sum(hashfile) + + } + + renv_bootstrap_load <- function(project, libpath, version) { + + # try to load renv from the project library + if (!requireNamespace("renv", lib.loc = libpath, quietly = TRUE)) + return(FALSE) + + # warn if the version of renv loaded does not match + renv_bootstrap_validate_version(version) + + # load the project + renv::load(project) + + TRUE + + } + + renv_bootstrap_profile_load <- function(project) { + + # if RENV_PROFILE is already set, just use that + profile <- Sys.getenv("RENV_PROFILE", unset = NA) + if (!is.na(profile) && nzchar(profile)) + return(profile) + + # check for a profile file (nothing to do if it doesn't exist) + path <- renv_bootstrap_paths_renv("profile", profile = FALSE) + if (!file.exists(path)) + return(NULL) + + # read the profile, and set it if it exists + contents <- readLines(path, warn = FALSE) + if (length(contents) == 0L) + return(NULL) + + # set RENV_PROFILE + profile <- contents[[1L]] + if (!profile %in% c("", "default")) + Sys.setenv(RENV_PROFILE = profile) + + profile + + } + + renv_bootstrap_profile_prefix <- function() { + profile <- renv_bootstrap_profile_get() + if (!is.null(profile)) + return(file.path("profiles", profile, "renv")) + } + + renv_bootstrap_profile_get <- function() { + profile <- Sys.getenv("RENV_PROFILE", unset = "") + renv_bootstrap_profile_normalize(profile) + } + + renv_bootstrap_profile_set <- function(profile) { + profile <- renv_bootstrap_profile_normalize(profile) + if (is.null(profile)) + Sys.unsetenv("RENV_PROFILE") + else + Sys.setenv(RENV_PROFILE = profile) + } + + renv_bootstrap_profile_normalize <- function(profile) { + + if (is.null(profile) || profile %in% c("", "default")) + return(NULL) + + profile + + } + + renv_bootstrap_path_absolute <- function(path) { + + substr(path, 1L, 1L) %in% c("~", "/", "\\") || ( + substr(path, 1L, 1L) %in% c(letters, LETTERS) && + substr(path, 2L, 3L) %in% c(":/", ":\\") + ) + + } + + renv_bootstrap_paths_renv <- function(..., profile = TRUE, project = NULL) { + renv <- Sys.getenv("RENV_PATHS_RENV", unset = "renv") + root <- if (renv_bootstrap_path_absolute(renv)) NULL else project + prefix <- if (profile) renv_bootstrap_profile_prefix() + components <- c(root, renv, prefix, ...) + paste(components, collapse = "/") + } + + renv_bootstrap_project_type <- function(path) { + + descpath <- file.path(path, "DESCRIPTION") + if (!file.exists(descpath)) + return("unknown") + + desc <- tryCatch( + read.dcf(descpath, all = TRUE), + error = identity + ) + + if (inherits(desc, "error")) + return("unknown") + + type <- desc$Type + if (!is.null(type)) + return(tolower(type)) + + package <- desc$Package + if (!is.null(package)) + return("package") + + "unknown" + + } + + renv_bootstrap_user_dir <- function(path) { + dir <- renv_bootstrap_user_dir_impl(path) + chartr("\\", "/", dir) + } + + renv_bootstrap_user_dir_impl <- function(path) { + + # use R_user_dir if available + tools <- asNamespace("tools") + if (is.function(tools$R_user_dir)) + return(tools$R_user_dir("renv", "cache")) + + # try using our own backfill for older versions of R + envvars <- c("R_USER_CACHE_DIR", "XDG_CACHE_HOME") + for (envvar in envvars) { + root <- Sys.getenv(envvar, unset = NA) + if (!is.na(root)) { + path <- file.path(root, "R/renv") + return(path) + } + } + + # use platform-specific default fallbacks + if (Sys.info()[["sysname"]] == "Windows") + file.path(Sys.getenv("LOCALAPPDATA"), "R/cache/R/renv") + else if (Sys.info()[["sysname"]] == "Darwin") + "~/Library/Caches/org.R-project.R/R/renv" + else + "~/.cache/R/renv" + + } + + renv_json_read <- function(file = NULL, text = NULL) { + + text <- paste(text %||% read(file), collapse = "\n") + + # find strings in the JSON + pattern <- '["](?:(?:\\\\.)|(?:[^"\\\\]))*?["]' + locs <- gregexpr(pattern, text)[[1]] + + # if any are found, replace them with placeholders + replaced <- text + strings <- character() + replacements <- character() + + if (!identical(c(locs), -1L)) { + + # get the string values + starts <- locs + ends <- locs + attr(locs, "match.length") - 1L + strings <- substring(text, starts, ends) + + # only keep those requiring escaping + strings <- grep("[[\\]{}:]", strings, perl = TRUE, value = TRUE) + + # compute replacements + replacements <- sprintf('"\032%i\032"', seq_along(strings)) + + # replace the strings + mapply(function(string, replacement) { + replaced <<- sub(string, replacement, replaced, fixed = TRUE) + }, strings, replacements) + + } + + # transform the JSON into something the R parser understands + transformed <- replaced + transformed <- gsub("[[{]", "list(", transformed) + transformed <- gsub("[]}]", ")", transformed) + transformed <- gsub(":", "=", transformed, fixed = TRUE) + text <- paste(transformed, collapse = "\n") + + # parse it + json <- parse(text = text, keep.source = FALSE, srcfile = NULL)[[1L]] + + # construct map between source strings, replaced strings + map <- as.character(parse(text = strings)) + names(map) <- as.character(parse(text = replacements)) + + # convert to list + map <- as.list(map) + + # remap strings in object + remapped <- renv_json_remap(json, map) + + # evaluate + eval(remapped, envir = baseenv()) + + } + + renv_json_remap <- function(json, map) { + + # fix names + if (!is.null(names(json))) { + lhs <- match(names(json), names(map), nomatch = 0L) + rhs <- match(names(map), names(json), nomatch = 0L) + names(json)[rhs] <- map[lhs] + } + + # fix values + if (is.character(json)) + return(map[[json]] %||% json) + + # handle true, false, null + if (is.name(json)) { + text <- as.character(json) + if (text == "true") + return(TRUE) + else if (text == "false") + return(FALSE) + else if (text == "null") + return(NULL) + } + + # recurse + if (is.recursive(json)) { + for (i in seq_along(json)) { + json[i] <- list(renv_json_remap(json[[i]], map)) + } + } + + json + + } + + # load the renv profile, if any + renv_bootstrap_profile_load(project) + + # construct path to library root + root <- renv_bootstrap_library_root(project) + + # construct library prefix for platform + prefix <- renv_bootstrap_platform_prefix() + + # construct full libpath + libpath <- file.path(root, prefix) + + # attempt to load + if (renv_bootstrap_load(project, libpath, version)) + return(TRUE) + + # load failed; inform user we're about to bootstrap + prefix <- paste("# Bootstrapping renv", version) + postfix <- paste(rep.int("-", 77L - nchar(prefix)), collapse = "") + header <- paste(prefix, postfix) + message(header) + + # perform bootstrap + bootstrap(version, libpath) + + # exit early if we're just testing bootstrap + if (!is.na(Sys.getenv("RENV_BOOTSTRAP_INSTALL_ONLY", unset = NA))) + return(TRUE) + + # try again to load + if (requireNamespace("renv", lib.loc = libpath, quietly = TRUE)) { + message("* Successfully installed and loaded renv ", version, ".") + return(renv::load()) + } + + # failed to download or load renv; warn the user + msg <- c( + "Failed to find an renv installation: the project will not be loaded.", + "Use `renv::activate()` to re-initialize the project." + ) + + warning(paste(msg, collapse = "\n"), call. = FALSE) + +}) diff --git a/renv/settings.dcf b/renv/settings.dcf new file mode 100644 index 0000000..169d82f --- /dev/null +++ b/renv/settings.dcf @@ -0,0 +1,10 @@ +bioconductor.version: +external.libraries: +ignored.packages: +package.dependency.fields: Imports, Depends, LinkingTo +r.version: +snapshot.type: implicit +use.cache: TRUE +vcs.ignore.cellar: TRUE +vcs.ignore.library: TRUE +vcs.ignore.local: TRUE