library(tidyverse)
library(patchwork)
library(glmmTMB)
library(brms)
> Loading required package: Rcpp
> Loading 'brms' package (version 2.17.4). Useful instructions
> can be found by typing help('brms'). A more detailed introduction
> to the package is available through vignette('brms_overview').
>
> Attaching package: 'brms'
> The following object is masked from 'package:stats':
>
> ar
library(easystats)
show_parameters <- function(model) {
p <- parameters::parameters(model, effects = "fixed", component="conditional", test=c("pd")) |>
data_relocate("pd", after=-1)
display(p, stars=TRUE)
}
summary(report::report(sessionInfo()))
The analysis was done using the R Statistical language (v4.2.0; R
Core Team, 2022) on Windows 10 x64, using the packages effectsize
(v0.7.9.2999), glmmTMB (v1.1.3), brms (v2.17.4), Rcpp (v1.0.9), purrr
(v0.3.4), parameters (v0.19.0.5), performance (v0.9.2.5), see
(v0.7.3.1), insight (v0.18.4.5), bayestestR (v0.13.0), easystats
(v0.4.3), modelbased (v0.8.5.1), correlation (v0.8.2.4), report
(v0.5.5.2), tibble (v3.1.8), datawizard (v0.6.1.2), patchwork (v1.1.1),
ggplot2 (v3.3.6), stringr (v1.4.0), forcats (v0.5.2), tidyverse
(v1.3.1), dplyr (v1.0.10), tidyr (v1.2.1) and readr (v2.1.2).
df <- read.csv("data/data_combined.csv") %>%
mutate(Participant = factor(paste0("S", Participant), levels = paste0("S", 1:30)),
Condition = as.factor(Condition),
Item = as.factor(Item),
Phrasing = as.factor(Phrasing),
Answer = as.factor(Answer)) |>
dplyr::filter(!Participant %in% c("S3", "S15", "S19", "S23")) # No data
# Outliers
df$HeartRate[df$Participant == "S30"] <- NA # Extreme values
# df$Confidence[df$Participant %in% c("S9", "S29")] <- NA # Extreme responses
df$RT[df$Participant == "S13"] <- NA # Slower than the others
# Remove outlier trials
df <- df |>
group_by(Participant) |>
mutate(Outliers_RT = as.logical(performance::check_outliers(RT, method = "zscore", threshold = qnorm(0.99999))),
Outliers_Physio = as.logical(performance::check_outliers(HeartRate, method = "zscore", threshold = qnorm(0.99999)))) |>
ungroup()
# Adjustments for beta models
df$Confidence[df$Confidence == 1] <- 0.99999
df$Confidence[df$Confidence == 0] <- 0.00001
cat(paste("The data consists of",
report::report_participants(df,
participants = "Participant",
sex = "Gender",
age = "Age")))
The data consists of 26 participants (Mean age = 20.9, SD = 2.0,
range: [18, 25]; Sex: 65.4% females, 34.6% males, 0.0% other; Gender:
65.4% women, 34.6% men, 0.00% non-binary)
Percentage of Confidence data removed: 0.481 %
Percentage of RT data removed: 3.846 %
Percentage of Heart Rate data removed: 7.692 %
Measures
Outcomes
p1 <- estimate_density(df$Confidence, at=df$Participant, method = "kernSmooth") |>
ggplot(aes(x=x, y=y)) +
geom_line(aes(color = Group)) +
scale_x_continuous(labels = scales::percent, expand = c(0, 0)) +
scale_y_continuous(expand = c(0, 0)) +
labs(x = "Confidence", y = "") +
scale_colour_flat_d(guide="none")
p2 <- estimate_density(df$RT, at=df$Participant, method = "kernSmooth") |>
ggplot(aes(x=x, y=y)) +
geom_line(aes(color = Group)) +
scale_x_continuous(expand = c(0, 0)) +
scale_y_continuous(expand = c(0, 0)) +
labs(x = "RT (s)", y = "") +
scale_colour_viridis_d(guide="none")
> Warning: 'V1', or one of its 'at' groups, is empty and has no density
> information.
p3 <- estimate_density(filter(df, !is.na(HeartRate)), select="HeartRate", at="Participant", method = "kernSmooth") |>
ggplot(aes(x=x, y=y)) +
geom_line(aes(color = Participant)) +
scale_x_continuous(expand = c(0, 0)) +
scale_y_continuous(expand = c(0, 0)) +
labs(x = "Heart Rate (bpm)", y = "") +
scale_colour_pizza_d(guide="none")
p1 / p2 / p3 + plot_annotation(title = "Distribution of each participant")
## Theory of Mind / Empathy
Yoni Task
df %>%
group_by(Participant) %>%
select(starts_with("YONI_")) |>
summarise_all(mean, na.rm=TRUE) |>
tidyr::pivot_longer(-Participant, values_to = "Scores") |>
mutate(name = paste0(str_replace(name, "_", " ("), ")")) |>
ggplot(aes(x = Scores, fill = name)) +
geom_density() +
scale_fill_manual(values = c("YONI (Affective)" = "Purple",
"YONI (Cognitive)" = "Blue",
"YONI (Physical)" = "Green",
"YONI (Total)"= "DarkBlue"),
guide = "none") +
facet_wrap(~name, scales = "free")
BES Questionnaire
df %>%
group_by(Participant) %>%
select(starts_with("BES_")) |>
summarise_all(mean, na.rm=TRUE) |>
tidyr::pivot_longer(-Participant, values_to = "Scores") |>
mutate(name = paste0(str_replace(name, "_", " ("), ")")) |>
ggplot(aes(x = Scores, fill = name)) +
geom_density() +
scale_fill_manual(values = c("BES (Affective)" = "Purple",
"BES (Cognitive)" = "Blue",
"BES (Total)"= "DarkBlue"),
guide = "none") +
facet_wrap(~name, scales = "free")
Interoception
Heartbeat Counting Task (HCT)
df %>%
group_by(Participant) %>%
select(starts_with("HCT_")) |>
summarise_all(mean, na.rm=TRUE) |>
tidyr::pivot_longer(-Participant, values_to = "Scores") |>
mutate(name = paste0(str_replace(name, "_", " ("), ")")) |>
ggplot(aes(x = Scores, fill = name)) +
geom_density() +
scale_fill_manual(values = c("HCT (Accuracy)" = "Red",
"HCT (Awareness)" = "Orange",
"HCT (Confidence)"= "DarkOrange"),
guide = "none") +
facet_wrap(~name, scales = "free")
Heartbeat Tracking Task (HTT)
htt_data <- read.csv("preprocessing/HTT_extracted.csv") |>
mutate(Condition = fct_recode(Condition, "NoGuessPerturbed" = "NoGuess_Perturbed")) |>
rename(Distance = Time_to_Rpeak)
htt_data |>
ggplot(aes(x = Distance, fill = Condition)) +
geom_histogram(binwidth = 0.05, alpha = 0.5, position="identity") +
geom_vline(xintercept = 0, linetype = "dashed") +
facet_wrap(~ID) +
see::theme_modern()
MAIA
df %>%
group_by(Participant) %>%
select(starts_with("MAIA_")) |>
summarise_all(mean, na.rm=TRUE) |>
tidyr::pivot_longer(-Participant, values_to = "Scores") |>
mutate(name = paste0(str_replace(name, "_", " ("), ")")) |>
ggplot(aes(x = Scores, fill = name)) +
geom_density() +
scale_fill_brewer(palette = "Reds", guide = "none") +
facet_wrap(~name, scales = "free")
Deception
LIE Scale
df %>%
group_by(Participant) %>%
select(starts_with("LIE_")) |>
summarise_all(mean, na.rm=TRUE) |>
tidyr::pivot_longer(-Participant, values_to = "Scores") |>
mutate(name = paste0(str_replace(name, "_", " ("), ")")) |>
ggplot(aes(x = Scores, fill = name)) +
geom_density() +
scale_fill_manual(values = c("LIE (Ability)" = "#2196F3",
"LIE (Frequency)" = "#4CAF50",
"LIE (Contextuality)"= "#FF9800",
"LIE (Negativity)"= "#E91E63"),
guide = "none") +
facet_wrap(~name, scales = "free")
Deception Task
Physiological Processing
df_physio <- read.csv("preprocessing/deceptiontask_physio_extracted.csv") |>
dplyr::filter(ID != 30) |>
tidyr::pivot_longer(starts_with("X"), names_to = "Time", values_to = "HeartRate") |>
mutate(Time = as.numeric(str_remove(Time, "X"))-500,
group = paste0(ID, condition, Answer),
instruction = fct_relevel(Answer, "TRUTH")) |>
group_by(condition, Answer, Time) |>
summarise(HeartRate = mean(HeartRate)) |>
ungroup()
df_physio |>
ggplot(aes(x = Time, y = HeartRate)) +
annotate("rect", xmin=1500, xmax=2500, ymin=-Inf, ymax=Inf, alpha=0.1, fill = "green") +
geom_line(aes(color = condition, linetype = Answer), size=1) +
geom_vline(aes(xintercept=0), linetype = "dotted") +
labs(y = "Average Heart Rate (bpm)", x = "Time (ms)") +
scale_color_manual(values = c("Polygraph" = "#FF5722", "Interrogation" = "#2196F3")) +
theme(legend.position = "top", legend.title=element_blank())
Summary Table
df |>
group_by(Participant, Answer) |>
summarise(Confidence = paste(insight::format_value(mean(Confidence, na.rm = TRUE)),
" +- ",
insight::format_value(sd(Confidence, na.rm = TRUE))),
RT = paste(insight::format_value(mean(RT, na.rm = TRUE)),
" +- ",
insight::format_value(sd(RT, na.rm = TRUE)))) |>
arrange(Participant) |>
knitr::kable()
S1 |
Lie |
0.40 +- 0.10 |
3.69 +- 0.69 |
S1 |
Truth |
0.52 +- 0.15 |
3.75 +- 0.79 |
S2 |
Lie |
0.10 +- 0.17 |
6.46 +- 1.95 |
S2 |
Truth |
0.94 +- 0.08 |
6.83 +- 1.71 |
S4 |
Lie |
0.67 +- 0.18 |
3.86 +- 0.89 |
S4 |
Truth |
0.77 +- 0.13 |
3.81 +- 0.80 |
S5 |
Lie |
0.53 +- 0.23 |
3.41 +- 0.63 |
S5 |
Truth |
0.72 +- 0.21 |
3.45 +- 0.69 |
S6 |
Lie |
0.11 +- 0.15 |
3.10 +- 0.52 |
S6 |
Truth |
0.90 +- 0.09 |
3.24 +- 0.64 |
S7 |
Lie |
0.62 +- 0.33 |
3.84 +- 0.58 |
S7 |
Truth |
0.69 +- 0.29 |
3.69 +- 0.55 |
S8 |
Lie |
0.44 +- 0.17 |
4.48 +- 0.95 |
S8 |
Truth |
0.71 +- 0.16 |
4.59 +- 1.12 |
S9 |
Lie |
5.85e-04 +- 9.02e-04 |
4.75 +- 1.42 |
S9 |
Truth |
1.00 +- 1.17e-03 |
5.15 +- 1.88 |
S10 |
Lie |
0.59 +- 0.28 |
3.20 +- 0.71 |
S10 |
Truth |
0.84 +- 0.13 |
3.22 +- 0.62 |
S11 |
Lie |
0.38 +- 0.31 |
3.60 +- 0.69 |
S11 |
Truth |
0.72 +- 0.26 |
3.48 +- 0.62 |
S12 |
Lie |
0.58 +- 0.13 |
3.77 +- 1.42 |
S12 |
Truth |
0.64 +- 0.16 |
4.40 +- 1.72 |
S13 |
Lie |
0.28 +- 0.25 |
+- |
S13 |
Truth |
0.85 +- 0.17 |
+- |
S14 |
Lie |
0.52 +- 0.29 |
4.13 +- 0.86 |
S14 |
Truth |
0.62 +- 0.26 |
5.29 +- 1.54 |
S16 |
Lie |
0.41 +- 0.18 |
4.94 +- 1.13 |
S16 |
Truth |
0.66 +- 0.11 |
5.07 +- 1.01 |
S17 |
Lie |
0.63 +- 0.31 |
2.72 +- 0.63 |
S17 |
Truth |
0.77 +- 0.19 |
2.72 +- 0.51 |
S18 |
Lie |
0.21 +- 0.33 |
5.88 +- 2.54 |
S18 |
Truth |
0.79 +- 0.36 |
5.13 +- 1.98 |
S20 |
Lie |
0.37 +- 0.32 |
3.79 +- 0.74 |
S20 |
Truth |
0.80 +- 0.17 |
4.04 +- 1.02 |
S21 |
Lie |
0.55 +- 0.12 |
4.97 +- 1.01 |
S21 |
Truth |
0.74 +- 0.18 |
5.09 +- 1.04 |
S22 |
Lie |
0.13 +- 0.29 |
4.78 +- 2.94 |
S22 |
Truth |
0.81 +- 0.36 |
5.12 +- 2.99 |
S24 |
Lie |
0.27 +- 0.22 |
2.81 +- 0.68 |
S24 |
Truth |
0.72 +- 0.17 |
2.69 +- 0.64 |
S25 |
Lie |
0.63 +- 0.33 |
3.71 +- 1.11 |
S25 |
Truth |
0.85 +- 0.22 |
3.71 +- 0.92 |
S26 |
Lie |
0.46 +- 0.21 |
3.32 +- 0.61 |
S26 |
Truth |
0.70 +- 0.16 |
3.23 +- 0.53 |
S27 |
Lie |
0.32 +- 0.09 |
5.19 +- 1.88 |
S27 |
Truth |
0.68 +- 0.07 |
4.75 +- 1.78 |
S28 |
Lie |
0.49 +- 0.36 |
3.78 +- 0.83 |
S28 |
Truth |
0.59 +- 0.32 |
3.69 +- 0.50 |
S29 |
Lie |
0.58 +- 0.50 |
3.22 +- 0.56 |
S29 |
Truth |
0.90 +- 0.30 |
3.31 +- 0.68 |
S30 |
Lie |
0.80 +- 0.28 |
4.09 +- 1.10 |
S30 |
Truth |
0.88 +- 0.18 |
3.80 +- 0.99 |
Outliers
Participants
df |>
select(Participant, Confidence, RT, HeartRate) |>
tidyr::pivot_longer(-Participant) |>
ggplot(aes(x=Participant, y=value)) +
geom_violin(aes(fill=name), color="white", alpha=0.5) +
geom_jitter(aes(color=name), width = 0.25, height=0, shape="+", size=2) +
facet_wrap(~name, scales="free", nrow=3) +
guides(fill="none", color="none")
Observations
df |>
group_by(Participant) |>
summarise(Physio = sum(Outliers_RT) / n(),
RT = sum(Outliers_Physio) / n()) |>
tidyr::pivot_longer(-Participant, names_to = "Outlier_Type") |>
ggplot(aes(x=Participant, y = value, fill = Outlier_Type)) +
geom_bar(stat = "identity") +
scale_y_continuous(labels = scales::percent) +
labs(y = "Percentage of trials") +
ggtitle("Number of Trial Dropped Per Participant")
df$RT[df$Outliers_RT] <- NA
df$HeartRate[df$Outliers_Physio] <- NA
Distributions
p1 <- df |>
ggplot(aes(x = Confidence, fill = Participant)) +
geom_density(alpha = 0.1) +
see::scale_fill_material_d(palette = "rainbow", guide = "none") +
scale_x_continuous(labels = scales::percent, expand=expansion(c(0, .05))) +
scale_y_continuous(expand=expansion(c(0, .05))) +
facet_wrap(~Answer)
p2 <- df |>
ggplot(aes(x = RT, fill = Participant)) +
geom_density(alpha = 0.1) +
see::scale_fill_material_d(palette = "rainbow", guide = "none") +
scale_x_continuous(expand=expansion(c(0, .05))) +
scale_y_continuous(expand=expansion(c(0, .05))) +
facet_wrap(~Answer)
p3 <- df |>
ggplot(aes(x = HeartRate, fill = Participant)) +
geom_density(alpha = 0.1) +
see::scale_fill_material_d(palette = "rainbow", guide = "none") +
scale_x_continuous(expand=expansion(c(0, .05))) +
scale_y_continuous(expand=expansion(c(0, .05))) +
facet_wrap(~Answer)
p1 / p2 / p3
Inter-Measures Correlation
dfsub <- df |>
select(Participant,
starts_with("YONI_"),
starts_with("BES_"),
starts_with("HCT_"),
starts_with("HTT_"),
starts_with("MAIA_"),
starts_with("LIE_")) |>
group_by(Participant) |>
summarise_all(mean)
Theory of Mind / Empathy
r <- correlation(select(dfsub, starts_with("YONI_")),
select(dfsub, starts_with("BES_")),
p_adjust = "none")
summary(r) |>
plot()
Interoception
# Leave out HTT
# r <- correlation(select(dfsub, starts_with("HTT_")),
# select(dfsub, starts_with("HCT_")),
# p_adjust = "none")
#
# summary(r) |>
# plot()
r <- correlation(select(dfsub, starts_with("MAIA_")),
select(dfsub, starts_with("HCT_")),
#starts_with("HTT_")),
p_adjust = "none")
summary(r) |>
plot()
ToM and Interoception
r <- correlation(select(dfsub, starts_with(c("MAIA_", "HCT_"))),
select(dfsub, starts_with(c("YONI_", "BES_"))),
p_adjust = "none")
summary(r) |>
plot() +
theme(axis.text.x = element_text(angle = 45, hjust = 1, vjust = 1))
Deception Task and LIE Scale
r <- df |>
select(Participant, Answer, Confidence, RT, HeartRate) |>
group_by(Participant, Answer) |>
summarise_all(mean, na.rm=TRUE) |>
tidyr::pivot_wider(names_from = "Answer", values_from = c("Confidence", "RT", "HeartRate")) |>
ungroup() |>
select(-Participant) |>
correlation(select(dfsub, starts_with("LIE_")), p_adjust = "none")
summary(r) |>
plot()
Confidence and RT
- When lying, the faster they answer, the more confident they
are.
model <- glmmTMB(Confidence ~ RT * Answer + (1|Participant) + (1|Item),
data = df, family = beta_family())
parameters::parameters(model, effects = "fixed")
> # Fixed Effects
>
> Parameter | Coefficient | SE | 95% CI | z | p
> --------------------------------------------------------------------------
> (Intercept) | 0.59 | 0.13 | [ 0.34, 0.85] | 4.55 | < .001
> RT | -0.23 | 0.03 | [-0.29, -0.18] | -8.71 | < .001
> Answer [Truth] | 0.14 | 0.15 | [-0.16, 0.44] | 0.91 | 0.365
> RT * Answer [Truth] | 0.29 | 0.03 | [ 0.22, 0.35] | 8.22 | < .001
modelbased::estimate_slopes(model, trend = "RT", at = "Answer")
> Estimated Marginal Effects
>
> Answer | Coefficient | SE | 95% CI | t(1982) | p
> ---------------------------------------------------------------
> Lie | -0.23 | 0.03 | [-0.29, -0.18] | -8.71 | < .001
> Truth | 0.05 | 0.03 | [ 0.00, 0.11] | 1.90 | 0.057
> Marginal effects estimated for RT
estimate_relation(model, at = c("RT", "Answer")) |>
plot(length = 50, point = list(alpha = 0.3, size = 3.5))
Confidence and Heart Rate
- More confidence in truths is associated with higher heart rate.
model <- glmmTMB(Confidence ~ HeartRate * Answer + (1|Participant) + (1|Item),
data = df, family = beta_family()) # warning can be ignored
parameters::parameters(model, effects = "fixed")
> # Fixed Effects
>
> Parameter | Coefficient | SE | 95% CI | z | p
> -------------------------------------------------------------------------------------
> (Intercept) | 0.53 | 0.36 | [-0.17, 1.24] | 1.49 | 0.137
> HeartRate | -0.01 | 4.14e-03 | [-0.02, 0.00] | -2.69 | 0.007
> Answer [Truth] | -0.17 | 0.41 | [-0.96, 0.63] | -0.41 | 0.682
> HeartRate * Answer [Truth] | 0.02 | 4.77e-03 | [ 0.01, 0.03] | 3.79 | < .001
modelbased::estimate_slopes(model, trend = "HeartRate", at = "Answer")
> Estimated Marginal Effects
>
> Answer | Coefficient | SE | 95% CI | t(1902) | p
> ------------------------------------------------------------------
> Lie | -0.01 | 4.14e-03 | [-0.02, 0.00] | -2.69 | 0.007
> Truth | 6.91e-03 | 4.28e-03 | [ 0.00, 0.02] | 1.61 | 0.107
> Marginal effects estimated for HeartRate
estimate_relation(model, at = c("HeartRate", "Answer")) |>
plot(length = 50, point = list(alpha = 0.3, size = 3.5))
RT and Heart Rate
model <- glmmTMB(RT ~ HeartRate * Answer + (1|Participant) + (1|Item),
data = df)
parameters::parameters(model, effects = "fixed")
> # Fixed Effects
>
> Parameter | Coefficient | SE | 95% CI | z | p
> -------------------------------------------------------------------------------------
> (Intercept) | 4.83 | 0.41 | [ 4.02, 5.64] | 11.73 | < .001
> HeartRate | -9.19e-03 | 4.12e-03 | [-0.02, 0.00] | -2.23 | 0.026
> Answer [Truth] | -0.05 | 0.36 | [-0.76, 0.66] | -0.14 | 0.886
> HeartRate * Answer [Truth] | 1.35e-03 | 4.25e-03 | [-0.01, 0.01] | 0.32 | 0.751
estimate_relation(model, at = c("HeartRate", "Answer")) |>
plot(length = 50, point = list(alpha = 0.3, size = 3.5))
Dimension Reduction
Theory of Mind
dfsub <- df |>
select(Participant,
starts_with("YONI_"),
starts_with("BES_")) |>
select(-ends_with("Total")) |>
group_by(Participant) |>
summarise_all(mean) |>
select(-Participant)
parameters::n_factors(dfsub)
> # Method Agreement Procedure:
>
> The choice of 1 dimensions is supported by 7 (46.67%) methods out of 15 (Bartlett, Anderson, Lawley, Acceleration factor, Scree (SE), Velicer's MAP, BIC).
efa <- parameters::factor_analysis(dfsub, n=1, sort=TRUE, rotation = "oblimin")
efa
> # Rotated loadings from Factor Analysis (oblimin-rotation)
>
> Variable | MR1 | Complexity | Uniqueness
> -----------------------------------------------
> YONI_Cognitive | 0.89 | 1 | 0.21
> YONI_Affective | 0.77 | 1 | 0.41
> YONI_Physical | 0.45 | 1 | 0.79
> BES_Affective | 0.41 | 1 | 0.83
> BES_Cognitive | 0.17 | 1 | 0.97
>
> The unique latent factor (oblimin rotation) accounted for 35.76% of the total variance of the original data.
df <- cbind(df, predict(efa, newdata=df, names="ToM"))
Interoception
dfsub <- df |>
select(Participant,
starts_with("HCT_"),
starts_with("MAIA_")) |>
select(-ends_with("Total")) |>
group_by(Participant) |>
summarise_all(mean) |>
select(-Participant)
parameters::n_components(dfsub)
> # Method Agreement Procedure:
>
> The choice of 4 dimensions is supported by 7 (41.18%) methods out of 17 (Anderson, Bentler, beta, Optimal coordinates, Parallel analysis, Kaiser criterion, VSS complexity 2).
efa <- parameters::factor_analysis(dfsub, n=4, sort=TRUE, rotation = "oblimin")
efa
> # Rotated loadings from Factor Analysis (oblimin-rotation)
>
> Variable | MR1 | MR3 | MR2 | MR4 | Complexity | Uniqueness
> --------------------------------------------------------------------------------------
> MAIA_AttentionRegulation | 0.97 | 0.03 | 0.15 | -7.19e-03 | 1.05 | 0.05
> MAIA_SelfRegulation | 0.63 | 0.18 | -0.06 | 0.22 | 1.45 | 0.34
> MAIA_EmotionalAwareness | 0.60 | 0.10 | -0.40 | 0.03 | 1.83 | 0.35
> MAIA_Noticing | 0.49 | 0.19 | -0.21 | -5.12e-04 | 1.70 | 0.58
> MAIA_BodyListening | 0.09 | 0.92 | -0.06 | -0.10 | 1.05 | 0.09
> HCT_Awareness | 0.11 | -0.60 | -0.14 | 0.06 | 1.21 | 0.66
> MAIA_Trusting | 0.10 | 0.53 | -0.26 | 0.40 | 2.45 | 0.37
> HCT_Confidence | 0.40 | 0.46 | 0.32 | 0.15 | 3.05 | 0.34
> MAIA_NotDistracting | 0.11 | -0.04 | 0.87 | 3.28e-03 | 1.03 | 0.25
> MAIA_NotWorrying | 0.29 | -0.24 | -0.10 | 0.71 | 1.62 | 0.32
> HCT_Accuracy | -0.33 | 0.13 | 0.33 | 0.61 | 2.26 | 0.49
>
> The 4 latent factors (oblimin rotation) accounted for 65.17% of the total variance of the original data (MR1 = 23.59%, MR3 = 18.54%, MR2 = 12.07%, MR4 = 10.97%).
df <- cbind(df, predict(efa, newdata=df, names=c("Intero_Meta", "Intero_Listening", "Intero_Focus", "Intero_Regulation")))
Manipulation Checks
Effect of Condition
Confidence
- Significant interaction between the condition and the answer: the
effect of answer (being more confident in truths than in lies) is lower
in the interrogation condition.
- Effect mostly driven by lower lie confidence in the polygraph
condition: less obvious feedback cues?
# Prior: plot(seq(-5, 5, length.out=100), dstudent_t(seq(-5, 5, length.out=100), 1, 0, 1), "l")
model <- brms::brm(Confidence ~ Answer * Condition + (1|Participant) + (1|Item),
data = df, family = "beta", seed=3, refresh=0,
prior = set_prior("student_t(1, 0, 1)", class = "b"))
> Running MCMC with 4 parallel chains...
>
> Chain 4 finished in 25.2 seconds.
> Chain 3 finished in 25.8 seconds.
> Chain 1 finished in 27.9 seconds.
> Chain 2 finished in 28.8 seconds.
>
> All 4 chains finished successfully.
> Mean chain execution time: 26.9 seconds.
> Total execution time: 28.8 seconds.
show_parameters(model)
Fixed effects
(Intercept) |
-0.34 |
(-0.50, -0.17) |
1.001 |
1535.00 |
99.98%*** |
AnswerTruth |
1.26 |
(1.10, 1.42) |
1.000 |
3028.00 |
100%*** |
ConditionPolygraph |
-0.11 |
(-0.26, 0.04) |
1.000 |
3073.00 |
92.97% |
AnswerTruth:ConditionPolygraph |
0.18 |
(-0.03, 0.39) |
1.001 |
2576.00 |
95.58% |
modelbased::estimate_contrasts(model, contrast = "Answer", at = "Condition")
> Marginal Contrasts Analysis
>
> Level1 | Level2 | Condition | Difference | 95% CI | pd | % in ROPE
> --------------------------------------------------------------------------------
> Lie | Truth | Interrogation | -1.26 | [-1.42, -1.10] | 100% | 0%
> Lie | Truth | Polygraph | -1.44 | [-1.60, -1.29] | 100% | 0%
>
> Marginal contrasts estimated at Answer
modelbased::estimate_contrasts(model, contrast = "Condition", at = "Answer")
> Marginal Contrasts Analysis
>
> Level1 | Level2 | Answer | Difference | 95% CI | pd | % in ROPE
> ------------------------------------------------------------------------------------
> Interrogation | Polygraph | Lie | 0.11 | [-0.04, 0.26] | 92.97% | 43.11%
> Interrogation | Polygraph | Truth | -0.07 | [-0.22, 0.07] | 83.33% | 65.05%
>
> Marginal contrasts estimated at Condition
estimate_means(model, at = c("Condition", "Answer")) |>
plot(show_data = "none")
RT
- Significant main effect of the condition: people are slower in the
polygraph condition.
- Consistent with the “less intuitive cues”.
# Prior: plot(seq(-10, 10, length.out=100), dstudent_t(seq(-10, 10, length.out=100), 1, 0, 3), "l")
model <- brms::brm(RT ~ Answer * Condition + (1|Participant) + (1|Item),
data = df, seed=3, refresh=0,
prior = set_prior("student_t(1, 0, 3)", class = "b"))
> Running MCMC with 4 parallel chains...
>
> Chain 1 finished in 5.4 seconds.
> Chain 4 finished in 5.4 seconds.
> Chain 3 finished in 5.8 seconds.
> Chain 2 finished in 5.9 seconds.
>
> All 4 chains finished successfully.
> Mean chain execution time: 5.6 seconds.
> Total execution time: 6.0 seconds.
show_parameters(model)
Fixed effects
(Intercept) |
3.81 |
(3.39, 4.26) |
1.006 |
422.00 |
100%*** |
AnswerTruth |
0.07 |
(-0.08, 0.21) |
1.000 |
2675.00 |
82.42% |
ConditionPolygraph |
0.52 |
(0.38, 0.66) |
1.000 |
2602.00 |
100%*** |
AnswerTruth:ConditionPolygraph |
4.97e-03 |
(-0.20, 0.20) |
1.000 |
2171.00 |
51.68% |
modelbased::estimate_contrasts(model, contrast = "Answer", at = "Condition")
> Marginal Contrasts Analysis
>
> Level1 | Level2 | Condition | Difference | 95% CI | pd | % in ROPE
> ---------------------------------------------------------------------------------
> Lie | Truth | Interrogation | -0.07 | [-0.21, 0.08] | 82.42% | 68.66%
> Lie | Truth | Polygraph | -0.07 | [-0.22, 0.07] | 82.00% | 66.16%
>
> Marginal contrasts estimated at Answer
modelbased::estimate_contrasts(model, contrast = "Condition", at = "Answer")
> Marginal Contrasts Analysis
>
> Level1 | Level2 | Answer | Difference | 95% CI | pd | % in ROPE
> -----------------------------------------------------------------------------------
> Interrogation | Polygraph | Lie | -0.52 | [-0.66, -0.38] | 100% | 0%
> Interrogation | Polygraph | Truth | -0.52 | [-0.68, -0.38] | 100% | 0%
>
> Marginal contrasts estimated at Condition
estimate_means(model, at = c("Condition", "Answer")) |>
plot(show_data = "none")
Heart Rate
- Higher heart rate in lies vs. truths.
- Higher heart rate in interrogation condition vs. polygraph: presence
of another person causes heightened physiological arousals.
- No interaction.
# Prior: plot(seq(-30, 30, length.out=100), dstudent_t(seq(-30,30, length.out=100), 1, 0, 8), "l")
model <- brms::brm(HeartRate ~ Answer * Condition + (1|Participant) + (1|Item),
data = df, seed=3, refresh=0,
prior = set_prior("student_t(1, 0, 8)", class = "b"))
> Running MCMC with 4 parallel chains...
>
> Chain 4 finished in 10.1 seconds.
> Chain 2 finished in 13.0 seconds.
> Chain 3 finished in 14.2 seconds.
> Chain 1 finished in 14.5 seconds.
>
> All 4 chains finished successfully.
> Mean chain execution time: 13.0 seconds.
> Total execution time: 14.6 seconds.
show_parameters(model)
Fixed effects
(Intercept) |
87.49 |
(83.27, 91.37) |
1.005 |
296.00 |
100%*** |
AnswerTruth |
-1.48 |
(-2.28, -0.65) |
1.000 |
3528.00 |
99.98%*** |
ConditionPolygraph |
-5.15 |
(-5.98, -4.35) |
1.000 |
3800.00 |
100%*** |
AnswerTruth:ConditionPolygraph |
0.50 |
(-0.63, 1.60) |
1.000 |
3355.00 |
80.88% |
modelbased::estimate_contrasts(model, contrast = "Answer", at = "Condition")
> Marginal Contrasts Analysis
>
> Level1 | Level2 | Condition | Difference | 95% CI | pd | % in ROPE
> --------------------------------------------------------------------------------
> Lie | Truth | Interrogation | 1.48 | [0.65, 2.28] | 99.98% | 0%
> Lie | Truth | Polygraph | 0.97 | [0.19, 1.80] | 99.38% | 0%
>
> Marginal contrasts estimated at Answer
modelbased::estimate_contrasts(model, contrast = "Condition", at = "Answer")
> Marginal Contrasts Analysis
>
> Level1 | Level2 | Answer | Difference | 95% CI | pd | % in ROPE
> ---------------------------------------------------------------------------------
> Interrogation | Polygraph | Lie | 5.15 | [4.35, 5.98] | 100% | 0%
> Interrogation | Polygraph | Truth | 4.65 | [3.81, 5.51] | 100% | 0%
>
> Marginal contrasts estimated at Condition
estimate_means(model, at = c("Condition", "Answer")) |>
plot(show_data = "none")
Effect of Phrasing
- Main effect of phrasing on RT only. Indirect questions lead to
slower answers.
Confidence
# Prior: plot(seq(-2, 2, length.out=100), dstudent_t(seq(-2, 2, length.out=100), 1, 0, 1), "l")
model <- brms::brm(Confidence ~ Answer * Phrasing + (1|Participant) + (1|Item),
data = df, family = "beta", seed=3, refresh=0,
prior = set_prior("student_t(1, 0, 1)", class = "b"))
> Running MCMC with 4 parallel chains...
>
> Chain 2 finished in 24.1 seconds.
> Chain 4 finished in 25.5 seconds.
> Chain 1 finished in 25.9 seconds.
> Chain 3 finished in 27.3 seconds.
>
> All 4 chains finished successfully.
> Mean chain execution time: 25.7 seconds.
> Total execution time: 27.4 seconds.
show_parameters(model)
Fixed effects
(Intercept) |
-0.40 |
(-0.57, -0.24) |
1.002 |
1546.00 |
100%*** |
AnswerTruth |
1.40 |
(1.24, 1.56) |
1.000 |
3191.00 |
100%*** |
PhrasingIndirect |
9.91e-03 |
(-0.13, 0.16) |
1.000 |
3142.00 |
55.45% |
AnswerTruth:PhrasingIndirect |
-0.09 |
(-0.29, 0.13) |
1.000 |
2701.00 |
80.08% |
estimate_means(model, at = c("Answer", "Phrasing")) |>
plot(show_data = "none")
RT
# Prior: plot(seq(-10, 10, length.out=100), dstudent_t(seq(-10, 10, length.out=100), 1, 0, 3), "l")
model <- brms::brm(RT ~ Answer * Phrasing + (1|Participant) + (1|Item),
data = df, seed=3, refresh=0,
prior = set_prior("student_t(1, 0, 3)", class = "b"))
> Running MCMC with 4 parallel chains...
>
> Chain 1 finished in 5.1 seconds.
> Chain 4 finished in 5.1 seconds.
> Chain 2 finished in 5.3 seconds.
> Chain 3 finished in 5.7 seconds.
>
> All 4 chains finished successfully.
> Mean chain execution time: 5.3 seconds.
> Total execution time: 5.9 seconds.
show_parameters(model)
Fixed effects
(Intercept) |
3.90 |
(3.48, 4.34) |
1.004 |
472.00 |
100%*** |
AnswerTruth |
0.04 |
(-0.11, 0.19) |
1.000 |
2748.00 |
68.20% |
PhrasingIndirect |
0.36 |
(0.21, 0.51) |
1.000 |
2673.00 |
100%*** |
AnswerTruth:PhrasingIndirect |
0.06 |
(-0.15, 0.27) |
1.001 |
2308.00 |
72.52% |
estimate_means(model, at = c("Answer", "Phrasing")) |>
plot(show_data = "none")
Heart Rate
# Prior: plot(seq(-50, 50, length.out=100), dstudent_t(seq(-50, 50, length.out=100), 1, 0, 8), "l")
model <- brms::brm(HeartRate ~ Answer * Phrasing + (1|Participant) + (1|Item),
data = df, seed=3, refresh=0,
prior = set_prior("student_t(1, 0, 8)", class = "b"))
> Running MCMC with 4 parallel chains...
>
> Chain 4 finished in 9.7 seconds.
> Chain 1 finished in 10.9 seconds.
> Chain 2 finished in 12.4 seconds.
> Chain 3 finished in 14.3 seconds.
>
> All 4 chains finished successfully.
> Mean chain execution time: 11.8 seconds.
> Total execution time: 14.4 seconds.
show_parameters(model)
Fixed effects
(Intercept) |
84.79 |
(80.87, 89.20) |
1.013 |
329.00 |
100%*** |
AnswerTruth |
-0.85 |
(-1.72, 0.02) |
1.001 |
3942.00 |
97.10%* |
PhrasingIndirect |
0.34 |
(-0.54, 1.25) |
1.001 |
3949.00 |
77.75% |
AnswerTruth:PhrasingIndirect |
-0.74 |
(-2.03, 0.49) |
1.001 |
3325.00 |
89.45% |
estimate_means(model, at = c("Answer", "Phrasing")) |>
plot(show_data = "none")
Theory of Mind / Empathy
Theory of Mind Score
Confidence
results <- list()
for (var in c("ToM")) {
model <- brms::brm(as.formula(paste0("Confidence ~ Answer / (Condition / ",
var,
") + (1|Participant) + (1|Item)")),
data = df, family = "beta", refresh=0, seed=3, iter=4000,
prior = set_prior("student_t(1, 0, 1)", class = "b"))
results[[var]] <- parameters::parameters(model, effects = "fixed", component = "conditional", test = c("pd"), keep = var) |>
as.data.frame() |>
separate(Parameter, sep = ":", into = c("Answer", "Condition", "Variable")) |>
mutate(
Answer = str_remove(Answer, "b_Answer"),
Condition = str_remove(Condition, "Condition")
)
}
> Running MCMC with 4 parallel chains...
>
> Chain 2 finished in 43.4 seconds.
> Chain 3 finished in 45.2 seconds.
> Chain 1 finished in 45.9 seconds.
> Chain 4 finished in 51.8 seconds.
>
> All 4 chains finished successfully.
> Mean chain execution time: 46.6 seconds.
> Total execution time: 52.0 seconds.
display(format_table(results$ToM, stars = TRUE))
Lie |
Interrogation |
ToM |
0.12 |
[-0.05, 0.29] |
90.55% |
1.000 |
2476.00 |
Truth |
Interrogation |
ToM |
-0.12 |
[-0.30, 0.04] |
92.03% |
1.001 |
2436.00 |
Lie |
Polygraph |
ToM |
-0.19 |
[-0.36, -0.02] |
98.47%* |
1.001 |
2384.00 |
Truth |
Polygraph |
ToM |
0.04 |
[-0.14, 0.21] |
65.60% |
1.000 |
2485.00 |
RT
results <- list()
for (var in c("ToM")) {
model <- brms::brm(as.formula(paste0("RT ~ Answer / (Condition / ",
var,
") + (1|Participant) + (1|Item)")),
data = df, refresh=0, seed=3, iter=4000,
prior = set_prior("student_t(1, 0, 3)", class = "b"))
results[[var]] <- parameters::parameters(model, effects = "fixed", component = "conditional", test = c("pd"), keep = var) |>
as.data.frame() |>
separate(Parameter, sep = ":", into = c("Answer", "Condition", "Variable")) |>
mutate(
Answer = str_remove(Answer, "b_Answer"),
Condition = str_remove(Condition, "Condition")
)
}
> Running MCMC with 4 parallel chains...
>
> Chain 4 finished in 11.3 seconds.
> Chain 3 finished in 12.2 seconds.
> Chain 2 finished in 12.5 seconds.
> Chain 1 finished in 12.9 seconds.
>
> All 4 chains finished successfully.
> Mean chain execution time: 12.2 seconds.
> Total execution time: 13.0 seconds.
display(format_table(results$ToM, stars = TRUE))
Lie |
Interrogation |
ToM |
0.21 |
[-0.20, 0.62] |
85.40% |
1.002 |
1309.00 |
Truth |
Interrogation |
ToM |
0.22 |
[-0.19, 0.62] |
85.52% |
1.002 |
1366.00 |
Lie |
Polygraph |
ToM |
0.42 |
[ 0.01, 0.83] |
97.67%* |
1.002 |
1355.00 |
Truth |
Polygraph |
ToM |
0.15 |
[-0.26, 0.57] |
76.56% |
1.003 |
1340.00 |
Heart Rate
results <- list()
for (var in c("ToM")) {
model <- brms::brm(as.formula(paste0("HeartRate ~ Answer / (Condition / ",
var,
") + (1|Participant) + (1|Item)")),
data = df, refresh=0, seed=3, iter=4000,
prior = set_prior("student_t(1, 0, 8)", class = "b"))
results[[var]] <- parameters::parameters(model, effects = "fixed", component = "conditional", test = c("pd"), keep = var) |>
as.data.frame() |>
separate(Parameter, sep = ":", into = c("Answer", "Condition", "Variable")) |>
mutate(
Answer = str_remove(Answer, "b_Answer"),
Condition = str_remove(Condition, "Condition")
)
}
> Running MCMC with 4 parallel chains...
>
> Chain 2 finished in 22.8 seconds.
> Chain 3 finished in 23.0 seconds.
> Chain 1 finished in 23.6 seconds.
> Chain 4 finished in 24.1 seconds.
>
> All 4 chains finished successfully.
> Mean chain execution time: 23.4 seconds.
> Total execution time: 24.2 seconds.
display(format_table(results$ToM, stars = TRUE))
Lie |
Interrogation |
ToM |
1.14 |
[-2.37, 4.65] |
74.94% |
1.004 |
1145.00 |
Truth |
Interrogation |
ToM |
0.71 |
[-2.81, 4.15] |
66.41% |
1.003 |
1140.00 |
Lie |
Polygraph |
ToM |
0.77 |
[-2.78, 4.22] |
66.80% |
1.004 |
1143.00 |
Truth |
Polygraph |
ToM |
0.42 |
[-3.13, 3.88] |
59.51% |
1.004 |
1150.00 |
Correlation with LIE Scale
dfsub <- df |>
select(Participant,
starts_with("LIE_"),
starts_with("ToM")) |>
group_by(Participant) |>
summarise_all(mean)
correlation(select(dfsub, starts_with("LIE_")), select(dfsub, starts_with("ToM")), bayesian=TRUE)
> # Correlation Matrix (pearson-method)
>
> Parameter1 | Parameter2 | rho | 95% CI | pd | % in ROPE | Prior | BF
> ---------------------------------------------------------------------------------------------------
> LIE_Ability | ToM | 0.05 | [-0.28, 0.40] | 60.12% | 40.77% | Beta (3 +- 3) | 0.442
> LIE_Frequency | ToM | -0.06 | [-0.39, 0.27] | 63.42% | 40.35% | Beta (3 +- 3) | 0.452
> LIE_Negativity | ToM | -0.06 | [-0.41, 0.28] | 64.25% | 39.45% | Beta (3 +- 3) | 0.453
> LIE_Contextuality | ToM | -0.06 | [-0.40, 0.27] | 64.10% | 39.65% | Beta (3 +- 3) | 0.454
>
> Observations: 26
Yoni Task
- Confidence: effect in total, cognitive and physical.
- RT: effect in cognitive only.
- Heart rate: no effect.
Confidence
results <- data.frame()
for (var in c("YONI_Total", "YONI_Cognitive", "YONI_Affective", "YONI_Physical")) {
model <- glmmTMB(as.formula(paste0("Confidence ~ Answer / (Condition / ",
var,
") + (1|Participant) + (1|Item)")),
data = df,
family = beta_family())
results <- parameters::parameters(model, effects = "fixed", keep=var) |>
mutate(Parameter = str_remove(Parameter, "Answer|Condition")) |>
separate(Parameter, sep=":", into = c("Answer", "Condition", "Variable")) |>
data_relocate(select="Variable", before=1) |>
rbind(results)
}
display(results, stars=TRUE)
Fixed Effects
YONI_Physical |
Lie |
ConditionInterrogation |
-0.09 |
0.03 |
(-0.15, -0.03) |
-2.97 |
0.003** |
YONI_Physical |
Truth |
ConditionInterrogation |
0.04 |
0.03 |
(-0.02, 0.10) |
1.23 |
0.220 |
YONI_Physical |
Lie |
ConditionPolygraph |
-0.13 |
0.03 |
(-0.19, -0.07) |
-4.17 |
< .001*** |
YONI_Physical |
Truth |
ConditionPolygraph |
0.12 |
0.03 |
(0.06, 0.18) |
3.77 |
< .001*** |
YONI_Affective |
Lie |
ConditionInterrogation |
0.09 |
0.02 |
(0.05, 0.14) |
4.26 |
< .001*** |
YONI_Affective |
Truth |
ConditionInterrogation |
-0.07 |
0.02 |
(-0.12, -0.03) |
-3.42 |
< .001*** |
YONI_Affective |
Lie |
ConditionPolygraph |
0.06 |
0.02 |
(0.02, 0.11) |
2.99 |
0.003** |
YONI_Affective |
Truth |
ConditionPolygraph |
-0.05 |
0.02 |
(-0.09, -8.53e-03) |
-2.35 |
0.019* |
YONI_Cognitive |
Lie |
ConditionInterrogation |
3.52e-03 |
0.03 |
(-0.05, 0.05) |
0.14 |
0.893 |
YONI_Cognitive |
Truth |
ConditionInterrogation |
-0.01 |
0.03 |
(-0.06, 0.04) |
-0.48 |
0.632 |
YONI_Cognitive |
Lie |
ConditionPolygraph |
-0.10 |
0.03 |
(-0.15, -0.05) |
-3.78 |
< .001*** |
YONI_Cognitive |
Truth |
ConditionPolygraph |
0.03 |
0.03 |
(-0.02, 0.08) |
1.09 |
0.278 |
YONI_Total |
Lie |
ConditionInterrogation |
0.01 |
0.01 |
(-6.26e-03, 0.04) |
1.38 |
0.169 |
YONI_Total |
Truth |
ConditionInterrogation |
-0.02 |
0.01 |
(-0.04, 3.24e-03) |
-1.65 |
0.099 |
YONI_Total |
Lie |
ConditionPolygraph |
-0.01 |
0.01 |
(-0.03, 6.22e-03) |
-1.36 |
0.173 |
YONI_Total |
Truth |
ConditionPolygraph |
4.92e-03 |
0.01 |
(-0.02, 0.03) |
0.46 |
0.642 |
RT
results <- data.frame()
for (var in c("YONI_Total", "YONI_Cognitive", "YONI_Affective", "YONI_Physical")) {
model <- glmmTMB(as.formula(paste0("RT ~ Answer / (Condition / ",
var,
") + (1|Participant) + (1|Item)")),
data = df)
results <- parameters::parameters(model, effects = "fixed", keep=var) |>
mutate(Parameter = str_remove(Parameter, "Answer|Condition")) |>
separate(Parameter, sep=":", into = c("Answer", "Condition", "Variable")) |>
data_relocate(select="Variable", before=1) |>
rbind(results)
}
display(results, stars=TRUE)
Fixed Effects
YONI_Physical |
Lie |
ConditionInterrogation |
-0.02 |
0.07 |
(-0.16, 0.12) |
-0.25 |
0.804 |
YONI_Physical |
Truth |
ConditionInterrogation |
-0.02 |
0.07 |
(-0.16, 0.12) |
-0.29 |
0.773 |
YONI_Physical |
Lie |
ConditionPolygraph |
0.05 |
0.07 |
(-0.10, 0.19) |
0.62 |
0.537 |
YONI_Physical |
Truth |
ConditionPolygraph |
-0.05 |
0.07 |
(-0.19, 0.10) |
-0.64 |
0.519 |
YONI_Affective |
Lie |
ConditionInterrogation |
0.04 |
0.05 |
(-0.06, 0.14) |
0.78 |
0.435 |
YONI_Affective |
Truth |
ConditionInterrogation |
0.03 |
0.05 |
(-0.07, 0.13) |
0.60 |
0.546 |
YONI_Affective |
Lie |
ConditionPolygraph |
0.06 |
0.05 |
(-0.04, 0.16) |
1.24 |
0.216 |
YONI_Affective |
Truth |
ConditionPolygraph |
0.02 |
0.05 |
(-0.08, 0.12) |
0.35 |
0.726 |
YONI_Cognitive |
Lie |
ConditionInterrogation |
0.09 |
0.06 |
(-0.02, 0.20) |
1.54 |
0.124 |
YONI_Cognitive |
Truth |
ConditionInterrogation |
0.09 |
0.06 |
(-0.02, 0.21) |
1.62 |
0.106 |
YONI_Cognitive |
Lie |
ConditionPolygraph |
0.15 |
0.06 |
(0.04, 0.27) |
2.64 |
0.008** |
YONI_Cognitive |
Truth |
ConditionPolygraph |
0.08 |
0.06 |
(-0.03, 0.19) |
1.37 |
0.172 |
YONI_Total |
Lie |
ConditionInterrogation |
0.02 |
0.02 |
(-0.03, 0.07) |
0.88 |
0.380 |
YONI_Total |
Truth |
ConditionInterrogation |
0.02 |
0.02 |
(-0.03, 0.07) |
0.81 |
0.418 |
YONI_Total |
Lie |
ConditionPolygraph |
0.04 |
0.02 |
(-3.48e-03, 0.09) |
1.82 |
0.069 |
YONI_Total |
Truth |
ConditionPolygraph |
0.01 |
0.02 |
(-0.04, 0.06) |
0.47 |
0.638 |
Heart Rate
results <- data.frame()
for (var in c("YONI_Total", "YONI_Cognitive", "YONI_Affective", "YONI_Physical")) {
model <- glmmTMB(as.formula(paste0("HeartRate ~ Answer / (Condition / ",
var,
") + (1|Participant) + (1|Item)")),
data = df)
results <- parameters::parameters(model, effects = "fixed", keep=var) |>
mutate(Parameter = str_remove(Parameter, "Answer|Condition")) |>
separate(Parameter, sep=":", into = c("Answer", "Condition", "Variable")) |>
data_relocate(select="Variable", before=1) |>
rbind(results)
}
display(results, stars=TRUE)
Fixed Effects
YONI_Physical |
Lie |
ConditionInterrogation |
0.71 |
0.76 |
(-0.78, 2.21) |
0.93 |
0.350 |
YONI_Physical |
Truth |
ConditionInterrogation |
0.38 |
0.76 |
(-1.11, 1.88) |
0.50 |
0.616 |
YONI_Physical |
Lie |
ConditionPolygraph |
0.35 |
0.76 |
(-1.14, 1.85) |
0.46 |
0.644 |
YONI_Physical |
Truth |
ConditionPolygraph |
0.38 |
0.76 |
(-1.12, 1.88) |
0.50 |
0.619 |
YONI_Affective |
Lie |
ConditionInterrogation |
-0.03 |
0.51 |
(-1.04, 0.98) |
-0.06 |
0.952 |
YONI_Affective |
Truth |
ConditionInterrogation |
-0.09 |
0.51 |
(-1.10, 0.92) |
-0.18 |
0.861 |
YONI_Affective |
Lie |
ConditionPolygraph |
-0.11 |
0.51 |
(-1.12, 0.90) |
-0.21 |
0.830 |
YONI_Affective |
Truth |
ConditionPolygraph |
-0.11 |
0.51 |
(-1.12, 0.90) |
-0.21 |
0.831 |
YONI_Cognitive |
Lie |
ConditionInterrogation |
0.30 |
0.61 |
(-0.89, 1.50) |
0.50 |
0.619 |
YONI_Cognitive |
Truth |
ConditionInterrogation |
0.19 |
0.61 |
(-1.01, 1.38) |
0.31 |
0.757 |
YONI_Cognitive |
Lie |
ConditionPolygraph |
0.30 |
0.61 |
(-0.89, 1.50) |
0.49 |
0.621 |
YONI_Cognitive |
Truth |
ConditionPolygraph |
0.14 |
0.61 |
(-1.05, 1.34) |
0.23 |
0.816 |
YONI_Total |
Lie |
ConditionInterrogation |
0.11 |
0.25 |
(-0.37, 0.60) |
0.47 |
0.640 |
YONI_Total |
Truth |
ConditionInterrogation |
0.05 |
0.25 |
(-0.43, 0.53) |
0.20 |
0.842 |
YONI_Total |
Lie |
ConditionPolygraph |
0.06 |
0.25 |
(-0.42, 0.54) |
0.24 |
0.809 |
YONI_Total |
Truth |
ConditionPolygraph |
0.04 |
0.25 |
(-0.44, 0.52) |
0.15 |
0.882 |
Correlation with LIE Scale
dfsub <- df |>
select(Participant,
starts_with("LIE_"),
starts_with("YONI_")) |>
group_by(Participant) |>
summarise_all(mean)
correlation(select(dfsub, starts_with("LIE_")), select(dfsub, starts_with("YONI_")), p_adjust = "none")
> # Correlation Matrix (pearson-method)
>
> Parameter1 | Parameter2 | r | 95% CI | t(24) | p
> ------------------------------------------------------------------------------
> LIE_Ability | YONI_Affective | -6.05e-03 | [-0.39, 0.38] | -0.03 | 0.977
> LIE_Ability | YONI_Cognitive | 0.12 | [-0.28, 0.49] | 0.62 | 0.544
> LIE_Ability | YONI_Physical | -0.23 | [-0.57, 0.17] | -1.16 | 0.259
> LIE_Ability | YONI_Total | -0.03 | [-0.41, 0.36] | -0.15 | 0.881
> LIE_Frequency | YONI_Affective | -0.15 | [-0.51, 0.25] | -0.73 | 0.471
> LIE_Frequency | YONI_Cognitive | 3.86e-03 | [-0.38, 0.39] | 0.02 | 0.985
> LIE_Frequency | YONI_Physical | -0.10 | [-0.47, 0.30] | -0.49 | 0.626
> LIE_Frequency | YONI_Total | -0.10 | [-0.47, 0.30] | -0.51 | 0.616
> LIE_Negativity | YONI_Affective | -0.07 | [-0.45, 0.33] | -0.35 | 0.732
> LIE_Negativity | YONI_Cognitive | -0.12 | [-0.48, 0.28] | -0.58 | 0.569
> LIE_Negativity | YONI_Physical | -0.08 | [-0.45, 0.32] | -0.39 | 0.703
> LIE_Negativity | YONI_Total | -0.11 | [-0.48, 0.29] | -0.53 | 0.601
> LIE_Contextuality | YONI_Affective | -0.19 | [-0.53, 0.22] | -0.92 | 0.365
> LIE_Contextuality | YONI_Cognitive | -0.09 | [-0.46, 0.31] | -0.43 | 0.670
> LIE_Contextuality | YONI_Physical | 0.05 | [-0.34, 0.43] | 0.24 | 0.812
> LIE_Contextuality | YONI_Total | -0.11 | [-0.47, 0.29] | -0.53 | 0.603
>
> p-value adjustment method: none
> Observations: 26
BES
- Confidence: effect in all.
- RT: no effect.
- Heart rate: effect in all.
Confidence
results <- data.frame()
for (var in c("BES_Total", "BES_Cognitive", "BES_Affective")) {
model <- glmmTMB(as.formula(paste0("Confidence ~ Answer / (Condition / ",
var,
") + (1|Participant) + (1|Item)")),
data = df,
family = beta_family())
results <- parameters::parameters(model, effects = "fixed", keep=var) |>
mutate(Parameter = str_remove(Parameter, "Answer|Condition")) |>
separate(Parameter, sep=":", into = c("Answer", "Condition", "Variable")) |>
data_relocate(select="Variable", before=1) |>
rbind(results)
}
display(results, stars=TRUE)
Fixed Effects
BES_Affective |
Lie |
ConditionInterrogation |
0.03 |
0.01 |
(3.77e-03, 0.05) |
2.25 |
0.024* |
BES_Affective |
Truth |
ConditionInterrogation |
-0.03 |
0.01 |
(-0.05, -5.27e-03) |
-2.38 |
0.017* |
BES_Affective |
Lie |
ConditionPolygraph |
-6.98e-03 |
0.01 |
(-0.03, 0.02) |
-0.56 |
0.573 |
BES_Affective |
Truth |
ConditionPolygraph |
-2.41e-03 |
0.01 |
(-0.03, 0.02) |
-0.19 |
0.847 |
BES_Cognitive |
Lie |
ConditionInterrogation |
-0.02 |
0.02 |
(-0.05, 0.02) |
-1.01 |
0.314 |
BES_Cognitive |
Truth |
ConditionInterrogation |
0.03 |
0.02 |
(-9.20e-03, 0.06) |
1.44 |
0.150 |
BES_Cognitive |
Lie |
ConditionPolygraph |
-0.03 |
0.02 |
(-0.07, 2.74e-03) |
-1.80 |
0.071 |
BES_Cognitive |
Truth |
ConditionPolygraph |
0.02 |
0.02 |
(-0.02, 0.05) |
1.09 |
0.275 |
BES_Total |
Lie |
ConditionInterrogation |
0.01 |
9.31e-03 |
(-6.74e-03, 0.03) |
1.24 |
0.217 |
BES_Total |
Truth |
ConditionInterrogation |
-9.97e-03 |
9.16e-03 |
(-0.03, 7.97e-03) |
-1.09 |
0.276 |
BES_Total |
Lie |
ConditionPolygraph |
-0.01 |
8.92e-03 |
(-0.03, 6.32e-03) |
-1.25 |
0.211 |
BES_Total |
Truth |
ConditionPolygraph |
3.57e-03 |
9.06e-03 |
(-0.01, 0.02) |
0.39 |
0.694 |
RT
results <- data.frame()
for (var in c("BES_Total", "BES_Cognitive", "BES_Affective")) {
model <- glmmTMB(as.formula(paste0("RT ~ Answer / (Condition / ",
var,
") + (1|Participant) + (1|Item)")),
data = df)
results <- parameters::parameters(model, effects = "fixed", keep=var) |>
mutate(Parameter = str_remove(Parameter, "Answer|Condition")) |>
separate(Parameter, sep=":", into = c("Answer", "Condition", "Variable")) |>
data_relocate(select="Variable", before=1) |>
rbind(results)
}
display(results, stars=TRUE)
Fixed Effects
BES_Affective |
Lie |
ConditionInterrogation |
-0.02 |
0.03 |
(-0.08, 0.05) |
-0.51 |
0.611 |
BES_Affective |
Truth |
ConditionInterrogation |
-0.02 |
0.03 |
(-0.08, 0.04) |
-0.57 |
0.570 |
BES_Affective |
Lie |
ConditionPolygraph |
-4.22e-03 |
0.03 |
(-0.06, 0.06) |
-0.14 |
0.892 |
BES_Affective |
Truth |
ConditionPolygraph |
-0.04 |
0.03 |
(-0.10, 0.02) |
-1.26 |
0.209 |
BES_Cognitive |
Lie |
ConditionInterrogation |
-0.05 |
0.05 |
(-0.14, 0.04) |
-1.03 |
0.304 |
BES_Cognitive |
Truth |
ConditionInterrogation |
-0.02 |
0.05 |
(-0.11, 0.07) |
-0.51 |
0.612 |
BES_Cognitive |
Lie |
ConditionPolygraph |
0.02 |
0.05 |
(-0.07, 0.11) |
0.50 |
0.618 |
BES_Cognitive |
Truth |
ConditionPolygraph |
-3.71e-03 |
0.05 |
(-0.09, 0.09) |
-0.08 |
0.935 |
BES_Total |
Lie |
ConditionInterrogation |
-0.02 |
0.02 |
(-0.06, 0.02) |
-0.88 |
0.380 |
BES_Total |
Truth |
ConditionInterrogation |
-0.01 |
0.02 |
(-0.06, 0.03) |
-0.66 |
0.508 |
BES_Total |
Lie |
ConditionPolygraph |
3.36e-03 |
0.02 |
(-0.04, 0.05) |
0.15 |
0.881 |
BES_Total |
Truth |
ConditionPolygraph |
-0.02 |
0.02 |
(-0.07, 0.02) |
-0.95 |
0.342 |
Heart Rate
results <- data.frame()
for (var in c("BES_Total", "BES_Cognitive", "BES_Affective")) {
model <- glmmTMB(as.formula(paste0("HeartRate ~ Answer / (Condition / ",
var,
") + (1|Participant) + (1|Item)")),
data = df)
results <- parameters::parameters(model, effects = "fixed", keep=var) |>
mutate(Parameter = str_remove(Parameter, "Answer|Condition")) |>
separate(Parameter, sep=":", into = c("Answer", "Condition", "Variable")) |>
data_relocate(select="Variable", before=1) |>
rbind(results)
}
display(results, stars=TRUE)
Fixed Effects
BES_Affective |
Lie |
ConditionInterrogation |
0.77 |
0.30 |
(0.19, 1.35) |
2.60 |
0.009** |
BES_Affective |
Truth |
ConditionInterrogation |
0.72 |
0.30 |
(0.14, 1.30) |
2.43 |
0.015* |
BES_Affective |
Lie |
ConditionPolygraph |
0.42 |
0.30 |
(-0.16, 1.00) |
1.42 |
0.155 |
BES_Affective |
Truth |
ConditionPolygraph |
0.47 |
0.30 |
(-0.11, 1.05) |
1.58 |
0.115 |
BES_Cognitive |
Lie |
ConditionInterrogation |
1.16 |
0.36 |
(0.46, 1.86) |
3.27 |
0.001** |
BES_Cognitive |
Truth |
ConditionInterrogation |
1.20 |
0.36 |
(0.50, 1.90) |
3.37 |
< .001*** |
BES_Cognitive |
Lie |
ConditionPolygraph |
1.13 |
0.36 |
(0.43, 1.83) |
3.18 |
0.001** |
BES_Cognitive |
Truth |
ConditionPolygraph |
1.11 |
0.36 |
(0.42, 1.81) |
3.13 |
0.002** |
BES_Total |
Lie |
ConditionInterrogation |
0.72 |
0.19 |
(0.35, 1.08) |
3.81 |
< .001*** |
BES_Total |
Truth |
ConditionInterrogation |
0.70 |
0.19 |
(0.33, 1.07) |
3.74 |
< .001*** |
BES_Total |
Lie |
ConditionPolygraph |
0.53 |
0.19 |
(0.16, 0.90) |
2.83 |
0.005** |
BES_Total |
Truth |
ConditionPolygraph |
0.55 |
0.19 |
(0.18, 0.92) |
2.93 |
0.003** |
Correlation with LIE Scale
dfsub <- df |>
select(Participant,
starts_with("LIE_"),
starts_with("BES_")) |>
group_by(Participant) |>
summarise_all(mean)
correlation(select(dfsub, starts_with("LIE_")), select(dfsub, starts_with("BES_")), p_adjust = "none")
> # Correlation Matrix (pearson-method)
>
> Parameter1 | Parameter2 | r | 95% CI | t(24) | p
> -------------------------------------------------------------------------
> LIE_Ability | BES_Cognitive | 0.07 | [-0.33, 0.44] | 0.32 | 0.752
> LIE_Ability | BES_Affective | -0.02 | [-0.40, 0.37] | -0.10 | 0.922
> LIE_Ability | BES_Total | 0.02 | [-0.37, 0.40] | 0.10 | 0.923
> LIE_Frequency | BES_Cognitive | -0.26 | [-0.59, 0.14] | -1.31 | 0.204
> LIE_Frequency | BES_Affective | -0.19 | [-0.54, 0.21] | -0.96 | 0.347
> LIE_Frequency | BES_Total | -0.27 | [-0.60, 0.13] | -1.39 | 0.176
> LIE_Negativity | BES_Cognitive | 0.11 | [-0.29, 0.48] | 0.56 | 0.581
> LIE_Negativity | BES_Affective | 0.23 | [-0.17, 0.57] | 1.15 | 0.260
> LIE_Negativity | BES_Total | 0.22 | [-0.18, 0.56] | 1.13 | 0.270
> LIE_Contextuality | BES_Cognitive | 0.14 | [-0.26, 0.50] | 0.71 | 0.483
> LIE_Contextuality | BES_Affective | 0.20 | [-0.20, 0.54] | 1.00 | 0.329
> LIE_Contextuality | BES_Total | 0.22 | [-0.18, 0.56] | 1.10 | 0.283
>
> p-value adjustment method: none
> Observations: 26
Interoception
Interoception Scores
Confidence
results <- list()
for (var in c("Intero_Meta", "Intero_Listening", "Intero_Focus", "Intero_Regulation")) {
model <- brms::brm(as.formula(paste0(
"Confidence ~ Answer / (Condition / ",
var,
") + (1|Participant) + (1|Item)"
)),
data = df, family = "beta", refresh = 0, seed=3, iter = 4000,
prior = set_prior("student_t(1, 0, 1)", class = "b")
)
results[[var]] <- parameters::parameters(model, effects = "fixed", component = "conditional", test = c("pd"), keep = var) |>
as.data.frame() |>
separate(Parameter, sep = ":", into = c("Answer", "Condition", "Variable")) |>
mutate(
Answer = str_remove(Answer, "b_Answer"),
Condition = str_remove(Condition, "Condition")
)
}
> Running MCMC with 4 parallel chains...
>
> Chain 2 finished in 42.0 seconds.
> Chain 4 finished in 43.0 seconds.
> Chain 1 finished in 43.3 seconds.
> Chain 3 finished in 46.3 seconds.
>
> All 4 chains finished successfully.
> Mean chain execution time: 43.7 seconds.
> Total execution time: 46.4 seconds.
>
> Running MCMC with 4 parallel chains...
>
> Chain 1 finished in 42.4 seconds.
> Chain 4 finished in 42.8 seconds.
> Chain 2 finished in 43.2 seconds.
> Chain 3 finished in 45.7 seconds.
>
> All 4 chains finished successfully.
> Mean chain execution time: 43.5 seconds.
> Total execution time: 45.8 seconds.
>
> Running MCMC with 4 parallel chains...
>
> Chain 2 finished in 43.9 seconds.
> Chain 1 finished in 44.1 seconds.
> Chain 4 finished in 44.6 seconds.
> Chain 3 finished in 45.0 seconds.
>
> All 4 chains finished successfully.
> Mean chain execution time: 44.4 seconds.
> Total execution time: 45.1 seconds.
>
> Running MCMC with 4 parallel chains...
>
> Chain 1 finished in 43.0 seconds.
> Chain 2 finished in 43.1 seconds.
> Chain 3 finished in 46.0 seconds.
> Chain 4 finished in 54.1 seconds.
>
> All 4 chains finished successfully.
> Mean chain execution time: 46.6 seconds.
> Total execution time: 54.2 seconds.
display(format_table(results$Intero_Meta, stars = TRUE))
Lie |
Interrogation |
Intero_Meta |
0.02 |
[-0.15, 0.18] |
58.49% |
1.001 |
2450.00 |
Truth |
Interrogation |
Intero_Meta |
0.10 |
[-0.07, 0.26] |
88.80% |
1.002 |
2516.00 |
Lie |
Polygraph |
Intero_Meta |
0.20 |
[ 0.03, 0.35] |
98.98%* |
1.001 |
2550.00 |
Truth |
Polygraph |
Intero_Meta |
-0.06 |
[-0.23, 0.09] |
78.96% |
1.001 |
2424.00 |
display(format_table(results$Intero_Listening, stars = TRUE))
Lie |
Interrogation |
Intero_Listening |
0.16 |
[ 0.01, 0.32] |
98.04%* |
1.002 |
2872.00 |
Truth |
Interrogation |
Intero_Listening |
-0.04 |
[-0.19, 0.12] |
66.91% |
1.003 |
2740.00 |
Lie |
Polygraph |
Intero_Listening |
0.43 |
[ 0.27, 0.59] |
100%*** |
1.002 |
2927.00 |
Truth |
Polygraph |
Intero_Listening |
-0.07 |
[-0.23, 0.08] |
82.93% |
1.002 |
2666.00 |
display(format_table(results$Intero_Focus, stars = TRUE))
Lie |
Interrogation |
Intero_Focus |
-0.09 |
[-0.26, 0.09] |
83.45% |
1.000 |
1868.00 |
Truth |
Interrogation |
Intero_Focus |
0.15 |
[-0.02, 0.32] |
95.76% |
1.000 |
1961.00 |
Lie |
Polygraph |
Intero_Focus |
0.02 |
[-0.16, 0.19] |
57.91% |
1.000 |
1933.00 |
Truth |
Polygraph |
Intero_Focus |
0.17 |
[-0.01, 0.34] |
97.16%* |
1.000 |
1987.00 |
display(format_table(results$Intero_Regulation, stars = TRUE))
Lie |
Interrogation |
Intero_Regulation |
0.18 |
[ 0.00, 0.36] |
97.42%* |
1.000 |
2621.00 |
Truth |
Interrogation |
Intero_Regulation |
0.08 |
[-0.10, 0.26] |
80.76% |
1.000 |
2719.00 |
Lie |
Polygraph |
Intero_Regulation |
0.32 |
[ 0.14, 0.51] |
99.99%*** |
1.001 |
2643.00 |
Truth |
Polygraph |
Intero_Regulation |
-0.17 |
[-0.36, 0.01] |
97.16%* |
1.000 |
2712.00 |
RT
results <- list()
for (var in c("Intero_Meta", "Intero_Listening", "Intero_Focus", "Intero_Regulation")) {
model <- brms::brm(as.formula(paste0(
"RT ~ Answer / (Condition / ",
var,
") + (1|Participant) + (1|Item)"
)),
data = df, refresh = 0, seed=3, iter = 4000,
prior = set_prior("student_t(1, 0, 3)", class = "b")
)
results[[var]] <- parameters::parameters(model, effects = "fixed", component = "conditional", test = c("pd"), keep = var) |>
as.data.frame() |>
separate(Parameter, sep = ":", into = c("Answer", "Condition", "Variable")) |>
mutate(
Answer = str_remove(Answer, "b_Answer"),
Condition = str_remove(Condition, "Condition")
)
}
> Running MCMC with 4 parallel chains...
>
> Chain 1 finished in 11.0 seconds.
> Chain 2 finished in 11.8 seconds.
> Chain 3 finished in 11.7 seconds.
> Chain 4 finished in 12.0 seconds.
>
> All 4 chains finished successfully.
> Mean chain execution time: 11.6 seconds.
> Total execution time: 12.1 seconds.
>
> Running MCMC with 4 parallel chains...
>
> Chain 2 finished in 10.9 seconds.
> Chain 4 finished in 11.0 seconds.
> Chain 3 finished in 11.3 seconds.
> Chain 1 finished in 11.8 seconds.
>
> All 4 chains finished successfully.
> Mean chain execution time: 11.3 seconds.
> Total execution time: 11.9 seconds.
>
> Running MCMC with 4 parallel chains...
>
> Chain 1 finished in 11.7 seconds.
> Chain 3 finished in 11.7 seconds.
> Chain 4 finished in 11.9 seconds.
> Chain 2 finished in 12.8 seconds.
>
> All 4 chains finished successfully.
> Mean chain execution time: 12.0 seconds.
> Total execution time: 12.9 seconds.
>
> Running MCMC with 4 parallel chains...
>
> Chain 4 finished in 11.7 seconds.
> Chain 3 finished in 11.8 seconds.
> Chain 1 finished in 11.9 seconds.
> Chain 2 finished in 13.0 seconds.
>
> All 4 chains finished successfully.
> Mean chain execution time: 12.1 seconds.
> Total execution time: 13.1 seconds.
display(format_table(results$Intero_Meta, stars = TRUE))
Lie |
Interrogation |
Intero_Meta |
-0.08 |
[-0.45, 0.30] |
66.85% |
1.005 |
1131.00 |
Truth |
Interrogation |
Intero_Meta |
-0.14 |
[-0.52, 0.24] |
77.51% |
1.006 |
984.00 |
Lie |
Polygraph |
Intero_Meta |
-0.56 |
[-0.92, -0.16] |
99.62%** |
1.005 |
1034.00 |
Truth |
Polygraph |
Intero_Meta |
-0.51 |
[-0.88, -0.11] |
99.29%** |
1.005 |
1080.00 |
display(format_table(results$Intero_Listening, stars = TRUE))
Lie |
Interrogation |
Intero_Listening |
-0.21 |
[-0.61, 0.18] |
84.82% |
1.002 |
1138.00 |
Truth |
Interrogation |
Intero_Listening |
-0.20 |
[-0.61, 0.18] |
84.21% |
1.002 |
1166.00 |
Lie |
Polygraph |
Intero_Listening |
-0.42 |
[-0.82, -0.03] |
98.19%* |
1.002 |
1198.00 |
Truth |
Polygraph |
Intero_Listening |
-0.36 |
[-0.76, 0.03] |
96.49% |
1.002 |
1147.00 |
display(format_table(results$Intero_Focus, stars = TRUE))
Lie |
Interrogation |
Intero_Focus |
-0.02 |
[-0.46, 0.42] |
54.00% |
1.002 |
1069.00 |
Truth |
Interrogation |
Intero_Focus |
-0.08 |
[-0.52, 0.35] |
63.95% |
1.002 |
1081.00 |
Lie |
Polygraph |
Intero_Focus |
-0.18 |
[-0.62, 0.26] |
78.99% |
1.001 |
1090.00 |
Truth |
Polygraph |
Intero_Focus |
-0.05 |
[-0.49, 0.39] |
59.98% |
1.002 |
1073.00 |
display(format_table(results$Intero_Regulation, stars = TRUE))
Lie |
Interrogation |
Intero_Regulation |
0.17 |
[-0.35, 0.66] |
74.65% |
1.001 |
1365.00 |
Truth |
Interrogation |
Intero_Regulation |
0.12 |
[-0.39, 0.61] |
68.51% |
1.001 |
1381.00 |
Lie |
Polygraph |
Intero_Regulation |
0.04 |
[-0.46, 0.54] |
57.03% |
1.001 |
1378.00 |
Truth |
Polygraph |
Intero_Regulation |
0.06 |
[-0.45, 0.56] |
59.69% |
1.001 |
1370.00 |
Heart Rate
results <- list()
for (var in c("Intero_Meta", "Intero_Listening", "Intero_Focus", "Intero_Regulation")) {
model <- brms::brm(as.formula(paste0(
"HeartRate ~ Answer / (Condition / ",
var,
") + (1|Participant) + (1|Item)"
)),
data = df, refresh = 0, seed=3, iter = 4000,
prior = set_prior("student_t(1, 0, 8)", class = "b")
)
results[[var]] <- parameters::parameters(model, effects = "fixed", component = "conditional", test = c("pd"), keep = var) |>
as.data.frame() |>
separate(Parameter, sep = ":", into = c("Answer", "Condition", "Variable")) |>
mutate(
Answer = str_remove(Answer, "b_Answer"),
Condition = str_remove(Condition, "Condition")
)
}
> Running MCMC with 4 parallel chains...
>
> Chain 4 finished in 21.4 seconds.
> Chain 3 finished in 22.4 seconds.
> Chain 2 finished in 22.9 seconds.
> Chain 1 finished in 23.2 seconds.
>
> All 4 chains finished successfully.
> Mean chain execution time: 22.5 seconds.
> Total execution time: 23.3 seconds.
>
> Running MCMC with 4 parallel chains...
>
> Chain 3 finished in 21.0 seconds.
> Chain 4 finished in 22.4 seconds.
> Chain 1 finished in 22.5 seconds.
> Chain 2 finished in 22.5 seconds.
>
> All 4 chains finished successfully.
> Mean chain execution time: 22.1 seconds.
> Total execution time: 22.6 seconds.
>
> Running MCMC with 4 parallel chains...
>
> Chain 3 finished in 21.9 seconds.
> Chain 2 finished in 22.4 seconds.
> Chain 4 finished in 24.3 seconds.
> Chain 1 finished in 25.2 seconds.
>
> All 4 chains finished successfully.
> Mean chain execution time: 23.5 seconds.
> Total execution time: 25.4 seconds.
>
> Running MCMC with 4 parallel chains...
>
> Chain 2 finished in 21.4 seconds.
> Chain 4 finished in 22.2 seconds.
> Chain 3 finished in 22.6 seconds.
> Chain 1 finished in 23.6 seconds.
>
> All 4 chains finished successfully.
> Mean chain execution time: 22.4 seconds.
> Total execution time: 23.7 seconds.
display(format_table(results$Intero_Meta, stars = TRUE))
Lie |
Interrogation |
Intero_Meta |
0.24 |
[-3.23, 3.63] |
55.43% |
1.004 |
1083.00 |
Truth |
Interrogation |
Intero_Meta |
0.63 |
[-2.84, 4.00] |
64.42% |
1.004 |
1065.00 |
Lie |
Polygraph |
Intero_Meta |
1.26 |
[-2.13, 4.64] |
77.36% |
1.004 |
1060.00 |
Truth |
Polygraph |
Intero_Meta |
1.22 |
[-2.26, 4.63] |
75.90% |
1.004 |
1087.00 |
display(format_table(results$Intero_Listening, stars = TRUE))
Lie |
Interrogation |
Intero_Listening |
1.79 |
[-1.83, 5.30] |
83.95% |
1.001 |
1000.00 |
Truth |
Interrogation |
Intero_Listening |
2.00 |
[-1.64, 5.47] |
86.48% |
1.001 |
1020.00 |
Lie |
Polygraph |
Intero_Listening |
1.37 |
[-2.25, 4.84] |
78.27% |
1.001 |
1016.00 |
Truth |
Polygraph |
Intero_Listening |
1.45 |
[-2.15, 4.99] |
79.94% |
1.001 |
1032.00 |
display(format_table(results$Intero_Focus, stars = TRUE))
Lie |
Interrogation |
Intero_Focus |
-2.92 |
[-6.58, 0.89] |
94.03% |
1.000 |
1615.00 |
Truth |
Interrogation |
Intero_Focus |
-2.77 |
[-6.43, 1.11] |
92.76% |
1.000 |
1596.00 |
Lie |
Polygraph |
Intero_Focus |
-1.43 |
[-5.10, 2.50] |
77.92% |
1.000 |
1607.00 |
Truth |
Polygraph |
Intero_Focus |
-1.53 |
[-5.19, 2.31] |
79.36% |
1.000 |
1613.00 |
display(format_table(results$Intero_Regulation, stars = TRUE))
Lie |
Interrogation |
Intero_Regulation |
-1.54 |
[-5.20, 2.26] |
78.84% |
1.002 |
1408.00 |
Truth |
Interrogation |
Intero_Regulation |
-1.41 |
[-5.05, 2.40] |
77.00% |
1.002 |
1378.00 |
Lie |
Polygraph |
Intero_Regulation |
0.46 |
[-3.14, 4.28] |
59.52% |
1.003 |
1367.00 |
Truth |
Polygraph |
Intero_Regulation |
-0.10 |
[-3.65, 3.71] |
52.36% |
1.002 |
1409.00 |
Correlation with LIE Scale
dfsub <- df |>
select(
Participant,
starts_with("LIE_"),
starts_with("Intero_")
) |>
group_by(Participant) |>
summarise_all(mean)
correlation(select(dfsub, starts_with("LIE_")), select(dfsub, starts_with("Intero_")), bayesian = TRUE)
> # Correlation Matrix (pearson-method)
>
> Parameter1 | Parameter2 | rho | 95% CI | pd | % in ROPE | Prior | BF
> --------------------------------------------------------------------------------------------------------------
> LIE_Ability | Intero_Meta | -0.15 | [-0.47, 0.19] | 79.38% | 31.65% | Beta (3 +- 3) | 0.613
> LIE_Ability | Intero_Listening | 0.06 | [-0.29, 0.39] | 61.12% | 39.45% | Beta (3 +- 3) | 0.445
> LIE_Ability | Intero_Focus | 0.50 | [ 0.21, 0.75] | 100%*** | 0% | Beta (3 +- 3) | 34.37***
> LIE_Ability | Intero_Regulation | 0.01 | [-0.32, 0.35] | 52.78% | 41.90% | Beta (3 +- 3) | 0.428
> LIE_Frequency | Intero_Meta | -0.22 | [-0.55, 0.13] | 87.95% | 21.57% | Beta (3 +- 3) | 0.902
> LIE_Frequency | Intero_Listening | 0.02 | [-0.32, 0.36] | 53.80% | 39.75% | Beta (3 +- 3) | 0.429
> LIE_Frequency | Intero_Focus | 0.24 | [-0.12, 0.54] | 90.12% | 19.10% | Beta (3 +- 3) | 1.01
> LIE_Frequency | Intero_Regulation | -0.10 | [-0.45, 0.22] | 69.90% | 36.93% | Beta (3 +- 3) | 0.504
> LIE_Negativity | Intero_Meta | 0.18 | [-0.15, 0.51] | 84.75% | 26.52% | Beta (3 +- 3) | 0.723
> LIE_Negativity | Intero_Listening | 0.12 | [-0.22, 0.46] | 75.62% | 33.17% | Beta (3 +- 3) | 0.536
> LIE_Negativity | Intero_Focus | -0.28 | [-0.60, 0.03] | 94.50% | 12.40% | Beta (3 +- 3) | 1.56
> LIE_Negativity | Intero_Regulation | 0.06 | [-0.31, 0.39] | 61.40% | 38.85% | Beta (3 +- 3) | 0.445
> LIE_Contextuality | Intero_Meta | -0.05 | [-0.40, 0.28] | 61.45% | 40.83% | Beta (3 +- 3) | 0.446
> LIE_Contextuality | Intero_Listening | 0.35 | [ 0.03, 0.64] | 97.52%* | 5.70% | Beta (3 +- 3) | 3.48*
> LIE_Contextuality | Intero_Focus | -0.02 | [-0.37, 0.32] | 53.33% | 42.25% | Beta (3 +- 3) | 0.428
> LIE_Contextuality | Intero_Regulation | -0.08 | [-0.41, 0.26] | 66.90% | 38.60% | Beta (3 +- 3) | 0.471
>
> Observations: 26
Heartbeat Counting
- Confidence: effect in all.
- RT: effect in confidence only.
- Heart rate: effect in accuracy and awareness.
Confidence
results <- data.frame()
for (var in c("HCT_Accuracy", "HCT_Confidence", "HCT_Awareness")) {
model <- glmmTMB(as.formula(paste0(
"Confidence ~ Answer / (Condition / ",
var,
") + (1|Participant) + (1|Item)"
)),
data = df,
family = beta_family()
)
results <- parameters::parameters(model, effects = "fixed", keep = var) |>
mutate(Parameter = str_remove(Parameter, "Answer|Condition")) |>
separate(Parameter, sep = ":", into = c("Answer", "Condition", "Variable")) |>
data_relocate(select = "Variable", before = 1) |>
rbind(results)
}
display(results, stars = TRUE)
Fixed Effects
HCT_Awareness |
Lie |
ConditionInterrogation |
0.18 |
0.13 |
(-0.08, 0.44) |
1.35 |
0.178 |
HCT_Awareness |
Truth |
ConditionInterrogation |
-1.64e-03 |
0.13 |
(-0.26, 0.25) |
-0.01 |
0.990 |
HCT_Awareness |
Lie |
ConditionPolygraph |
-0.34 |
0.13 |
(-0.60, -0.09) |
-2.64 |
0.008** |
HCT_Awareness |
Truth |
ConditionPolygraph |
0.02 |
0.13 |
(-0.24, 0.27) |
0.13 |
0.893 |
HCT_Confidence |
Lie |
ConditionInterrogation |
0.34 |
0.29 |
(-0.23, 0.91) |
1.17 |
0.240 |
HCT_Confidence |
Truth |
ConditionInterrogation |
0.62 |
0.29 |
(0.05, 1.18) |
2.15 |
0.032* |
HCT_Confidence |
Lie |
ConditionPolygraph |
0.95 |
0.29 |
(0.39, 1.51) |
3.33 |
< .001*** |
HCT_Confidence |
Truth |
ConditionPolygraph |
0.01 |
0.28 |
(-0.54, 0.57) |
0.04 |
0.965 |
HCT_Accuracy |
Lie |
ConditionInterrogation |
0.38 |
0.39 |
(-0.37, 1.14) |
0.99 |
0.322 |
HCT_Accuracy |
Truth |
ConditionInterrogation |
-0.08 |
0.38 |
(-0.83, 0.67) |
-0.21 |
0.834 |
HCT_Accuracy |
Lie |
ConditionPolygraph |
1.16 |
0.39 |
(0.41, 1.92) |
3.01 |
0.003** |
HCT_Accuracy |
Truth |
ConditionPolygraph |
-0.89 |
0.38 |
(-1.64, -0.14) |
-2.33 |
0.020* |
RT
results <- data.frame()
for (var in c("HCT_Accuracy", "HCT_Confidence", "HCT_Awareness")) {
model <- glmmTMB(as.formula(paste0(
"RT ~ Answer / (Condition / ",
var,
") + (1|Participant) + (1|Item)"
)),
data = df
)
results <- parameters::parameters(model, effects = "fixed", keep = var) |>
mutate(Parameter = str_remove(Parameter, "Answer|Condition")) |>
separate(Parameter, sep = ":", into = c("Answer", "Condition", "Variable")) |>
data_relocate(select = "Variable", before = 1) |>
rbind(results)
}
display(results, stars = TRUE)
Fixed Effects
HCT_Awareness |
Lie |
ConditionInterrogation |
0.21 |
0.32 |
(-0.43, 0.84) |
0.64 |
0.521 |
HCT_Awareness |
Truth |
ConditionInterrogation |
0.18 |
0.32 |
(-0.46, 0.81) |
0.55 |
0.583 |
HCT_Awareness |
Lie |
ConditionPolygraph |
0.64 |
0.32 |
(0.01, 1.28) |
2.00 |
0.046* |
HCT_Awareness |
Truth |
ConditionPolygraph |
0.35 |
0.32 |
(-0.28, 0.98) |
1.08 |
0.280 |
HCT_Confidence |
Lie |
ConditionInterrogation |
-0.77 |
0.72 |
(-2.18, 0.63) |
-1.08 |
0.280 |
HCT_Confidence |
Truth |
ConditionInterrogation |
-1.17 |
0.72 |
(-2.58, 0.23) |
-1.64 |
0.102 |
HCT_Confidence |
Lie |
ConditionPolygraph |
-1.43 |
0.72 |
(-2.84, -0.03) |
-2.00 |
0.046* |
HCT_Confidence |
Truth |
ConditionPolygraph |
-1.22 |
0.72 |
(-2.62, 0.19) |
-1.70 |
0.090 |
HCT_Accuracy |
Lie |
ConditionInterrogation |
0.85 |
0.99 |
(-1.08, 2.78) |
0.86 |
0.388 |
HCT_Accuracy |
Truth |
ConditionInterrogation |
1.22 |
0.99 |
(-0.71, 3.15) |
1.24 |
0.216 |
HCT_Accuracy |
Lie |
ConditionPolygraph |
0.49 |
0.99 |
(-1.44, 2.43) |
0.50 |
0.617 |
HCT_Accuracy |
Truth |
ConditionPolygraph |
1.46 |
0.99 |
(-0.47, 3.39) |
1.48 |
0.139 |
Heart Rate
results <- data.frame()
for (var in c("HCT_Accuracy", "HCT_Confidence", "HCT_Awareness")) {
model <- glmmTMB(as.formula(paste0(
"HeartRate ~ Answer / (Condition / ",
var,
") + (1|Participant) + (1|Item)"
)),
data = df
)
results <- parameters::parameters(model, effects = "fixed", keep = var) |>
mutate(Parameter = str_remove(Parameter, "Answer|Condition")) |>
separate(Parameter, sep = ":", into = c("Answer", "Condition", "Variable")) |>
data_relocate(select = "Variable", before = 1) |>
rbind(results)
}
display(results, stars = TRUE)
Fixed Effects
HCT_Awareness |
Lie |
ConditionInterrogation |
-6.07 |
3.06 |
(-12.06, -0.08) |
-1.99 |
0.047* |
HCT_Awareness |
Truth |
ConditionInterrogation |
-5.84 |
3.06 |
(-11.83, 0.15) |
-1.91 |
0.056 |
HCT_Awareness |
Lie |
ConditionPolygraph |
-4.54 |
3.06 |
(-10.53, 1.45) |
-1.49 |
0.137 |
HCT_Awareness |
Truth |
ConditionPolygraph |
-4.32 |
3.06 |
(-10.31, 1.67) |
-1.41 |
0.157 |
HCT_Confidence |
Lie |
ConditionInterrogation |
1.49 |
7.57 |
(-13.35, 16.32) |
0.20 |
0.844 |
HCT_Confidence |
Truth |
ConditionInterrogation |
2.95 |
7.57 |
(-11.89, 17.78) |
0.39 |
0.697 |
HCT_Confidence |
Lie |
ConditionPolygraph |
2.86 |
7.57 |
(-11.97, 17.69) |
0.38 |
0.706 |
HCT_Confidence |
Truth |
ConditionPolygraph |
2.88 |
7.57 |
(-11.95, 17.72) |
0.38 |
0.703 |
HCT_Accuracy |
Lie |
ConditionInterrogation |
-18.70 |
8.86 |
(-36.05, -1.34) |
-2.11 |
0.035* |
HCT_Accuracy |
Truth |
ConditionInterrogation |
-15.72 |
8.86 |
(-33.08, 1.64) |
-1.78 |
0.076 |
HCT_Accuracy |
Lie |
ConditionPolygraph |
-6.09 |
8.86 |
(-23.45, 11.26) |
-0.69 |
0.491 |
HCT_Accuracy |
Truth |
ConditionPolygraph |
-8.66 |
8.86 |
(-26.02, 8.70) |
-0.98 |
0.328 |
Correlation with LIE Scale
dfsub <- df |>
select(
Participant,
starts_with("LIE_"),
starts_with("HCT_")
) |>
group_by(Participant) |>
summarise_all(mean)
correlation(select(dfsub, starts_with("LIE_")), select(dfsub, starts_with("HCT_")), p_adjust = "none")
> # Correlation Matrix (pearson-method)
>
> Parameter1 | Parameter2 | r | 95% CI | t(24) | p
> --------------------------------------------------------------------------
> LIE_Ability | HCT_Confidence | 0.22 | [-0.19, 0.56] | 1.09 | 0.286
> LIE_Ability | HCT_Accuracy | 0.35 | [-0.04, 0.65] | 1.84 | 0.078
> LIE_Ability | HCT_Awareness | -0.09 | [-0.46, 0.31] | -0.43 | 0.674
> LIE_Frequency | HCT_Confidence | -0.07 | [-0.45, 0.33] | -0.35 | 0.732
> LIE_Frequency | HCT_Accuracy | 0.21 | [-0.19, 0.55] | 1.07 | 0.295
> LIE_Frequency | HCT_Awareness | -0.02 | [-0.40, 0.37] | -0.08 | 0.940
> LIE_Negativity | HCT_Confidence | 0.07 | [-0.33, 0.44] | 0.32 | 0.749
> LIE_Negativity | HCT_Accuracy | -0.30 | [-0.61, 0.10] | -1.53 | 0.140
> LIE_Negativity | HCT_Awareness | 0.02 | [-0.37, 0.40] | 0.09 | 0.928
> LIE_Contextuality | HCT_Confidence | 0.16 | [-0.25, 0.51] | 0.78 | 0.443
> LIE_Contextuality | HCT_Accuracy | 0.05 | [-0.34, 0.43] | 0.27 | 0.792
> LIE_Contextuality | HCT_Awareness | -0.25 | [-0.58, 0.15] | -1.29 | 0.211
>
> p-value adjustment method: none
> Observations: 26
MAIA
- Confidence: effect in total, noticing, not distracting, body
listening, not worrying, attention regulation and self regulation.
- RT: effect in total, noticing, body listening, attention regulation
and self regulation.
- Heart rate: effect in trusting and emotional awareness only.
Confidence
results <- data.frame()
for (var in c(
"MAIA_Noticing", "MAIA_NotDistracting", "MAIA_NotWorrying",
"MAIA_AttentionRegulation", "MAIA_EmotionalAwareness", "MAIA_SelfRegulation",
"MAIA_BodyListening", "MAIA_Trusting", "MAIA_Total"
)) {
model <- glmmTMB(as.formula(paste0(
"Confidence ~ Answer / (Condition / ",
var,
") + (1|Participant) + (1|Item)"
)),
data = df,
family = beta_family()
)
results <- parameters::parameters(model, effects = "fixed", keep = var) |>
mutate(Parameter = str_remove(Parameter, "Answer|Condition")) |>
separate(Parameter, sep = ":", into = c("Answer", "Condition", "Variable")) |>
data_relocate(select = "Variable", before = 1) |>
rbind(results)
}
display(results, stars = TRUE)
Fixed Effects
MAIA_Total |
Lie |
ConditionInterrogation |
0.16 |
0.14 |
(-0.12, 0.44) |
1.15 |
0.252 |
MAIA_Total |
Truth |
ConditionInterrogation |
0.15 |
0.14 |
(-0.13, 0.43) |
1.06 |
0.287 |
MAIA_Total |
Lie |
ConditionPolygraph |
0.63 |
0.14 |
(0.36, 0.91) |
4.56 |
< .001*** |
MAIA_Total |
Truth |
ConditionPolygraph |
-0.15 |
0.14 |
(-0.42, 0.12) |
-1.09 |
0.276 |
MAIA_Trusting |
Lie |
ConditionInterrogation |
5.30e-03 |
0.09 |
(-0.17, 0.18) |
0.06 |
0.952 |
MAIA_Trusting |
Truth |
ConditionInterrogation |
0.16 |
0.09 |
(-0.01, 0.34) |
1.81 |
0.070 |
MAIA_Trusting |
Lie |
ConditionPolygraph |
0.18 |
0.09 |
(6.27e-03, 0.35) |
2.03 |
0.042* |
MAIA_Trusting |
Truth |
ConditionPolygraph |
4.60e-03 |
0.09 |
(-0.17, 0.18) |
0.05 |
0.958 |
MAIA_BodyListening |
Lie |
ConditionInterrogation |
0.22 |
0.07 |
(0.09, 0.35) |
3.31 |
< .001*** |
MAIA_BodyListening |
Truth |
ConditionInterrogation |
-0.10 |
0.07 |
(-0.23, 0.03) |
-1.52 |
0.128 |
MAIA_BodyListening |
Lie |
ConditionPolygraph |
0.43 |
0.07 |
(0.30, 0.56) |
6.53 |
< .001*** |
MAIA_BodyListening |
Truth |
ConditionPolygraph |
-0.10 |
0.07 |
(-0.23, 0.03) |
-1.57 |
0.116 |
MAIA_SelfRegulation |
Lie |
ConditionInterrogation |
0.30 |
0.10 |
(0.11, 0.49) |
3.13 |
0.002** |
MAIA_SelfRegulation |
Truth |
ConditionInterrogation |
-0.01 |
0.10 |
(-0.20, 0.18) |
-0.14 |
0.887 |
MAIA_SelfRegulation |
Lie |
ConditionPolygraph |
0.31 |
0.10 |
(0.12, 0.49) |
3.19 |
0.001** |
MAIA_SelfRegulation |
Truth |
ConditionPolygraph |
-0.25 |
0.10 |
(-0.44, -0.06) |
-2.63 |
0.009** |
MAIA_EmotionalAwareness |
Lie |
ConditionInterrogation |
-0.24 |
0.10 |
(-0.44, -0.04) |
-2.40 |
0.016* |
MAIA_EmotionalAwareness |
Truth |
ConditionInterrogation |
0.28 |
0.10 |
(0.08, 0.48) |
2.78 |
0.005** |
MAIA_EmotionalAwareness |
Lie |
ConditionPolygraph |
-0.21 |
0.10 |
(-0.40, -7.58e-03) |
-2.04 |
0.042* |
MAIA_EmotionalAwareness |
Truth |
ConditionPolygraph |
0.08 |
0.10 |
(-0.12, 0.28) |
0.81 |
0.421 |
MAIA_AttentionRegulation |
Lie |
ConditionInterrogation |
2.35e-03 |
0.08 |
(-0.16, 0.17) |
0.03 |
0.977 |
MAIA_AttentionRegulation |
Truth |
ConditionInterrogation |
0.10 |
0.08 |
(-0.06, 0.27) |
1.25 |
0.210 |
MAIA_AttentionRegulation |
Lie |
ConditionPolygraph |
0.22 |
0.08 |
(0.06, 0.38) |
2.64 |
0.008** |
MAIA_AttentionRegulation |
Truth |
ConditionPolygraph |
-0.05 |
0.08 |
(-0.21, 0.11) |
-0.65 |
0.519 |
MAIA_NotWorrying |
Lie |
ConditionInterrogation |
0.25 |
0.10 |
(0.06, 0.44) |
2.62 |
0.009** |
MAIA_NotWorrying |
Truth |
ConditionInterrogation |
0.03 |
0.10 |
(-0.15, 0.22) |
0.35 |
0.726 |
MAIA_NotWorrying |
Lie |
ConditionPolygraph |
0.43 |
0.10 |
(0.25, 0.62) |
4.55 |
< .001*** |
MAIA_NotWorrying |
Truth |
ConditionPolygraph |
-0.17 |
0.09 |
(-0.36, 0.02) |
-1.79 |
0.074 |
MAIA_NotDistracting |
Lie |
ConditionInterrogation |
-0.15 |
0.08 |
(-0.31, 6.05e-03) |
-1.88 |
0.059 |
MAIA_NotDistracting |
Truth |
ConditionInterrogation |
0.23 |
0.08 |
(0.08, 0.39) |
2.90 |
0.004** |
MAIA_NotDistracting |
Lie |
ConditionPolygraph |
-0.05 |
0.08 |
(-0.21, 0.10) |
-0.65 |
0.514 |
MAIA_NotDistracting |
Truth |
ConditionPolygraph |
0.25 |
0.08 |
(0.10, 0.41) |
3.15 |
0.002** |
MAIA_Noticing |
Lie |
ConditionInterrogation |
0.09 |
0.11 |
(-0.12, 0.30) |
0.86 |
0.391 |
MAIA_Noticing |
Truth |
ConditionInterrogation |
-0.22 |
0.10 |
(-0.42, -0.01) |
-2.06 |
0.040* |
MAIA_Noticing |
Lie |
ConditionPolygraph |
0.47 |
0.10 |
(0.27, 0.67) |
4.61 |
< .001*** |
MAIA_Noticing |
Truth |
ConditionPolygraph |
-0.34 |
0.10 |
(-0.54, -0.13) |
-3.23 |
0.001** |
RT
results <- data.frame()
for (var in c(
"MAIA_Noticing", "MAIA_NotDistracting", "MAIA_NotWorrying",
"MAIA_AttentionRegulation", "MAIA_EmotionalAwareness", "MAIA_SelfRegulation",
"MAIA_BodyListening", "MAIA_Trusting", "MAIA_Total"
)) {
model <- glmmTMB(as.formula(paste0(
"RT ~ Answer / (Condition / ",
var,
") + (1|Participant) + (1|Item)"
)),
data = df
)
results <- parameters::parameters(model, effects = "fixed", keep = var) |>
mutate(Parameter = str_remove(Parameter, "Answer|Condition")) |>
separate(Parameter, sep = ":", into = c("Answer", "Condition", "Variable")) |>
data_relocate(select = "Variable", before = 1) |>
rbind(results)
}
display(results, stars = TRUE)
Fixed Effects
MAIA_Total |
Lie |
ConditionInterrogation |
-0.16 |
0.34 |
(-0.83, 0.51) |
-0.46 |
0.648 |
MAIA_Total |
Truth |
ConditionInterrogation |
-0.26 |
0.34 |
(-0.93, 0.41) |
-0.75 |
0.452 |
MAIA_Total |
Lie |
ConditionPolygraph |
-0.91 |
0.34 |
(-1.58, -0.24) |
-2.66 |
0.008** |
MAIA_Total |
Truth |
ConditionPolygraph |
-0.76 |
0.34 |
(-1.43, -0.09) |
-2.22 |
0.026* |
MAIA_Trusting |
Lie |
ConditionInterrogation |
-0.10 |
0.22 |
(-0.53, 0.34) |
-0.43 |
0.667 |
MAIA_Trusting |
Truth |
ConditionInterrogation |
-0.08 |
0.22 |
(-0.52, 0.36) |
-0.37 |
0.713 |
MAIA_Trusting |
Lie |
ConditionPolygraph |
0.11 |
0.22 |
(-0.33, 0.54) |
0.47 |
0.635 |
MAIA_Trusting |
Truth |
ConditionPolygraph |
0.04 |
0.22 |
(-0.40, 0.48) |
0.18 |
0.857 |
MAIA_BodyListening |
Lie |
ConditionInterrogation |
-0.19 |
0.15 |
(-0.48, 0.11) |
-1.24 |
0.216 |
MAIA_BodyListening |
Truth |
ConditionInterrogation |
-0.21 |
0.15 |
(-0.50, 0.09) |
-1.39 |
0.163 |
MAIA_BodyListening |
Lie |
ConditionPolygraph |
-0.50 |
0.15 |
(-0.80, -0.21) |
-3.33 |
< .001*** |
MAIA_BodyListening |
Truth |
ConditionPolygraph |
-0.47 |
0.15 |
(-0.76, -0.17) |
-3.11 |
0.002** |
MAIA_SelfRegulation |
Lie |
ConditionInterrogation |
0.15 |
0.22 |
(-0.29, 0.58) |
0.65 |
0.516 |
MAIA_SelfRegulation |
Truth |
ConditionInterrogation |
0.04 |
0.22 |
(-0.40, 0.48) |
0.17 |
0.864 |
MAIA_SelfRegulation |
Lie |
ConditionPolygraph |
-0.73 |
0.22 |
(-1.17, -0.30) |
-3.29 |
0.001** |
MAIA_SelfRegulation |
Truth |
ConditionPolygraph |
-0.72 |
0.22 |
(-1.16, -0.28) |
-3.22 |
0.001** |
MAIA_EmotionalAwareness |
Lie |
ConditionInterrogation |
-5.22e-03 |
0.24 |
(-0.48, 0.47) |
-0.02 |
0.983 |
MAIA_EmotionalAwareness |
Truth |
ConditionInterrogation |
0.06 |
0.24 |
(-0.42, 0.53) |
0.23 |
0.816 |
MAIA_EmotionalAwareness |
Lie |
ConditionPolygraph |
0.09 |
0.24 |
(-0.39, 0.57) |
0.37 |
0.711 |
MAIA_EmotionalAwareness |
Truth |
ConditionPolygraph |
0.11 |
0.24 |
(-0.37, 0.58) |
0.44 |
0.658 |
MAIA_AttentionRegulation |
Lie |
ConditionInterrogation |
-0.07 |
0.19 |
(-0.44, 0.30) |
-0.38 |
0.706 |
MAIA_AttentionRegulation |
Truth |
ConditionInterrogation |
-0.14 |
0.19 |
(-0.51, 0.23) |
-0.72 |
0.471 |
MAIA_AttentionRegulation |
Lie |
ConditionPolygraph |
-0.68 |
0.19 |
(-1.05, -0.30) |
-3.57 |
< .001*** |
MAIA_AttentionRegulation |
Truth |
ConditionPolygraph |
-0.58 |
0.19 |
(-0.96, -0.21) |
-3.09 |
0.002** |
MAIA_NotWorrying |
Lie |
ConditionInterrogation |
0.07 |
0.24 |
(-0.40, 0.55) |
0.31 |
0.757 |
MAIA_NotWorrying |
Truth |
ConditionInterrogation |
1.96e-03 |
0.24 |
(-0.47, 0.47) |
8.16e-03 |
0.993 |
MAIA_NotWorrying |
Lie |
ConditionPolygraph |
-0.09 |
0.24 |
(-0.56, 0.38) |
-0.39 |
0.700 |
MAIA_NotWorrying |
Truth |
ConditionPolygraph |
-0.13 |
0.24 |
(-0.60, 0.34) |
-0.55 |
0.581 |
MAIA_NotDistracting |
Lie |
ConditionInterrogation |
-0.05 |
0.20 |
(-0.43, 0.34) |
-0.23 |
0.820 |
MAIA_NotDistracting |
Truth |
ConditionInterrogation |
-0.10 |
0.20 |
(-0.49, 0.29) |
-0.48 |
0.630 |
MAIA_NotDistracting |
Lie |
ConditionPolygraph |
-0.07 |
0.20 |
(-0.46, 0.32) |
-0.37 |
0.710 |
MAIA_NotDistracting |
Truth |
ConditionPolygraph |
0.03 |
0.20 |
(-0.36, 0.42) |
0.13 |
0.893 |
MAIA_Noticing |
Lie |
ConditionInterrogation |
-0.13 |
0.25 |
(-0.62, 0.35) |
-0.53 |
0.594 |
MAIA_Noticing |
Truth |
ConditionInterrogation |
-0.18 |
0.25 |
(-0.67, 0.31) |
-0.71 |
0.475 |
MAIA_Noticing |
Lie |
ConditionPolygraph |
-0.57 |
0.25 |
(-1.05, -0.08) |
-2.28 |
0.023* |
MAIA_Noticing |
Truth |
ConditionPolygraph |
-0.25 |
0.25 |
(-0.74, 0.23) |
-1.02 |
0.309 |
Heart Rate
results <- data.frame()
for (var in c(
"MAIA_Noticing", "MAIA_NotDistracting", "MAIA_NotWorrying",
"MAIA_AttentionRegulation", "MAIA_EmotionalAwareness", "MAIA_SelfRegulation",
"MAIA_BodyListening", "MAIA_Trusting", "MAIA_Total"
)) {
model <- glmmTMB(as.formula(paste0(
"HeartRate ~ Answer / (Condition / ",
var,
") + (1|Participant) + (1|Item)"
)),
data = df
)
results <- parameters::parameters(model, effects = "fixed", keep = var) |>
mutate(Parameter = str_remove(Parameter, "Answer|Condition")) |>
separate(Parameter, sep = ":", into = c("Answer", "Condition", "Variable")) |>
data_relocate(select = "Variable", before = 1) |>
rbind(results)
}
display(results, stars = TRUE)
Fixed Effects
MAIA_Total |
Lie |
ConditionInterrogation |
2.94 |
3.78 |
(-4.47, 10.36) |
0.78 |
0.436 |
MAIA_Total |
Truth |
ConditionInterrogation |
3.46 |
3.78 |
(-3.96, 10.87) |
0.91 |
0.361 |
MAIA_Total |
Lie |
ConditionPolygraph |
4.53 |
3.78 |
(-2.89, 11.94) |
1.20 |
0.231 |
MAIA_Total |
Truth |
ConditionPolygraph |
4.25 |
3.78 |
(-3.16, 11.66) |
1.12 |
0.261 |
MAIA_Trusting |
Lie |
ConditionInterrogation |
3.95 |
2.07 |
(-0.11, 8.00) |
1.91 |
0.057 |
MAIA_Trusting |
Truth |
ConditionInterrogation |
3.68 |
2.07 |
(-0.38, 7.74) |
1.78 |
0.075 |
MAIA_Trusting |
Lie |
ConditionPolygraph |
3.77 |
2.07 |
(-0.29, 7.83) |
1.82 |
0.069 |
MAIA_Trusting |
Truth |
ConditionPolygraph |
3.57 |
2.07 |
(-0.49, 7.63) |
1.72 |
0.085 |
MAIA_BodyListening |
Lie |
ConditionInterrogation |
1.95 |
1.69 |
(-1.36, 5.25) |
1.15 |
0.249 |
MAIA_BodyListening |
Truth |
ConditionInterrogation |
2.06 |
1.69 |
(-1.24, 5.37) |
1.22 |
0.222 |
MAIA_BodyListening |
Lie |
ConditionPolygraph |
1.41 |
1.69 |
(-1.89, 4.72) |
0.84 |
0.403 |
MAIA_BodyListening |
Truth |
ConditionPolygraph |
1.56 |
1.69 |
(-1.75, 4.86) |
0.92 |
0.356 |
MAIA_SelfRegulation |
Lie |
ConditionInterrogation |
-8.55e-03 |
2.53 |
(-4.96, 4.94) |
-3.39e-03 |
0.997 |
MAIA_SelfRegulation |
Truth |
ConditionInterrogation |
0.73 |
2.53 |
(-4.22, 5.68) |
0.29 |
0.771 |
MAIA_SelfRegulation |
Lie |
ConditionPolygraph |
1.50 |
2.53 |
(-3.45, 6.45) |
0.59 |
0.553 |
MAIA_SelfRegulation |
Truth |
ConditionPolygraph |
1.09 |
2.53 |
(-3.86, 6.04) |
0.43 |
0.666 |
MAIA_EmotionalAwareness |
Lie |
ConditionInterrogation |
4.88 |
2.26 |
(0.45, 9.31) |
2.16 |
0.031* |
MAIA_EmotionalAwareness |
Truth |
ConditionInterrogation |
5.04 |
2.26 |
(0.61, 9.46) |
2.23 |
0.026* |
MAIA_EmotionalAwareness |
Lie |
ConditionPolygraph |
4.42 |
2.26 |
(-0.01, 8.84) |
1.95 |
0.051 |
MAIA_EmotionalAwareness |
Truth |
ConditionPolygraph |
4.35 |
2.26 |
(-0.08, 8.78) |
1.93 |
0.054 |
MAIA_AttentionRegulation |
Lie |
ConditionInterrogation |
0.12 |
2.03 |
(-3.85, 4.09) |
0.06 |
0.954 |
MAIA_AttentionRegulation |
Truth |
ConditionInterrogation |
0.56 |
2.03 |
(-3.41, 4.53) |
0.28 |
0.782 |
MAIA_AttentionRegulation |
Lie |
ConditionPolygraph |
1.56 |
2.03 |
(-2.42, 5.53) |
0.77 |
0.443 |
MAIA_AttentionRegulation |
Truth |
ConditionPolygraph |
1.44 |
2.03 |
(-2.53, 5.41) |
0.71 |
0.478 |
MAIA_NotWorrying |
Lie |
ConditionInterrogation |
-1.77 |
2.46 |
(-6.58, 3.05) |
-0.72 |
0.472 |
MAIA_NotWorrying |
Truth |
ConditionInterrogation |
-1.97 |
2.46 |
(-6.78, 2.85) |
-0.80 |
0.424 |
MAIA_NotWorrying |
Lie |
ConditionPolygraph |
-0.32 |
2.46 |
(-5.14, 4.50) |
-0.13 |
0.896 |
MAIA_NotWorrying |
Truth |
ConditionPolygraph |
-0.62 |
2.46 |
(-5.44, 4.20) |
-0.25 |
0.801 |
MAIA_NotDistracting |
Lie |
ConditionInterrogation |
-2.94 |
2.20 |
(-7.26, 1.38) |
-1.33 |
0.182 |
MAIA_NotDistracting |
Truth |
ConditionInterrogation |
-3.03 |
2.20 |
(-7.36, 1.29) |
-1.38 |
0.169 |
MAIA_NotDistracting |
Lie |
ConditionPolygraph |
-1.80 |
2.20 |
(-6.12, 2.52) |
-0.82 |
0.413 |
MAIA_NotDistracting |
Truth |
ConditionPolygraph |
-1.85 |
2.20 |
(-6.17, 2.47) |
-0.84 |
0.401 |
MAIA_Noticing |
Lie |
ConditionInterrogation |
0.98 |
2.62 |
(-4.15, 6.11) |
0.37 |
0.708 |
MAIA_Noticing |
Truth |
ConditionInterrogation |
1.63 |
2.62 |
(-3.49, 6.76) |
0.62 |
0.533 |
MAIA_Noticing |
Lie |
ConditionPolygraph |
1.67 |
2.62 |
(-3.45, 6.80) |
0.64 |
0.522 |
MAIA_Noticing |
Truth |
ConditionPolygraph |
1.71 |
2.62 |
(-3.42, 6.84) |
0.65 |
0.513 |
Correlation with LIE Scale
dfsub <- df |>
select(
Participant,
starts_with("LIE_"),
starts_with("MAIA_")
) |>
group_by(Participant) |>
summarise_all(mean)
correlation(select(dfsub, starts_with("LIE_")), select(dfsub, starts_with("MAIA_")), p_adjust = "none")
> # Correlation Matrix (pearson-method)
>
> Parameter1 | Parameter2 | r | 95% CI | t(24) | p
> ------------------------------------------------------------------------------------------
> LIE_Ability | MAIA_Noticing | -0.34 | [-0.64, 0.05] | -1.79 | 0.087
> LIE_Ability | MAIA_NotDistracting | 0.56 | [ 0.23, 0.78] | 3.35 | 0.003**
> LIE_Ability | MAIA_NotWorrying | -0.18 | [-0.53, 0.23] | -0.88 | 0.386
> LIE_Ability | MAIA_AttentionRegulation | -0.11 | [-0.48, 0.29] | -0.54 | 0.597
> LIE_Ability | MAIA_EmotionalAwareness | -0.29 | [-0.61, 0.11] | -1.47 | 0.153
> LIE_Ability | MAIA_SelfRegulation | 9.09e-03 | [-0.38, 0.40] | 0.04 | 0.965
> LIE_Ability | MAIA_BodyListening | -3.81e-03 | [-0.39, 0.38] | -0.02 | 0.985
> LIE_Ability | MAIA_Trusting | -0.17 | [-0.52, 0.23] | -0.84 | 0.407
> LIE_Ability | MAIA_Total | -0.08 | [-0.45, 0.32] | -0.38 | 0.704
> LIE_Frequency | MAIA_Noticing | -0.12 | [-0.49, 0.28] | -0.60 | 0.552
> LIE_Frequency | MAIA_NotDistracting | 0.29 | [-0.11, 0.61] | 1.50 | 0.147
> LIE_Frequency | MAIA_NotWorrying | -0.12 | [-0.48, 0.28] | -0.58 | 0.570
> LIE_Frequency | MAIA_AttentionRegulation | -0.24 | [-0.57, 0.16] | -1.22 | 0.236
> LIE_Frequency | MAIA_EmotionalAwareness | -0.22 | [-0.56, 0.19] | -1.08 | 0.289
> LIE_Frequency | MAIA_SelfRegulation | -0.35 | [-0.65, 0.05] | -1.80 | 0.084
> LIE_Frequency | MAIA_BodyListening | -0.03 | [-0.41, 0.36] | -0.16 | 0.877
> LIE_Frequency | MAIA_Trusting | -0.26 | [-0.59, 0.14] | -1.32 | 0.201
> LIE_Frequency | MAIA_Total | -0.20 | [-0.54, 0.21] | -0.98 | 0.335
> LIE_Negativity | MAIA_Noticing | 0.48 | [ 0.12, 0.73] | 2.69 | 0.013*
> LIE_Negativity | MAIA_NotDistracting | -0.32 | [-0.63, 0.08] | -1.65 | 0.112
> LIE_Negativity | MAIA_NotWorrying | 0.17 | [-0.23, 0.52] | 0.85 | 0.402
> LIE_Negativity | MAIA_AttentionRegulation | 0.19 | [-0.21, 0.54] | 0.96 | 0.345
> LIE_Negativity | MAIA_EmotionalAwareness | 0.17 | [-0.23, 0.52] | 0.84 | 0.410
> LIE_Negativity | MAIA_SelfRegulation | 0.36 | [-0.03, 0.66] | 1.91 | 0.069
> LIE_Negativity | MAIA_BodyListening | 0.28 | [-0.12, 0.60] | 1.41 | 0.173
> LIE_Negativity | MAIA_Trusting | 0.18 | [-0.22, 0.53] | 0.90 | 0.380
> LIE_Negativity | MAIA_Total | 0.30 | [-0.10, 0.61] | 1.51 | 0.143
> LIE_Contextuality | MAIA_Noticing | -0.06 | [-0.44, 0.33] | -0.30 | 0.766
> LIE_Contextuality | MAIA_NotDistracting | -0.01 | [-0.40, 0.38] | -0.05 | 0.957
> LIE_Contextuality | MAIA_NotWorrying | -0.21 | [-0.55, 0.19] | -1.05 | 0.303
> LIE_Contextuality | MAIA_AttentionRegulation | -0.08 | [-0.46, 0.31] | -0.42 | 0.681
> LIE_Contextuality | MAIA_EmotionalAwareness | 0.08 | [-0.31, 0.46] | 0.41 | 0.686
> LIE_Contextuality | MAIA_SelfRegulation | -0.07 | [-0.45, 0.32] | -0.36 | 0.721
> LIE_Contextuality | MAIA_BodyListening | 0.38 | [-0.01, 0.67] | 2.01 | 0.055
> LIE_Contextuality | MAIA_Trusting | 0.26 | [-0.14, 0.59] | 1.34 | 0.193
> LIE_Contextuality | MAIA_Total | 0.08 | [-0.31, 0.46] | 0.42 | 0.680
>
> p-value adjustment method: none
> Observations: 26
Deception Trait
Confidence
results <- list()
for (var in c("LIE_Ability", "LIE_Frequency", "LIE_Contextuality", "LIE_Negativity")) {
model <- brms::brm(as.formula(paste0(
"Confidence ~ Answer / (Condition / ",
var,
") + (1|Participant) + (1|Item)"
)),
data = df, family = "beta", refresh = 0, seed=3, iter = 4000,
prior = set_prior("student_t(1, 0, 1)", class = "b")
)
results[[var]] <- parameters::parameters(model, effects = "fixed", component = "conditional", test = c("pd"), keep = var) |>
as.data.frame() |>
separate(Parameter, sep = ":", into = c("Answer", "Condition", "Variable")) |>
mutate(
Answer = str_remove(Answer, "b_Answer"),
Condition = str_remove(Condition, "Condition")
)
}
> Running MCMC with 4 parallel chains...
>
> Chain 2 finished in 54.0 seconds.
> Chain 4 finished in 55.1 seconds.
> Chain 1 finished in 56.2 seconds.
> Chain 3 finished in 62.7 seconds.
>
> All 4 chains finished successfully.
> Mean chain execution time: 57.0 seconds.
> Total execution time: 62.8 seconds.
>
> Running MCMC with 4 parallel chains...
>
> Chain 4 finished in 55.2 seconds.
> Chain 2 finished in 55.7 seconds.
> Chain 1 finished in 56.8 seconds.
> Chain 3 finished in 57.2 seconds.
>
> All 4 chains finished successfully.
> Mean chain execution time: 56.2 seconds.
> Total execution time: 57.3 seconds.
>
> Running MCMC with 4 parallel chains...
>
> Chain 3 finished in 60.0 seconds.
> Chain 1 finished in 66.5 seconds.
> Chain 4 finished in 66.9 seconds.
> Chain 2 finished in 71.9 seconds.
>
> All 4 chains finished successfully.
> Mean chain execution time: 66.3 seconds.
> Total execution time: 72.0 seconds.
>
> Running MCMC with 4 parallel chains...
>
> Chain 4 finished in 52.6 seconds.
> Chain 2 finished in 55.8 seconds.
> Chain 3 finished in 59.0 seconds.
> Chain 1 finished in 63.4 seconds.
>
> All 4 chains finished successfully.
> Mean chain execution time: 57.7 seconds.
> Total execution time: 63.4 seconds.
display(format_table(results$LIE_Ability, stars = TRUE))
Lie |
Interrogation |
LIE_Ability |
0.02 |
[-0.01, 0.05] |
85.32% |
1.001 |
2547.00 |
Truth |
Interrogation |
LIE_Ability |
0.04 |
[ 0.01, 0.07] |
99.46%** |
1.001 |
2473.00 |
Lie |
Polygraph |
LIE_Ability |
3.29e-03 |
[-0.03, 0.03] |
58.45% |
1.001 |
2489.00 |
Truth |
Polygraph |
LIE_Ability |
0.04 |
[ 0.01, 0.07] |
99.16%** |
1.001 |
2567.00 |
display(format_table(results$LIE_Frequency, stars = TRUE))
Lie |
Interrogation |
LIE_Frequency |
-0.03 |
[-0.07, 0.00] |
95.81% |
1.001 |
2112.00 |
Truth |
Interrogation |
LIE_Frequency |
0.04 |
[ 0.00, 0.07] |
97.34%* |
1.000 |
2088.00 |
Lie |
Polygraph |
LIE_Frequency |
-0.03 |
[-0.07, 0.01] |
93.75% |
1.000 |
2128.00 |
Truth |
Polygraph |
LIE_Frequency |
0.05 |
[ 0.01, 0.08] |
99.39%** |
1.001 |
2182.00 |
display(format_table(results$LIE_Contextuality, stars = TRUE))
Lie |
Interrogation |
LIE_Contextuality |
-0.01 |
[-0.07, 0.05] |
64.98% |
1.005 |
2722.00 |
Truth |
Interrogation |
LIE_Contextuality |
-0.02 |
[-0.07, 0.04] |
70.43% |
1.003 |
2435.00 |
Lie |
Polygraph |
LIE_Contextuality |
-0.03 |
[-0.09, 0.02] |
87.80% |
1.004 |
2474.00 |
Truth |
Polygraph |
LIE_Contextuality |
0.01 |
[-0.05, 0.07] |
64.03% |
1.004 |
2513.00 |
display(format_table(results$LIE_Negativity, stars = TRUE))
Lie |
Interrogation |
LIE_Negativity |
0.06 |
[ 0.01, 0.11] |
98.32%* |
1.001 |
1837.00 |
Truth |
Interrogation |
LIE_Negativity |
-0.09 |
[-0.14, -0.04] |
99.89%** |
1.002 |
1879.00 |
Lie |
Polygraph |
LIE_Negativity |
0.07 |
[ 0.02, 0.12] |
99.55%** |
1.001 |
1894.00 |
Truth |
Polygraph |
LIE_Negativity |
-0.09 |
[-0.14, -0.03] |
99.92%*** |
1.002 |
1887.00 |
RT
results <- list()
for (var in c("LIE_Ability", "LIE_Frequency", "LIE_Contextuality", "LIE_Negativity")) {
model <- brms::brm(as.formula(paste0(
"RT ~ Answer / (Condition / ",
var,
") + (1|Participant) + (1|Item)"
)),
data = df, refresh = 0, seed=3, iter = 4000,
prior = set_prior("student_t(1, 0, 3)", class = "b")
)
results[[var]] <- parameters::parameters(model, effects = "fixed", component = "conditional", test = c("pd"), keep = var) |>
as.data.frame() |>
separate(Parameter, sep = ":", into = c("Answer", "Condition", "Variable")) |>
mutate(
Answer = str_remove(Answer, "b_Answer"),
Condition = str_remove(Condition, "Condition")
)
}
> Running MCMC with 4 parallel chains...
>
> Chain 2 finished in 11.9 seconds.
> Chain 3 finished in 12.8 seconds.
> Chain 1 finished in 12.9 seconds.
> Chain 4 finished in 13.2 seconds.
>
> All 4 chains finished successfully.
> Mean chain execution time: 12.7 seconds.
> Total execution time: 13.3 seconds.
>
> Running MCMC with 4 parallel chains...
>
> Chain 1 finished in 11.7 seconds.
> Chain 4 finished in 12.1 seconds.
> Chain 2 finished in 12.3 seconds.
> Chain 3 finished in 12.9 seconds.
>
> All 4 chains finished successfully.
> Mean chain execution time: 12.2 seconds.
> Total execution time: 13.0 seconds.
>
> Running MCMC with 4 parallel chains...
>
> Chain 2 finished in 14.5 seconds.
> Chain 1 finished in 15.6 seconds.
> Chain 3 finished in 16.1 seconds.
> Chain 4 finished in 16.1 seconds.
>
> All 4 chains finished successfully.
> Mean chain execution time: 15.6 seconds.
> Total execution time: 16.2 seconds.
>
> Running MCMC with 4 parallel chains...
>
> Chain 2 finished in 11.5 seconds.
> Chain 1 finished in 11.7 seconds.
> Chain 4 finished in 11.7 seconds.
> Chain 3 finished in 12.7 seconds.
>
> All 4 chains finished successfully.
> Mean chain execution time: 11.9 seconds.
> Total execution time: 12.8 seconds.
display(format_table(results$LIE_Ability, stars = TRUE))
Lie |
Interrogation |
LIE_Ability |
0.02 |
[-0.06, 0.10] |
71.46% |
1.002 |
973.00 |
Truth |
Interrogation |
LIE_Ability |
0.03 |
[-0.05, 0.11] |
76.88% |
1.003 |
964.00 |
Lie |
Polygraph |
LIE_Ability |
-8.32e-03 |
[-0.09, 0.07] |
58.21% |
1.002 |
1005.00 |
Truth |
Polygraph |
LIE_Ability |
-2.99e-03 |
[-0.09, 0.07] |
52.90% |
1.003 |
934.00 |
display(format_table(results$LIE_Frequency, stars = TRUE))
Lie |
Interrogation |
LIE_Frequency |
-7.08e-03 |
[-0.09, 0.08] |
56.66% |
1.003 |
1037.00 |
Truth |
Interrogation |
LIE_Frequency |
0.02 |
[-0.06, 0.10] |
68.26% |
1.003 |
1038.00 |
Lie |
Polygraph |
LIE_Frequency |
0.13 |
[ 0.04, 0.21] |
99.75%** |
1.003 |
1026.00 |
Truth |
Polygraph |
LIE_Frequency |
0.16 |
[ 0.08, 0.25] |
99.99%*** |
1.003 |
1039.00 |
display(format_table(results$LIE_Contextuality, stars = TRUE))
Lie |
Interrogation |
LIE_Contextuality |
-0.09 |
[-0.23, 0.05] |
88.80% |
1.001 |
1426.00 |
Truth |
Interrogation |
LIE_Contextuality |
-0.10 |
[-0.25, 0.04] |
91.81% |
1.002 |
1431.00 |
Lie |
Polygraph |
LIE_Contextuality |
0.11 |
[-0.04, 0.25] |
92.84% |
1.001 |
1448.00 |
Truth |
Polygraph |
LIE_Contextuality |
0.06 |
[-0.08, 0.20] |
80.06% |
1.001 |
1428.00 |
display(format_table(results$LIE_Negativity, stars = TRUE))
Lie |
Interrogation |
LIE_Negativity |
-0.03 |
[-0.15, 0.08] |
72.69% |
1.003 |
833.00 |
Truth |
Interrogation |
LIE_Negativity |
-0.07 |
[-0.18, 0.04] |
90.21% |
1.003 |
846.00 |
Lie |
Polygraph |
LIE_Negativity |
-0.22 |
[-0.33, -0.11] |
99.86%** |
1.004 |
813.00 |
Truth |
Polygraph |
LIE_Negativity |
-0.23 |
[-0.34, -0.12] |
99.90%** |
1.003 |
844.00 |
Heart Rate
results <- list()
for (var in c("LIE_Ability", "LIE_Frequency", "LIE_Contextuality", "LIE_Negativity")) {
model <- brms::brm(as.formula(paste0(
"HeartRate ~ Answer / (Condition / ",
var,
") + (1|Participant) + (1|Item)"
)),
data = df, refresh = 0, seed=3, iter = 4000,
prior = set_prior("student_t(1, 0, 8)", class = "b")
)
results[[var]] <- parameters::parameters(model, effects = "fixed", component = "conditional", test = c("pd"), keep = var) |>
as.data.frame() |>
separate(Parameter, sep = ":", into = c("Answer", "Condition", "Variable")) |>
mutate(
Answer = str_remove(Answer, "b_Answer"),
Condition = str_remove(Condition, "Condition")
)
}
> Running MCMC with 4 parallel chains...
>
> Chain 4 finished in 21.2 seconds.
> Chain 3 finished in 22.5 seconds.
> Chain 2 finished in 23.1 seconds.
> Chain 1 finished in 23.3 seconds.
>
> All 4 chains finished successfully.
> Mean chain execution time: 22.5 seconds.
> Total execution time: 23.4 seconds.
>
> Running MCMC with 4 parallel chains...
>
> Chain 1 finished in 21.3 seconds.
> Chain 4 finished in 22.1 seconds.
> Chain 2 finished in 22.2 seconds.
> Chain 3 finished in 23.0 seconds.
>
> All 4 chains finished successfully.
> Mean chain execution time: 22.1 seconds.
> Total execution time: 23.1 seconds.
>
> Running MCMC with 4 parallel chains...
>
> Chain 2 finished in 22.1 seconds.
> Chain 3 finished in 22.3 seconds.
> Chain 1 finished in 23.3 seconds.
> Chain 4 finished in 23.4 seconds.
>
> All 4 chains finished successfully.
> Mean chain execution time: 22.8 seconds.
> Total execution time: 23.5 seconds.
>
> Running MCMC with 4 parallel chains...
>
> Chain 1 finished in 23.7 seconds.
> Chain 2 finished in 23.8 seconds.
> Chain 3 finished in 24.5 seconds.
> Chain 4 finished in 24.6 seconds.
>
> All 4 chains finished successfully.
> Mean chain execution time: 24.1 seconds.
> Total execution time: 24.7 seconds.
display(format_table(results$LIE_Ability, stars = TRUE))
Lie |
Interrogation |
LIE_Ability |
-0.76 |
[-1.56, 0.00] |
97.50%* |
1.002 |
993.00 |
Truth |
Interrogation |
LIE_Ability |
-0.62 |
[-1.43, 0.14] |
94.74% |
1.002 |
1008.00 |
Lie |
Polygraph |
LIE_Ability |
-0.37 |
[-1.19, 0.38] |
83.31% |
1.002 |
1002.00 |
Truth |
Polygraph |
LIE_Ability |
-0.49 |
[-1.30, 0.27] |
89.66% |
1.002 |
1006.00 |
display(format_table(results$LIE_Frequency, stars = TRUE))
Lie |
Interrogation |
LIE_Frequency |
-1.46 |
[-2.35, -0.56] |
99.92%*** |
1.004 |
1207.00 |
Truth |
Interrogation |
LIE_Frequency |
-1.37 |
[-2.25, -0.47] |
99.78%** |
1.004 |
1187.00 |
Lie |
Polygraph |
LIE_Frequency |
-1.04 |
[-1.94, -0.16] |
98.86%* |
1.003 |
1217.00 |
Truth |
Polygraph |
LIE_Frequency |
-1.10 |
[-2.00, -0.21] |
99.22%** |
1.004 |
1196.00 |
display(format_table(results$LIE_Contextuality, stars = TRUE))
Lie |
Interrogation |
LIE_Contextuality |
1.07 |
[-0.33, 2.52] |
93.39% |
1.002 |
836.00 |
Truth |
Interrogation |
LIE_Contextuality |
1.04 |
[-0.36, 2.50] |
92.86% |
1.002 |
834.00 |
Lie |
Polygraph |
LIE_Contextuality |
0.66 |
[-0.74, 2.12] |
81.92% |
1.002 |
832.00 |
Truth |
Polygraph |
LIE_Contextuality |
0.66 |
[-0.74, 2.11] |
82.38% |
1.002 |
847.00 |
display(format_table(results$LIE_Negativity, stars = TRUE))
Lie |
Interrogation |
LIE_Negativity |
1.34 |
[ 0.14, 2.54] |
98.52%* |
1.007 |
961.00 |
Truth |
Interrogation |
LIE_Negativity |
1.23 |
[ 0.04, 2.41] |
97.91%* |
1.007 |
963.00 |
Lie |
Polygraph |
LIE_Negativity |
0.80 |
[-0.39, 1.98] |
90.70% |
1.007 |
965.00 |
Truth |
Polygraph |
LIE_Negativity |
0.86 |
[-0.34, 2.03] |
92.15% |
1.007 |
959.00 |
Figure
data <- df |>
group_by(Participant, Answer, Condition) |>
select(Confidence, RT, HeartRate, ToM, Intero_Regulation, Intero_Focus, Intero_Meta, Intero_Listening) |>
summarise_all(.funs = list(Mean = function(x) mean(x, na.rm = TRUE),
low = function(x) (mean(x, na.rm=TRUE) - sd(x, na.rm=TRUE) / 2),
high = function(x) (mean(x, na.rm=TRUE) + sd(x, na.rm=TRUE) / 2))) |>
mutate(ToM_Mean = ifelse(Condition == "Polygraph", ToM_Mean-0.01, ToM_Mean+0.01),
Intero_Regulation_Mean = ifelse(Condition == "Polygraph", Intero_Regulation_Mean-0.01, Intero_Regulation_Mean+0.01),
Intero_Focus_Mean = ifelse(Condition == "Polygraph", Intero_Focus_Mean-0.01, Intero_Focus_Mean+0.01),
Intero_Meta_Mean = ifelse(Condition == "Polygraph", Intero_Meta_Mean-0.01, Intero_Meta_Mean+0.01),
Intero_Listening_Mean = ifelse(Condition == "Polygraph", Intero_Listening_Mean-0.01, Intero_Listening_Mean+0.01))
model <- glmmTMB(Confidence ~ Answer / Condition / ToM + (1|Participant) + (1|Item),
data = df,
family = beta_family())
model <- brms::brm(Confidence ~ Answer / Condition / ToM + (1|Participant) + (1|Item),
data = df, family = "beta", seed=3, refresh=0,
prior = set_prior("student_t(1, 0, 1)", class = "b"))
link_data <- estimate_relation(model, at = c("Condition", "Answer", "ToM"), length = 30)
p1 <- ggplot(link_data, aes(x = ToM, y = Predicted)) +
geom_segment(data = data,
aes(x = ToM_Mean,
xend = ToM_Mean,
y = Confidence_low,
yend = Confidence_high,
color = Condition),
alpha=1/3) +
geom_point2(data = data,
aes(x = ToM_Mean,
y = Confidence_Mean,
color = Condition),
size=2) +
geom_ribbon(aes(ymin = CI_low, ymax = CI_high, fill = Condition), alpha = 1/3) +
geom_line(aes(color = Condition), size=1) +
geom_text(data=data.frame(ToM = -0.3, Predicted = 0.45, Condition = "Polygraph", Answer = "Lie"),
label = "***",
color = "#FF5722",
size = 10, show.legend = FALSE, fontface = "bold") +
labs(y = "Confidence",
x = "Theory of Mind") +
scale_y_continuous(labels = scales::percent) +
scale_color_manual(values = c("Polygraph" = "#FF5722", "Interrogation" = "#2196F3")) +
scale_fill_manual(values = c("Polygraph" = "#FF5722", "Interrogation" = "#2196F3")) +
facet_wrap(~Answer) +
theme_modern(axis.title.space = 10) +
theme(strip.text = element_text(size = 10),
strip.background = element_rect(fill = c("grey"), color = "white"))
p1
model <- brms::brm(Confidence ~ Answer / Condition / Intero_Meta + (1|Participant) + (1|Item),
data = df, family = "beta", seed=3, refresh=0,
prior = set_prior("student_t(1, 0, 1)", class = "b"))
link_data <- estimate_relation(model, at = c("Condition", "Answer", "Intero_Meta"), length = 30)
p2 <- ggplot(link_data, aes(x = Intero_Meta, y = Predicted)) +
geom_segment(data = data,
aes(x = Intero_Meta_Mean,
xend = Intero_Meta_Mean,
y = Confidence_low,
yend = Confidence_high,
color = Condition),
alpha=1/3) +
geom_point2(data = data,
aes(x = Intero_Meta_Mean,
y = Confidence_Mean,
color = Condition),
size=2) +
geom_ribbon(aes(ymin = CI_low, ymax = CI_high, fill = Condition), alpha = 1/3) +
geom_line(aes(color = Condition), size=1) +
geom_text(data=data.frame(Intero_Meta = -0.15, Predicted = 0.25, Condition = "Polygraph", Answer = "Lie"),
label = "***",
color = "#FF5722",
size = 10, show.legend = FALSE, fontface = "bold") +
labs(y = "Confidence",
x = expression(Interoception[" (.97 MAIA Attention regulation, .63 MAIA Self-regulation, .60 MAIA Emotional Awareness)"])) +
scale_y_continuous(labels = scales::percent) +
scale_color_manual(values = c("Polygraph" = "#FF5722", "Interrogation" = "#2196F3")) +
scale_fill_manual(values = c("Polygraph" = "#FF5722", "Interrogation" = "#2196F3")) +
facet_wrap(~Answer) +
theme_modern(axis.title.space = 10) +
theme(strip.text = element_text(size = 10),
strip.background = element_rect(fill = c("grey"), color = "white"))
p2
model <- brms::brm(Confidence ~ Answer / Condition / Intero_Listening + (1|Participant) + (1|Item),
data = df, family = "beta", seed=3, refresh=0,
prior = set_prior("student_t(1, 0, 1)", class = "b"))
link_data <- estimate_relation(model, at = c("Condition", "Answer", "Intero_Listening"), length = 30)
p3 <- ggplot(link_data, aes(x = Intero_Listening, y = Predicted)) +
geom_segment(data = data,
aes(x = Intero_Listening_Mean,
xend = Intero_Listening_Mean,
y = Confidence_low,
yend = Confidence_high,
color = Condition),
alpha=1/3) +
geom_point2(data = data,
aes(x = Intero_Listening_Mean,
y = Confidence_Mean,
color = Condition),
size=2) +
geom_ribbon(aes(ymin = CI_low, ymax = CI_high, fill = Condition), alpha = 1/3) +
geom_line(aes(color = Condition), size=1) +
geom_text(data=data.frame(Intero_Listening = 0.2, Predicted = 0.45, Condition = "Polygraph", Answer = "Lie"),
label = "***",
color = "#FF5722",
size = 10, show.legend = FALSE, fontface = "bold") +
labs(y = "Confidence",
x = expression(Interoception[" (.92 MAIA Body listening, -.60 HCT Awareness, .53 HCT Trusting)"])) +
scale_y_continuous(labels = scales::percent) +
scale_color_manual(values = c("Polygraph" = "#FF5722", "Interrogation" = "#2196F3")) +
scale_fill_manual(values = c("Polygraph" = "#FF5722", "Interrogation" = "#2196F3")) +
facet_wrap(~Answer) +
theme_modern(axis.title.space = 10) +
theme(strip.text = element_text(size = 10),
strip.background = element_rect(fill = c("grey"), color = "white"))
p3
model <- brms::brm(Confidence ~ Answer / Condition / Intero_Focus + (1|Participant) + (1|Item),
data = df, family = "beta", seed=3, refresh=0,
prior = set_prior("student_t(1, 0, 1)", class = "b"))
link_data <- estimate_relation(model, at = c("Condition", "Answer", "Intero_Focus"), length = 30)
p4 <- ggplot(link_data, aes(x = Intero_Focus, y = Predicted)) +
geom_segment(data = data,
aes(x = Intero_Focus_Mean,
xend = Intero_Focus_Mean,
y = Confidence_low,
yend = Confidence_high,
color = Condition),
alpha=1/3) +
geom_point2(data = data,
aes(x = Intero_Focus_Mean,
y = Confidence_Mean,
color = Condition),
size=2) +
geom_ribbon(aes(ymin = CI_low, ymax = CI_high, fill = Condition), alpha = 1/3) +
geom_line(aes(color = Condition), size=1) +
geom_text(data=data.frame(Intero_Focus = -0.2,
Predicted = c(0.55, 0.75),
Condition = c("Polygraph", "Interrogation"),
Answer = c("Truth", "Truth")),
label = c("*", "*"),
color = c("#2196F3", "#FF5722"),
size = 10, show.legend = FALSE, fontface = "bold") +
labs(y = "Confidence",
x = expression(Interoception[" (.87 MAIA Not-distracting, -.40 MAIA Emotional Awareness, .33 HCT Accuracy)"])) +
scale_y_continuous(labels = scales::percent) +
scale_color_manual(values = c("Polygraph" = "#FF5722", "Interrogation" = "#2196F3")) +
scale_fill_manual(values = c("Polygraph" = "#FF5722", "Interrogation" = "#2196F3")) +
facet_wrap(~Answer) +
theme_modern(axis.title.space = 10) +
theme(strip.text = element_text(size = 10),
strip.background = element_rect(fill = c("grey"), color = "white"))
p4
model <- brms::brm(Confidence ~ Answer / Condition / Intero_Regulation + (1|Participant) + (1|Item),
data = df, family = "beta", seed=3, refresh=0,
prior = set_prior("student_t(1, 0, 1)", class = "b"))
link_data <- estimate_relation(model, at = c("Condition", "Answer", "Intero_Regulation"), length = 30)
p5 <- ggplot(link_data, aes(x = Intero_Regulation, y = Predicted)) +
geom_segment(data = data,
aes(x = Intero_Regulation_Mean,
xend = Intero_Regulation_Mean,
y = Confidence_low,
yend = Confidence_high,
color = Condition),
alpha=1/3) +
geom_point2(data = data,
aes(x = Intero_Regulation_Mean,
y = Confidence_Mean,
color = Condition),
size=2) +
geom_ribbon(aes(ymin = CI_low, ymax = CI_high, fill = Condition), alpha = 1/3) +
geom_line(aes(color = Condition), size=1) +
geom_text(data=data.frame(Intero_Regulation = -0.15,
Predicted = c(0.25, 0.45, 0.75),
Condition = c("Polygraph", "Interrogation", "Polygraph"),
Answer = c("Lie", "Lie", "Truth")),
label = c("***", "*", "*"),
color = c("#FF5722", "#2196F3", "#FF5722"),
size = 10, show.legend = FALSE, fontface = "bold") +
labs(y = "Confidence",
x = expression(Interoception[" (.71 MAIA Not-worrying, .61 HCT Accuracy, .40 MAIA Trusting)"])) +
scale_y_continuous(labels = scales::percent) +
scale_color_manual(values = c("Polygraph" = "#FF5722", "Interrogation" = "#2196F3")) +
scale_fill_manual(values = c("Polygraph" = "#FF5722", "Interrogation" = "#2196F3")) +
facet_wrap(~Answer) +
theme_modern(axis.title.space = 10) +
theme(strip.text = element_text(size = 10),
strip.background = element_rect(fill = c("grey"), color = "white"))
p5
p <- (p1 / p2 / p3 / p4 / p5) + plot_layout(guides = "collect") + plot_annotation(title = "Interindividual correlates of the confidence\nthat one tells a convincing lie", theme = list(plot.title = element_text(face = "bold", hjust = 0.5)))
p
ggsave("figures/Figure1.png", width=12, height=15)
References
report::cite_packages(sessionInfo())
- Ben-Shachar MS, Lüdecke D, Makowski D (2020). “effectsize:
Estimationof Effect Size Indices and Standardized Parameters.”
Journal of OpenSource Software, 5(56), 2815. doi:10.21105/joss.02815https://doi.org/10.21105/joss.02815,https://doi.org/10.21105/joss.02815.
- Brooks ME, Kristensen K, van Benthem KJ, Magnusson A, Berg CW,
NielsenA, Skaug HJ, Maechler M, Bolker BM (2017). “glmmTMB Balances
Speed andFlexibility Among Packages for Zero-inflated Generalized Linear
MixedModeling.” The R Journal, 9(2), 378-400.https://journal.r-project.org/archive/2017/RJ-2017-066/index.html.
- Bürkner P (2017). “brms: An R Package for Bayesian Multilevel
ModelsUsing Stan.” Journal of Statistical Software,
80(1), 1-28.doi:10.18637/jss.v080.i01 https://doi.org/10.18637/jss.v080.i01.Bürkner P (2018).
“Advanced Bayesian Multilevel Modeling with the RPackage brms.” The
R Journal, 10(1), 395-411.doi:10.32614/RJ-2018-017 https://doi.org/10.32614/RJ-2018-017.Bürkner P (2021).
“Bayesian Item Response Modeling in R with brms andStan.” Journal of
Statistical Software, 100(5),
1-54.doi:10.18637/jss.v100.i05 https://doi.org/10.18637/jss.v100.i05.
- Eddelbuettel D, François R (2011). “Rcpp: Seamless R and
C++Integration.” Journal of Statistical Software,
40(8), 1-18.doi:10.18637/jss.v040.i08 https://doi.org/10.18637/jss.v040.i08.Eddelbuettel D
(2013). Seamless R and C++ Integration with Rcpp.Springer, New
York. doi:10.1007/978-1-4614-6868-4https://doi.org/10.1007/978-1-4614-6868-4, ISBN
978-1-4614-6867-7.Eddelbuettel D, Balamuta JJ (2018). “Extending extitR
with extitC++: ABrief Introduction to extitRcpp.” The American
Statistician, 72(1),28-36. doi:10.1080/00031305.2017.1375990https://doi.org/10.1080/00031305.2017.1375990.
- Henry L, Wickham H (2020). purrr: Functional Programming
Tools. Rpackage version 0.3.4, https://CRAN.R-project.org/package=purrr.
- Lüdecke D, Ben-Shachar M, Patil I, Makowski D (2020).
“Extracting,Computing and Exploring the Parameters of Statistical Models
using R.”Journal of Open Source Software, 5(53),
2445.doi:10.21105/joss.02445 https://doi.org/10.21105/joss.02445.
- Lüdecke D, Ben-Shachar M, Patil I, Waggoner P, Makowski D
(2021).”performance: An R Package for Assessment, Comparison and Testing
ofStatistical Models.” Journal of Open Source Software,
6(60), 3139.doi:10.21105/joss.03139 https://doi.org/10.21105/joss.03139.
- Lüdecke D, Patil I, Ben-Shachar M, Wiernik B, Waggoner P, Makowski
D(2021). “see: An R Package for Visualizing Statistical
Models.”Journal of Open Source Software, 6(64),
3393.doi:10.21105/joss.03393 https://doi.org/10.21105/joss.03393.
- Lüdecke D, Waggoner P, Makowski D (2019). “insight: A Unified
Interfaceto Access Information from Model Objects in R.” Journal of
Open SourceSoftware, 4(38), 1412. doi:10.21105/joss.01412https://doi.org/10.21105/joss.01412.
- Makowski D, Ben-Shachar M, Lüdecke D (2019). “bayestestR:
DescribingEffects and their Uncertainty, Existence and Significance
within theBayesian Framework.” Journal of Open Source Software,
4(40), 1541.doi:10.21105/joss.01541 https://doi.org/10.21105/joss.01541,https://joss.theoj.org/papers/10.21105/joss.01541.
- Makowski D, Ben-Shachar M, Lüdecke D (2020). “The easystats
collectionof R packages.” GitHub. https://github.com/easystats/easystats.
- Makowski D, Ben-Shachar M, Patil I, Lüdecke D (2020). “Estimation
ofModel-Based Predictions, Contrasts and Means.” CRAN.https://github.com/easystats/modelbased.
- Makowski D, Ben-Shachar M, Patil I, Lüdecke D (2020). “Methods
andAlgorithms for Correlation Analysis in R.” Journal of Open
SourceSoftware, 5(51), 2306. doi:10.21105/joss.02306https://doi.org/10.21105/joss.02306,https://joss.theoj.org/papers/10.21105/joss.02306.
- Makowski D, Ben-Shachar M, Patil I, Lüdecke D (2021).
“AutomatedResults Reporting as a Practical Tool to Improve
Reproducibility andMethodological Best Practices Adoption.”
CRAN.https://github.com/easystats/report.
- Müller K, Wickham H (2022). tibble: Simple Data Frames. R
packageversion 3.1.8, https://CRAN.R-project.org/package=tibble.
- Patil I, Makowski D, Ben-Shachar M, Wiernik B, Bacher E, Lüdecke
D(2022). “datawizard: An R Package for Easy Data Preparation
andStatistical Transformations.” CRAN. R package,https://easystats.github.io/datawizard/.
- Pedersen T (2020). patchwork: The Composer of Plots. R
packageversion 1.1.1, https://CRAN.R-project.org/package=patchwork.
- R Core Team (2022). R: A Language and Environment for
StatisticalComputing. R Foundation for Statistical Computing,
Vienna, Austria.https://www.R-project.org/.
- Wickham H (2016). ggplot2: Elegant Graphics for Data
Analysis.Springer-Verlag New York. ISBN 978-3-319-24277-4,https://ggplot2.tidyverse.org.
- Wickham H (2019). stringr: Simple, Consistent Wrappers for
CommonString Operations. R package version 1.4.0,https://CRAN.R-project.org/package=stringr.
- Wickham H (2022). forcats: Tools for Working with
CategoricalVariables (Factors). R package version 0.5.2,https://CRAN.R-project.org/package=forcats.
- Wickham H, Averick M, Bryan J, Chang W, McGowan LD, François
R,Grolemund G, Hayes A, Henry L, Hester J, Kuhn M, Pedersen TL, Miller
E,Bache SM, Müller K, Ooms J, Robinson D, Seidel DP, Spinu V,
TakahashiK, Vaughan D, Wilke C, Woo K, Yutani H (2019). “Welcome to
thetidyverse.” Journal of Open Source Software, 4(43),
1686.doi:10.21105/joss.01686 https://doi.org/10.21105/joss.01686.
- Wickham H, François R, Henry L, Müller K (2022). dplyr: A
Grammar ofData Manipulation. R package version 1.0.10,https://CRAN.R-project.org/package=dplyr.
- Wickham H, Girlich M (2022). tidyr: Tidy Messy Data. R
packageversion 1.2.1, https://CRAN.R-project.org/package=tidyr.
- Wickham H, Hester J, Bryan J (2022). readr: Read Rectangular
TextData. R package version 2.1.2,https://CRAN.R-project.org/package=readr.
---
title: '**The Role of Interoception and Theory of Mind in Deception**'
author: "Dominique Makowski et al."
date: "`r Sys.Date()`"
output:
  html_document:
    theme: paper
    toc: yes
    toc_depth: 3
    df_print: default
    code_folding: hide
    code_download: yes
editor_options:
  chunk_output_type: console
---


<!-- 
!!!! IMPORTANT: run `source("utils/render.R")` to publish instead of clicking on 'Knit'
-->

```{r setup, warning=FALSE, message=TRUE, include=FALSE}
# Set up the environment (or use local alternative `source("utils/config.R")`)
source("https://raw.githubusercontent.com/RealityBending/TemplateResults/main/utils/config.R")

options(
  digits = 3,
  mc.cores = 4,
  brms.algorithm = "sampling",
  brms.backend = "cmdstanr",
  dplyr.summarise.inform = FALSE
)

knitr::opts_chunk$set(echo = TRUE)

library(ggplot2)
theme_set(see::theme_modern())
```


```{r warning=FALSE, message=TRUE, results='asis'}
library(tidyverse)
library(patchwork)
library(glmmTMB)
library(brms)
library(easystats)

show_parameters <- function(model) {
  p <- parameters::parameters(model, effects = "fixed", component="conditional", test=c("pd")) |>
    data_relocate("pd", after=-1)
  display(p, stars=TRUE)
}
```

```{r warning=FALSE, message=TRUE, results='asis'}
summary(report::report(sessionInfo()))
```


```{r warning=FALSE, message=TRUE, results='asis'}
df <- read.csv("data/data_combined.csv") %>% 
  mutate(Participant = factor(paste0("S", Participant), levels = paste0("S", 1:30)),
         Condition = as.factor(Condition),
         Item = as.factor(Item),
         Phrasing = as.factor(Phrasing),
         Answer = as.factor(Answer)) |> 
  dplyr::filter(!Participant %in% c("S3", "S15", "S19", "S23"))  # No data

# Outliers
df$HeartRate[df$Participant == "S30"] <- NA # Extreme values
# df$Confidence[df$Participant %in% c("S9", "S29")] <- NA # Extreme responses
df$RT[df$Participant == "S13"] <- NA # Slower than the others


# Remove outlier trials
df <- df |>
  group_by(Participant) |>
  mutate(Outliers_RT = as.logical(performance::check_outliers(RT, method = "zscore", threshold = qnorm(0.99999))),
         Outliers_Physio = as.logical(performance::check_outliers(HeartRate, method = "zscore", threshold = qnorm(0.99999)))) |>
  ungroup()

# Adjustments for beta models
df$Confidence[df$Confidence == 1] <- 0.99999
df$Confidence[df$Confidence == 0] <- 0.00001

cat(paste("The data consists of",
          report::report_participants(df,
                                      participants = "Participant",
                                      sex = "Gender",
                                      age = "Age")))
```

Percentage of Confidence data removed: `r sum(is.na(df$Confidence)) / nrow(df) * 100` %  
Percentage of RT data removed: `r sum(is.na(df$RT)) / nrow(df) * 100` %  
Percentage of Heart Rate data removed: `r sum(is.na(df$HeartRate)) / nrow(df) * 100` %


# Measures {.tabset}

```{r child='1_Measures.Rmd'}
```


# Dimension Reduction 

## Theory of Mind

```{r message=FALSE, warning=FALSE}
dfsub <- df |> 
  select(Participant, 
         starts_with("YONI_"), 
         starts_with("BES_")) |> 
  select(-ends_with("Total")) |> 
  group_by(Participant) |> 
  summarise_all(mean) |> 
  select(-Participant)

parameters::n_factors(dfsub)
efa <- parameters::factor_analysis(dfsub, n=1, sort=TRUE, rotation = "oblimin")
efa

df <- cbind(df, predict(efa, newdata=df, names="ToM"))
```


## Interoception

```{r message=FALSE, warning=FALSE}
dfsub <- df |> 
  select(Participant, 
         starts_with("HCT_"), 
         starts_with("MAIA_")) |> 
  select(-ends_with("Total")) |> 
  group_by(Participant) |> 
  summarise_all(mean) |> 
  select(-Participant)

parameters::n_components(dfsub)
efa <- parameters::factor_analysis(dfsub, n=4, sort=TRUE, rotation = "oblimin")
efa
df <- cbind(df, predict(efa, newdata=df, names=c("Intero_Meta", "Intero_Listening", "Intero_Focus", "Intero_Regulation")))
```


# Manipulation Checks

```{r child='2_ManipulationChecks.Rmd'}
```

# Theory of Mind / Empathy



## Theory of Mind Score {.tabset}


### Confidence

```{r message=FALSE, warning=FALSE}
results <- list()
for (var in c("ToM")) {
  model <- brms::brm(as.formula(paste0("Confidence ~ Answer / (Condition / ",
                                     var,
                                     ") + (1|Participant) + (1|Item)")),
                   data = df, family = "beta", refresh=0, seed=3, iter=4000,
                   prior = set_prior("student_t(1, 0, 1)", class = "b"))

  results[[var]] <- parameters::parameters(model, effects = "fixed", component = "conditional", test = c("pd"), keep = var) |>
      as.data.frame() |> 
      separate(Parameter, sep = ":", into = c("Answer", "Condition", "Variable")) |>
      mutate(
        Answer = str_remove(Answer, "b_Answer"),
        Condition = str_remove(Condition, "Condition")
      )
}

display(format_table(results$ToM, stars = TRUE))
```

### RT


```{r message=FALSE, warning=FALSE}
results <- list()
for (var in c("ToM")) {
  model <- brms::brm(as.formula(paste0("RT ~ Answer / (Condition / ",
                                     var,
                                     ") + (1|Participant) + (1|Item)")),
                   data = df, refresh=0, seed=3, iter=4000,
                   prior = set_prior("student_t(1, 0, 3)", class = "b"))

  results[[var]] <- parameters::parameters(model, effects = "fixed", component = "conditional", test = c("pd"), keep = var) |>
      as.data.frame() |> 
      separate(Parameter, sep = ":", into = c("Answer", "Condition", "Variable")) |>
      mutate(
        Answer = str_remove(Answer, "b_Answer"),
        Condition = str_remove(Condition, "Condition")
      )
}

display(format_table(results$ToM, stars = TRUE))
```


### Heart Rate

```{r message=FALSE, warning=FALSE}
results <- list()
for (var in c("ToM")) {
  model <- brms::brm(as.formula(paste0("HeartRate ~ Answer / (Condition / ",
                                     var,
                                     ") + (1|Participant) + (1|Item)")),
                   data = df, refresh=0, seed=3, iter=4000,
                   prior = set_prior("student_t(1, 0, 8)", class = "b"))

  results[[var]] <- parameters::parameters(model, effects = "fixed", component = "conditional", test = c("pd"), keep = var) |>
      as.data.frame() |> 
      separate(Parameter, sep = ":", into = c("Answer", "Condition", "Variable")) |>
      mutate(
        Answer = str_remove(Answer, "b_Answer"),
        Condition = str_remove(Condition, "Condition")
      )
}

display(format_table(results$ToM, stars = TRUE))
```




### Correlation with LIE Scale

```{r message=FALSE, warning=FALSE}
dfsub <- df |>
  select(Participant,
         starts_with("LIE_"),
         starts_with("ToM")) |>
  group_by(Participant) |>
  summarise_all(mean)

correlation(select(dfsub, starts_with("LIE_")), select(dfsub, starts_with("ToM")), bayesian=TRUE)
```




```{r child='3_ToM.Rmd'}
```

# Interoception


## Interoception Scores {.tabset}


### Confidence

```{r message=FALSE, warning=FALSE}
results <- list()
for (var in c("Intero_Meta", "Intero_Listening", "Intero_Focus", "Intero_Regulation")) {
  model <- brms::brm(as.formula(paste0(
    "Confidence ~ Answer / (Condition / ",
    var,
    ") + (1|Participant) + (1|Item)"
  )),
  data = df, family = "beta", refresh = 0, seed=3, iter = 4000,
  prior = set_prior("student_t(1, 0, 1)", class = "b")
  )

  results[[var]] <- parameters::parameters(model, effects = "fixed", component = "conditional", test = c("pd"), keep = var) |>
      as.data.frame() |> 
      separate(Parameter, sep = ":", into = c("Answer", "Condition", "Variable")) |>
      mutate(
        Answer = str_remove(Answer, "b_Answer"),
        Condition = str_remove(Condition, "Condition")
      )
}

display(format_table(results$Intero_Meta, stars = TRUE))
display(format_table(results$Intero_Listening, stars = TRUE))
display(format_table(results$Intero_Focus, stars = TRUE))
display(format_table(results$Intero_Regulation, stars = TRUE))
```


### RT


```{r message=FALSE, warning=FALSE}
results <- list()
for (var in c("Intero_Meta", "Intero_Listening", "Intero_Focus", "Intero_Regulation")) {
  model <- brms::brm(as.formula(paste0(
    "RT ~ Answer / (Condition / ",
    var,
    ") + (1|Participant) + (1|Item)"
  )),
  data = df, refresh = 0, seed=3, iter = 4000,
  prior = set_prior("student_t(1, 0, 3)", class = "b")
  )

  results[[var]] <- parameters::parameters(model, effects = "fixed", component = "conditional", test = c("pd"), keep = var) |>
      as.data.frame() |> 
      separate(Parameter, sep = ":", into = c("Answer", "Condition", "Variable")) |>
      mutate(
        Answer = str_remove(Answer, "b_Answer"),
        Condition = str_remove(Condition, "Condition")
      )
}

display(format_table(results$Intero_Meta, stars = TRUE))
display(format_table(results$Intero_Listening, stars = TRUE))
display(format_table(results$Intero_Focus, stars = TRUE))
display(format_table(results$Intero_Regulation, stars = TRUE))
```


### Heart Rate

```{r message=FALSE, warning=FALSE}
results <- list()
for (var in c("Intero_Meta", "Intero_Listening", "Intero_Focus", "Intero_Regulation")) {
  model <- brms::brm(as.formula(paste0(
    "HeartRate ~ Answer / (Condition / ",
    var,
    ") + (1|Participant) + (1|Item)"
  )),
  data = df, refresh = 0, seed=3, iter = 4000,
  prior = set_prior("student_t(1, 0, 8)", class = "b")
  )

  results[[var]] <- parameters::parameters(model, effects = "fixed", component = "conditional", test = c("pd"), keep = var) |>
      as.data.frame() |> 
      separate(Parameter, sep = ":", into = c("Answer", "Condition", "Variable")) |>
      mutate(
        Answer = str_remove(Answer, "b_Answer"),
        Condition = str_remove(Condition, "Condition")
      )
}

display(format_table(results$Intero_Meta, stars = TRUE))
display(format_table(results$Intero_Listening, stars = TRUE))
display(format_table(results$Intero_Focus, stars = TRUE))
display(format_table(results$Intero_Regulation, stars = TRUE))
```




### Correlation with LIE Scale

```{r message=FALSE, warning=FALSE}
dfsub <- df |>
  select(
    Participant,
    starts_with("LIE_"),
    starts_with("Intero_")
  ) |>
  group_by(Participant) |>
  summarise_all(mean)

correlation(select(dfsub, starts_with("LIE_")), select(dfsub, starts_with("Intero_")), bayesian = TRUE)
```




```{r child='4_Intero.Rmd'}
```



# Deception Trait {.tabset}

## Confidence

```{r message=FALSE, warning=FALSE}
results <- list()
for (var in c("LIE_Ability", "LIE_Frequency", "LIE_Contextuality", "LIE_Negativity")) {
  model <- brms::brm(as.formula(paste0(
    "Confidence ~ Answer / (Condition / ",
    var,
    ") + (1|Participant) + (1|Item)"
  )),
  data = df, family = "beta", refresh = 0, seed=3, iter = 4000,
  prior = set_prior("student_t(1, 0, 1)", class = "b")
  )

  results[[var]] <- parameters::parameters(model, effects = "fixed", component = "conditional", test = c("pd"), keep = var) |>
      as.data.frame() |> 
      separate(Parameter, sep = ":", into = c("Answer", "Condition", "Variable")) |>
      mutate(
        Answer = str_remove(Answer, "b_Answer"),
        Condition = str_remove(Condition, "Condition")
      )
}

display(format_table(results$LIE_Ability, stars = TRUE))
display(format_table(results$LIE_Frequency, stars = TRUE))
display(format_table(results$LIE_Contextuality, stars = TRUE))
display(format_table(results$LIE_Negativity, stars = TRUE))
```


## RT


```{r message=FALSE, warning=FALSE}
results <- list()
for (var in c("LIE_Ability", "LIE_Frequency", "LIE_Contextuality", "LIE_Negativity")) {
  model <- brms::brm(as.formula(paste0(
    "RT ~ Answer / (Condition / ",
    var,
    ") + (1|Participant) + (1|Item)"
  )),
  data = df, refresh = 0, seed=3, iter = 4000,
  prior = set_prior("student_t(1, 0, 3)", class = "b")
  )

  results[[var]] <- parameters::parameters(model, effects = "fixed", component = "conditional", test = c("pd"), keep = var) |>
      as.data.frame() |> 
      separate(Parameter, sep = ":", into = c("Answer", "Condition", "Variable")) |>
      mutate(
        Answer = str_remove(Answer, "b_Answer"),
        Condition = str_remove(Condition, "Condition")
      )
}

display(format_table(results$LIE_Ability, stars = TRUE))
display(format_table(results$LIE_Frequency, stars = TRUE))
display(format_table(results$LIE_Contextuality, stars = TRUE))
display(format_table(results$LIE_Negativity, stars = TRUE))
```


## Heart Rate

```{r message=FALSE, warning=FALSE}
results <- list()
for (var in c("LIE_Ability", "LIE_Frequency", "LIE_Contextuality", "LIE_Negativity")) {
  model <- brms::brm(as.formula(paste0(
    "HeartRate ~ Answer / (Condition / ",
    var,
    ") + (1|Participant) + (1|Item)"
  )),
  data = df, refresh = 0, seed=3, iter = 4000,
  prior = set_prior("student_t(1, 0, 8)", class = "b")
  )

  results[[var]] <- parameters::parameters(model, effects = "fixed", component = "conditional", test = c("pd"), keep = var) |>
      as.data.frame() |> 
      separate(Parameter, sep = ":", into = c("Answer", "Condition", "Variable")) |>
      mutate(
        Answer = str_remove(Answer, "b_Answer"),
        Condition = str_remove(Condition, "Condition")
      )
}

display(format_table(results$LIE_Ability, stars = TRUE))
display(format_table(results$LIE_Frequency, stars = TRUE))
display(format_table(results$LIE_Contextuality, stars = TRUE))
display(format_table(results$LIE_Negativity, stars = TRUE))
```



# Figure

```{r warning=FALSE, message=FALSE, eval=FALSE}
data <- df |>
  group_by(Participant, Answer, Condition) |> 
  select(Confidence, RT, HeartRate, ToM, Intero_Regulation, Intero_Focus, Intero_Meta, Intero_Listening) |> 
  summarise_all(.funs = list(Mean = function(x) mean(x, na.rm = TRUE), 
                             low = function(x) (mean(x, na.rm=TRUE) - sd(x, na.rm=TRUE) / 2),
                             high = function(x) (mean(x, na.rm=TRUE) + sd(x, na.rm=TRUE) / 2))) |> 
  mutate(ToM_Mean = ifelse(Condition == "Polygraph", ToM_Mean-0.01, ToM_Mean+0.01),
         Intero_Regulation_Mean = ifelse(Condition == "Polygraph", Intero_Regulation_Mean-0.01, Intero_Regulation_Mean+0.01),
         Intero_Focus_Mean = ifelse(Condition == "Polygraph", Intero_Focus_Mean-0.01, Intero_Focus_Mean+0.01),
         Intero_Meta_Mean = ifelse(Condition == "Polygraph", Intero_Meta_Mean-0.01, Intero_Meta_Mean+0.01),
         Intero_Listening_Mean = ifelse(Condition == "Polygraph", Intero_Listening_Mean-0.01, Intero_Listening_Mean+0.01))

model <- glmmTMB(Confidence ~ Answer / Condition / ToM + (1|Participant) + (1|Item), 
                 data = df, 
                 family = beta_family())

model <- brms::brm(Confidence ~ Answer / Condition / ToM + (1|Participant) + (1|Item), 
                   data = df, family = "beta", seed=3, refresh=0,
                   prior = set_prior("student_t(1, 0, 1)", class = "b"))

link_data <- estimate_relation(model, at = c("Condition", "Answer", "ToM"), length = 30)

p1 <- ggplot(link_data, aes(x = ToM, y = Predicted)) +
  geom_segment(data = data,
                  aes(x = ToM_Mean,
                      xend = ToM_Mean,
                      y = Confidence_low,
                      yend = Confidence_high,
                      color = Condition),
                  alpha=1/3) +
  geom_point2(data = data,
                  aes(x = ToM_Mean,
                      y = Confidence_Mean,
                      color = Condition),
              size=2) +
  geom_ribbon(aes(ymin = CI_low, ymax = CI_high, fill = Condition), alpha = 1/3) +
  geom_line(aes(color = Condition), size=1) +
  geom_text(data=data.frame(ToM = -0.3, Predicted = 0.45, Condition = "Polygraph", Answer = "Lie"),
            label = "***",
            color = "#FF5722",
            size = 10, show.legend = FALSE, fontface = "bold") +
  labs(y = "Confidence",
       x = "Theory of Mind") +
  scale_y_continuous(labels = scales::percent) + 
  scale_color_manual(values = c("Polygraph" = "#FF5722", "Interrogation" = "#2196F3")) +
  scale_fill_manual(values = c("Polygraph" = "#FF5722", "Interrogation" = "#2196F3")) +
  facet_wrap(~Answer) +
  theme_modern(axis.title.space = 10) +
  theme(strip.text = element_text(size = 10),
        strip.background = element_rect(fill = c("grey"), color = "white"))
p1
  

model <- brms::brm(Confidence ~ Answer / Condition / Intero_Meta + (1|Participant) + (1|Item), 
                   data = df, family = "beta", seed=3, refresh=0,
                   prior = set_prior("student_t(1, 0, 1)", class = "b"))

link_data <- estimate_relation(model, at = c("Condition", "Answer", "Intero_Meta"), length = 30)

p2 <- ggplot(link_data, aes(x = Intero_Meta, y = Predicted)) +
  geom_segment(data = data,
                  aes(x = Intero_Meta_Mean,
                      xend = Intero_Meta_Mean,
                      y = Confidence_low,
                      yend = Confidence_high,
                      color = Condition),
                  alpha=1/3) +
  geom_point2(data = data,
                  aes(x = Intero_Meta_Mean,
                      y = Confidence_Mean,
                      color = Condition),
              size=2) +
  geom_ribbon(aes(ymin = CI_low, ymax = CI_high, fill = Condition), alpha = 1/3) +
  geom_line(aes(color = Condition), size=1) +
  geom_text(data=data.frame(Intero_Meta = -0.15, Predicted = 0.25, Condition = "Polygraph", Answer = "Lie"),
            label = "***",
            color = "#FF5722",
            size = 10, show.legend = FALSE, fontface = "bold") +
  labs(y = "Confidence",
       x = expression(Interoception["  (.97 MAIA Attention regulation, .63 MAIA Self-regulation, .60 MAIA Emotional Awareness)"])) +
  scale_y_continuous(labels = scales::percent) + 
  scale_color_manual(values = c("Polygraph" = "#FF5722", "Interrogation" = "#2196F3")) +
  scale_fill_manual(values = c("Polygraph" = "#FF5722", "Interrogation" = "#2196F3")) +
  facet_wrap(~Answer) +
  theme_modern(axis.title.space = 10) +
  theme(strip.text = element_text(size = 10),
        strip.background = element_rect(fill = c("grey"), color = "white"))
p2



model <- brms::brm(Confidence ~ Answer / Condition / Intero_Listening + (1|Participant) + (1|Item), 
                   data = df, family = "beta", seed=3, refresh=0,
                   prior = set_prior("student_t(1, 0, 1)", class = "b"))
link_data <- estimate_relation(model, at = c("Condition", "Answer", "Intero_Listening"), length = 30)

p3 <- ggplot(link_data, aes(x = Intero_Listening, y = Predicted)) +
  geom_segment(data = data,
                  aes(x = Intero_Listening_Mean,
                      xend = Intero_Listening_Mean,
                      y = Confidence_low,
                      yend = Confidence_high,
                      color = Condition),
                  alpha=1/3) +
  geom_point2(data = data,
                  aes(x = Intero_Listening_Mean,
                      y = Confidence_Mean,
                      color = Condition),
              size=2) +
  geom_ribbon(aes(ymin = CI_low, ymax = CI_high, fill = Condition), alpha = 1/3) +
  geom_line(aes(color = Condition), size=1) +
  geom_text(data=data.frame(Intero_Listening = 0.2, Predicted = 0.45, Condition = "Polygraph", Answer = "Lie"),
            label = "***",
            color = "#FF5722",
            size = 10, show.legend = FALSE, fontface = "bold") +
  labs(y = "Confidence",
       x = expression(Interoception["  (.92 MAIA Body listening, -.60 HCT Awareness, .53 HCT Trusting)"])) +
  scale_y_continuous(labels = scales::percent) + 
  scale_color_manual(values = c("Polygraph" = "#FF5722", "Interrogation" = "#2196F3")) +
  scale_fill_manual(values = c("Polygraph" = "#FF5722", "Interrogation" = "#2196F3")) +
  facet_wrap(~Answer) +
  theme_modern(axis.title.space = 10) +
  theme(strip.text = element_text(size = 10),
        strip.background = element_rect(fill = c("grey"), color = "white")) 
  
p3


model <- brms::brm(Confidence ~ Answer / Condition / Intero_Focus + (1|Participant) + (1|Item), 
                   data = df, family = "beta", seed=3, refresh=0,
                   prior = set_prior("student_t(1, 0, 1)", class = "b"))
link_data <- estimate_relation(model, at = c("Condition", "Answer", "Intero_Focus"), length = 30)

p4 <- ggplot(link_data, aes(x = Intero_Focus, y = Predicted)) +
  geom_segment(data = data,
                  aes(x = Intero_Focus_Mean,
                      xend = Intero_Focus_Mean,
                      y = Confidence_low,
                      yend = Confidence_high,
                      color = Condition),
                  alpha=1/3) +
  geom_point2(data = data,
                  aes(x = Intero_Focus_Mean,
                      y = Confidence_Mean,
                      color = Condition),
              size=2) +
  geom_ribbon(aes(ymin = CI_low, ymax = CI_high, fill = Condition), alpha = 1/3) +
  geom_line(aes(color = Condition), size=1) +
  geom_text(data=data.frame(Intero_Focus = -0.2,
                            Predicted = c(0.55, 0.75),
                            Condition = c("Polygraph", "Interrogation"),
                            Answer = c("Truth", "Truth")),
            label = c("*", "*"),
            color = c("#2196F3", "#FF5722"),
            size = 10, show.legend = FALSE, fontface = "bold") +
  labs(y = "Confidence",
       x = expression(Interoception["  (.87 MAIA Not-distracting, -.40 MAIA Emotional Awareness, .33 HCT Accuracy)"])) +
  scale_y_continuous(labels = scales::percent) + 
  scale_color_manual(values = c("Polygraph" = "#FF5722", "Interrogation" = "#2196F3")) +
  scale_fill_manual(values = c("Polygraph" = "#FF5722", "Interrogation" = "#2196F3")) +
  facet_wrap(~Answer) +
  theme_modern(axis.title.space = 10) +
  theme(strip.text = element_text(size = 10),
        strip.background = element_rect(fill = c("grey"), color = "white"))
p4



model <- brms::brm(Confidence ~ Answer / Condition / Intero_Regulation + (1|Participant) + (1|Item), 
                   data = df, family = "beta", seed=3, refresh=0,
                   prior = set_prior("student_t(1, 0, 1)", class = "b"))

link_data <- estimate_relation(model, at = c("Condition", "Answer", "Intero_Regulation"), length = 30)

p5 <- ggplot(link_data, aes(x = Intero_Regulation, y = Predicted)) +
  geom_segment(data = data,
                  aes(x = Intero_Regulation_Mean,
                      xend = Intero_Regulation_Mean,
                      y = Confidence_low,
                      yend = Confidence_high,
                      color = Condition),
                  alpha=1/3) +
  geom_point2(data = data,
                  aes(x = Intero_Regulation_Mean,
                      y = Confidence_Mean,
                      color = Condition),
              size=2) +
  geom_ribbon(aes(ymin = CI_low, ymax = CI_high, fill = Condition), alpha = 1/3) +
  geom_line(aes(color = Condition), size=1) +
  geom_text(data=data.frame(Intero_Regulation = -0.15, 
                            Predicted = c(0.25, 0.45, 0.75),
                            Condition = c("Polygraph", "Interrogation", "Polygraph"), 
                            Answer = c("Lie", "Lie", "Truth")),
            label = c("***", "*", "*"),
            color = c("#FF5722", "#2196F3", "#FF5722"),
            size = 10, show.legend = FALSE, fontface = "bold") +
  labs(y = "Confidence",
       x = expression(Interoception["  (.71 MAIA Not-worrying, .61 HCT Accuracy, .40 MAIA Trusting)"])) +
  scale_y_continuous(labels = scales::percent) + 
  scale_color_manual(values = c("Polygraph" = "#FF5722", "Interrogation" = "#2196F3")) +
  scale_fill_manual(values = c("Polygraph" = "#FF5722", "Interrogation" = "#2196F3")) +
  facet_wrap(~Answer) +
  theme_modern(axis.title.space = 10) +
  theme(strip.text = element_text(size = 10),
        strip.background = element_rect(fill = c("grey"), color = "white")) 
p5


p <- (p1 / p2 / p3 / p4 / p5) + plot_layout(guides = "collect") + plot_annotation(title = "Interindividual correlates of the confidence\nthat one tells a convincing lie", theme = list(plot.title = element_text(face = "bold", hjust = 0.5)))
p
ggsave("figures/Figure1.png", width=12, height=15)
```


# References

```{r warning=FALSE, message=FALSE, results='asis'}
report::cite_packages(sessionInfo())
```


