set.seed(123456) # for reproducibility
::p_load(tidyverse, psych)
pacman::p_load(ggpubr, gghalves, knitr, cowplot, ggbeeswarm)
pacman::p_load(rstatix, permuco, Superpower, MPTinR, MKinfer, boot)
pacman
::p_load_gh("RLesur/klippy", "mitchelloharawild/icons")
pacman
options(knitr.kable.NA = '')
options(dplyr.summarise.inform=FALSE) # suppress warning in regards to regrouping
::klippy() klippy
<- 1e4
nsims
## Excluding Ss
<- function(df, rx){
rm_subject for (i in rx){
<- df %>% filter(SID != i) %>% droplevels()
df
}cat(sprintf('%d removed & %d left',
length(unique(rx)),
length(unique(df$SID))))
return(df)
}
## Plot
# stat summary plot to 25% quartile and 75% quartile
# https://bit.ly/3iFpV07
<- function(df, Y, xMin, xMax, xBy, xLab){
single_raincloud_plot %>% ggplot(aes(x = 1, y = Y)) +
df geom_half_violin(aes(y = Y), side = "r",
color = "grey70", fill = "grey70") +
geom_half_point(aes(y = Y), side = "l", size = 2,
color = "grey50", fill = "grey50", alpha = .5) +
geom_pointrange(stat = "summary",
fun.min = function(z) {quantile(z,0.25)},
fun.max = function(z) {quantile(z,0.75)},
fun = median, color = "darkred", size = 1) +
scale_y_continuous(breaks=seq(xMin,xMax,by=xBy)) +
coord_flip(ylim = c(xMin, xMax), clip = "on") +
labs(y = xLab) +
theme_bw(base_size = 18) +
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
axis.title.y = element_blank(),
axis.ticks.y = element_blank(),
axis.text.y = element_blank(),
aspect.ratio = .3)
}
ggdraw() + draw_image("fig/Fig_proc.png")
원자료를 정리하고, 온라인 참가자들이 지시를 잘 따랐는지 확인한다.
<- read_csv('data/PIT2_dataN46.csv', show_col_types = FALSE)
d
%>% summarise(SN = n_distinct(SID))
d ## # A tibble: 1 × 1
## SN
## <int>
## 1 46
table(d$SID)
##
## 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
## 256 256 256 256 256 256 256 256 256 256 256 256 256 256 256 256 256 256 256 256
## 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40
## 256 256 256 256 256 256 256 256 256 256 256 256 256 256 256 256 256 256 256 256
## 41 42 43 44 45 46
## 256 256 256 256 256 256
unique(d$Prime)
## [1] "nonpain" "pain"
unique(d$Target)
## [1] "pain" "nonpain"
table(d$Load, d$SID)
##
## 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
## High 128 128 128 128 128 128 128 128 128 128 128 128 128 128 128 128 128 128
## Low 128 128 128 128 128 128 128 128 128 128 128 128 128 128 128 128 128 128
##
## 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36
## High 128 128 128 128 128 128 128 128 128 128 128 128 128 128 128 128 128 128
## Low 128 128 128 128 128 128 128 128 128 128 128 128 128 128 128 128 128 128
##
## 37 38 39 40 41 42 43 44 45 46
## High 128 128 128 128 128 128 128 128 128 128
## Low 128 128 128 128 128 128 128 128 128 128
# group과 block은 역균형화를 위한 변수;
# group: 반응키 counterbalancing
# (A, B; A - 우세손의 왼쪽이 고통 반응키, B - 우세손의 오른쪽이 고통 반응키)
table(d$group, d$SID)
##
## 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
## A 256 256 256 256 256 256 256 256 256 256 256 256 256 256 256 256 256 256 256
## B 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
##
## 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38
## A 256 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
## B 0 256 256 256 256 256 256 256 256 256 256 256 256 256 256 256 256 256 256
##
## 39 40 41 42 43 44 45 46
## A 0 0 0 0 256 256 0 256
## B 256 256 256 256 0 0 256 0
# block: 블록 counterbalancing
# (H, L; H - 고부하 블록부터 시작, HLLHHLLH, L - 저부하 블록부터 시작 LHHLLHHL)
table(d$block, d$SID)
##
## 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
## H 256 256 256 256 256 256 256 256 256 256 0 0 0 0 0 0 0 0 0
## L 0 0 0 0 0 0 0 0 0 0 256 256 256 256 256 256 256 256 256
##
## 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38
## H 0 256 256 256 256 256 256 256 256 256 256 256 0 0 0 0 0 0 0
## L 256 0 0 0 0 0 0 0 0 0 0 0 256 256 256 256 256 256 256
##
## 39 40 41 42 43 44 45 46
## H 0 0 0 0 256 256 256 256
## L 256 256 256 256 0 0 0 0
table(d$Load, d$group)
##
## A B
## High 2944 2944
## Low 2944 2944
table(d$Load, d$block)
##
## H L
## High 3200 2688
## Low 3200 2688
table(d$sameDiff)
##
## 0 1
## 5888 5888
table(d$Congruent)
##
## 0 1
## 5888 5888
table(d$PIT_corr)
##
## 0 1
## 1227 10549
table(d$WM_corr)
##
## 0 1
## 1277 4611
table(d$Load, d$WM_corr)
##
## 0 1
## High 1277 4611
## Low 0 0
table(d$Load, d$PIT_corr)
##
## 0 1
## High 821 5067
## Low 406 5482
%>% mutate(Cong = Prime==Target,
d Check = Cong!=Congruent) %>%
summarise(Check = sum(Check)) # Prime, Taget 변인 불필요.
## # A tibble: 1 × 1
## Check
## <int>
## 1 0
$SID <- factor(d$SID)
d$Load <- factor(d$Load)
d$Prime <- factor(d$Prime, levels=c("pain","nonpain"), labels=c("Pain","Nonpain"))
d$Target <- factor(d$Target, levels=c("pain","nonpain"), labels=c("Pain","Nonpain"))
d$Congruency <- factor(d$Congruent, levels=c(1,0), labels=c("Congruent","Incongruent"))
d$sameDiff <- factor(d$sameDiff, levels=c(1, 0), labels=c("Same","Different"))
d
<- d %>% select(SID, Load, sameDiff, WM_corr, WM_rt, Prime, Target, Congruency, PIT_corr, PIT_rt)
d str(d)
## tibble [11,776 × 10] (S3: tbl_df/tbl/data.frame)
## $ SID : Factor w/ 46 levels "1","2","3","4",..: 1 1 1 1 1 1 1 1 1 1 ...
## $ Load : Factor w/ 2 levels "High","Low": 1 1 1 1 1 1 1 1 1 1 ...
## $ sameDiff : Factor w/ 2 levels "Same","Different": 1 1 1 2 2 2 2 1 2 1 ...
## $ WM_corr : num [1:11776] 1 0 1 0 1 1 0 0 1 0 ...
## $ WM_rt : num [1:11776] 0.796 1.436 0.65 0.804 1.327 ...
## $ Prime : Factor w/ 2 levels "Pain","Nonpain": 2 2 2 1 2 1 1 1 1 2 ...
## $ Target : Factor w/ 2 levels "Pain","Nonpain": 1 2 1 1 2 1 1 1 1 2 ...
## $ Congruency: Factor w/ 2 levels "Congruent","Incongruent": 2 1 2 1 1 1 1 1 1 1 ...
## $ PIT_corr : num [1:11776] 1 1 1 1 1 1 1 1 1 1 ...
## $ PIT_rt : num [1:11776] 1.103 0.83 0.917 1.282 1.052 ...
table(d$Load, d$Congruency)
##
## Congruent Incongruent
## High 2944 2944
## Low 2944 2944
# 참가자당 256시행. 참가자당 & 조건당 64시행
<- d %>%
d.sum filter(Load == 'High') %>%
group_by(SID) %>%
summarise(WMerror = (1 - mean(WM_corr))*100) %>%
ungroup() %>%
summarise(MN = mean(WMerror)
SD = sd(WMerror)
, MIN = min(WMerror)
, MAX = max(WMerror)
, Q1 = quantile(WMerror, prob = .25)
, MED = median(WMerror)
, Q3 = quantile(WMerror, prob = .75)
, IQR = IQR(WMerror)
, Outlier = Q3 + 1.5 * IQR
, Extreme = Q3 + 3 * IQR)
, %>%
d.sum kable(digits = 2, caption = "WM Error (%): Distribution")
MN | SD | MIN | MAX | Q1 | MED | Q3 | IQR | Outlier | Extreme |
---|---|---|---|---|---|---|---|---|---|
21.69 | 12.49 | 2.34 | 52.34 | 12.7 | 17.97 | 29.49 | 16.8 | 54.69 | 79.88 |
%>%
d filter(Load == 'High') %>%
group_by(SID) %>%
summarise(WMerror = (1 - mean(WM_corr))*100) %>%
ungroup() %>%
filter(WMerror > 30) %>%
print(n = Inf)
## # A tibble: 11 × 2
## SID WMerror
## <fct> <dbl>
## 1 1 39.8
## 2 11 37.5
## 3 13 39.8
## 4 30 52.3
## 5 32 49.2
## 6 33 47.7
## 7 35 32.8
## 8 41 35.9
## 9 42 33.6
## 10 45 37.5
## 11 46 30.5
# 아래 PIT 가외치 명단과 대부분 겹친다.
%>%
d filter(Load == 'High') %>%
group_by(SID) %>%
summarise(WMerror = (1 - mean(WM_corr))*100) %>%
ungroup() %>%
identify_outliers(WMerror) # IQR 기준 가외치 없음.
## [1] SID WMerror is.outlier is.extreme
## <0 rows> (or 0-length row.names)
%>%
d filter(Load == 'High') %>%
group_by(SID) %>%
summarise(WMerror = (1 - mean(WM_corr))*100) %>%
ungroup() %>%
single_raincloud_plot(.$WMerror, 0, 100, 10, "WM Error (%)") +
geom_hline(yintercept=50, linetype="dotted") +
geom_hline(yintercept=d.sum$Outlier, linetype='dashed', color='red', size=0.5)
작업기억 오류율이 비교적 넓게 퍼져있어서 가외치를 정의하기 어렵다. 대략 30% 이상 오류를 범한 참가자들은 아래 PIT 오류율 가외치와 대체로 겹친다.
2 Load
x 2 Congruency
설계에서 조건 당
64시행씩 제시되었다.
# d %>% group_by(SID, Load, Congruency) %>%
# summarise(PITerror = (1 - mean(PIT_corr))*100,
# N = n()) %>%
# ungroup() %>%
# print(n = Inf) # 4조건, 각 64시행
# 고부하 구획 반응 확인: 2초 넘으면 PIT_rt = na, PIT_corr = 0으로 입력되어 있음.
%>% filter(Load == "High") %>%
d summarise(MN = mean(PIT_rt, na.rm = TRUE), # na.rm은 반응 누락 시행
SD = sd(PIT_rt, na.rm = TRUE),
Max = max(PIT_rt, na.rm = TRUE))
## # A tibble: 1 × 3
## MN SD Max
## <dbl> <dbl> <dbl>
## 1 0.864 0.335 1.98
# 고부하 구획에서는 반응시간 최대값이 2초를 넘지 않는다.
# ( n2 <- d %>% filter(Load == "Low", PIT_rt > 2) %>% nrow() ) # 아래 코드와 같다.
<- d %>% filter(PIT_rt > 2) %>% nrow() ) # 저부하에서 2초 넘는 시행은 139
( n2 ## [1] 139
100*n2/nrow(d) # 1.18 %
## [1] 1.180367
# 저부하 시행도 2초 넘으면 오답 처리 -> PIT_corr2
<- d %>%
d mutate(PIT_corr2 = ifelse(Load == "High" | PIT_rt < 2, PIT_corr, 0))
<- d %>%
d.sum group_by(SID) %>%
summarise(PITerror = (1 - mean(PIT_corr2))*100) %>%
ungroup() %>%
summarise(MN = mean(PITerror)
SD = sd(PITerror)
, MIN = min(PITerror)
, MAX = max(PITerror)
, Q1 = quantile(PITerror, prob = .25)
, MED = median(PITerror)
, Q3 = quantile(PITerror, prob = .75)
, IQR = IQR(PITerror)
, Outlier = Q3 + 1.5 * IQR
, Extreme = Q3 + 3 * IQR)
, %>%
d.sum kable(digits = 2, caption = "WM Error (%): Distribution")
MN | SD | MIN | MAX | Q1 | MED | Q3 | IQR | Outlier | Extreme |
---|---|---|---|---|---|---|---|---|---|
11.27 | 14.72 | 1.17 | 59.77 | 2.83 | 5.86 | 9.28 | 6.45 | 18.95 | 28.61 |
%>%
d group_by(SID) %>%
summarise(PITerror = (1 - mean(PIT_corr2))*100) %>%
ungroup() %>%
identify_outliers(PITerror)
## # A tibble: 8 × 4
## SID PITerror is.outlier is.extreme
## <fct> <dbl> <lgl> <lgl>
## 1 1 21.1 TRUE FALSE
## 2 8 42.2 TRUE TRUE
## 3 30 43.0 TRUE TRUE
## 4 32 25.4 TRUE FALSE
## 5 33 51.2 TRUE TRUE
## 6 35 35.2 TRUE TRUE
## 7 45 44.9 TRUE TRUE
## 8 46 59.8 TRUE TRUE
%>%
d group_by(SID) %>%
summarise(PITerror = (1 - mean(PIT_corr2))*100) %>%
ungroup() %>%
single_raincloud_plot(.$PITerror, 0, 100, 10, "PIT Error (%)") +
geom_hline(yintercept=50, linetype='dotted') +
geom_hline(yintercept=d.sum$Outlier, linetype='dashed', color='red', size=0.5)
rstatix::identify_outliers()
함수를 사용하여 Q3 +
1.5*IQR = 18.95 %보다 PIT 오류를 많이 범한 참가자 여덟 명을
가외치(outlier)로 판정하였다. 이 참가자들을 분석에서 제외한다.
<- rm_subject(d, c(1, 8, 30, 32, 33, 35, 45, 46)) # 38
d2 8 removed & 38 left
# 저부하 시행의 WM_corr = NA를 WM_corr = 1 로 변환 -> WM_corr2
<- d2 %>%
dd mutate(WM_corr2 = ifelse(Load == 'Low', 1, ifelse(WM_corr == 1, 1, 0)),
DL1s = ifelse(is.na(PIT_rt), 0, ifelse(PIT_rt >= 1.00, 0, 1)),
DL2s = ifelse(is.na(PIT_rt), 0, ifelse(PIT_rt >= 2.00, 0, 1)),
PIT_corr1s = PIT_corr2 * DL1s,
PIT_corr2s = PIT_corr2 * DL2s) %>%
select(SID, Load, sameDiff, Prime, Target, Congruency, WM_corr2, PIT_corr1s, PIT_corr2s, PIT_rt)
headTail(dd)
SID Load sameDiff Prime Target Congruency WM_corr2 PIT_corr1s1 2 High Same Pain Nonpain Incongruent 0 1
2 2 High Same Nonpain Pain Incongruent 1 1
3 2 High Same Pain Nonpain Incongruent 1 0
4 2 High Different Nonpain Pain Incongruent 0 0
5 <NA> <NA> <NA> <NA> <NA> <NA> ... ...
6 44 High Different Pain Pain Congruent 1 1
7 44 High Same Pain Pain Congruent 1 1
8 44 High Same Pain Pain Congruent 0 0
9 44 High Different Nonpain Pain Incongruent 1 1
PIT_corr2s PIT_rt1 1 1
2 1 0.95
3 1 1.05
4 0 1.23
5 ... ...
6 1 0.73
7 1 0.74
8 1 1.61
9 1 0.82
High Load 조건의 작업기억 오류율을 분석하였다.
# 상관 분석 위해 계산
<- dd %>%
wd filter(Load == 'High') %>%
group_by(SID) %>%
summarise(WMerror = (1 - mean(WM_corr2))*100)
%>%
dd filter(Load == 'High') %>%
group_by(SID) %>%
summarise(WMerror = (1 - mean(WM_corr2))*100) %>%
ungroup() %>%
get_summary_stats(WMerror, show = c("mean", "sd", "median", "iqr", "ci")) %>%
kable(digits = 2, caption = "High Load WM Error (%): Descriptive Stats")
variable | n | mean | sd | median | iqr | ci |
---|---|---|---|---|---|---|
WMerror | 38 | 18.2 | 9.55 | 17.58 | 11.33 | 3.14 |
Cowan’s K를 추정하였다(Rouder et al., 2011, PBR).
<- dd %>%
wk filter(Load == 'High') %>%
group_by(SID, sameDiff) %>%
summarise(WMacc = mean(WM_corr2)) %>%
ungroup() %>%
pivot_wider(id_cols = SID, names_from = sameDiff, values_from = WMacc) %>%
mutate(cowanK = 4 * (Same - (1 - Different))) %>%
select(SID, cowanK)
range(wk$cowanK)
## [1] 0.8125 3.8125
%>%
wk get_summary_stats(cowanK, show = c("mean", "sd", "median", "iqr", "ci")) %>%
kable(digits = 4, caption = "High Load Cowan's K: Descriptive Stats")
variable | n | mean | sd | median | iqr | ci |
---|---|---|---|---|---|---|
cowanK | 38 | 2.544 | 0.764 | 2.594 | 0.906 | 0.251 |
%>%
wk single_raincloud_plot(.$cowanK, 0, 4, 1, "Cowan's K")
<- dd %>% filter(Load == 'Low') %>% nrow() )
( m3 ## [1] 4864
<- dd %>% filter(Load == 'Low', PIT_rt > 2) %>% nrow() ) # 저부하에서 2초 넘는 시행은 58
( n3 ## [1] 58
*100/m3 # 1.19%
n3## [1] 1.192434
<- dd %>% filter(Load == 'High') %>% nrow() )
( m4 ## [1] 4864
<- dd %>% filter(Load == 'High', PIT_rt < 2) %>% nrow() ) # 고부하에서 2초 넘는 시행은 118시행
( n4 ## [1] 4746
-n4)*100/m4 # 2.43%
(m4## [1] 2.425987
고부하(high load) 구획에서 PIT 과제는 2초 후에 종료되었지만 저부하(low load) 구획에서는 참가자가 반응해야 종료되었다. 따라서 저부하 구획에서 PIT RT가 2초를 넘는 총 58 시행(1.19 %)을 ‘무반응(=오류)’ 처리하였다.
고부하 구획에서 반응이 누락된(즉, 2초 전에 반응이 입력되지 못한) 총 118 시행(2.43 %)도 ‘무반응(=오류)’ 처리하였다.
유효 시행수를 최대한 확보하기 위해 작업기억 정확도와 상관없이 PIT 수행을 분석하였다.
rstatix
ANOVA 메뉴얼에 따라 분석하였다. 오류율이 정규성 가정에 위배되므로
모든 자료에 대해 비모수적 순열검증(permutation test)을 실시하였다.
Load
주효과 없이 Congruency
주효과(incongruent > congruent)만 있다.
<- dd %>%
ed.slong # filter(WM_corr2 == 1) %>% # 작업기억 정반응만 포함
group_by(SID, Load, Congruency) %>%
summarise(PITerror = (1 - mean(PIT_corr2s))*100) %>%
ungroup() %>%
mutate(Load = factor(Load, levels = c("Low", "High"),
labels = c("Low", "High")))
# ed.slong %>% sample_n_by(Load, Congruency, size = 2) # 조건별 랜덤 샘플림
<- ed.slong %>%
sumEd group_by(Load, Congruency) %>%
get_summary_stats(PITerror, show = c("mean", "sd", "median", "iqr", "ci"))
%>%
sumEd kable(digits = 2, caption = "PIT Error (%): Descriptive Stats")
Load | Congruency | variable | n | mean | sd | median | iqr | ci |
---|---|---|---|---|---|---|---|---|
Low | Congruent | PITerror | 38 | 4.19 | 3.74 | 3.12 | 4.30 | 1.23 |
Low | Incongruent | PITerror | 38 | 6.13 | 5.09 | 4.69 | 5.86 | 1.67 |
High | Congruent | PITerror | 38 | 4.56 | 3.97 | 3.91 | 5.86 | 1.30 |
High | Incongruent | PITerror | 38 | 5.72 | 4.70 | 5.47 | 6.25 | 1.54 |
%>% ggplot(aes(x = factor(Load, level = c('Low', 'High')),
ed.slong y = PITerror, color = Congruency)) +
geom_half_boxplot(side = "r", outlier.shape = NA) +
geom_half_point(side = "l", show.legend = FALSE) +
scale_color_manual(values=c("#E69F00", "#56B4E9"),
labels=c("Congruent", "Incongruent")) +
# coord_cartesian(ylim = c(0, 25), clip = "on") +
scale_y_continuous(breaks=seq(0, 25, by=5)) +
labs(x = "Load",
y = "Pain Identification Error (%)",
fill='Congruency') +
theme_bw(base_size = 18) +
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank())
# Build the linear model
<- lm(PITerror ~ Load*Congruency, data = ed.slong)
model
# # Create a QQ plot of residuals
# ggqqplot(residuals(model))
# ggqqplot(ed.slong, "PITerror", ggtheme = theme_bw()) +
# facet_grid(Load ~ Congruency)
%>%
ed.slong group_by(Load, Congruency) %>%
shapiro_test(PITerror)
## # A tibble: 4 × 5
## Load Congruency variable statistic p
## <fct> <fct> <chr> <dbl> <dbl>
## 1 Low Congruent PITerror 0.801 0.0000111
## 2 Low Incongruent PITerror 0.879 0.000692
## 3 High Congruent PITerror 0.893 0.00160
## 4 High Incongruent PITerror 0.883 0.000870
# Compute Shapiro-Wilk test of normality
shapiro_test(residuals(model)) %>%
kable(digits = 2, caption = "Shapiro-Wilk test of normality")
variable | statistic | p.value |
---|---|---|
residuals(model) | 0.9 | 0 |
# Homogneity of variance assumption
%>% levene_test(PITerror ~ Load*Congruency) %>%
ed.slong kable(digits = 2, caption = "Homogneity of variance assumption")
df1 | df2 | statistic | p |
---|---|---|---|
3 | 148 | 1.23 | 0.3 |
# parametric ANOVA: no use
%>%
ed.slong anova_test(dv = PITerror, wid = SID,
within = c(Load, Congruency),
effect.size = "pes") %>%
get_anova_table(correction = "auto")
## ANOVA Table (type III tests)
##
## Effect DFn DFd F p p<.05 pes
## 1 Load 1 37 0.001 0.973 3.04e-05
## 2 Congruency 1 37 8.138 0.007 * 1.80e-01
## 3 Load:Congruency 1 37 0.822 0.370 2.20e-02
# two-way permutation anova
<-
PITerror.perm aovperm(PITerror ~ Load * Congruency + Error(SID/(Load*Congruency)),
data = ed.slong, np = nsims)
summary(PITerror.perm) %>%
kable(digits = c(0,2,2,2,2,2,2,2,3,3), caption = "Nonparametric ANOVA")
SSn | dfn | SSd | dfd | MSEn | MSEd | F | parametric P(>F) | resampled P(>F) | |
---|---|---|---|---|---|---|---|---|---|
Load | 0 | 1 | 527.94 | 37 | 0.02 | 14.27 | 0.00 | 0.97 | 0.974 |
Congruency | 90 | 1 | 410.75 | 37 | 90.35 | 11.10 | 8.14 | 0.01 | 0.006 |
Load:Congruency | 6 | 1 | 260.93 | 37 | 5.80 | 7.05 | 0.82 | 0.37 | 0.372 |
# effect size
<- ANOVA_design(
dxEd design = "2w*2w",
n = sumEd$n,
mu = sumEd$mean,
sd = sumEd$sd,
labelnames = c("LOAD", "Low", "High",
"CONGRUENCY", "Cong", "Incong"),
plot = FALSE
)
<- ANOVA_power(dxEd, verbose = FALSE, nsims = nsims)
pwrEd $main_results %>%
pwrEdkable(digits = c(0,2,3), caption = "Power & Effect Size")
power | effect_size | |
---|---|---|
anova_LOAD | 5 | 0.03 |
anova_CONGRUENCY | 56 | 0.13 |
anova_LOAD:CONGRUENCY | 8 | 0.03 |
반응시간 200ms 미만인 반응(anticipatory responses)은 한 시행도 없었다. 개별 참가자 반응시간에 \(\mu \pm3SD\) 기준을 적용하여 1.7% 시행을 가외치로 분석에서 제외하였다. 남은 반응시간의 참가자별 평균은 아래와 같이 대체로 균일하였다.
<- dd %>%
nCorr # filter(WM_corr2==1) %>%
filter(PIT_corr2s == 1) %>%
nrow()
<- dd %>%
nAnticip # filter(WM_corr2 == 1) %>%
filter(PIT_corr2s == 1, PIT_rt < .2) %>%
nrow()
/nCorr # 기대반응 없었다.
nAnticip ## [1] 0
<- dd %>%
td # filter(WM_corr2 == 1) %>%
filter(PIT_corr2s == 1) %>%
group_by(SID) %>%
nest() %>%
mutate(lbound = map(data, ~mean(.$PIT_rt)-3*sd(.$PIT_rt)),
ubound = map(data, ~mean(.$PIT_rt)+3*sd(.$PIT_rt))) %>%
unnest(c(lbound, ubound)) %>%
unnest(data) %>%
mutate(Outlier = (PIT_rt < lbound)|(PIT_rt > ubound)) %>%
filter(Outlier == FALSE) %>%
ungroup() %>%
select(SID, Load, Congruency, PIT_rt)
range(td$PIT_rt) # 최소 & 최대
## [1] 0.2554 1.9541
<- td %>% nrow()
nVal - nVal)/nCorr # 1.7% trimmed
(nCorr ## [1] 0.01712366
%>% group_by(SID) %>%
td summarise(RT = mean(PIT_rt)) %>%
ungroup() %>%
single_raincloud_plot(.$RT, 0, 2, 0.2, "Pain Identification Time (s)")
Load
효과(high load > low load)와
Congruency
효과(incongruent >
congruent)가 뚜렷하다. 상호작용은 보이지 않는다.
<- td %>%
td.slong group_by(SID, Load, Congruency) %>%
summarise(RT = mean(PIT_rt)*1000) %>%
ungroup() %>%
mutate(Load = factor(Load, levels = c("Low", "High"),
labels = c("Low", "High")))
str(td.slong)
## tibble [152 × 4] (S3: tbl_df/tbl/data.frame)
## $ SID : Factor w/ 38 levels "2","3","4","5",..: 1 1 1 1 2 2 2 2 3 3 ...
## $ Load : Factor w/ 2 levels "Low","High": 2 2 1 1 2 2 1 1 2 2 ...
## $ Congruency: Factor w/ 2 levels "Congruent","Incongruent": 1 2 1 2 1 2 1 2 1 2 ...
## $ RT : num [1:152] 716 827 667 764 942 ...
<- td.slong %>%
sumTd group_by(Load, Congruency) %>%
get_summary_stats(RT, show = c("mean", "sd", "median", "iqr", "ci"))
%>%
sumTd kable(digits = 2, caption = "PIT RT (sec) Descriptive Stats")
Load | Congruency | variable | n | mean | sd | median | iqr | ci |
---|---|---|---|---|---|---|---|---|
Low | Congruent | RT | 38 | 662.32 | 133.66 | 634.67 | 146.35 | 43.94 |
Low | Incongruent | RT | 38 | 695.49 | 135.89 | 688.77 | 148.77 | 44.67 |
High | Congruent | RT | 38 | 815.01 | 169.94 | 818.82 | 272.25 | 55.86 |
High | Incongruent | RT | 38 | 855.82 | 173.60 | 845.51 | 256.77 | 57.06 |
%>%
td.slong mutate(RT = RT / 1000) %>%
ggplot(aes(x = factor(Load, level = c('Low', 'High')),
y = RT, color = Congruency)) +
geom_half_boxplot(side = "r", outlier.shape = NA) +
geom_half_point(side = "l", show.legend = FALSE) +
# coord_cartesian(ylim = c(0.5, 1.2), clip = "on") +
scale_y_continuous(breaks=seq(0.5, 1.2, by=0.1)) +
scale_color_manual(values=c("#E69F00", "#56B4E9"),
labels=c("Congruent", "Incongruent")) +
labs(x = "Load",
y = "Pain Identification Time (s)",
fill='Congruency') +
theme_bw(base_size = 18) +
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank())
# # Build the linear model
# model <- lm(RT ~ Load*Congruency, data = td.slong)
#
# # Create a QQ plot of residuals
# ggqqplot(residuals(model))
# ggqqplot(td.slong, "RT", ggtheme = theme_bw()) +
# facet_grid(Load ~ Congruency)
#
# td.slong %>%
# group_by(Load, Congruency) %>%
# shapiro_test(RT)
#
# # Compute Shapiro-Wilk test of normality
# shapiro_test(residuals(model)) %>%
# kable(digits = 4, caption = "Shapiro-Wilk test of normality: All trials")
#
# # Homogneity of variance assumption
# td.slong %>% levene_test(RT ~ Load*Congruency) %>%
# kable(digits = 4, caption = "Homogneity of variance assumption")
# parametric ANOVA: no use
%>%
td.slong anova_test(dv = RT, wid = SID,
within = c(Load, Congruency),
effect.size = "pes") %>%
get_anova_table(correction = "auto")
## ANOVA Table (type III tests)
##
## Effect DFn DFd F p p<.05 pes
## 1 Load 1 37 86.497 3.19e-11 * 0.700
## 2 Congruency 1 37 27.056 7.56e-06 * 0.422
## 3 Load:Congruency 1 37 0.764 3.88e-01 0.020
# two-way permutation anova
<-
PITRT.perm aovperm(RT ~ Load * Congruency + Error(SID/(Load*Congruency)),
data = td.slong, np = nsims)
summary(PITRT.perm) %>%
kable(digits = c(0,2,2,2,2,2,2,2,3,3), caption = "Nonparametric ANOVA")
SSn | dfn | SSd | dfd | MSEn | MSEd | F | parametric P(>F) | resampled P(>F) | |
---|---|---|---|---|---|---|---|---|---|
Load | 930838 | 1 | 398175.00 | 37 | 930838.19 | 10761.49 | 86.50 | 0.00 | 0.000 |
Congruency | 52000 | 1 | 71112.49 | 37 | 51999.92 | 1921.96 | 27.06 | 0.00 | 0.000 |
Load:Congruency | 556 | 1 | 26952.39 | 37 | 556.21 | 728.44 | 0.76 | 0.39 | 0.381 |
# effect size
<- ANOVA_design(
dxTd design = "2w*2w",
n = sumTd$n,
mu = sumTd$mean,
sd = sumTd$sd,
labelnames = c("LOAD", "Low", "High",
"CONGRUENCY", "Cong", "Incong"),
plot = FALSE
)
<- ANOVA_power(dxTd, verbose = FALSE, nsims = nsims)
pwrTd $main_results %>%
pwrTdkable(digits = c(0,2,3), caption = "Power & Effect Size")
power | effect_size | |
---|---|---|
anova_LOAD | 100 | 0.51 |
anova_CONGRUENCY | 30 | 0.08 |
anova_LOAD:CONGRUENCY | 5 | 0.03 |
# PIT Error
<- ed.slong %>%
datB unite("Var", c(Load:Congruency)) %>%
pivot_wider(id_cols = "SID", names_from = "Var", values_from = "PITerror") %>%
mutate(Low_CE = Low_Incongruent - Low_Congruent,
High_CE = High_Incongruent - High_Congruent,
WM = wk$cowanK) %>%
select(SID, WM, ends_with("CE"))
<- datB %>% select(!SID) %>% cor_mat()
tmpR <- tmpR %>% cor_get_pval()
tmpP
<- p.adjust(tmpP$WM[2:3], "bonferroni")
pbonB
tibble(parameter = c("Pearson r", "Bonf.corr.p"),
lowLoad = c(tmpR$WM[2], pbonB[1]),
highLoad = c(tmpR$WM[3], pbonB[2])) %>%
kable(align = 'c', digits = 3, caption = "Congruency in error rates vs. Cowan's K")
parameter | lowLoad | highLoad |
---|---|---|
Pearson r | -0.280 | -0.45 |
Bonf.corr.p | 0.183 | 0.01 |
# PIT RT
<- td.slong %>%
datT unite("Var", c(Load:Congruency)) %>%
pivot_wider(id_cols = "SID", names_from = "Var", values_from = "RT") %>%
mutate(Low_CE = Low_Incongruent - Low_Congruent,
High_CE = High_Incongruent - High_Congruent,
WM = wk$cowanK) %>%
select(SID, WM, ends_with("CE"))
<- datT %>% select(!SID) %>% cor_mat()
tmpR2 <- tmpR2 %>% cor_get_pval()
tmpP2
<- p.adjust(tmpP2$WM[2:3], "bonferroni")
pbonB2
tibble(parameter = c("Pearson r", "Bonf.corr.p"),
lowLoad = c(tmpR2$WM[2], pbonB2[1]),
highLoad = c(tmpR2$WM[3], pbonB2[2])) %>%
kable(align = 'c', digits = 3, caption = "Congruency in RTs vs. Cowan's K")
parameter | lowLoad | highLoad |
---|---|---|
Pearson r | -0.170 | -0.300 |
Bonf.corr.p | 0.616 | 0.142 |
Load
주효과(high load > low load)와
Congruency
주효과(incongruent >
congruent)가 있다.
<- dd %>%
edx.slong # filter(WM_corr2 == 1) %>% # 작업기억 정반응만 포함
group_by(SID, Load, Congruency) %>%
summarise(PITerror = (1 - mean(PIT_corr1s))*100) %>%
ungroup() %>%
mutate(Load = factor(Load, levels = c("Low", "High"),
labels = c("Low", "High")))
<- edx.slong %>%
sumEdx group_by(Load, Congruency) %>%
get_summary_stats(PITerror, show = c("mean", "sd", "median", "iqr", "ci"))
%>%
sumEdx kable(digits = 2, caption = "PIT Error (%): Descriptive Stats")
Load | Congruency | variable | n | mean | sd | median | iqr | ci |
---|---|---|---|---|---|---|---|---|
Low | Congruent | PITerror | 38 | 12.42 | 11.93 | 10.16 | 10.94 | 3.92 |
Low | Incongruent | PITerror | 38 | 16.24 | 13.44 | 13.28 | 11.72 | 4.42 |
High | Congruent | PITerror | 38 | 27.59 | 17.37 | 22.66 | 28.12 | 5.71 |
High | Incongruent | PITerror | 38 | 31.78 | 18.34 | 28.91 | 28.91 | 6.03 |
# plot
%>% ggplot(aes(x = factor(Load, level = c('Low', 'High')),
edx.slong y = PITerror, color = Congruency)) +
geom_half_boxplot(side = "r", outlier.shape = NA) +
geom_half_point(side = "l", show.legend = FALSE) +
scale_color_manual(values=c("#E69F00", "#56B4E9"),
labels=c("Congruent", "Incongruent")) +
# coord_cartesian(ylim = c(0, 25), clip = "on") +
# scale_y_continuous(breaks=seq(0, 25, by=5)) +
labs(x = "Load",
y = "Pain Identification Error (%)",
fill='Congruency') +
theme_bw(base_size = 18) +
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank())
# parametric ANOVA: no use
%>%
edx.slong anova_test(dv = PITerror, wid = SID,
within = c(Load, Congruency),
effect.size = "pes") %>%
get_anova_table(correction = "auto")
## ANOVA Table (type III tests)
##
## Effect DFn DFd F p p<.05 pes
## 1 Load 1 37 56.487 5.89e-09 * 0.604
## 2 Congruency 1 37 11.198 2.00e-03 * 0.232
## 3 Load:Congruency 1 37 0.052 8.21e-01 0.001
# two-way permutation anova
<-
PITerrorX.perm aovperm(PITerror ~ Load * Congruency + Error(SID/(Load*Congruency)),
data = edx.slong, np = nsims)
summary(PITerrorX.perm) %>%
kable(digits = c(0,2,2,2,2,2,2,2,5,3), caption = "Nonparametric ANOVA")
SSn | dfn | SSd | dfd | MSEn | MSEd | F | parametric P(>F) | resampled P(>F) | |
---|---|---|---|---|---|---|---|---|---|
Load | 8963 | 1 | 5870.70 | 37 | 8962.68 | 158.67 | 56.49 | 0.00 | 0.0001 |
Congruency | 611 | 1 | 2018.03 | 37 | 610.75 | 54.54 | 11.20 | 0.00 | 0.0016 |
Load:Congruency | 1 | 1 | 925.82 | 37 | 1.30 | 25.02 | 0.05 | 0.82 | 0.8287 |
# effect size
<- ANOVA_design(
dxEdx design = "2w*2w",
n = sumEdx$n,
mu = sumEdx$mean,
sd = sumEdx$sd,
labelnames = c("LOAD", "Low", "High",
"CONGRUENCY", "Cong", "Incong"),
plot = FALSE
)
<- ANOVA_power(dxEdx, verbose = FALSE, nsims = nsims)
pwrEdx $main_results %>%
pwrEdxkable(digits = c(0,2,3), caption = "Power & Effect Size")
power | effect_size | |
---|---|---|
anova_LOAD | 100 | 0.50 |
anova_CONGRUENCY | 34 | 0.08 |
anova_LOAD:CONGRUENCY | 5 | 0.03 |
<- dd %>%
tdx # filter(WM_corr2 == 1) %>%
filter(PIT_corr1s == 1) %>%
group_by(SID) %>%
nest() %>%
mutate(lbound = map(data, ~mean(.$PIT_rt)-3*sd(.$PIT_rt)),
ubound = map(data, ~mean(.$PIT_rt)+3*sd(.$PIT_rt))) %>%
unnest(c(lbound, ubound)) %>%
unnest(data) %>%
mutate(Outlier = (PIT_rt < lbound)|(PIT_rt > ubound)) %>%
filter(Outlier == FALSE) %>%
ungroup() %>%
select(SID, Load, Congruency, PIT_rt)
range(tdx$PIT_rt) # 최소 & 최대
## [1] 0.2554 0.9999
<- dd %>%
nCorr # filter(WM_corr2==1) %>%
filter(PIT_corr1s == 1) %>%
nrow()
<- tdx %>% nrow()
nVal - nVal)/nCorr # 0.3% trimmed
(nCorr ## [1] 0.002767892
%>% group_by(SID) %>%
tdx summarise(RT = mean(PIT_rt)) %>%
ungroup() %>%
single_raincloud_plot(.$RT, 0, 2, 0.2, "Pain Identification Time (s)")
<- tdx %>%
tdx.slong group_by(SID, Load, Congruency) %>%
summarise(RT = mean(PIT_rt)*1000) %>%
ungroup() %>%
mutate(Load = factor(Load, levels = c("Low", "High"),
labels = c("Low", "High")))
str(tdx.slong)
## tibble [152 × 4] (S3: tbl_df/tbl/data.frame)
## $ SID : Factor w/ 38 levels "2","3","4","5",..: 1 1 1 1 2 2 2 2 3 3 ...
## $ Load : Factor w/ 2 levels "Low","High": 2 2 1 1 2 2 1 1 2 2 ...
## $ Congruency: Factor w/ 2 levels "Congruent","Incongruent": 1 2 1 2 1 2 1 2 1 2 ...
## $ RT : num [1:152] 692 772 653 732 728 ...
<- tdx.slong %>%
sumTdx group_by(Load, Congruency) %>%
get_summary_stats(RT, show = c("mean", "sd", "median", "iqr", "ci"))
%>%
sumTdx kable(digits = 2, caption = "PIT RT (sec) Descriptive Stats")
Load | Congruency | variable | n | mean | sd | median | iqr | ci |
---|---|---|---|---|---|---|---|---|
Low | Congruent | RT | 38 | 619.53 | 86.60 | 607.88 | 140.43 | 28.46 |
Low | Incongruent | RT | 38 | 641.67 | 86.00 | 635.43 | 125.10 | 28.27 |
High | Congruent | RT | 38 | 711.24 | 99.51 | 735.24 | 146.30 | 32.71 |
High | Incongruent | RT | 38 | 737.49 | 103.91 | 760.39 | 143.56 | 34.16 |
Load
효과(high load > low load)와
Congruency
효과(incongruent >
congruent)가 뚜렷하다. 상호작용은 보이지 않는다.
%>%
tdx.slong mutate(RT = RT / 1000) %>%
ggplot(aes(x = factor(Load, level = c('Low', 'High')),
y = RT, color = Congruency)) +
geom_half_boxplot(side = "r", outlier.shape = NA) +
geom_half_point(side = "l", show.legend = FALSE) +
# coord_cartesian(ylim = c(0.5, 1.2), clip = "on") +
scale_y_continuous(breaks=seq(0.5, 1.2, by=0.1)) +
scale_color_manual(values=c("#E69F00", "#56B4E9"),
labels=c("Congruent", "Incongruent")) +
labs(x = "Load",
y = "Pain Identification Time (s)",
fill='Congruency') +
theme_bw(base_size = 18) +
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank())
# parametric ANOVA: no use
%>%
tdx.slong anova_test(dv = RT, wid = SID,
within = c(Load, Congruency),
effect.size = "pes") %>%
get_anova_table(correction = "auto")
## ANOVA Table (type III tests)
##
## Effect DFn DFd F p p<.05 pes
## 1 Load 1 37 87.943 2.57e-11 * 0.704
## 2 Congruency 1 37 33.646 1.17e-06 * 0.476
## 3 Load:Congruency 1 37 0.353 5.56e-01 0.009
# two-way permutation anova
<-
PITRTx.perm aovperm(RT ~ Load * Congruency + Error(SID/(Load*Congruency)),
data = tdx.slong, np = nsims)
summary(PITRTx.perm) %>%
kable(digits = c(0,2,2,2,2,2,2,2,5,3), caption = "Nonparametric ANOVA")
SSn | dfn | SSd | dfd | MSEn | MSEd | F | parametric P(>F) | resampled P(>F) | |
---|---|---|---|---|---|---|---|---|---|
Load | 334093 | 1 | 140562.5 | 37 | 334092.92 | 3798.99 | 87.94 | 0.00 | 0.0001 |
Congruency | 22250 | 1 | 24467.9 | 37 | 22250.06 | 661.29 | 33.65 | 0.00 | 0.0001 |
Load:Congruency | 160 | 1 | 16812.5 | 37 | 160.40 | 454.39 | 0.35 | 0.56 | 0.5531 |
# effect size
<- ANOVA_design(
dxTdx design = "2w*2w",
n = sumTdx$n,
mu = sumTdx$mean,
sd = sumTdx$sd,
labelnames = c("LOAD", "Low", "High",
"CONGRUENCY", "Cong", "Incong"),
plot = FALSE
)
<- ANOVA_power(dxTdx, verbose = FALSE, nsims = nsims)
pwrTdx $main_results %>%
pwrTdxkable(digits = c(0,2,3), caption = "Power & Effect Size")
power | effect_size | |
---|---|---|
anova_LOAD | 100 | 0.50 |
anova_CONGRUENCY | 34 | 0.08 |
anova_LOAD:CONGRUENCY | 5 | 0.03 |
# PIT Error
<- edx.slong %>%
datB unite("Var", c(Load:Congruency)) %>%
pivot_wider(id_cols = "SID", names_from = "Var", values_from = "PITerror") %>%
mutate(Low_CE = Low_Incongruent - Low_Congruent,
High_CE = High_Incongruent - High_Congruent,
WM = wk$cowanK) %>%
select(SID, WM, ends_with("CE"))
<- datB %>% select(!SID) %>% cor_mat()
tmpR <- tmpR %>% cor_get_pval()
tmpP
<- p.adjust(tmpP$WM[2:3], "bonferroni")
pbonB
tibble(parameter = c("Pearson r", "Bonf.corr.p"),
lowLoad = c(tmpR$WM[2], pbonB[1]),
highLoad = c(tmpR$WM[3], pbonB[2])) %>%
kable(align = 'c', digits = 3, caption = "Congruency in error rates vs. Cowan's K")
parameter | lowLoad | highLoad |
---|---|---|
Pearson r | -0.300 | -0.320 |
Bonf.corr.p | 0.127 | 0.094 |
# PIT RT
<- tdx.slong %>%
datT unite("Var", c(Load:Congruency)) %>%
pivot_wider(id_cols = "SID", names_from = "Var", values_from = "RT") %>%
mutate(Low_CE = Low_Incongruent - Low_Congruent,
High_CE = High_Incongruent - High_Congruent,
WM = wk$cowanK) %>%
select(SID, WM, ends_with("CE"))
<- datT %>% select(!SID) %>% cor_mat()
tmpR2 <- tmpR2 %>% cor_get_pval()
tmpP2
<- p.adjust(tmpP2$WM[2:3], "bonferroni")
pbonB2
tibble(parameter = c("Pearson r", "Bonf.corr.p"),
lowLoad = c(tmpR2$WM[2], pbonB2[1]),
highLoad = c(tmpR2$WM[3], pbonB2[2])) %>%
kable(align = 'c', digits = 3, caption = "Congruency in RTs vs. Cowan's K")
parameter | lowLoad | highLoad |
---|---|---|
Pearson r | -0.099 | -0.250 |
Bonf.corr.p | 1.000 | 0.252 |
Note. Filled circles = 2s deadline. Empty circles = 1s deadline.
<- edx.slong %>% # deadline 1s
tmp1 group_by(Load, Congruency) %>%
get_summary_stats(PITerror, show = c("mean", "ci"))
<- ed.slong %>% # deadline 2s
tmp3 group_by(Load, Congruency) %>%
get_summary_stats(PITerror, show = c("mean", "ci"))
<- rbind(tmp1, tmp3)
xSum $Deadline <- factor(rep(c('DL1s', 'DL2s'), each = 4))
xSum<- xSum %>%
xSum mutate(M = mean,
lower.conf = mean - ci,
upper.conf = mean + ci) %>%
select(variable, Deadline, Load, Congruency, M, lower.conf, upper.conf)
str(xSum)
## tibble [8 × 7] (S3: tbl_df/tbl/data.frame)
## $ variable : chr [1:8] "PITerror" "PITerror" "PITerror" "PITerror" ...
## $ Deadline : Factor w/ 2 levels "DL1s","DL2s": 1 1 1 1 2 2 2 2
## $ Load : Factor w/ 2 levels "Low","High": 1 1 2 2 1 1 2 2
## $ Congruency: Factor w/ 2 levels "Congruent","Incongruent": 1 2 1 2 1 2 1 2
## $ M : num [1:8] 12.42 16.24 27.59 31.79 4.19 ...
## $ lower.conf: num [1:8] 8.5 11.82 21.88 25.75 2.96 ...
## $ upper.conf: num [1:8] 16.34 20.66 33.3 37.81 5.42 ...
<- 0.3
error_width <- 0.05
dodge_size0 <- 0.6
dodge_size2 <- xSum %>% filter(variable == "PITerror", Deadline == "DL1s")
dl1 <- xSum %>% filter(variable == "PITerror", Deadline == "DL2s")
dl2 <- ggplot() +
yf1 geom_errorbar(data = dl1,
aes(x = as.numeric(Load)-dodge_size0, y = M,
ymin = lower.conf, ymax = upper.conf, color = Congruency),
width=error_width, size=1,
position = position_dodge(width = dodge_size2)) +
geom_errorbar(data = dl2,
aes(x = as.numeric(Load)+dodge_size0, y = M,
ymin = lower.conf, ymax = upper.conf, color = Congruency),
width=error_width, size=1,
position = position_dodge(width = dodge_size2)) +
geom_point(data = dl1,
aes(x = as.numeric(Load)-dodge_size0, y = M, color = Congruency),
size=4, shape = 21, fill = "white",
position = position_dodge(width = dodge_size2)) +
geom_point(data = dl2,
aes(x = as.numeric(Load)+dodge_size0, y = M,
color = Congruency, fill = Congruency),
size=4, shape = 21,
position = position_dodge(width = dodge_size2)) +
scale_color_manual(values=c("#E69F00", "#56B4E9")) +
scale_fill_manual(values=c("#E69F00", "#56B4E9")) +
scale_x_continuous(breaks = c(1, 2), label = c("Low", "High")) +
coord_cartesian(xlim = c(0.5, 2.5), ylim = c(0, 40), clip = "on") +
scale_y_continuous(breaks=seq(0, 40, by=10)) +
labs(x = "Load",
y = "Pain Identification Error (%)",
fill='Congruency') +
theme_bw(base_size = 18) +
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
aspect.ratio = 1.3)
yf1
# response time
<- tdx.slong %>% # deadline 1s
tmp2 group_by(Load, Congruency) %>%
get_summary_stats(RT, show = c("mean", "ci"))
<- td.slong %>% # deadline 2s
tmp4 group_by(Load, Congruency) %>%
get_summary_stats(RT, show = c("mean", "ci"))
<- rbind(tmp2, tmp4)
xSum2 $Deadline <- factor(rep(c('DL1s', 'DL2s'), each = 4))
xSum2<- xSum2 %>%
xSum2 mutate(M = mean,
lower.conf = mean - ci,
upper.conf = mean + ci) %>%
select(variable, Deadline, Load, Congruency, M, lower.conf, upper.conf)
str(xSum2)
## tibble [8 × 7] (S3: tbl_df/tbl/data.frame)
## $ variable : chr [1:8] "RT" "RT" "RT" "RT" ...
## $ Deadline : Factor w/ 2 levels "DL1s","DL2s": 1 1 1 1 2 2 2 2
## $ Load : Factor w/ 2 levels "Low","High": 1 1 2 2 1 1 2 2
## $ Congruency: Factor w/ 2 levels "Congruent","Incongruent": 1 2 1 2 1 2 1 2
## $ M : num [1:8] 620 642 711 737 662 ...
## $ lower.conf: num [1:8] 591 613 679 703 618 ...
## $ upper.conf: num [1:8] 648 670 744 772 706 ...
<- xSum2 %>% filter(variable == "RT", Deadline == "DL1s")
dl3 <- xSum2 %>% filter(variable == "RT", Deadline == "DL2s")
dl4 <- ggplot() +
yf2 geom_errorbar(data = dl3,
aes(x = as.numeric(Load)-dodge_size0, y = M,
ymin = lower.conf, ymax = upper.conf, color = Congruency),
width=error_width, size=1,
position = position_dodge(width = dodge_size2)) +
geom_errorbar(data = dl4,
aes(x = as.numeric(Load)+dodge_size0, y = M,
ymin = lower.conf, ymax = upper.conf, color = Congruency),
width=error_width, size=1,
position = position_dodge(width = dodge_size2)) +
geom_point(data = dl3,
aes(x = as.numeric(Load)-dodge_size0, y = M, color = Congruency),
size=4, shape = 21, fill = "white",
position = position_dodge(width = dodge_size2)) +
geom_point(data = dl4,
aes(x = as.numeric(Load)+dodge_size0, y = M, color =
fill = Congruency),
Congruency, size=4, shape = 21,
position = position_dodge(width = dodge_size2)) +
scale_color_manual(values=c("#E69F00", "#56B4E9")) +
scale_fill_manual(values=c("#E69F00", "#56B4E9")) +
scale_x_continuous(breaks = c(1, 2), label = c("Low", "High")) +
coord_cartesian(xlim = c(0.5, 2.5), ylim = c(600, 900), clip = "on") +
scale_y_continuous(breaks=seq(600, 900, by=100)) +
labs(x = "Load",
y = "Pain Identification Time (ms)",
fill='Congruency') +
theme_bw(base_size = 18) +
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
aspect.ratio = 1.3)
yf2
multiTree는 사용과정에서 human errors가 발생할 가능성이 있다. multiTreeR이 R과의 인터페이스 역할을 할 것으로 기대했으나 맥과 윈도우에서 모두 작동하지 않았다. 대신, MPTinR 패키지를 사용하여 모든 과정을 스크립트로 진행한다.
ggdraw() + draw_image("fig/Fig_mpt.png")
트리구조와 제약을 설정한다.
<- "
PIT2C.eqn # tree for Low, Pain prime, Pain target: correct, then incorrect
I1 + (1-I1)*U1 + (1-I1)*(1-U1)*B1
(1-I1)*(1-U1)*(1-B1)
# tree for Low, Pain prime, Nonpain target: correct, then incorrect
I1 + (1-I1)*(1-U1)*(1-B1)
(1-I1)*U1 + (1-I1)*(1-U1)*B1
# tree for Low, Nonpain prime, Pain target: correct, then incorrect
I1 + (1-I1)*(1-U1)*B1
(1-I1)*U1 + (1-I1)*(1-U1)*(1-B1)
# tree for Low, Nonpain prime, Nonpain target: correct, then incorrect
I1 + (1-I1)*U1 + (1-I1)*(1-U1)*(1-B1)
(1-I1)*(1-U1)*B1
# tree for High, Pain prime, Pain target: correct, then incorrect
I2 + (1-I2)*U2 + (1-I2)*(1-U2)*B2
(1-I2)*(1-U2)*(1-B2)
# tree for High, Pain prime, Nonpain target: correct, then incorrect
I2 + (1-I2)*(1-U2)*(1-B2)
(1-I2)*U2 + (1-I2)*(1-U2)*B2
# tree for High, Nonpain prime, Pain target: correct, then incorrect
I2 + (1-I2)*(1-U2)*B2
(1-I2)*U2 + (1-I2)*(1-U2)*(1-B2)
# tree for High, Nonpain prime, Nonpain target: correct, then incorrect
I2 + (1-I2)*U2 + (1-I2)*(1-U2)*(1-B2)
(1-I2)*(1-U2)*B2
"
.0 <- "
restrict"
.1 <- "
restrictI1 = I2
"
.2 <- "
restrictU1 = U2
"
.3 <- "
restrictB1 = B2
"
check.mpt(
model.filename = textConnection(PIT2C.eqn),
model.type = "easy"
)$probabilities.eq.1
1] TRUE
[
$n.trees
1] 8
[
$n.model.categories
1] 16
[
$n.independent.categories
1] 8
[
$n.params
1] 6
[
$parameters
1] "B1" "B2" "I1" "I2" "U1" "U2" [
MPTinR에 적합한 자료구조를 만든다. 기저모형(baseline model)을 적합하여(fitting) 파라미터를 추정한다.
<- dd %>%
cn group_by(SID, Load, Prime, Target, PIT_corr1s) %>%
summarise(N = n()) %>%
ungroup() %>%
mutate(Prime2 = ifelse(Prime == "Pain", "P", "N"),
Target2 = ifelse(Target == "Pain", "P", "N"),
Corr2 = ifelse(PIT_corr1s == 1, "Corr", "Incorr")) %>%
select(SID, Load, Prime2, Target2, Corr2, N) %>%
arrange(SID, desc(Load), desc(Prime2), desc(Target2), Corr2) %>%
unite("Condition", Load:Corr2, sep = "") %>%
pivot_wider(names_from = "Condition", values_from = "N") %>%
select(!SID) %>%
replace(is.na(.), 0)
<- c("LowPPCorr", "LowPPIncorr",
col_order "LowPNCorr", "LowPNIncorr",
"LowNPCorr", "LowNPIncorr",
"LowNNCorr", "LowNNIncorr",
"HighPPCorr", "HighPPIncorr",
"HighPNCorr", "HighPNIncorr",
"HighNPCorr", "HighNPIncorr",
"HighNNCorr", "HighNNIncorr")
<- cn[, col_order]
cn # cn %>% print(n = Inf)
# model fitting
.0 <- fit.mpt(
mCdata = cn,
model.filename = textConnection(PIT2C.eqn),
fia = 200000,
)## [1] "Computing FIA: Iteration begins at 2022-09-20 19:04:03"
## [1] "Computing FIA: Iteration stopped at 2022-09-20 19:04:05"
## Time difference of 1.275761 secs
## Presenting the best result out of 5 minimization runs.
## [1] "Model fitting begins at 2022-09-20 19:04:05"
## Optimization routine for dataset(s) 4 12 13 17 18 30 38
## did not converge succesfully. Tried again with use.gradient == FALSE.
## Optimization for dataset(s) 4 12 13 17 18 30 38
## using numerically estimated gradients produced better results. Using those results.
## Old results saved in output == 'full' [['optim.runs']].
## [1] "Model fitting stopped at 2022-09-20 19:04:07"
## Time difference of 2.255377 secs
이제 기저모형의 적합도를 보자.
.0$goodness.of.fit$individual %>%
mCmutate(SID = unique(td.slong$SID))
## Log.Likelihood G.Squared df p.value SID
## 1 -100.13218 1.4081388 2 4.945686e-01 2
## 2 -130.92696 0.6123544 2 7.362562e-01 3
## 3 -134.23379 1.8997596 2 3.867875e-01 4
## 4 -119.16954 1.3906686 2 4.989076e-01 5
## 5 -145.66643 4.3758035 2 1.121518e-01 6
## 6 -105.19152 2.1964629 2 3.334603e-01 7
## 7 -45.22586 7.8516657 2 1.972570e-02 9
## 8 -98.70813 1.8424463 2 3.980319e-01 10
## 9 -156.58672 0.5114994 2 7.743358e-01 11
## 10 -114.50364 4.8189309 2 8.986332e-02 12
## 11 -169.17009 11.9166680 2 2.584214e-03 13
## 12 -74.73781 7.7052782 2 2.122365e-02 14
## 13 -91.63495 3.8744302 2 1.441047e-01 15
## 14 -86.72567 2.4814926 2 2.891683e-01 16
## 15 -114.89134 2.8704485 2 2.380620e-01 17
## 16 -57.07647 9.6274123 2 8.117719e-03 18
## 17 -74.31167 2.3802128 2 3.041889e-01 19
## 18 -63.61689 2.4856654 2 2.885656e-01 20
## 19 -140.05587 1.8264752 2 4.012231e-01 21
## 20 -176.88186 54.2579694 2 1.652082e-12 22
## 21 -107.54796 1.0377157 2 5.952000e-01 23
## 22 -155.97618 1.1603546 2 5.597991e-01 24
## 23 -130.14711 1.4667557 2 4.802839e-01 25
## 24 -146.10490 1.8109995 2 4.043398e-01 26
## 25 -70.47207 0.7950919 2 6.719671e-01 27
## 26 -124.92913 2.3097399 2 3.150985e-01 28
## 27 -50.66526 2.7924402 2 2.475308e-01 29
## 28 -163.96024 3.7973819 2 1.497645e-01 31
## 29 -113.18278 1.2173873 2 5.440611e-01 34
## 30 -88.73664 2.7451790 2 2.534498e-01 36
## 31 -148.88447 6.9984597 2 3.022065e-02 37
## 32 -103.18878 4.9841672 2 8.273740e-02 38
## 33 -95.07542 4.3629561 2 1.128746e-01 39
## 34 -95.79796 12.8828427 2 1.594139e-03 40
## 35 -154.58331 1.5607839 2 4.582264e-01 41
## 36 -146.82969 2.1078925 2 3.485595e-01 42
## 37 -90.29681 8.8432557 2 1.201466e-02 43
## 38 -99.68214 1.1464693 2 5.636991e-01 44
# kable(digits = 4, caption = "Individual Fits")
# mC.0$goodness.of.fit$sum %>%
# kable(digits = 4, caption = "Sum of Indivisual Fits")
<- sqrt( mC.0$goodness.of.fit$aggregated$G.Squared / sum(cn) )
wES
.0$goodness.of.fit$aggregated %>%
mCmutate(w = wES) %>%
kable(digits = 2, caption = "Aggregated Fit")
Log.Likelihood | G.Squared | df | p.value | w |
---|---|---|---|---|
-4942.76 | 5.72 | 2 | 0.06 | 0.02 |
Likelihood ratio \(G^2\) statistic은
자료와 기대값의 차이를 가리킨다. 적합이 잘되면 이 값이 작아야 하므로
\(p\)-값이 유의미하지 않아야 한다(\(p\) < .05). 코드블럭에는
개인별($individual
) 적합 결과가 출력되어 있다. 특히 13, 22,
40번째 줄의 참가자는 자료가 모형에 적합되지 않았음을 쉽게 알 수 있다.
하단에는 전체 합산($aggregated
) 결과가 보인다. 이 값은 multiTree와
결과와 정확히 일치한다.
모형 적합을 평가하는 두 번째 기준은 효과크기이다. 자료와 기대값의 차이가 클수록 효과 크기가 커지기 때문에, 모형 적합에서는 효과 크기가 작을수록 좋다. \(r\)-family static인 \(w = \sqrt{\frac{\chi^2}{n}}\) < .05이면 자료가 모형에 적합되었다고 간주한다.
효과크기 \(w\) = 0.024이므로 적합이 충분하다고 볼 수 있다.
파라미터 추정치는 아래와 같다.
.0$parameters$aggregated %>%
mCkable(digits = 2, caption = "Parameter Estimates")
estimates | lower.conf | upper.conf | |
---|---|---|---|
B1 | 0.54 | 0.50 | 0.58 |
B2 | 0.50 | 0.48 | 0.52 |
I1 | 0.71 | 0.69 | 0.73 |
I2 | 0.41 | 0.38 | 0.43 |
U1 | 0.13 | 0.07 | 0.20 |
U2 | 0.07 | 0.03 | 0.11 |
이제 제약된 모형들을 적합시킨다. 파라미터가 중요하다면, 제약된 모형이 기저모형보다 나쁠 것이다. \(G^2\)가 커지면서 \(p\)-값은 작아지고, 효과크기 \(w\)도 작아야 한다.
.1 <- fit.mpt(
mCdata = cn,
model.filename = textConnection(PIT2C.eqn),
restrictions.filename = textConnection(restrict.1),
fia = 20000
)## [1] "Computing FIA: Iteration begins at 2022-09-20 19:04:07"
## [1] "Computing FIA: Iteration stopped at 2022-09-20 19:04:07"
## Time difference of 0.1137588 secs
## Presenting the best result out of 5 minimization runs.
## [1] "Model fitting begins at 2022-09-20 19:04:07"
## Optimization routine for dataset(s) 12 13 17 18 30
## did not converge succesfully. Tried again with use.gradient == FALSE.
## Optimization for dataset(s) 12 13 17 18 30
## using numerically estimated gradients produced better results. Using those results.
## Old results saved in output == 'full' [['optim.runs']].
## [1] "Model fitting stopped at 2022-09-20 19:04:09"
## Time difference of 1.67807 secs
.2 <- fit.mpt(
mCdata = cn,
model.filename = textConnection(PIT2C.eqn),
restrictions.filename = textConnection(restrict.2),
fia = 20000
)## [1] "Computing FIA: Iteration begins at 2022-09-20 19:04:09"
## [1] "Computing FIA: Iteration stopped at 2022-09-20 19:04:09"
## Time difference of 0.110883 secs
## Presenting the best result out of 5 minimization runs.
## [1] "Model fitting begins at 2022-09-20 19:04:09"
## Optimization routine for dataset(s) 12 17 18
## did not converge succesfully. Tried again with use.gradient == FALSE.
## Optimization for dataset(s) 12 17 18
## using numerically estimated gradients produced better results. Using those results.
## Old results saved in output == 'full' [['optim.runs']].
## [1] "Model fitting stopped at 2022-09-20 19:04:11"
## Time difference of 1.677922 secs
.3 <- fit.mpt(
mCdata = cn,
model.filename = textConnection(PIT2C.eqn),
restrictions.filename = textConnection(restrict.3),
fia = 20000
)## [1] "Computing FIA: Iteration begins at 2022-09-20 19:04:11"
## [1] "Computing FIA: Iteration stopped at 2022-09-20 19:04:11"
## Time difference of 0.1106639 secs
## Presenting the best result out of 5 minimization runs.
## [1] "Model fitting begins at 2022-09-20 19:04:11"
## Optimization routine for dataset(s) 18 38
## did not converge succesfully. Tried again with use.gradient == FALSE.
## Optimization for dataset(s) 18 38
## using numerically estimated gradients produced better results. Using those results.
## Old results saved in output == 'full' [['optim.runs']].
## [1] "Model fitting stopped at 2022-09-20 19:04:13"
## Time difference of 1.673912 secs
파라미터를 제약한 대안모형을 기저모형에 비교하였다. 두 모형의 차이가 클수록 \(\Delta G^2\)와 효과크기 \(w\)가 커지고 \(p\)-값은 작아질 것이다.
# C-dominating
<- mC.1[["goodness.of.fit"]][["aggregated"]][["G.Squared"]] -
( g.sq.I.equal .0[["goodness.of.fit"]][["aggregated"]][["G.Squared"]] )
mC## [1] 340.9816
<- mC.1[["goodness.of.fit"]][["aggregated"]][["df"]] -
( df.I.equal .0[["goodness.of.fit"]][["aggregated"]][["df"]] )
mC## [1] 1
<- pchisq(g.sq.I.equal, df.I.equal , lower.tail = FALSE) )
( p.value.I.equal ## [1] 3.900261e-76
<- sqrt(g.sq.I.equal/sum(cn)) )
( w.I.equal ## [1] 0.1872206
# format(round(p.value.I.equal,10), nsmall = 10)
<- mC.2[["goodness.of.fit"]][["aggregated"]][["G.Squared"]] -
( g.sq.U.equal .0[["goodness.of.fit"]][["aggregated"]][["G.Squared"]] )
mC## [1] 2.336545
<- mC.2[["goodness.of.fit"]][["aggregated"]][["df"]] -
( df.U.equal .0[["goodness.of.fit"]][["aggregated"]][["df"]] )
mC## [1] 1
<- pchisq(g.sq.U.equal, df.U.equal , lower.tail = FALSE) )
( p.value.U.equal ## [1] 0.1263695
<- sqrt(g.sq.U.equal/sum(cn)) )
( w.U.equal ## [1] 0.01549799
<- mC.3[["goodness.of.fit"]][["aggregated"]][["G.Squared"]] -
( g.sq.B.equal .0[["goodness.of.fit"]][["aggregated"]][["G.Squared"]] )
mC## [1] 2.535747
<- mC.3[["goodness.of.fit"]][["aggregated"]][["df"]] -
( df.B.equal .0[["goodness.of.fit"]][["aggregated"]][["df"]] )
mC## [1] 1
<- pchisq(g.sq.B.equal, df.B.equal , lower.tail = FALSE) )
( p.value.B.equal ## [1] 0.1112942
<- sqrt(g.sq.B.equal/sum(cn)) )
( w.B.equal ## [1] 0.01614512
<- tibble(Parameter = c("IE", "UE", "RB"),
mC.table G_squared = c(0, 0, 0), df = c(0, 0, 0),
p = c(0, 0, 0), w = c(0, 0, 0))
$G_squared[1] <- g.sq.I.equal
mC.table$df[1] <- df.I.equal
mC.table$p[1] <- p.value.I.equal
mC.table$w[1] <- w.I.equal
mC.table
$G_squared[2] <- g.sq.U.equal
mC.table$df[2] <- df.U.equal
mC.table$p[2] <- p.value.U.equal
mC.table$w[2] <- w.U.equal
mC.table
$G_squared[3] <- g.sq.B.equal
mC.table$df[3] <- df.B.equal
mC.table$p[3] <- p.value.B.equal
mC.table$w[3] <- w.B.equal
mC.table
$bonferroni.p <- p.adjust(mC.table$p, "bonferroni")
mC.table%>%
mC.table select(Parameter, G_squared, df, p, bonferroni.p, w) %>%
kable(digits = c(0, 2, 0, 5, 2, 2), caption = "C-First, Deadline = 1s")
Parameter | G_squared | df | p | bonferroni.p | w |
---|---|---|---|---|---|
IE | 340.98 | 1 | 0.00000 | 0.00 | 0.19 |
UE | 2.34 | 1 | 0.12637 | 0.38 | 0.02 |
RB | 2.54 | 1 | 0.11129 | 0.33 | 0.02 |
기저모형에 비해 high load 조건과 low
load 조건의 Intentional Empathy
파라미터가 같다고
가정한 모형의 적합도가 유의미하게 낮았다, \(\Delta G^2\) (1) = 340.982, \(p\) = 0.0000000000, \(w\) = 0.187.
high load 조건과 low load 조건의
Unintentional Empathy
파라미터가 같다고 가정한 모형과
기저모형은 유의미하게 다르지 않았다, \(\Delta
G^2\) (1) = 2.337, \(p\) =
0.379, \(w\) = 0.015.
high load 조건과 low load 조건의
Bias
파라미터가 같다고 가정한 모형과 기저모형은 유의미하게
다르지 않았다, \(\Delta G^2\) (1) =
2.536, \(p\) = 0.334, \(w\) = 0.016.
# 개인별 파라미터 추정치 정리
<- as.data.frame.table(mC.0$parameters$individual) %>%
resParamInd filter(Var2 == "estimates") %>%
separate(col = "Var3", into = c("dum", "sid"), sep = ": ") %>%
rename(estimate = Freq) %>%
mutate(parameter = factor(rep(c('Bias', 'Intention', 'Unintention'), each = 2, times = 38)),
load = factor(rep(c('Low', 'High'), times = 3*38), levels = c('Low', 'High'),
labels = c('Low', 'High'))) %>%
select(sid, Var1, parameter, load, estimate)
<- resParamInd %>%
sumParamInd group_by(parameter, load) %>%
get_summary_stats(estimate, show = c("mean", "sd", "ci"))
# bootstrapped CI: adjusted bootstrap percentile (bca) method
<- function(data, idx){
my.fun <- data[idx]
df return(mean(df))
}
<- resParamInd %>%
tmpCI split(~parameter + load) %>%
map_dfr(
function(x){
= boot.ci(boot(data=x$estimate, statistic=my.fun, R=nsims), type = "bca")$bca
tmp <- x$parameter[1]
parameter <- x$load[1]
load <- tmp[4]
bootCI_lo <- tmp[5]
bootCI_hi data.frame(parameter, load, bootCI_lo, bootCI_hi)
}
)
merge(sumParamInd, tmpCI, by = c("parameter", "load")) %>%
kable(digits = 2, caption = "Parameter Estimates: Descriptive Stats")
parameter | load | variable | n | mean | sd | ci | bootCI_lo | bootCI_hi |
---|---|---|---|---|---|---|---|---|
Bias | High | estimate | 38 | 0.52 | 0.16 | 0.05 | 0.47 | 0.56 |
Bias | Low | estimate | 38 | 0.54 | 0.23 | 0.08 | 0.47 | 0.62 |
Intention | High | estimate | 38 | 0.43 | 0.30 | 0.10 | 0.34 | 0.52 |
Intention | Low | estimate | 38 | 0.72 | 0.21 | 0.07 | 0.64 | 0.78 |
Unintention | High | estimate | 38 | 0.13 | 0.15 | 0.05 | 0.09 | 0.18 |
Unintention | Low | estimate | 38 | 0.23 | 0.27 | 0.09 | 0.15 | 0.32 |
<- tibble(Parameter = c("IE", "UE", "RB"),
TparamInd t = c(0, 0, 0), df = c(0, 0, 0),
p = c(0, 0, 0), bonferroni.p = c(0, 0, 0), d = c(0, 0, 0))
# permutation t test
# intention
<- perm.t.test(
tmp filter(resParamInd, parameter == "Intention", load == "Low")$estimate,
filter(resParamInd, parameter == "Intention", load == "High")$estimate,
paired = TRUE, R = nsims)
$t[1] <- tmp$statistic
TparamInd$df[1] <- tmp$parameter
TparamInd$p[1] <- tmp$perm.p.value
TparamInd
<- ANOVA_design(
dxTmp design = "2w",
n = filter(sumParamInd, parameter == "Intention")$n,
mu = filter(sumParamInd, parameter == "Intention")$mean,
sd = filter(sumParamInd, parameter == "Intention")$sd,
labelnames = c("LOAD", "Low", "High"),
plot = FALSE)
<- ANOVA_power(dxTmp, verbose = FALSE, nsims = nsims)
tmp $d[1] <- tmp$pc_results$effect_size
TparamInd
# unintention
<- perm.t.test(
tmp filter(resParamInd, parameter == "Unintention", load == "Low")$estimate,
filter(resParamInd, parameter == "Unintention", load == "High")$estimate,
paired = TRUE, R = nsims)
$t[2] <- tmp$statistic
TparamInd$df[2] <- tmp$parameter
TparamInd$p[2] <- tmp$perm.p.value
TparamInd
<- ANOVA_design(
dxTmp design = "2w",
n = filter(sumParamInd, parameter == "Unintention")$n,
mu = filter(sumParamInd, parameter == "Unintention")$mean,
sd = filter(sumParamInd, parameter == "Unintention")$sd,
labelnames = c("LOAD", "Low", "High"),
plot = FALSE)
<- ANOVA_power(dxTmp, verbose = FALSE, nsims = nsims)
tmp $d[2] <- tmp$pc_results$effect_size
TparamInd
# bias
<- perm.t.test(
tmp filter(resParamInd, parameter == "Bias", load == "Low")$estimate,
filter(resParamInd, parameter == "Bias", load == "High")$estimate,
paired = TRUE, R = nsims)
$t[3] <- tmp$statistic
TparamInd$df[3] <- tmp$parameter
TparamInd$p[3] <- tmp$perm.p.value
TparamInd
<- ANOVA_design(
dxTmp design = "2w",
n = filter(sumParamInd, parameter == "Bias")$n,
mu = filter(sumParamInd, parameter == "Bias")$mean,
sd = filter(sumParamInd, parameter == "Bias")$sd,
labelnames = c("LOAD", "Low", "High"),
plot = FALSE)
<- ANOVA_power(dxTmp, verbose = FALSE, nsims = nsims)
tmp $d[3] <- tmp$pc_results$effect_size
TparamInd
$bonferroni.p <- p.adjust(TparamInd$p, "bonferroni")
TparamInd%>%
TparamInd kable(digits = c(0, 2, 0, 2, 2, 2), caption = "Pairwise comparisons")
Parameter | t | df | p | bonferroni.p | d |
---|---|---|---|---|---|
IE | 7.16 | 37 | 0.00 | 0.00 | -1.14 |
UE | 2.26 | 37 | 0.03 | 0.09 | -0.45 |
RB | 0.53 | 37 | 0.60 | 1.00 | -0.11 |
# 파라미터와 작업기억 상관
<- resParamInd %>%
datParamInd unite("Var", c(load, parameter)) %>%
pivot_wider(id_cols = sid, names_from = Var, values_from = estimate) %>%
mutate(sid2 = unique(td.slong$SID),
SID = wd$SID,
WM = wk$cowanK) %>%
select(SID, WM, starts_with("Low"), starts_with("High"))
<- datParamInd %>% select(!SID) %>% cor_mat()
RR <- RR %>% cor_get_pval()
PP
<- p.adjust(PP$WM[2:7], "bonferroni")
pbon
tibble(parameter = c("Pearson r", "Bonf.corr.p"),
lowIE = c(RR$WM[3], pbon[2]),
lowUE = c(RR$WM[4], pbon[3]),
lowRB = c(RR$WM[2], pbon[1]),
highIE = c(RR$WM[6], pbon[5]),
highUE = c(RR$WM[7], pbon[6]),
highRB = c(RR$WM[5], pbon[4])) %>%
kable(align = 'c', digits = 3, caption = "Parameters vs. Cowan's K")
parameter | lowIE | lowUE | lowRB | highIE | highUE | highRB |
---|---|---|---|---|---|---|
Pearson r | 0.480 | -0.096 | -0.16 | 0.460 | -0.240 | -0.07 |
Bonf.corr.p | 0.015 | 1.000 | 1.00 | 0.024 | 0.906 | 1.00 |
<- datParamInd %>%
rh1 ggplot(aes(x = WM, y = Low_Intention)) +
geom_point(size = 3) +
geom_smooth(method = lm, formula = y ~ x) +
scale_x_continuous(breaks=seq(0, 4, by=0.5)) +
scale_y_continuous(breaks=seq(0.0, 1.0, by=0.2)) +
coord_cartesian(ylim = c(0.0, 1.0), clip = "on") +
labs(x = expression(paste("Cowan's ", italic("K"))),
y = expression(italic("IE")["Low, 1-sec"])) +
theme_bw(base_size = 18) +
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
aspect.ratio = 1) +
annotate(geom = "text", x = 2, y = .1, hjust = 0, size = 4,
label = "paste(italic(r), \" = .48, \", italic(p), \" < .05\")", parse = TRUE)
<- datParamInd %>%
rh2 ggplot(aes(x = WM, y = High_Intention)) +
geom_point(size = 3) +
geom_smooth(method = lm, formula = y ~ x) +
scale_x_continuous(breaks=seq(0, 4, by=0.5)) +
scale_y_continuous(breaks=seq(0.0, 1.0, by=0.2)) +
coord_cartesian(ylim = c(0.0, 1.0), clip = "on") +
labs(x = expression(paste("Cowan's ", italic("K"))),
y = expression(italic("IE")["High, 1-sec"])) +
theme_bw(base_size = 18) +
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
aspect.ratio = 1) +
annotate(geom = "text", x = 1.0, y = .55, hjust = 0, size = 4,
label = "paste(italic(r), \" = .46, \", italic(p), \" < .05\")", parse = TRUE)
plot_grid(rh1, rh2, labels = c("A", "B"), label_size = 22, nrow = 1)
<- dd %>%
cx group_by(SID, Load, Prime, Target, PIT_corr2s) %>%
summarise(N = n()) %>%
ungroup() %>%
mutate(Prime2 = ifelse(Prime == "Pain", "P", "N"),
Target2 = ifelse(Target == "Pain", "P", "N"),
Corr2 = ifelse(PIT_corr2s == 1, "Corr", "Incorr")) %>%
select(SID, Load, Prime2, Target2, Corr2, N) %>%
arrange(SID, desc(Load), desc(Prime2), desc(Target2), Corr2) %>%
unite("Condition", Load:Corr2, sep = "") %>%
pivot_wider(names_from = "Condition", values_from = "N") %>%
select(!SID) %>%
replace(is.na(.), 0)
<- cx[, col_order]
cx # cx %>% print(n = Inf)
.0 <- fit.mpt(
mC2data = cx,
model.filename = textConnection(PIT2C.eqn),
fia = 200000,
)## [1] "Computing FIA: Iteration begins at 2022-09-20 19:07:53"
## [1] "Computing FIA: Iteration stopped at 2022-09-20 19:07:55"
## Time difference of 1.269217 secs
## Presenting the best result out of 5 minimization runs.
## [1] "Model fitting begins at 2022-09-20 19:07:55"
## Optimization routine for dataset(s) 1 2 3 4 5 9 10 11 12 13 14 15 16 17 18 19 21 22 24 25 27 30 31 32 34 37 38
## did not converge succesfully. Tried again with use.gradient == FALSE.
## Optimization for dataset(s) 1 2 3 4 5 9 10 11 12 13 14 15 16 17 18 19 21 22 24 25 27 30 31 32 34 37 38
## using numerically estimated gradients produced better results. Using those results.
## Old results saved in output == 'full' [['optim.runs']].
## [1] "Model fitting stopped at 2022-09-20 19:07:58"
## Time difference of 3.638809 secs
.0$goodness.of.fit$individual %>%
mC2mutate(SID = unique(td.slong$SID))
## Log.Likelihood G.Squared df p.value SID
## 1 -45.93371 3.674790e-01 2 0.8321525585 2
## 2 -54.43749 8.883605e-02 2 0.9565540096 3
## 3 -14.96267 1.113175e+00 2 0.5731615570 4
## 4 -49.78835 2.089308e+00 2 0.3518134755 5
## 5 -52.42155 2.183051e-01 2 0.8965936319 6
## 6 -28.10184 8.478568e+00 2 0.0144179101 7
## 7 -31.31133 5.997644e+00 2 0.0498457426 9
## 8 -24.35298 7.043620e+00 2 0.0295459060 10
## 9 -75.31411 4.914257e+00 2 0.0856806387 11
## 10 -35.34640 2.080056e+00 2 0.3534448307 12
## 11 -70.92739 1.339791e+00 2 0.5117620695 13
## 12 -50.96340 9.176825e+00 2 0.0101689875 14
## 13 -54.03875 2.689535e+00 2 0.2606002566 15
## 14 -35.93833 1.402168e+00 2 0.4960472862 16
## 15 -43.67418 6.359328e+00 2 0.0415996397 17
## 16 -22.94874 4.235138e+00 2 0.1203237790 18
## 17 -27.07204 1.469368e+00 2 0.4796571131 19
## 18 -48.84814 9.574650e-01 2 0.6195681878 20
## 19 -28.78034 2.048859e+00 2 0.3590012329 21
## 20 -119.94259 6.178533e+00 2 0.0455353491 22
## 21 -15.45110 4.202520e+00 2 0.1223022212 23
## 22 -37.93917 1.453558e+00 2 0.4834638081 24
## 23 -60.72027 3.316255e+00 2 0.1904953745 25
## 24 -53.75213 1.116982e+00 2 0.5720715727 26
## 25 -48.53539 3.705163e-01 2 0.8308897725 27
## 26 -52.00238 5.792738e+00 2 0.0552233835 28
## 27 -21.00927 3.561923e-01 2 0.8368619760 29
## 28 -53.87545 7.426397e+00 2 0.0243993581 31
## 29 -69.04723 4.564906e-01 2 0.7959289775 34
## 30 -44.46252 7.399632e+00 2 0.0247280759 36
## 31 -52.90923 2.183051e-01 2 0.8965936319 37
## 32 -19.20195 2.804336e+00 2 0.2460629101 38
## 33 -27.56246 4.562696e+00 2 0.1021464010 39
## 34 -61.32805 1.473240e+01 2 0.0006322651 40
## 35 -84.13210 1.665675e-01 2 0.9200900351 41
## 36 -80.72256 -2.842171e-14 2 1.0000000000 42
## 37 -31.62912 5.704572e-01 2 0.7518423691 43
## 38 -12.10937 3.561923e-01 2 0.8368619760 44
# kable(digits = 4, caption = "Individual Fits")
# mC2.0$goodness.of.fit$sum %>%
# kable(digits = 4, caption = "Sum of Indivisual Fits")
.0$goodness.of.fit$aggregated %>%
mC2mutate(w = sqrt( mC2.0$goodness.of.fit$aggregated$G.Squared / sum(cn) )) %>%
kable(digits = 2, caption = "Aggregated Fit")
Log.Likelihood | G.Squared | df | p.value | w |
---|---|---|---|---|
-1966.16 | 5 | 2 | 0.08 | 0.02 |
.0$parameters$aggregated %>%
mC2kable(digits = 2, caption = "Parameter Estimates")
estimates | lower.conf | upper.conf | |
---|---|---|---|
B1 | 0.52 | 0.45 | 0.59 |
B2 | 0.44 | 0.38 | 0.51 |
I1 | 0.90 | 0.88 | 0.91 |
I2 | 0.90 | 0.88 | 0.91 |
U1 | 0.19 | 0.07 | 0.31 |
U2 | 0.10 | -0.02 | 0.22 |
.1 <- fit.mpt(
mC2data = cx,
model.filename = textConnection(PIT2C.eqn),
restrictions.filename = textConnection(restrict.1),
fia = 20000
)## [1] "Computing FIA: Iteration begins at 2022-09-20 19:07:58"
## [1] "Computing FIA: Iteration stopped at 2022-09-20 19:07:58"
## Time difference of 0.1121221 secs
## Presenting the best result out of 5 minimization runs.
## [1] "Model fitting begins at 2022-09-20 19:07:58"
## Optimization routine for dataset(s) 1 2 3 4 5 9 10 12 13 14 15 16 17 18 19 21 22 24 25 30 31 32 34 37 38
## did not converge succesfully. Tried again with use.gradient == FALSE.
## Optimization for dataset(s) 1 2 3 4 5 9 10 12 13 14 15 16 17 18 19 21 22 24 25 30 31 32 34 37 38
## using numerically estimated gradients produced better results. Using those results.
## Old results saved in output == 'full' [['optim.runs']].
## [1] "Model fitting stopped at 2022-09-20 19:08:01"
## Time difference of 2.889705 secs
.2 <- fit.mpt(
mC2data = cx,
model.filename = textConnection(PIT2C.eqn),
restrictions.filename = textConnection(restrict.2),
fia = 20000
)## [1] "Computing FIA: Iteration begins at 2022-09-20 19:08:02"
## [1] "Computing FIA: Iteration stopped at 2022-09-20 19:08:02"
## Time difference of 0.1110029 secs
## Presenting the best result out of 5 minimization runs.
## [1] "Model fitting begins at 2022-09-20 19:08:02"
## Optimization routine for dataset(s) 1 2 3 4 5 10 12 13 14 15 16 17 18 19 21 24 25 27 30 32 34 37 38
## did not converge succesfully. Tried again with use.gradient == FALSE.
## Optimization for dataset(s) 1 2 3 4 5 10 12 13 14 15 16 17 18 19 21 24 25 27 30 32 34 37 38
## using numerically estimated gradients produced better results. Using those results.
## Old results saved in output == 'full' [['optim.runs']].
## [1] "Model fitting stopped at 2022-09-20 19:08:05"
## Time difference of 3.124184 secs
.3 <- fit.mpt(
mC2data = cx,
model.filename = textConnection(PIT2C.eqn),
restrictions.filename = textConnection(restrict.3),
fia = 20000
)## [1] "Computing FIA: Iteration begins at 2022-09-20 19:08:05"
## [1] "Computing FIA: Iteration stopped at 2022-09-20 19:08:05"
## Time difference of 0.1102879 secs
## Presenting the best result out of 5 minimization runs.
## [1] "Model fitting begins at 2022-09-20 19:08:05"
## Optimization routine for dataset(s) 3 10 12 17 18 21 24 27 30 32 38
## did not converge succesfully. Tried again with use.gradient == FALSE.
## Optimization for dataset(s) 3 10 12 17 18 21 24 27 30 32 38
## using numerically estimated gradients produced better results. Using those results.
## Old results saved in output == 'full' [['optim.runs']].
## [1] "Model fitting stopped at 2022-09-20 19:08:08"
## Time difference of 2.581181 secs
# C-dominating
<- mC2.1[["goodness.of.fit"]][["aggregated"]][["G.Squared"]] -
( g.sq.I.equal2 .0[["goodness.of.fit"]][["aggregated"]][["G.Squared"]] )
mC2## [1] 0.001970386
<- mC2.1[["goodness.of.fit"]][["aggregated"]][["df"]] -
( df.I.equal2 .0[["goodness.of.fit"]][["aggregated"]][["df"]] )
mC2## [1] 1
<- pchisq(g.sq.I.equal2, df.I.equal2 , lower.tail = FALSE) )
( p.value.I.equal2 ## [1] 0.9645943
<- sqrt(g.sq.I.equal2/sum(cx)) )
( w.I.equal2 ## [1] 0.0004500532
<- mC2.2[["goodness.of.fit"]][["aggregated"]][["G.Squared"]] -
( g.sq.U.equal2 .0[["goodness.of.fit"]][["aggregated"]][["G.Squared"]] )
mC2## [1] 1.024434
<- mC2.2[["goodness.of.fit"]][["aggregated"]][["df"]] -
( df.U.equal2 .0[["goodness.of.fit"]][["aggregated"]][["df"]] )
mC2## [1] 1
<- pchisq(g.sq.U.equal2, df.U.equal2 , lower.tail = FALSE) )
( p.value.U.equal2 ## [1] 0.3114695
<- sqrt(g.sq.U.equal2/sum(cx)) )
( w.U.equal2 ## [1] 0.01026196
<- mC2.3[["goodness.of.fit"]][["aggregated"]][["G.Squared"]] -
( g.sq.B.equal2 .0[["goodness.of.fit"]][["aggregated"]][["G.Squared"]] )
mC2## [1] 2.206364
<- mC2.3[["goodness.of.fit"]][["aggregated"]][["df"]] -
( df.B.equal2 .0[["goodness.of.fit"]][["aggregated"]][["df"]] )
mC2## [1] 1
<- pchisq(g.sq.B.equal2, df.B.equal2 , lower.tail = FALSE) )
( p.value.B.equal2 ## [1] 0.1374423
<- sqrt(g.sq.B.equal2/sum(cx)) )
( w.B.equal2 ## [1] 0.01506006
<- tibble(Parameter = c("IE", "UE", "RB"),
mC.table2 G_squared = c(0, 0, 0), df = c(0, 0, 0),
p = c(0, 0, 0), w = c(0, 0, 0))
$G_squared[1] <- g.sq.I.equal2
mC.table2$df[1] <- df.I.equal2
mC.table2$p[1] <- p.value.I.equal2
mC.table2$w[1] <- w.I.equal2
mC.table2
$G_squared[2] <- g.sq.U.equal2
mC.table2$df[2] <- df.U.equal2
mC.table2$p[2] <- p.value.U.equal2
mC.table2$w[2] <- w.U.equal2
mC.table2
$G_squared[3] <- g.sq.B.equal2
mC.table2$df[3] <- df.B.equal2
mC.table2$p[3] <- p.value.B.equal2
mC.table2$w[3] <- w.B.equal2
mC.table2
$bonferroni.p <- p.adjust(mC.table2$p, "bonferroni")
mC.table2%>%
mC.table2 select(Parameter, G_squared, df, p, bonferroni.p, w) %>%
kable(digits = c(0, 2, 0, 2, 2, 2), caption = "C-First, Deadline = 2s")
Parameter | G_squared | df | p | bonferroni.p | w |
---|---|---|---|---|---|
IE | 0.00 | 1 | 0.96 | 1.00 | 0.00 |
UE | 1.02 | 1 | 0.31 | 0.93 | 0.01 |
RB | 2.21 | 1 | 0.14 | 0.41 | 0.02 |
# 개인별 파라미터 추정치 정리
<- as.data.frame.table(mC2.0$parameters$individual) %>%
resParamInd2 filter(Var2 == "estimates") %>%
separate(col = "Var3", into = c("dum", "sid"), sep = ": ") %>%
rename(estimate = Freq) %>%
mutate(parameter = factor(rep(c('Bias', 'Intention', 'Unintention'), each = 2, times = 38)),
load = factor(rep(c('Low', 'High'), times = 3*38), levels = c('Low', 'High'),
labels = c('Low', 'High'))) %>%
select(sid, Var1, parameter, load, estimate)
<- resParamInd2 %>%
sumParamInd2 group_by(parameter, load) %>%
get_summary_stats(estimate, show = c("mean", "sd", "ci"))
# bootstrapped CI: adjusted bootstrap percentile (bca) method
<- resParamInd2 %>%
tmpCI split(~parameter + load) %>%
map_dfr(
function(x){
= boot.ci(boot(data=x$estimate, statistic=my.fun, R=nsims), type = "bca")$bca
tmp <- x$parameter[1]
parameter <- x$load[1]
load <- tmp[4]
bootCI_lo <- tmp[5]
bootCI_hi data.frame(parameter, load, bootCI_lo, bootCI_hi)
}
)
merge(sumParamInd2, tmpCI, by = c("parameter", "load")) %>%
kable(digits = 2, caption = "Parameter Estimates: Descriptive Stats")
parameter | load | variable | n | mean | sd | ci | bootCI_lo | bootCI_hi |
---|---|---|---|---|---|---|---|---|
Bias | High | estimate | 38 | 0.49 | 0.32 | 0.11 | 0.39 | 0.59 |
Bias | Low | estimate | 38 | 0.49 | 0.34 | 0.11 | 0.38 | 0.59 |
Intention | High | estimate | 38 | 0.90 | 0.08 | 0.03 | 0.87 | 0.92 |
Intention | Low | estimate | 38 | 0.90 | 0.08 | 0.03 | 0.87 | 0.92 |
Unintention | High | estimate | 38 | 0.25 | 0.36 | 0.12 | 0.15 | 0.38 |
Unintention | Low | estimate | 38 | 0.22 | 0.28 | 0.09 | 0.15 | 0.32 |
<- tibble(Parameter = c("IE", "UE", "RB"),
TparamInd2 t = c(0, 0, 0), df = c(0, 0, 0),
p = c(0, 0, 0), bonferroni.p = c(0, 0, 0), d = c(0, 0, 0))
# permutation t test
# intention
<- perm.t.test(
tmp filter(resParamInd2, parameter == "Intention", load == "Low")$estimate,
filter(resParamInd2, parameter == "Intention", load == "High")$estimate,
paired = TRUE, R = nsims)
$t[1] <- tmp$statistic
TparamInd2$df[1] <- tmp$parameter
TparamInd2$p[1] <- tmp$perm.p.value
TparamInd2
<- ANOVA_design(
dxTmp design = "2w",
n = filter(sumParamInd2, parameter == "Intention")$n,
mu = filter(sumParamInd2, parameter == "Intention")$mean,
sd = filter(sumParamInd2, parameter == "Intention")$sd,
labelnames = c("LOAD", "Low", "High"),
plot = FALSE)
<- ANOVA_power(dxTmp, verbose = FALSE, nsims = nsims)
tmp $d[1] <- tmp$pc_results$effect_size
TparamInd2
# unintention
<- perm.t.test(
tmp filter(resParamInd2, parameter == "Unintention", load == "Low")$estimate,
filter(resParamInd2, parameter == "Unintention", load == "High")$estimate,
paired = TRUE, R = nsims)
$t[2] <- tmp$statistic
TparamInd2$df[2] <- tmp$parameter
TparamInd2$p[2] <- tmp$perm.p.value
TparamInd2
<- ANOVA_design(
dxTmp design = "2w",
n = filter(sumParamInd2, parameter == "Unintention")$n,
mu = filter(sumParamInd2, parameter == "Unintention")$mean,
sd = filter(sumParamInd2, parameter == "Unintention")$sd,
labelnames = c("LOAD", "Low", "High"),
plot = FALSE)
<- ANOVA_power(dxTmp, verbose = FALSE, nsims = nsims)
tmp $d[2] <- tmp$pc_results$effect_size
TparamInd2
# bias
<- perm.t.test(
tmp filter(resParamInd2, parameter == "Bias", load == "Low")$estimate,
filter(resParamInd2, parameter == "Bias", load == "High")$estimate,
paired = TRUE, R = nsims)
$t[3] <- tmp$statistic
TparamInd2$df[3] <- tmp$parameter
TparamInd2$p[3] <- tmp$perm.p.value
TparamInd2
<- ANOVA_design(
dxTmp design = "2w",
n = filter(sumParamInd2, parameter == "Bias")$n,
mu = filter(sumParamInd2, parameter == "Bias")$mean,
sd = filter(sumParamInd2, parameter == "Bias")$sd,
labelnames = c("LOAD", "Low", "High"),
plot = FALSE)
<- ANOVA_power(dxTmp, verbose = FALSE, nsims = nsims)
tmp $d[3] <- tmp$pc_results$effect_size
TparamInd2
$bonferroni.p <- p.adjust(TparamInd2$p, "bonferroni")
TparamInd2%>%
TparamInd2 kable(digits = c(0, 2, 0, 2, 2, 2), caption = "Pairwise comparisons")
Parameter | t | df | p | bonferroni.p | d |
---|---|---|---|---|---|
IE | -0.04 | 37 | 0.97 | 1 | 0.0 |
UE | -0.42 | 37 | 0.68 | 1 | 0.1 |
RB | 0.02 | 37 | 0.99 | 1 | 0.0 |
<- resParamInd2 %>%
datParamInd2 unite("Var", c(load, parameter)) %>%
pivot_wider(id_cols = sid, names_from = Var, values_from = estimate) %>%
mutate(sid2 = unique(td.slong$SID),
SID = wd$SID,
WM = wk$cowanK) %>%
select(SID, WM, starts_with("Low"), starts_with("High"))
<- datParamInd2 %>% select(!SID) %>% cor_mat()
RR2 <- RR2 %>% cor_get_pval()
PP2
<- p.adjust(PP2$WM[2:7], "bonferroni")
pbon2
tibble(parameter = c("Pearson r", "Bonf.corr.p"),
lowIE = c(RR2$WM[3], pbon2[2]),
lowUE = c(RR2$WM[4], pbon2[3]),
lowRB = c(RR2$WM[2], pbon2[1]),
highIE = c(RR2$WM[6], pbon2[5]),
highUE = c(RR2$WM[7], pbon2[6]),
highRB = c(RR2$WM[5], pbon2[4])) %>%
kable(align = 'c', digits = 3, caption = "Parameters vs. Cowan's K")
parameter | lowIE | lowUE | lowRB | highIE | highUE | highRB |
---|---|---|---|---|---|---|
Pearson r | 0.450 | -0.22 | 0.003 | 0.67 | -0.22 | -0.068 |
Bonf.corr.p | 0.027 | 1.00 | 1.000 | 0.00 | 1.00 | 1.000 |
<- datParamInd2 %>%
rh3 ggplot(aes(x = WM, y = Low_Intention)) +
geom_point(size = 3) +
geom_smooth(method = lm, formula = y ~ x) +
scale_x_continuous(breaks=seq(0, 4, by=0.5)) +
scale_y_continuous(breaks=seq(0.0, 1.0, by=0.2)) +
coord_cartesian(ylim = c(0.0, 1.0), clip = "on") +
labs(x = expression(paste("Cowan's ", italic("K"))),
y = expression(italic("IE")["Low, 2-sec"])) +
theme_bw(base_size = 18) +
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
aspect.ratio = 1) +
annotate(geom = "text", x = 2, y = .1, hjust = 0, size = 4,
label = "paste(italic(r), \" = .45, \", italic(p), \" < .05\")", parse = TRUE)
<- datParamInd2 %>%
rh4 ggplot(aes(x = WM, y = High_Intention)) +
geom_point(size = 3) +
geom_smooth(method = lm, formula = y ~ x) +
scale_x_continuous(breaks=seq(0, 4, by=0.5)) +
scale_y_continuous(breaks=seq(0.0, 1.0, by=0.2)) +
coord_cartesian(ylim = c(0.0, 1.0), clip = "on") +
labs(x = expression(paste("Cowan's ", italic("K"))),
y = expression(italic("IE")["High, 2-sec"])) +
theme_bw(base_size = 18) +
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
aspect.ratio = 1) +
annotate(geom = "text", x = 2, y = .1, hjust = 0, size = 4,
label = "paste(italic(r), \" = .67, \", italic(p), \" < .05\")", parse = TRUE)
plot_grid(rh3, rh4, labels = c("A", "B"), label_size = 22, nrow = 1)
# merge parameters over 2 deadlines
<- rbind(mC.0[["parameters"]][["aggregated"]], # 1s
xParam .0[["parameters"]][["aggregated"]]) # 2s
mC2class(xParam)
## [1] "data.frame"
$Deadline <- factor(rep(c('DL1s', 'DL2s'), each = 6))
xParam$parameter <- factor(rep(c('Bias', 'Intention', 'Unintention'), each = 2, times = 2))
xParam$Load <- factor(rep(c('Low', 'High'), 6), levels = c('Low', 'High'),
xParamlabels = c('Low', 'High'))
<- xParam %>% select(parameter, Deadline, Load, estimates, lower.conf, upper.conf)
xParam str(xParam)
## 'data.frame': 12 obs. of 6 variables:
## $ parameter : Factor w/ 3 levels "Bias","Intention",..: 1 1 2 2 3 3 1 1 2 2 ...
## $ Deadline : Factor w/ 2 levels "DL1s","DL2s": 1 1 1 1 1 1 2 2 2 2 ...
## $ Load : Factor w/ 2 levels "Low","High": 1 2 1 2 1 2 1 2 1 2 ...
## $ estimates : num 0.538 0.5 0.713 0.406 0.134 ...
## $ lower.conf: num 0.4982 0.4772 0.6938 0.3806 0.0656 ...
## $ upper.conf: num 0.577 0.524 0.733 0.432 0.202 ...
# plot
<- 0.4
dodge_size <- 0.3
error_width <- 0.1
nudge_size <- xParam %>% filter(parameter == "Intention", Deadline == "DL1s")
ypos
<- xParam %>%
zf1 filter(parameter == "Intention") %>%
ggplot(aes(x = Load, y = estimates, fill = Deadline)) +
geom_errorbar(aes(ymin = lower.conf, ymax = upper.conf), width=error_width, size=1,
position = position_dodge(width = dodge_size) ) +
geom_point(size=4, shape=21, position = position_dodge(width = dodge_size)) +
coord_cartesian(ylim = c(0.2, 1.0), clip = "on") +
scale_y_continuous(breaks=seq(0.2, 1.0, by=0.1)) +
scale_fill_manual(values=c("white", "black"), labels=c("1 sec", "2 sec")) +
labs(y = expression(paste("Intentional Empathy (", italic("IE"), ")")),
fill = expression("Post-hoc\nDeadline")) +
theme_bw(base_size = 18) +
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
aspect.ratio = 1.3) +
draw_line(x = c(1-nudge_size, 2-nudge_size), color = "darkgray",
y = c(ypos$estimates[2]-nudge_size, ypos$estimates[2]-nudge_size)) +
draw_line(x = c(1-nudge_size, 1-nudge_size), color = "darkgray",
y = c(ypos$estimates[1]-nudge_size*3/4, ypos$estimates[2]-nudge_size)) +
draw_line(x = c(2-nudge_size, 2-nudge_size), color = "darkgray",
y = c(ypos$estimates[2]-nudge_size*3/4, ypos$estimates[2]-nudge_size)) +
draw_label("***", color = "darkgray",
x = 1.5-nudge_size, y = ypos$estimates[2]-nudge_size*6/4,
vjust = 0.5, hjust = 0.5, size = 22, fontface = 'bold')
zf1
<- xParam %>%
zf2 filter(parameter == "Unintention") %>%
ggplot(aes(x = Load, y = estimates, fill = Deadline)) +
geom_errorbar(aes(ymin = lower.conf, ymax = upper.conf), width=error_width, size=1,
position = position_dodge(width = dodge_size) ) +
geom_point(size=4, shape=21, position = position_dodge(width = dodge_size)) +
coord_cartesian(ylim = c(0.0, 0.8), clip = "on") +
scale_y_continuous(breaks=seq(0.0, 0.8, by=0.1)) +
scale_fill_manual(values=c("white", "black"), labels=c("1 sec", "2 sec")) +
labs(y = expression(paste("Unintentional Empathy (", italic("UE"), ")")),
fill = expression("Post-hoc\nDeadline")) +
theme_bw(base_size = 18) +
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
aspect.ratio = 1.3)
zf2
<- xParam %>%
zf3 filter(parameter == "Bias") %>%
ggplot(aes(x = Load, y = estimates, fill = Deadline)) +
geom_errorbar(aes(ymin = lower.conf, ymax = upper.conf), width=error_width, size=1,
position = position_dodge(width = dodge_size) ) +
geom_point(size=4, shape=21, position = position_dodge(width = dodge_size)) +
coord_cartesian(ylim = c(0.1, 0.9), clip = "on") +
scale_y_continuous(breaks=seq(0.1, 0.9, by=0.1)) +
scale_fill_manual(values=c("white", "black"), labels=c("1 sec", "2 sec")) +
labs(y = expression(paste("Response Bias (", italic("RB"), ")")),
fill = expression("Post-hoc\nDeadline")) +
theme_bw(base_size = 18) +
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
aspect.ratio = 1.3)
zf3
# behavior
<- plot_grid(yf1 + theme(legend.position="none"),
prow2 + theme(legend.position="none"),
yf2 labels = c("A", "B"), label_size = 22, nrow = 1)
<- get_legend(yf2 + theme(legend.box.margin = margin(80, 0, 0, 0)))
legend2 <- get_legend(zf3 + theme(legend.box.margin = margin(-100, 0, 0, 0)))
legend <- plot_grid(legend2, legend, ncol = 1, align = "v")
legendX
plot_grid(NULL, prow2, legendX, NULL, rel_widths = c(0.5, 2, .5, 0.5), nrow = 1)
# model parameters
<- plot_grid(zf1 + theme(legend.position="none"),
prow + theme(legend.position="none"),
zf2 + theme(legend.position="none"),
zf3 labels = c("A", "B", "C"), label_size = 22, nrow = 1)
<- get_legend(zf3 + theme(legend.box.margin = margin(-50, 0, 0, 0)))
legend
plot_grid(prow, legend, rel_widths = c(3, .5))
ggdraw() + draw_image("fig/mpt_AFirst.png")
<- "
PIT2A.eqn # tree for Low, Pain prime, Pain target: correct, then incorrect
U1 + (1-U1)*I1 + (1-U1)*(1-I1)*B1
(1-U1)*(1-I1)*(1-B1)
# tree for Low, Pain prime, Nonpain target: correct, then incorrect
(1-U1)*I1 + (1-U1)*(1-I1)*(1-B1)
U1 + (1-U1)*(1-I1)*B1
# tree for Low, Nonpain prime, Pain target: correct, then incorrect
(1-U1)*I1 + (1-U1)*(1-I1)*B1
U1 + (1-U1)*(1-I1)*(1-B1)
# tree for Low, Nonpain prime, Nonpain target: correct, then incorrect
U1 + (1-U1)*I1 + (1-U1)*(1-I1)*(1-B1)
(1-U1)*(1-I1)*B1
# tree for High, Pain prime, Pain target: correct, then incorrect
U2 + (1-U2)*I2 + (1-U2)*(1-I2)*B2
(1-U2)*(1-I2)*(1-B2)
# tree for High, Pain prime, Nonpain target: correct, then incorrect
(1-U2)*I2 + (1-U2)*(1-I2)*(1-B2)
U2 + (1-U2)*(1-I2)*B2
# tree for High, Nonpain prime, Pain target: correct, then incorrect
(1-U2)*I2 + (1-U2)*(1-I2)*B2
U2 + (1-U2)*(1-I2)*(1-B2)
# tree for High, Nonpain prime, Nonpain target: correct, then incorrect
U2 + (1-U2)*I2 + (1-U2)*(1-I2)*(1-B2)
(1-U2)*(1-I2)*B2
"
check.mpt(
model.filename = textConnection(PIT2A.eqn),
model.type = "easy"
)## $probabilities.eq.1
## [1] TRUE
##
## $n.trees
## [1] 8
##
## $n.model.categories
## [1] 16
##
## $n.independent.categories
## [1] 8
##
## $n.params
## [1] 6
##
## $parameters
## [1] "B1" "B2" "I1" "I2" "U1" "U2"
# model fitting
.0 <- fit.mpt(
mAdata = cn,
model.filename = textConnection(PIT2A.eqn),
fia = 200000,
)## [1] "Computing FIA: Iteration begins at 2022-09-20 19:11:50"
## [1] "Computing FIA: Iteration stopped at 2022-09-20 19:11:51"
## Time difference of 1.266047 secs
## Presenting the best result out of 5 minimization runs.
## [1] "Model fitting begins at 2022-09-20 19:11:51"
## Optimization routine for dataset(s) 4 12 13 17 18 30 38
## did not converge succesfully. Tried again with use.gradient == FALSE.
## Optimization for dataset(s) 4 12 13 17 18 30 38
## using numerically estimated gradients produced better results. Using those results.
## Old results saved in output == 'full' [['optim.runs']].
## [1] "Model fitting stopped at 2022-09-20 19:11:53"
## Time difference of 2.59101 secs
.0$goodness.of.fit$aggregated %>%
mAmutate(w = sqrt( mA.0$goodness.of.fit$aggregated$G.Squared / sum(cn) )) %>%
kable(digits = c(2,2,2,3,2), caption = "Aggregated Fit")
Log.Likelihood | G.Squared | df | p.value | w |
---|---|---|---|---|
-4942.76 | 5.72 | 2 | 0.057 | 0.02 |
.0$parameters$aggregated %>%
mAkable(digits = 2, caption = "Parameter Estimates")
estimates | lower.conf | upper.conf | |
---|---|---|---|
B1 | 0.54 | 0.50 | 0.58 |
B2 | 0.50 | 0.48 | 0.52 |
I1 | 0.74 | 0.72 | 0.77 |
I2 | 0.42 | 0.40 | 0.45 |
U1 | 0.04 | 0.02 | 0.06 |
U2 | 0.04 | 0.02 | 0.07 |
.1 <- fit.mpt(
mAdata = cn,
model.filename = textConnection(PIT2A.eqn),
restrictions.filename = textConnection(restrict.1),
fia = 20000
)## [1] "Computing FIA: Iteration begins at 2022-09-20 19:11:54"
## [1] "Computing FIA: Iteration stopped at 2022-09-20 19:11:54"
## Time difference of 0.111944 secs
## Presenting the best result out of 5 minimization runs.
## [1] "Model fitting begins at 2022-09-20 19:11:54"
## Optimization routine for dataset(s) 12 13 18
## did not converge succesfully. Tried again with use.gradient == FALSE.
## Optimization for dataset(s) 12 13 18
## using numerically estimated gradients produced better results. Using those results.
## Old results saved in output == 'full' [['optim.runs']].
## [1] "Model fitting stopped at 2022-09-20 19:11:55"
## Time difference of 1.761771 secs
.2 <- fit.mpt(
mAdata = cn,
model.filename = textConnection(PIT2A.eqn),
restrictions.filename = textConnection(restrict.2),
fia = 20000
)## [1] "Computing FIA: Iteration begins at 2022-09-20 19:11:56"
## [1] "Computing FIA: Iteration stopped at 2022-09-20 19:11:56"
## Time difference of 0.112844 secs
## Presenting the best result out of 5 minimization runs.
## [1] "Model fitting begins at 2022-09-20 19:11:56"
## Optimization routine for dataset(s) 4 12 17 18 34 38
## did not converge succesfully. Tried again with use.gradient == FALSE.
## Optimization for dataset(s) 4 12 17 18 34 38
## using numerically estimated gradients produced better results. Using those results.
## Old results saved in output == 'full' [['optim.runs']].
## [1] "Model fitting stopped at 2022-09-20 19:11:58"
## Time difference of 2.10513 secs
.3 <- fit.mpt(
mAdata = cn,
model.filename = textConnection(PIT2A.eqn),
restrictions.filename = textConnection(restrict.3),
fia = 20000
)## [1] "Computing FIA: Iteration begins at 2022-09-20 19:11:58"
## [1] "Computing FIA: Iteration stopped at 2022-09-20 19:11:58"
## Time difference of 0.1115551 secs
## Presenting the best result out of 5 minimization runs.
## [1] "Model fitting begins at 2022-09-20 19:11:58"
## Optimization routine for dataset(s) 18 38
## did not converge succesfully. Tried again with use.gradient == FALSE.
## Optimization for dataset(s) 18 38
## using numerically estimated gradients produced better results. Using those results.
## Old results saved in output == 'full' [['optim.runs']].
## [1] "Model fitting stopped at 2022-09-20 19:12:00"
## Time difference of 1.879295 secs
# A-dominating
<- mA.1[["goodness.of.fit"]][["aggregated"]][["G.Squared"]] -
( g.sq.I.equal3 .0[["goodness.of.fit"]][["aggregated"]][["G.Squared"]] )
mC## [1] 272.1413
<- mA.1[["goodness.of.fit"]][["aggregated"]][["df"]] -
( df.I.equal3 .0[["goodness.of.fit"]][["aggregated"]][["df"]] )
mA## [1] 1
<- pchisq(g.sq.I.equal3, df.I.equal3 , lower.tail = FALSE) )
( p.value.I.equal3 ## [1] 3.874688e-61
<- sqrt(g.sq.I.equal3/sum(cn)) )
( w.I.equal3 ## [1] 0.1672574
<- mA.2[["goodness.of.fit"]][["aggregated"]][["G.Squared"]] -
( g.sq.U.equal3 .0[["goodness.of.fit"]][["aggregated"]][["G.Squared"]] )
mC## [1] 0.0489538
<- mA.2[["goodness.of.fit"]][["aggregated"]][["df"]] -
( df.U.equal3 .0[["goodness.of.fit"]][["aggregated"]][["df"]] )
mC## [1] 1
<- pchisq(g.sq.U.equal3, df.U.equal3 , lower.tail = FALSE) )
( p.value.U.equal3 ## [1] 0.8248938
<- sqrt(g.sq.U.equal3/sum(cn)) )
( w.U.equal3 ## [1] 0.002243269
<- mA.3[["goodness.of.fit"]][["aggregated"]][["G.Squared"]] -
( g.sq.B.equal3 .0[["goodness.of.fit"]][["aggregated"]][["G.Squared"]] )
mC## [1] 2.535747
<- mA.3[["goodness.of.fit"]][["aggregated"]][["df"]] -
( df.B.equal3 .0[["goodness.of.fit"]][["aggregated"]][["df"]] )
mC## [1] 1
<- pchisq(g.sq.B.equal3, df.B.equal3 , lower.tail = FALSE) )
( p.value.B.equal3 ## [1] 0.1112942
<- sqrt(g.sq.B.equal3/sum(cn)) )
( w.B.equal3 ## [1] 0.01614512
<- tibble(Parameter = c("IE", "UE", "RB"),
mC.table3 G_squared = c(0, 0, 0), df = c(0, 0, 0),
p = c(0, 0, 0), w = c(0, 0, 0))
$G_squared[1] <- g.sq.I.equal3
mC.table3$df[1] <- df.I.equal3
mC.table3$p[1] <- p.value.I.equal3
mC.table3$w[1] <- w.I.equal3
mC.table3
$G_squared[2] <- g.sq.U.equal3
mC.table3$df[2] <- df.U.equal3
mC.table3$p[2] <- p.value.U.equal3
mC.table3$w[2] <- w.U.equal3
mC.table3
$G_squared[3] <- g.sq.B.equal3
mC.table3$df[3] <- df.B.equal3
mC.table3$p[3] <- p.value.B.equal3
mC.table3$w[3] <- w.B.equal3
mC.table3
%>% kable(digits = c(0, 2, 2, 5, 2), caption = "A-First, Deadline = 1s") mC.table3
Parameter | G_squared | df | p | w |
---|---|---|---|---|
IE | 272.14 | 1 | 0.00000 | 0.17 |
UE | 0.05 | 1 | 0.82489 | 0.00 |
RB | 2.54 | 1 | 0.11129 | 0.02 |
<- select.mpt( list(mC.0, mA.0), output = "full")
mcomp.res1 c("model", "n.parameters" ,"G.Squared.aggregated",
mcomp.res1[, "FIA.aggregated", "AIC.aggregated", "BIC.aggregated")] %>%
mutate(model = c("C-First", "A-First")) %>%
kable(digits = 4, caption = "Model Comparison")
model | n.parameters | G.Squared.aggregated | FIA.aggregated | AIC.aggregated | BIC.aggregated |
---|---|---|---|---|---|
C-First | 6 | 5.718 | 23.1691 | 17.718 | 60.8146 |
A-First | 6 | 5.718 | 23.1659 | 17.718 | 60.8146 |
두 모델은 거의 같다: 상대적 우위가 매번 바뀐다. 그러나 파라미터 추정치가 다른데, A-dominating 모델에서 UE는 매우 낮다.
sessionInfo()
## R version 4.2.1 (2022-06-23)
## Platform: x86_64-apple-darwin17.0 (64-bit)
## Running under: macOS Big Sur ... 10.16
##
## Matrix products: default
## BLAS: /Library/Frameworks/R.framework/Versions/4.2/Resources/lib/libRblas.0.dylib
## LAPACK: /Library/Frameworks/R.framework/Versions/4.2/Resources/lib/libRlapack.dylib
##
## locale:
## [1] en_US.UTF-8/en_US.UTF-8/en_US.UTF-8/C/en_US.UTF-8/en_US.UTF-8
##
## attached base packages:
## [1] stats graphics grDevices utils datasets methods base
##
## other attached packages:
## [1] icons_0.2.0 klippy_0.0.0.9500 boot_1.3-28 MKinfer_0.7
## [5] MPTinR_1.14.1 Superpower_0.2.0 permuco_1.1.2 rstatix_0.7.0
## [9] ggbeeswarm_0.6.0 cowplot_1.1.1 knitr_1.40 gghalves_0.1.3
## [13] ggpubr_0.4.0 psych_2.2.5 forcats_0.5.2 stringr_1.4.1
## [17] dplyr_1.0.10 purrr_0.3.4 readr_2.1.2 tidyr_1.2.1
## [21] tibble_3.1.8 ggplot2_3.3.6 tidyverse_1.3.2
##
## loaded via a namespace (and not attached):
## [1] googledrive_2.0.0 minqa_1.2.4 colorspace_2.0-3
## [4] ggsignif_0.6.3 ellipsis_0.3.2 estimability_1.4.1
## [7] fs_1.5.2 rstudioapi_0.14 farver_2.1.1
## [10] bit64_4.0.5 fansi_1.0.3 mvtnorm_1.1-3
## [13] lubridate_1.8.0 MKdescr_0.7 xml2_1.3.3
## [16] splines_4.2.1 mnormt_2.1.0 cachem_1.0.6
## [19] afex_1.1-1 jsonlite_1.8.0 nloptr_2.0.3
## [22] broom_1.0.1 dbplyr_2.2.1 compiler_4.2.1
## [25] httr_1.4.4 emmeans_1.8.1-1 backports_1.4.1
## [28] assertthat_0.2.1 Matrix_1.5-1 fastmap_1.1.0
## [31] gargle_1.2.1 cli_3.4.0 htmltools_0.5.3
## [34] tools_4.2.1 gmp_0.6-6 lmerTest_3.1-3
## [37] coda_0.19-4 gtable_0.3.1 glue_1.6.2
## [40] reshape2_1.4.4 rappdirs_0.3.3 Rcpp_1.0.9
## [43] carData_3.0-5 cellranger_1.1.0 jquerylib_0.1.4
## [46] vctrs_0.4.1 arrangements_1.1.9 nlme_3.1-159
## [49] xfun_0.33 lme4_1.1-30 rvest_1.0.3
## [52] lifecycle_1.0.2 pacman_0.5.1 googlesheets4_1.0.1
## [55] MASS_7.3-58.1 scales_1.2.1 vroom_1.5.7
## [58] hms_1.1.2 Brobdingnag_1.2-7 parallel_4.2.1
## [61] yaml_2.3.5 sass_0.4.2 stringi_1.7.8
## [64] highr_0.9 permute_0.9-7 rlang_1.0.5
## [67] pkgconfig_2.0.3 evaluate_0.16 lattice_0.20-45
## [70] labeling_0.4.2 bit_4.0.4 tidyselect_1.1.2
## [73] plyr_1.8.7 magrittr_2.0.3 R6_2.5.1
## [76] magick_2.7.3 generics_0.1.3 DBI_1.1.3
## [79] mgcv_1.8-40 pillar_1.8.1 haven_2.5.1
## [82] withr_2.5.0 abind_1.4-5 modelr_0.1.9
## [85] crayon_1.5.1 car_3.1-0 utf8_1.2.2
## [88] tzdb_0.3.0 rmarkdown_2.16 grid_4.2.1
## [91] readxl_1.4.1 reprex_2.0.2 digest_0.6.29
## [94] xtable_1.8-4 numDeriv_2016.8-1.1 munsell_0.5.0
## [97] beeswarm_0.4.0 vipor_0.4.5 bslib_0.4.0