set.seed(12345) # for reproducibility
::p_load(tidyverse, knitr, psych, ggpubr)
pacman::p_load(afex, emmeans, rstatix)
pacman::p_load_gh("thomasp85/patchwork", "RLesur/klippy", "mitchelloharawild/icons")
pacman
source('HmiscHelper.R')
# 210615. Rmisc::summarySEwithin은 mean of normalized를 리턴.
# 따라서 패키지를 쓰지 않고 위 R파일에 정의된, 같은 이름의 함수를 쓴다.
# Rmisc::summarySEwithin과 달리 정규화하지 않은 평균도 제공한다.
# dplyr와 충돌하지 않도록 plyr 함수들을 직접 부르도록 수정하였다.
# http://www.cookbook-r.com/Graphs/Plotting_means_and_error_bars_(ggplot2)/
# See also: https://github.com/ccoolbaugh/FrontPhysiol_Coolbaugh_Cooling_Protocol
options(knitr.kable.NA = '')
options(dplyr.summarise.inform=FALSE) # suppress warning in regards to regrouping
set_sum_contrasts() # see Singmann & Kellen (2020)
::klippy() klippy
## Plots
# stat summary plot to 25% quartile and 75% quartile: https://bit.ly/3iFpV07
<- function(df, X, xMin, xMax, xBy, xLab){
single_hori_plot %>% ggplot(aes(x=1, y=X)) +
df geom_violin(width = .9, trim = TRUE) +
::geom_quasirandom(dodge.width = 0.5, color = "blue", size = 3,
ggbeeswarmalpha = 0.2, show.legend = FALSE) +
# stat_summary(fun.data = "mean_cl_boot", color = "darkred", size = 1) +
geom_pointrange(stat = "summary",
fun.min = function(z) {quantile(z,0.25)},
fun.max = function(z) {quantile(z,0.75)},
fun = median, color = "darkred", size = 1) +
scale_y_continuous(breaks=seq(xMin,xMax,by=xBy)) +
coord_flip(ylim = c(xMin, xMax), clip = "on") +
labs(y = xLab) +
theme_bw(base_size = 18) +
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
axis.title.y = element_blank(),
axis.ticks.y = element_blank(),
axis.text.y = element_blank(),
aspect.ratio = .3)
}
<- function(df, ylabel, yrange){
plot2x2spaghetti <- df %>% pivot_wider(id_cols = c('Group', 'SID'),
df.w names_from = 'Session', values_from = V)
<- ggplot(data=df, aes(x = Group, y = V, fill = Session)) +
p geom_violin(width = 0.5, trim = TRUE) +
geom_point(position=position_dodge(0.5),
color="gray80", size=1.8, show.legend = FALSE) +
geom_segment(data=filter(df.w, Group=="Real"), inherit.aes = FALSE,
aes(x=1-.12, y=filter(df.w, Group=="Real")$Pre,
xend=1+.12, yend=filter(df.w, Group=="Real")$Post),
color="gray80") +
geom_segment(data=filter(df.w, Group=="Sham"), inherit.aes = FALSE,
aes(x=2-.12, y=filter(df.w, Group=="Sham")$Pre,
xend=2+.12, yend=filter(df.w, Group=="Sham")$Post),
color="gray80") +
geom_pointrange(data=summarySEwithin(data = df, measurevar = "V", idvar = "SID",
withinvars = "Session", betweenvars = "Group"),
aes(x = Group, ymin = V-ci, ymax = V+ci, group = Session),
position = position_dodge(0.5), color = "darkred", size = 1, show.legend = FALSE) +
scale_fill_manual(values=c("#E69F00", "#56B4E9"),
labels=c("Pre", "Post")) +
labs(x = "Group",
y = ylabel,
fill='Session') +
theme_bw(base_size = 18) +
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank())
if(missing(yrange)){
pelse {
} + coord_cartesian(ylim = yrange, clip = "on")
p
}
}
## Excluding Ss
<- function(df, rx){
rm_subject for (i in rx){
<- df %>% filter(SID != i) %>% droplevels()
df
}cat(sprintf('%d removed & %d left',
length(unique(rx)),
length(unique(df$SID))))
return(df)
}
가장 먼저 가외치를 결정한다.
<- read_csv('metaRDMmain.csv', show_col_types = FALSE)
d
$Group <- factor(d$Group, levels=c(1,2), labels=c("Real","Sham"))
d$Session <- factor(d$Session, levels=c(1,2), labels=c("Pre","Post"))
d$TargLoc <- factor(d$TargLoc, levels=c(1,2), labels=c("L","R"))
d
unique(d$Resp) # 0_no resp,
## [1] 1 2 0
unique(d$Correct)
## [1] 0 1
unique(d$Conf) # 0, 6 = no response or multiple keys
## [1] 2 1 3 4 0 6
반응이 입력되지 않은 시행의 비율을 계산하였다.
# % excluded trials: 지각반응과 확신도 무반응/잘못된 버튼
%>% filter(Session == 'Pre') %>%
d mutate(X = ifelse(Resp==0, 1, ifelse((Conf==0)|(Conf==5), 1, 0))) %>%
group_by(SID) %>%
summarise(Excl = mean(X)*100) %>%
ungroup() %>%
print(n = Inf) # Note #26
## # A tibble: 32 × 2
## SID Excl
## <dbl> <dbl>
## 1 1 4.17
## 2 2 3.33
## 3 3 0
## 4 4 15
## 5 5 0
## 6 6 5.83
## 7 7 0.833
## 8 8 0
## 9 9 1.67
## 10 10 0.833
## 11 11 1.25
## 12 12 2.92
## 13 13 0
## 14 14 0
## 15 15 0
## 16 16 1.67
## 17 17 0
## 18 18 0
## 19 19 0.833
## 20 20 0.833
## 21 21 4.58
## 22 22 0
## 23 23 0
## 24 24 0.417
## 25 25 0.833
## 26 26 44.6
## 27 27 5
## 28 28 0
## 29 29 0
## 30 30 0.417
## 31 31 0.833
## 32 32 3.75
<- d %>% filter(SID == 26)
d26 table(d26$Resp, d26$Session) # pre에서 약 50% 무반응
##
## Pre Post
## 0 106 0
## 1 106 189
## 2 28 51
<- d %>% filter(Session == 'Pre') %>%
F1 mutate(X = ifelse(Resp==0, 1, ifelse((Conf==0)|(Conf==5), 1, 0))) %>%
group_by(SID) %>%
summarise(Excl = mean(X)*100) %>%
ungroup() %>%
single_hori_plot(.$Excl, 0, 100, 20, " [Pre] % Excluded")
<- d %>% filter(Session == 'Post') %>%
F2 mutate(X = ifelse(Resp==0, 1, ifelse((Conf==0)|(Conf==5), 1, 0))) %>%
group_by(SID) %>%
summarise(Excl = mean(X)*100) %>%
ungroup() %>%
single_hori_plot(.$Excl, 0, 100, 20, "[Post] % Excluded")
+ F2 F1
26번 참가자는 pre-tDCS
시행 중 절반에서 시각판단 반응을 입력하지 않았다.
<- d %>% filter(Session == 'Pre') %>%
F1 filter(Resp > 0, Conf > 0, Conf < 5) %>%
group_by(SID) %>%
summarise(M = mean(Conf)) %>%
ungroup() %>%
single_hori_plot(.$M, 1, 4, 0.5, "[Pre] Mean Confidence")
<- d %>% filter(Session == 'Pre') %>%
F2 filter(Resp > 0, Conf > 0, Conf < 5) %>%
group_by(SID) %>%
summarise(M = mean(Conf)) %>%
ungroup() %>%
single_hori_plot(.$M, 1, 4, 0.5, "[Post] Mean Confidence")
+ F2 F1
<- d %>% filter(Session == 'Pre') %>%
F1 filter(Resp > 0) %>%
group_by(SID) %>%
summarise(M = mean(Correct)) %>%
ungroup() %>%
single_hori_plot(.$M, 0.5, 1, .1, "[Pre] Perceptual Accuracy")
<- d %>% filter(Session == 'Post') %>%
F2 filter(Resp > 0) %>%
group_by(SID) %>%
summarise(M = mean(Correct)) %>%
ungroup() %>%
single_hori_plot(.$M, 0.5, 1, .1, "[Post] Perceptual Accuracy")
+ F2 F1
<- d %>% filter(Session == 'Pre') %>%
F1 filter(Resp > 0) %>%
group_by(SID) %>%
summarise(M = mean(Difficulty)) %>%
ungroup() %>%
single_hori_plot(.$M, 0, 1, .2, "[Pre] Motion Coherence")
<- d %>% filter(Session == 'Post') %>%
F2 filter(Resp > 0) %>%
group_by(SID) %>%
summarise(M = mean(Difficulty)) %>%
ungroup() %>%
single_hori_plot(.$M, 0, 1, .2, "[Post] Motion Coherence")
+ F2 F1
Maniscalco의 코드를 사용하여 \(d'\), meta-\(d'\), M-ratio(meta-\(d'/d'\))를 계산하였다. trials2counts()
함수와 fit_meta_d_MLE()
함수에 입력한 값들은 아래 코드와 같았다. 4점 척도에서 빈도가 0인 경우가 많아서, fit_meta_d_MLE()
도움말을 따라 2점 척도로 변환하였다. 여전히 빈도가 0인 점수들이 있으므로, 작은 수(adj_f = 1/(length(nR_S1)) = 0.25)를 모든 값에 더해주었다
% 원자료: metaRDMmain.csv
= TargLoc - 1;
stimID = Resp - 1;
response = Conf;
rating = 2;
nRatings = 1; # 확신도 점수 빈도 = 0인 사례를 보정.
cellpad
, nR_S2] = trials2counts(stimID, response, rating, nRatings, cellpad);
[nR_S1
= fit_meta_d_MLE(nR_S1, nR_S2);
fit
% 출력자료: tDCS1_estimated.csv
<- read_csv('tDCS1_estimated_2scale.csv') %>% filter(SID!= 26)
mtb2 ## Rows: 32 Columns: 15
## ── Column specification ────────────────────────────────────────────────────────
## Delimiter: ","
## dbl (15): #, SID, Group, p1da, p1meta_da, p1M_diff, p1M_ratio, p1meta_ca, ex...
##
## ℹ Use `spec()` to retrieve the full column specification for this data.
## ℹ Specify the column types or set `show_col_types = FALSE` to quiet this message.
<- function(mtb, V1, V2){
selectMtb <- mtb %>% select(Group, SID, all_of(V1), all_of(V2)) %>%
df pivot_longer(cols = c(all_of(V1), all_of(V2)), names_to = "Session", values_to = "V") %>%
mutate(Group = fct_recode(as_factor(Group), "Real" = '1', "Sham" = '2')) %>%
mutate(Session = fct_recode(Session, "Pre" = V1, "Post" = V2))
return(df)
}
# d'
<- mtb2 %>% selectMtb('p1da', 'p2da')
td
<- td %>% filter(Session == 'Pre') %>%
F1 single_hori_plot(.$V, 0, 2, .5, expression(paste("[Pre] ", italic(d), "'")))
<- td %>% filter(Session == 'Post') %>%
F2 single_hori_plot(.$V, 0, 2, .5, expression(paste("[Post] ", italic(d), "'")))
+ F2 F1
# meta-d'
<- mtb2 %>% selectMtb('p1meta_da', 'p2meta_da')
tm
<- tm %>% filter(Session == 'Pre') %>%
F1 single_hori_plot(.$V, -1, 3, 1, expression(paste("[Pre] meta-", italic(d), "'")))
<- tm %>% filter(Session == 'Post') %>%
F2 single_hori_plot(.$V, -1, 3, 1, expression(paste("[Post] meta-", italic(d), "'")))
+ F2 F1
Post-session
에서 \(\mu \pm3SD\)(빨간 점선)보다 큰 값을 보인 8번 참가자를 가외치로 판정하였다.
# meta-ratio
<- mtb2 %>% selectMtb('p1M_ratio', 'p2M_ratio')
tr
%>% filter(Session == 'Pre') %>%
tr mutate(lbound = mean(.$V) - 3*sd(.$V),
ubound = mean(.$V) + 3*sd(.$V)) %>%
mutate(Outlier = (V < lbound)|(V > ubound)) %>%
filter(Outlier == TRUE) %>%
droplevels()
## # A tibble: 0 × 7
## # … with 7 variables: Group <fct>, SID <dbl>, Session <fct>, V <dbl>,
## # lbound <dbl>, ubound <dbl>, Outlier <lgl>
%>% filter(Session == 'Post') %>%
tr mutate(lbound = mean(.$V) - 3*sd(.$V),
ubound = mean(.$V) + 3*sd(.$V)) %>%
mutate(Outlier = (V < lbound)|(V > ubound)) %>%
filter(Outlier == TRUE) %>%
droplevels()
## # A tibble: 1 × 7
## Group SID Session V lbound ubound Outlier
## <fct> <dbl> <fct> <dbl> <dbl> <dbl> <lgl>
## 1 Sham 8 Post 2.96 -0.754 2.67 TRUE
<- tr %>% filter(Session == 'Pre') %>%
F1 single_hori_plot(.$V, -1, 3, 1, '[Pre] M-Ratio')
<- tr %>% filter(Session == 'Post') %>%
F2 single_hori_plot(.$V, -1, 3, 1, '[Post] M-Ratio') +
geom_hline(yintercept=2.96, linetype='dashed', color='red', size=0.5)
+ F2 F1
참가자 26번과 8번을 최종분석에서 제외한다(Real
집단 16명, Sham
집단 14명).
<- d %>% rm_subject(c(8, 26)) %>% filter(Resp > 0, Conf > 0, Conf < 5)
d2 ## 2 removed & 30 left
<- mtb2 %>% rm_subject(c(8, 26))
mtb2 ## 2 removed & 30 left
모든 조건의 정확도는 대체로 1-up-2-down 계단법의 기대값인 70.7% 가까이에서 유지되었다(Levitt, 1971).
<- d2 %>%
accuracy group_by(Group, Session, SID) %>%
summarize(V = mean(Correct)*100) %>%
ungroup()
%>%
accuracy group_by(Group, Session) %>%
summarize(Accuracy = mean(V)) %>%
ungroup() %>%
pivot_wider(names_from = 'Session', values_from = 'Accuracy') %>%
kable(digits = 4, caption = 'Mean Type-1 Accuracy')
Group | Pre | Post |
---|---|---|
Real | 70.6339 | 70.2332 |
Sham | 73.6954 | 71.3001 |
%>%
accuracy aov_ez(id = 'SID', dv = 'V',
between = 'Group', within = 'Session') %>%
anova(es="pes") %>%
kable(digits = 4, caption = 'Type-1 Accuracy ANOVA')
num Df | den Df | MSE | F | pes | Pr(>F) | |
---|---|---|---|---|---|---|
Group | 1 | 28 | 37.2686 | 1.7073 | 0.0575 | 0.2020 |
Session | 1 | 28 | 30.3294 | 0.9624 | 0.0332 | 0.3350 |
Group:Session | 1 | 28 | 30.3294 | 0.4897 | 0.0172 | 0.4898 |
plot2x2spaghetti(accuracy, "Type-1 Accuracy")
계단법에 의해 정확도가 고정되었으므로 tDCS가 지각판단에 영향을 주었다면, 그 효과는 점 운동의 일관성 수준(coherence levels)에 반영되었을 것이다. 그러나 일관성 수준의 조건간 차이는 주목할 만큼 크지 않았다
<- d2 %>%
coherence group_by(Group, Session, SID) %>%
summarize(V = mean(Difficulty)) %>%
ungroup()
%>%
coherence group_by(Group, Session) %>%
summarize(Coherence = mean(V)) %>%
ungroup() %>%
pivot_wider(names_from = 'Session', values_from = 'Coherence') %>%
kable(digits = 4, caption = 'Mean Motion Coherence')
Group | Pre | Post |
---|---|---|
Real | 0.1754 | 0.1755 |
Sham | 0.2057 | 0.1886 |
%>%
coherence aov_ez(id = 'SID', dv = 'V',
between = 'Group', within = 'Session') %>%
anova(es="pes") %>%
kable(digits = 4, caption = 'Mean Motion Coherence ANOVA')
num Df | den Df | MSE | F | pes | Pr(>F) | |
---|---|---|---|---|---|---|
Group | 1 | 28 | 0.0041 | 1.7005 | 0.0573 | 0.2028 |
Session | 1 | 28 | 0.0013 | 0.7971 | 0.0277 | 0.3796 |
Group:Session | 1 | 28 | 0.0013 | 0.8201 | 0.0285 | 0.3729 |
plot2x2spaghetti(coherence, "Coherence")
tDCS가 확신 수준에 영향을 끼쳤는지 확인하기 위해 평균 확신도 점수를 분석하였다. 분석 결과, 눈에 띄는 효과는 없었다. (참고: 다른 지표들에 비해, 각 참가자의 평균 확신도 점수는 일정하게 유지되는 것으로 보인다. 신뢰구간이 작다.)
<- d2 %>% group_by(Group, Session, SID) %>%
confidence summarise(V = mean(Conf)) %>%
ungroup()
%>%
confidence group_by(Group, Session) %>%
summarize(Confidence = mean(V)) %>%
ungroup() %>%
pivot_wider(names_from = 'Session', values_from = 'Confidence') %>%
kable(digits = 4, caption = 'Mean Confidence')
Group | Pre | Post |
---|---|---|
Real | 2.2947 | 2.2701 |
Sham | 2.7090 | 2.5926 |
%>%
confidence aov_ez(id = 'SID', dv = 'V',
between = 'Group', within = 'Session') %>%
anova(es="pes") %>%
kable(digits = 4, caption = 'Mean Confidence ANOVA')
num Df | den Df | MSE | F | pes | Pr(>F) | |
---|---|---|---|---|---|---|
Group | 1 | 28 | 0.5167 | 3.9219 | 0.1229 | 0.0576 |
Session | 1 | 28 | 0.0457 | 1.6227 | 0.0548 | 0.2132 |
Group:Session | 1 | 28 | 0.0457 | 0.6900 | 0.0240 | 0.4132 |
plot2x2spaghetti(confidence, "Confidence", c(1,4))
\(d'\)은 tDCS 영향을 전혀 받지 않았다.
<- mtb2 %>% selectMtb('p1da', 'p2da')
td
%>% group_by(Group, Session) %>%
td summarise(Sensitivity = mean(V)) %>%
ungroup() %>%
pivot_wider(names_from = 'Session', values_from = 'Sensitivity') %>%
kable(digits = 4, caption = 'Mean Sensitivity')
Group | Pre | Post |
---|---|---|
Real | 1.2115 | 1.1788 |
Sham | 1.3662 | 1.2234 |
%>% aov_ez(id = 'SID', dv = 'V',
td between = 'Group', within = 'Session') %>%
anova(es="pes") %>%
kable(digits = 4, caption = 'Sensitivity ANOVA')
num Df | den Df | MSE | F | pes | Pr(>F) | |
---|---|---|---|---|---|---|
Group | 1 | 28 | 0.1192 | 1.2442 | 0.0425 | 0.2741 |
Session | 1 | 28 | 0.1295 | 0.8871 | 0.0307 | 0.3543 |
Group:Session | 1 | 28 | 0.1295 | 0.3488 | 0.0123 | 0.5595 |
%>% plot2x2spaghetti(ylabel = expression(paste(italic(d), "'"))) td
참가자들의 \(d'\)이 두 세션에서 일정하게 유지되지 않았다는 점이 주목할만 하다. 본 실험 과제가 \(d'\)을 안정적으로 추정하지 못한다는 것을 알 수 있다.
Meta-\(d'\)도 tDCS 영향을 받지 않았다.
## meta-d'
<- mtb2 %>% selectMtb('p1meta_da', 'p2meta_da')
tm
%>% group_by(Group, Session) %>%
tm summarise(MetaSensitivity = mean(V)) %>%
ungroup() %>%
pivot_wider(names_from = 'Session', values_from = 'MetaSensitivity') %>%
kable(digits = 4, caption = 'Mean Meta-Sensitivity')
Group | Pre | Post |
---|---|---|
Real | 0.8537 | 0.9993 |
Sham | 1.3540 | 1.0626 |
%>% aov_ez(id = 'SID', dv = 'V',
tm between = 'Group', within = 'Session') %>%
anova(es="pes") %>%
kable(digits = 4, caption = 'Meta-Sensitivity ANOVA')
num Df | den Df | MSE | F | pes | Pr(>F) | |
---|---|---|---|---|---|---|
Group | 1 | 28 | 0.4311 | 2.7511 | 0.0895 | 0.1083 |
Session | 1 | 28 | 0.1833 | 0.4336 | 0.0152 | 0.5156 |
Group:Session | 1 | 28 | 0.1833 | 3.8888 | 0.1219 | 0.0586 |
%>% plot2x2spaghetti(ylabel = expression(paste("Meta-", italic(d), "'"))) tm
\(d'\)에서와 마찬가지로, 참가자들의 meta-\(d'\)이 두 세션에서 일정하게 유지되지 않았다.
메타인지 효율성 지표에서 이원 상호작용이 유의미하였다. Real
집단의 메타인지 효율성이 유의미하게 향상되었다.
## Ratio
<- mtb2 %>% selectMtb('p1M_ratio', 'p2M_ratio')
tr
%>% group_by(Group, Session) %>%
tr summarise(Mratio = mean(V)) %>%
ungroup() %>%
pivot_wider(names_from = 'Session', values_from = 'Mratio') %>%
kable(digits = 4, caption = 'Mean M-Ratio')
Group | Pre | Post |
---|---|---|
Real | 0.6886 | 0.9027 |
Sham | 1.0169 | 0.8826 |
<- tr %>% aov_ez(id = 'SID', dv = 'V',
tmp between = 'Group', within = 'Session')
%>%
tmp anova(es="pes") %>%
kable(digits = 4, caption = 'M-Ratio ANOVA')
num Df | den Df | MSE | F | pes | Pr(>F) | |
---|---|---|---|---|---|---|
Group | 1 | 28 | 0.2833 | 1.2526 | 0.0428 | 0.2726 |
Session | 1 | 28 | 0.0691 | 0.3437 | 0.0121 | 0.5624 |
Group:Session | 1 | 28 | 0.0691 | 6.5588 | 0.1898 | 0.0161 |
<- tmp %>% emmeans::emmeans(pairwise ~ Session | Group)
tmp2 $contrasts %>% kable(digits = 4, caption = 'Post hoc') tmp2
contrast | Group | estimate | SE | df | t.ratio | p.value |
---|---|---|---|---|---|---|
Pre - Post | Real | -0.2141 | 0.0929 | 28 | -2.3036 | 0.0289 |
Pre - Post | Sham | 0.1343 | 0.0993 | 28 | 1.3520 | 0.1872 |
%>% plot2x2spaghetti(ylabel = expression(paste("meta-", italic(d), "/", italic(d), "'"))) tr
두 명을 제외하고 남은 참가자 30명의 집단별 연령과 성비를 분석한다.
<- read.csv('tDCS1_demographic.csv', header = TRUE)
demo head(demo)
## SN Sex Group Age Excluded
## 1 1 Male Real 25 0
## 2 7 Male Real 25 0
## 3 10 Male Real 24 0
## 4 23 Male Real 27 0
## 5 27 Male Real 28 0
## 6 4 Female Real 22 0
<- demo %>% filter(Excluded == 0)
demo table(demo$Group, demo$Sex) %>%
kable(caption = 'Group x Sex')
Female | Male | |
---|---|---|
Real | 11 | 5 |
Sham | 7 | 7 |
%>% group_by(Group) %>%
demo summarise(M = mean(Age),
SD = sd(Age)) %>%
ungroup() %>%
kable(digits = 2, caption = 'Group x Age')
Group | M | SD |
---|---|---|
Real | 23.44 | 2.45 |
Sham | 23.86 | 2.57 |
\(\chi^2\)-검정에서 셀이 네 개이면 p가 작게 나와서 1종오류 가능성 높아지므로, 예이츠의 연속성 보정이 필요하다(correct = TRUE
).
# 카이제곱 검정
<- table(demo$Group, demo$Sex)
t chisq.test(t, correct = TRUE)
##
## Pearson's Chi-squared test with Yates' continuity correction
##
## data: t
## X-squared = 0.45201, df = 1, p-value = 0.5014
성별비의 집단차는 유의하지 않았다.
t.test(data = demo,
~ Group,
Age alternative = "two.sided",
paired = FALSE,
var.equal = TRUE)
##
## Two Sample t-test
##
## data: Age by Group
## t = -0.45782, df = 28, p-value = 0.6506
## alternative hypothesis: true difference in means between group Real and group Sham is not equal to 0
## 95 percent confidence interval:
## -2.297230 1.457945
## sample estimates:
## mean in group Real mean in group Sham
## 23.43750 23.85714
연령의 집단차도 유의하지 않았다. 효과 크기는 아래와 같이 계산할 수 있다.
# https://www.datanovia.com/en/lessons/t-test-effect-size-using-cohens-d-measure/
cohens_d(data = demo,
~ Group,
Age var.equal = TRUE,
hedges.correction = TRUE)
## # A tibble: 1 × 7
## .y. group1 group2 effsize n1 n2 magnitude
## * <chr> <chr> <chr> <dbl> <int> <int> <ord>
## 1 Age Real Sham -0.163 16 14 negligible
# 표본크기가 50보다 작으면 hedge's corrected version (Hedges and Olkin 1985)
sessionInfo()
## R version 4.1.2 (2021-11-01)
## Platform: x86_64-apple-darwin17.0 (64-bit)
## Running under: macOS Big Sur 10.16
##
## Matrix products: default
## BLAS: /Library/Frameworks/R.framework/Versions/4.1/Resources/lib/libRblas.0.dylib
## LAPACK: /Library/Frameworks/R.framework/Versions/4.1/Resources/lib/libRlapack.dylib
##
## locale:
## [1] en_US.UTF-8/en_US.UTF-8/en_US.UTF-8/C/en_US.UTF-8/en_US.UTF-8
##
## attached base packages:
## [1] stats graphics grDevices utils datasets methods base
##
## other attached packages:
## [1] icons_0.2.0 klippy_0.0.0.9500 patchwork_1.1.1 rstatix_0.7.0
## [5] emmeans_1.7.2 afex_1.0-1 lme4_1.1-27.1 Matrix_1.4-0
## [9] ggpubr_0.4.0 psych_2.1.9 knitr_1.37 forcats_0.5.1
## [13] stringr_1.4.0 dplyr_1.0.7 purrr_0.3.4 readr_2.1.1
## [17] tidyr_1.1.4 tibble_3.1.6 ggplot2_3.3.5 tidyverse_1.3.1
##
## loaded via a namespace (and not attached):
## [1] ggbeeswarm_0.6.0 TH.data_1.1-0 minqa_1.2.4
## [4] colorspace_2.0-2 ggsignif_0.6.3 ellipsis_0.3.2
## [7] estimability_1.3 fs_1.5.2 rstudioapi_0.13
## [10] farver_2.1.0 bit64_4.0.5 fansi_1.0.2
## [13] mvtnorm_1.1-3 lubridate_1.8.0 xml2_1.3.3
## [16] codetools_0.2-18 splines_4.1.2 mnormt_2.0.2
## [19] jsonlite_1.7.3 nloptr_1.2.2.3 broom_0.7.11
## [22] dbplyr_2.1.1 compiler_4.1.2 httr_1.4.2
## [25] backports_1.4.1 assertthat_0.2.1 fastmap_1.1.0
## [28] cli_3.1.1 htmltools_0.5.2 tools_4.1.2
## [31] lmerTest_3.1-3 coda_0.19-4 gtable_0.3.0
## [34] glue_1.6.1 reshape2_1.4.4 rappdirs_0.3.3
## [37] Rcpp_1.0.8 carData_3.0-5 cellranger_1.1.0
## [40] jquerylib_0.1.4 vctrs_0.3.8 nlme_3.1-155
## [43] xfun_0.29 rvest_1.0.2 lifecycle_1.0.1
## [46] pacman_0.5.1 MASS_7.3-55 zoo_1.8-9
## [49] scales_1.1.1 vroom_1.5.7 hms_1.1.1
## [52] parallel_4.1.2 sandwich_3.0-1 yaml_2.2.1
## [55] sass_0.4.0 stringi_1.7.6 highr_0.9
## [58] boot_1.3-28 rlang_0.4.12 pkgconfig_2.0.3
## [61] evaluate_0.14 lattice_0.20-45 labeling_0.4.2
## [64] bit_4.0.4 tidyselect_1.1.1 plyr_1.8.6
## [67] magrittr_2.0.1 R6_2.5.1 generics_0.1.1
## [70] multcomp_1.4-18 DBI_1.1.2 pillar_1.6.4
## [73] haven_2.4.3 withr_2.4.3 survival_3.2-13
## [76] abind_1.4-5 modelr_0.1.8 crayon_1.4.2
## [79] car_3.0-12 utf8_1.2.2 tmvnsim_1.0-2
## [82] tzdb_0.2.0 rmarkdown_2.11 grid_4.1.2
## [85] readxl_1.3.1 reprex_2.0.1 digest_0.6.29
## [88] xtable_1.8-4 numDeriv_2016.8-1.1 munsell_0.5.0
## [91] beeswarm_0.4.0 vipor_0.4.5 bslib_0.3.1