Functions performing data analysis and facilitating data
reporting
Function performing correlations (depending on the study)
CorS1<-function(x){
cor(dplyr::select(x,
Moral_DIL,
Heure_TOT))}
CorS2S3<-function(x){
cor(dplyr::select(x,
Moral_SCA,
Heure_TOT))}
CorS4<-function(x){
cor(dplyr::select(x,
Moral_DIL,
Moral_SCA,
Moral_CAR,
Heure_TOT))}
CorS5S6<-function(x){
cor(dplyr::select(x,
Moral_DIL,
Moral_SCA,
Heure_TOT))}
CorFUNCTION <- function(x) {
ifelse(unique(x$Study)==1,
COR <- CorS1(x),
ifelse(unique(x$Study) %in% c(2,3),
COR<- CorS2S3(x),
ifelse(unique(x$Study)==4,
COR<- CorS4(x),
ifelse(unique(x$Study) %in% c(5,6),
COR<- CorS5S6(x),
NA))))
COR}
Function computing variance/covariance matrix based on the
equation approach (Steiger, 1980). All the credit for this function
should be given to Pr Wolfgang Viechtbauer.
rmat <- function(x, n, upper=TRUE, simplify=TRUE, rtoz=FALSE, data) {
if (inherits(x, "formula")) {
options(na.action = "na.pass")
if (missing(data))
stop("Must specify 'data' argument when 'x' is a formula.")
if (!is.data.frame(data))
data <- data.frame(data)
dat <- get_all_vars(x, data=data)
if (ncol(dat) != 4)
stop("Incorrect number of variables specified in formula.")
id <- dat[,4]
dat <- split(dat, id)
res <- list()
for (i in 1:length(dat)) {
ri <- dat[[i]][[1]]
var1 <- as.character(dat[[i]][[2]])
var2 <- as.character(dat[[i]][[3]])
vars <- sort(unique(c(var1, var2)))
R <- matrix(NA, nrow=length(vars), ncol=length(vars))
diag(R) <- 1
rownames(R) <- colnames(R) <- vars
for (j in 1:length(var1)) {
R[var1[j],var2[j]] <- R[var2[j],var1[j]] <- ri[j]
}
res[[i]] <- R
}
return(rmat(res, n=n, simplify=TRUE, rtoz=rtoz))
}
if (is.list(x)) {
k <- length(x)
if (length(x) != length(n))
stop("Argument 'n' must be of the same length as there are elements in 'x'.")
res <- list()
for (i in 1:k) {
res[[i]] <- rmat(x[[i]], n[i], upper=upper, rtoz=rtoz)
}
if (simplify) {
ki <- sapply(res, function(x) ifelse(is.null(x$dat), 0, nrow(x$dat)))
dat <- cbind(id=rep(1:k, times=ki), do.call(rbind, lapply(res, "[[", "dat")))
V <- bldiag(lapply(res[ki > 0], "[[", "V"))
rownames(V) <- colnames(V) <- unlist(lapply(res, function(x) rownames(x$V)))
return(list(dat=dat, V=V))
} else {
return(res)
}
}
if (!is.matrix(x))
stop("Argument 'x' must be a matrix (or list thereof).")
if (dim(x)[1] != dim(x)[2])
stop("Argument 'x' must be a square matrix (or list thereof).")
dimsx <- nrow(x)
dnames <- paste0("x", 1:dimsx)
### in case x has dimension names, use those
if (!is.null(rownames(x)))
dnames <- rownames(x)
if (!is.null(colnames(x)))
dnames <- colnames(x)
### in case x is a 1x1 (or 0x0) matrix, return nothing
if (dimsx <= 1L)
return(list(dat=NULL, V=NULL))
### make x symmetric, depending on whether we use upper or lower part
if (upper) {
x[lower.tri(x)] <- t(x)[lower.tri(x)]
} else {
x[upper.tri(x)] <- t(x)[upper.tri(x)]
}
### check if x is symmetric (can be skipped since x must now be symmetric)
#if (!isSymmetric(x))
# stop("x must be a symmetric matrix.")
### stack upper/lower triangular part of x into a column vector (this is always done column-wise!)
if (upper) {
ri <- cbind(x[upper.tri(x)])
} else {
ri <- cbind(x[lower.tri(x)])
}
### apply r-to-z transformation if requested
if (rtoz)
ri <- 1/2 * log((1 + ri)/(1 - ri))
### I and J are matrices with 1:dimsx for rows and columns, respectively
I <- matrix(1:dimsx, nrow=dimsx, ncol=dimsx)
J <- matrix(1:dimsx, nrow=dimsx, ncol=dimsx, byrow=TRUE)
### get upper/lower triangular elements of I and J
if (upper) {
I <- I[upper.tri(I)]
J <- J[upper.tri(J)]
} else {
I <- I[lower.tri(I)]
J <- J[lower.tri(J)]
}
### dimensions in V (must be dimsx*(dimsx-1)/2)
dimsV <- length(ri)
### set up V matrix
V <- matrix(NA, nrow=dimsV, ncol=dimsV)
for (ro in 1:dimsV) {
for (co in 1:dimsV) {
i <- I[ro]
j <- J[ro]
k <- I[co]
l <- J[co]
### Olkin & Finn (1995), equation 5, page 157
V[ro,co] <- 1/2 * x[i,j]*x[k,l] * (x[i,k]^2 + x[i,l]^2 + x[j,k]^2 + x[j,l]^2) +
x[i,k]*x[j,l] + x[i,l]*x[j,k] -
(x[i,j]*x[i,k]*x[i,l] + x[j,i]*x[j,k]*x[j,l] + x[k,i]*x[k,j]*x[k,l] + x[l,i]*x[l,j]*x[l,k])
### Steiger (1980), equation 2, page 245 (provides the same result - checked)
#V[ro,co] <- 1/2 * ((x[i,k] - x[i,j]*x[j,k]) * (x[j,l] - x[j,k]*x[k,l]) +
# (x[i,l] - x[i,k]*x[k,l]) * (x[j,k] - x[j,i]*x[i,k]) +
# (x[i,k] - x[i,l]*x[l,k]) * (x[j,l] - x[j,i]*x[i,l]) +
# (x[i,l] - x[i,j]*x[j,l]) * (x[j,k] - x[j,l]*x[l,k]))
### Steiger (1980), equation 11, page 247 for r-to-z transformed values
if (rtoz)
V[ro,co] <- V[ro,co] / ((1 - x[i,j]^2) * (1 - x[k,l]^2))
}
}
### divide V by (n-1) for raw correlations and by (n-3) for r-to-z transformed correlations
if (rtoz) {
V <- V/(n-3)
} else {
V <- V/(n-1)
}
### create matrix with var1 and var2 names and sort rowwise
dmat <- cbind(dnames[I], dnames[J])
dmat <- t(apply(dmat, 1, sort))
### set row/column names for V
var1var2 <- paste0(dmat[,1], ".", dmat[,2])
rownames(V) <- colnames(V) <- var1var2
return(list(dat=data.frame(yi=ri, var1=dmat[,1], var2=dmat[,2], var1var2=var1var2, stringsAsFactors=FALSE), V=V))
}
Function computing the correlation and variance/covariance
matrix while converting Pearson’s r to Fisher’s Z
RmatFUNCTION<-function(x,y){
rmat(x, n=y, rtoz=TRUE)}
Function extracting adjusted confidence intervals for
correlations
CorCIFUNCTION<-function(x,y){
CorCI(x,y, conf.level = (1-(0.05)))
}
Function extracting critical information from meta analyses
(performed using metafor)
ExtractMetaFUNCTION<-function(x){
cbind(
Raw.r = FisherZInv(as.numeric(as.character(x$b))),
p.val = as.numeric(as.character(x$pval)),
Adj.p.val=as.numeric(as.character(x$pval)),
Adj.CIlow=FisherZInv(x$ci.lb),
Adj.CIup=FisherZInv(x$ci.ub),
TOST1=abs(FisherZInv(as.numeric(as.character(x$b))-as.numeric(as.character(x$se*qnorm(1-2*(0.05/(2))))))),
TOST2=abs(FisherZInv(as.numeric(as.character(x$b))+as.numeric(as.character(x$se*qnorm(1-2*(0.05/(2))))))),
Cochran.Q=x$QE,
Cochran.Q.p.val=x$QEp)
}
Function extracting critical information about
heterogeneity
# x is the rma object and y the variance covariance matrix
ExtractHeterogeneityFUNCTION<-function(x, y){
W<-solve(y)
X<-model.matrix(x)
P <- W - W %*% X %*% solve(t(X) %*% W %*% X) %*% t(X) %*% W
cbind(
stud.out=as.character(data.frame(x$g.levels.k)[,1]),
N=data.frame(x$g.levels.k)[,2],
Tau.sq=x$tau2,
Tau=sqrt(x$tau2),
Cochran.Q=x$QE,
Cochran.Q.p.val=x$QEp,
I.sq=100 *x$tau2 / (x$tau2 + (x$k-x$p)/sum(diag(P))))
}
Functions selecting appropriate data in the correlation or
variance/covariance matrix (i.e., information regarding only the
relationship between sleep and moral outcomes and not those regarding -
for example - the relationship between 2 sleep indicators)
PredictFUNCTION<-function(x){
x[str_detect(row.names(x), "Heure") & str_detect(row.names(x), "Moral"),
str_detect(colnames(x), "Heure") & str_detect(colnames(x), "Moral")]}
S1b. Meta-analysis
# Correlation (Fisher's z) between all sleep indicators and all outcomes for each study
ES.prim_transit<-do.call(rbind, lapply(List.Cor.Varcovar.prim, function(x) x$dat))
ES.prim_transit1<-data.frame(cbind(
stud.out=row.names(ES.prim_transit), ES.prim_transit))
ES.prim_transit2<-separate(ES.prim_transit1, stud.out,
into=c("Study"))
## Warning: Expected 1 pieces. Additional pieces discarded in 12 rows [4, 5, 6, 7,
## 8, 9, 10, 11, 12, 13, 14, 15].
# Here, we delete all correlations not regarding sleep-moral outcomes relationship (k=40)
ES.Prim<-ES.prim_transit2 %>%
rename("Outcome"=var2,
"Predictor"=var1,
"Out.Pred"=var1var2) %>%
mutate(
Asso.of.int=case_when(
str_detect(Outcome, "Heure") & str_detect(Predictor, "Moral") ~ 1,
str_detect(Outcome, "Moral") & str_detect(Predictor, "Heure") ~ 1)) %>%
filter(Asso.of.int==1)
ES.Prim$N<-rep(N.prim, c(1,1,1,3,2,2))
ES.Prim$ES.ID<-1:nrow(ES.Prim)
#Variance-covariance matrix for each study (N=6)
List.Varcovar.prim_transit1<-lapply(List.Cor.Varcovar.prim, function(x) x$V)
# we apply the fourpredictfunction to keep only associations of interest
List.Varcovar.prim<-lapply(List.Varcovar.prim_transit1, PredictFUNCTION)
V.Prim = bldiag(list(
List.Varcovar.prim[[1]],
List.Varcovar.prim[[2]],
List.Varcovar.prim[[3]],
List.Varcovar.prim[[4]],
List.Varcovar.prim[[5]],
List.Varcovar.prim[[6]]))
# Primary analysis
Meta.Prim<-rma.mv(yi,V.Prim,
random = ~ Out.Pred | Study,
struct="UN",
data=ES.Prim)
# moderation analysis
Meta.Deux<-rma.mv(yi,V.Prim,
mods=~Outcome-1,
random = ~ Out.Pred | Study,
struct="UN",
data=ES.Prim)
Meta.Prim
##
## Multivariate Meta-Analysis Model (k = 10; method: REML)
##
## Variance Components:
##
## outer factor: Study (nlvls = 6)
## inner factor: Out.Pred (nlvls = 3)
##
## estim sqrt k.lvl fixed level
## tau^2.1 0.0318 0.1785 1 no Heure_TOT.Moral_CAR
## tau^2.2 0.0072 0.0849 4 no Heure_TOT.Moral_DIL
## tau^2.3 0.0411 0.2028 5 no Heure_TOT.Moral_SCA
##
## rho.H_TOT.M_C rho.H_TOT.M_D rho.H_TOT.M_S H_TOT.M_C
## Heure_TOT.Moral_CAR 1 -
## Heure_TOT.Moral_DIL 0.2994 1 no
## Heure_TOT.Moral_SCA 0.9790 0.4876 1 no
## H_TOT.M_D H_TOT.M_S
## Heure_TOT.Moral_CAR 1 1
## Heure_TOT.Moral_DIL - 3
## Heure_TOT.Moral_SCA no -
##
## Test for Heterogeneity:
## Q(df = 9) = 23.5454, p-val = 0.0051
##
## Model Results:
##
## estimate se zval pval ci.lb ci.ub
## -0.1359 0.0605 -2.2468 0.0247 -0.2544 -0.0173 *
##
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
Meta.Deux
##
## Multivariate Meta-Analysis Model (k = 10; method: REML)
##
## Variance Components:
##
## outer factor: Study (nlvls = 6)
## inner factor: Out.Pred (nlvls = 3)
##
## estim sqrt k.lvl fixed level
## tau^2.1 0.0022 0.0468 1 no Heure_TOT.Moral_CAR
## tau^2.2 0.0087 0.0932 4 no Heure_TOT.Moral_DIL
## tau^2.3 0.0119 0.1092 5 no Heure_TOT.Moral_SCA
##
## rho.H_TOT.M_C rho.H_TOT.M_D rho.H_TOT.M_S H_TOT.M_C
## Heure_TOT.Moral_CAR 1 -
## Heure_TOT.Moral_DIL 0.0000 1 no
## Heure_TOT.Moral_SCA 0.0000 1.0000 1 no
## H_TOT.M_D H_TOT.M_S
## Heure_TOT.Moral_CAR 1 1
## Heure_TOT.Moral_DIL - 3
## Heure_TOT.Moral_SCA no -
##
## Test for Residual Heterogeneity:
## QE(df = 7) = 10.1715, p-val = 0.1791
##
## Test of Moderators (coefficients 1:3):
## QM(df = 3) = 13.4814, p-val = 0.0037
##
## Model Results:
##
## estimate se zval pval ci.lb ci.ub
## OutcomeMoral_CAR 0.0483 0.1103 0.4375 0.6617 -0.1679 0.2645
## OutcomeMoral_DIL -0.1866 0.0671 -2.7828 0.0054 -0.3180 -0.0552 **
## OutcomeMoral_SCA 0.0330 0.0712 0.4631 0.6433 -0.1066 0.1725
##
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
Summary of the results
#individual studies
ES.Prim$Raw.r<-FisherZInv(ES.Prim$yi)
df.ES.Prim<-data.frame(cbind(
ES.Prim,
t(apply(ES.Prim[,c('Raw.r','N')], 1,
function(x) CorCIFUNCTION(x[1], x[2])))[,c('lwr.ci','upr.ci')]))
colnames(df.ES.Prim)[c(10:11)] <- c("CIlow", "CIup")
#Meta-analyses
df.Meta.Res1.Prim<-as.data.frame(ExtractMetaFUNCTION(Meta.Prim))
df.Meta.Prim.z<- df.Meta.Res1.Prim %>%
dplyr::rename(
"CIlow" = "Adj.CIlow",
"CIup" = "Adj.CIup") %>%
dplyr::mutate(
Study="Pooled Effect Size",
Outcome="All outcomes",
N=sum(N.prim)) %>%
rowwise() %>%
dplyr::mutate(TOST=max(TOST1, TOST2)) %>%
dplyr::select(-c(TOST1,TOST2))
df.Meta.Prim<-df.Meta.Prim.z[,c(8:10,1:2, 4:7, 11)]
df.Results.Prim<-data.frame(bind_rows(df.Meta.Prim, df.ES.Prim[,-c(3,5,6)]))
df.Results.Prim$Outcome<-dplyr::recode(df.Results.Prim$Outcome,
Moral_DIL="Dilemmas",
Moral_SCA="Scale",
Moral_CAR="Autonomous.Cars")
#Summary of the meta analysis
kable(df.Results.Prim[1,])
Study
|
Outcome
|
N
|
Raw.r
|
p.val
|
CIlow
|
CIup
|
Cochran.Q
|
Cochran.Q.p.val
|
TOST
|
yi
|
ES.ID
|
Pooled Effect Size
|
All outcomes
|
488
|
-0.1350288
|
0.0246526
|
-0.2490246
|
-0.0173427
|
23.54538
|
0.005081
|
0.2310692
|
NA
|
NA
|
#Summary of the primary studies
kable(df.Results.Prim[2:nrow(df.Results.Prim),c(
'Study', 'Outcome', 'N', 'Raw.r', 'CIlow', 'CIup')])
|
Study
|
Outcome
|
N
|
Raw.r
|
CIlow
|
CIup
|
2
|
1
|
Dilemmas
|
121
|
-0.1839007
|
-0.3508794
|
-0.0055876
|
3
|
2
|
Scale
|
101
|
0.0412951
|
-0.1553983
|
0.2348390
|
4
|
3
|
Scale
|
18
|
-0.1891369
|
-0.6027801
|
0.3046330
|
5
|
4
|
Dilemmas
|
100
|
-0.2538084
|
-0.4288473
|
-0.0604014
|
6
|
4
|
Scale
|
100
|
0.0187643
|
-0.1783110
|
0.2143922
|
7
|
4
|
Autonomous.Cars
|
100
|
0.0592862
|
-0.1387476
|
0.2527609
|
8
|
5
|
Dilemmas
|
48
|
-0.2929617
|
-0.5327500
|
-0.0096284
|
9
|
5
|
Scale
|
48
|
-0.2088947
|
-0.4654056
|
0.0799876
|
10
|
6
|
Dilemmas
|
100
|
0.0145262
|
-0.1824124
|
0.2103441
|
11
|
6
|
Scale
|
100
|
0.2394571
|
0.0451631
|
0.4162953
|
Heterogeneity
Heterogeneity.Prim<-as.data.frame(ExtractHeterogeneityFUNCTION(Meta.Prim, V.Prim))
Heterogeneity.Prim2<-data.frame(apply(Heterogeneity.Prim[,3:7], 2, function(x) as.numeric(as.character(x))))
kable(cbind(Heterogeneity.Prim[,1:2], Heterogeneity.Prim2))
stud.out
|
N
|
Tau.sq
|
Tau
|
Cochran.Q
|
Cochran.Q.p.val
|
I.sq
|
Heure_TOT.Moral_CAR
|
1
|
0.0318445
|
0.1784503
|
23.54538
|
0.005081
|
75.04747
|
Heure_TOT.Moral_DIL
|
4
|
0.0072100
|
0.0849119
|
23.54538
|
0.005081
|
40.51032
|
Heure_TOT.Moral_SCA
|
5
|
0.0411452
|
0.2028428
|
23.54538
|
0.005081
|
79.53349
|
#Overall heterogeneity is low. The highest heterogeneity is found for the association between moral dilemmas and chronic sleep quality. Despite this, effect sizes remained systematically low.
kable(df.Results.Prim[df.Results.Prim$Outcome=="Dilemmas",1:7])
|
Study
|
Outcome
|
N
|
Raw.r
|
p.val
|
CIlow
|
CIup
|
2
|
1
|
Dilemmas
|
121
|
-0.1839007
|
NA
|
-0.3508794
|
-0.0055876
|
5
|
4
|
Dilemmas
|
100
|
-0.2538084
|
NA
|
-0.4288473
|
-0.0604014
|
8
|
5
|
Dilemmas
|
48
|
-0.2929617
|
NA
|
-0.5327500
|
-0.0096284
|
10
|
6
|
Dilemmas
|
100
|
0.0145262
|
NA
|
-0.1824124
|
0.2103441
|
Plot of the primary meta-analysis
df.graph.prim<-df.Results.Prim
df.graph.prim$pooled <- factor(ifelse(df.graph.prim$Outcome == "All outcomes", 1, 0))
df.graph.prim$Study<- factor(df.graph.prim$Study)
df.graph.prim$Study <- factor(df.graph.prim$Study,
levels=rev(levels(df.graph.prim$Study)))
df.graph.prim2 <- df.graph.prim[order(df.graph.prim$Outcome),]
p<-ggplot(data=df.graph.prim2) +
geom_hline(yintercept =0, linetype=2) +
geom_pointrange(position=position_dodge2(width=8),
aes(x = Study, y = Raw.r,
ymin = CIlow, ymax = CIup), size = 0)+
geom_point(aes(x = Study, y = Raw.r, color=pooled, size=pooled, fill = pooled, shape = pooled),
position=position_dodge2(width=8))+
xlab('')+ ylab("Correlation coefficient\n (95% Confidence Interval)")+
scale_color_manual(values=c("#999999", "red")) +
scale_fill_manual(values=c("#999999", "red")) +
scale_size_manual(values=c(2, 5)) +
scale_shape_manual(values=c(21, 22)) +
coord_flip()+
theme_bw() +
theme(legend.position="right",
axis.text.y=element_blank(),
axis.ticks.y=element_blank(),
axis.text.x=element_text(size=10, face="bold"),
axis.title.y=element_text(size=12,face="bold"),
axis.title.x=element_text(size=12,face="bold"),
strip.text.x = element_text(hjust=0.5,vjust =0.5,angle=0,
size=11),
legend.background = element_rect(size=0.5, linetype="solid",
colour ="black"),
legend.box = "vertical")
## Warning: The `size` argument of `element_rect()` is deprecated as of ggplot2 3.4.0.
## i Please use the `linewidth` argument instead.
## This warning is displayed once every 8 hours.
## Call `lifecycle::last_lifecycle_warnings()` to see where this warning was
## generated.
p+guides(fill = FALSE, color= FALSE, shape = FALSE, size=FALSE,linetype=FALSE)# shape=FALSE,
## Warning: The `<scale>` argument of `guides()` cannot be `FALSE`. Use "none" instead as
## of ggplot2 3.3.4.
## This warning is displayed once every 8 hours.
## Call `lifecycle::last_lifecycle_warnings()` to see where this warning was
## generated.
