## Loading tidyverse: ggplot2
## Loading tidyverse: tibble
## Loading tidyverse: tidyr
## Loading tidyverse: readr
## Loading tidyverse: purrr
## Loading tidyverse: dplyr
## Conflicts with tidy packages ----------------------------------------------
## filter(): dplyr, stats
## lag():    dplyr, stats
## Loading required package: Matrix
## 
## Attaching package: 'Matrix'
## The following object is masked from 'package:tidyr':
## 
##     expand
## Parsed with column specification:
## cols(
##   .default = col_double(),
##   participant_ID = col_integer(),
##   program_ID = col_integer(),
##   beep_ID = col_character(),
##   challenge = col_integer(),
##   good_at = col_integer(),
##   interest = col_integer(),
##   important = col_integer(),
##   future_goals = col_integer(),
##   enjoy = col_integer(),
##   learning = col_integer(),
##   happy = col_integer(),
##   excited = col_integer(),
##   frustrated = col_integer(),
##   bored = col_integer(),
##   stressed = col_integer(),
##   additional_comments = col_character(),
##   whom_working_with = col_number(),
##   response_date = col_date(format = ""),
##   time_after_signal_seconds = col_time(format = ""),
##   sixth_math_sociedad = col_integer()
##   # ... with 12 more columns
## )
## See spec(...) for full column specifications.
## Warning in rbind(names(probs), probs_f): number of columns of result is not
## a multiple of vector length (arg 1)
## Warning: 18 parsing failures.
## row # A tibble: 5 x 5 col     row                       col   expected           actual expected   <int>                     <chr>      <chr>            <chr> actual 1   417 time_after_signal_seconds valid date 12:20:591.000000 file 2   579 time_after_signal_seconds valid date 13:38:591.000000 row 3   582 time_after_signal_seconds valid date 14:35:591.000000 col 4   614 time_after_signal_seconds valid date 14:43:591.000000 expected 5   789 time_after_signal_seconds valid date 14:43:591.000000 actual # ... with 1 more variables: file <chr>
## ... ................. ... ............................................................. ........ ............................................................. ...... ............................................................. .... ............................................................. ... ............................................................. ... ............................................................. ........ ............................................................. ...... .......................................
## See problems(...) for more details.
## Parsed with column specification:
## cols(
##   program_ID = col_integer(),
##   program_name = col_character()
## )
## Parsed with column specification:
## cols(
##   .default = col_integer(),
##   sociedad_class = col_character(),
##   duration = col_time(format = ""),
##   problem_solving = col_time(format = ""),
##   study_related_survey = col_time(format = ""),
##   elaboration_on_content = col_time(format = ""),
##   showing_video = col_time(format = ""),
##   response_date = col_date(format = ""),
##   youth_activity = col_character(),
##   who_working_with = col_character(),
##   location = col_character()
## )
## See spec(...) for full column specifications.

So, the means for the two dependent variables are definitely higher than the intercepts:

df$overall_engagement <- jmRtools::composite_mean_maker(df, hard_working, concentrating, enjoy)

mean(df$interest, na.rm = T)
## [1] 2.889899
mean(df$overall_engagement, na.rm = T)
## [1] 2.864029

Correlations don’t seem too problematic:

df %>% 
    select(overall_engagement, interest, challenge, relevance, learning, positive_affect) %>%  
    correlate() %>% 
    shave() %>% 
    fashion()
##              rowname overall_engagement interest challenge relevance
## 1 overall_engagement                                                
## 2           interest                .69                             
## 3          challenge                .31      .28                    
## 4          relevance                .65      .61       .39          
## 5           learning                .68      .56       .30       .65
## 6    positive_affect                .65      .56       .27       .52
##   learning positive_affect
## 1                         
## 2                         
## 3                         
## 4                         
## 5                         
## 6      .48

Wondering if the issue has to do with multiple of the relationships between interest and the predictor variables being strongly positive - so as we move along the x axis on all of the variables, the intercept keeping the other variables constant is even lower than we’d expect:

df %>% 
    select(interest, challenge, relevance, learning, positive_affect) %>% 
    gather(key, val, -interest) %>% 
    ggplot(aes(x = val, y = interest)) +
    facet_wrap("key") +
    geom_jitter(color = "gray") +
    geom_smooth(method = "lm", color = "red")
## Warning: Removed 1 rows containing non-finite values (stat_smooth).
## Warning: Removed 1 rows containing missing values (geom_point).

Correlations for the fixed effects don’t look too high:

m3i <- lmer(interest ~ 1 +
                challenge + relevance + learning + positive_affect +
                (1|program_ID) + (1|participant_ID) + (1|beep_ID_new),
            data = df)

summary(m3i)
## Linear mixed model fit by REML ['lmerMod']
## Formula: 
## interest ~ 1 + challenge + relevance + learning + positive_affect +  
##     (1 | program_ID) + (1 | participant_ID) + (1 | beep_ID_new)
##    Data: df
## 
## REML criterion at convergence: 6671.2
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -4.3605 -0.4882  0.0061  0.5274  3.8503 
## 
## Random effects:
##  Groups         Name        Variance Std.Dev.
##  beep_ID_new    (Intercept) 0.019152 0.1384  
##  participant_ID (Intercept) 0.057106 0.2390  
##  program_ID     (Intercept) 0.009449 0.0972  
##  Residual                   0.498061 0.7057  
## Number of obs: 2969, groups:  
## beep_ID_new, 248; participant_ID, 203; program_ID, 9
## 
## Fixed effects:
##                 Estimate Std. Error t value
## (Intercept)      0.64679    0.06855   9.435
## challenge        0.02191    0.01488   1.473
## relevance        0.36162    0.02190  16.513
## learning         0.17436    0.01774   9.830
## positive_affect  0.27938    0.01775  15.741
## 
## Correlation of Fixed Effects:
##             (Intr) chllng relvnc lernng
## challenge   -0.259                     
## relevance   -0.230 -0.190              
## learning    -0.171 -0.076 -0.428       
## positv_ffct -0.299 -0.034 -0.279 -0.220
m3v <- lmer(overall_engagement ~ 1 + 
                challenge + relevance + learning + positive_affect +
                (1|program_ID) + (1|participant_ID) + (1|beep_ID_new),
            data = df)

summary(m3v)
## Linear mixed model fit by REML ['lmerMod']
## Formula: 
## overall_engagement ~ 1 + challenge + relevance + learning + positive_affect +  
##     (1 | program_ID) + (1 | participant_ID) + (1 | beep_ID_new)
##    Data: df
## 
## REML criterion at convergence: 4437.9
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -5.6231 -0.5270  0.0259  0.5303  3.9380 
## 
## Random effects:
##  Groups         Name        Variance Std.Dev.
##  beep_ID_new    (Intercept) 0.008282 0.09101 
##  participant_ID (Intercept) 0.059744 0.24443 
##  program_ID     (Intercept) 0.000000 0.00000 
##  Residual                   0.226573 0.47600 
## Number of obs: 2969, groups:  
## beep_ID_new, 248; participant_ID, 203; program_ID, 9
## 
## Fixed effects:
##                 Estimate Std. Error t value
## (Intercept)      0.70115    0.04515  15.530
## challenge        0.03479    0.01039   3.347
## relevance        0.22660    0.01529  14.820
## learning         0.26682    0.01215  21.959
## positive_affect  0.27640    0.01240  22.295
## 
## Correlation of Fixed Effects:
##             (Intr) chllng relvnc lernng
## challenge   -0.287                     
## relevance   -0.284 -0.172              
## learning    -0.187 -0.081 -0.406       
## positv_ffct -0.340 -0.033 -0.264 -0.215