diff --git a/R/p_significance.R b/R/p_significance.R index dc3d02091..78d2c2287 100644 --- a/R/p_significance.R +++ b/R/p_significance.R @@ -151,8 +151,16 @@ p_significance.lm <- function(x, threshold = "default", ci = 0.95, verbose = TRU out <- result$out posterior <- result$posterior - # calculate the ROPE range - if (all(threshold == "default")) { + # calculate the ROPE range - for multiple thresholds, we have to check + # each list element for "default", to replace it with the appropriate range + if (is.list(threshold)) { + threshold <- lapply(threshold, function(i) { + if (all(i == "default")) { + i <- bayestestR::rope_range(x, verbose = verbose) + } + i + }) + } else if (all(threshold == "default")) { threshold <- bayestestR::rope_range(x, verbose = verbose) } diff --git a/tests/testthat/test-p_significance.R b/tests/testthat/test-p_significance.R index 16c71f4f2..d40305d29 100644 --- a/tests/testthat/test-p_significance.R +++ b/tests/testthat/test-p_significance.R @@ -1,33 +1,38 @@ skip_on_cran() -skip_if_not_installed("bayestestR", minimum_version = "0.14.1") +skip_if_not_installed("bayestestR", minimum_version = "0.14.0.10") +skip_if_not_installed("distributional") +skip_if_not_installed("withr") -test_that("p_significance", { - data(mtcars) - m <- lm(mpg ~ gear + wt + cyl + hp, data = mtcars) - set.seed(123) - x <- p_significance(m) - expect_identical(c(nrow(x), ncol(x)), c(5L, 5L)) - expect_named(x, c("Parameter", "CI", "CI_low", "CI_high", "ps")) - expect_snapshot(print(x)) +withr::with_environment( + new.env(), + test_that("p_significance", { + data(mtcars) + m <<- lm(mpg ~ gear + wt + cyl + hp, data = mtcars) + set.seed(123) + x <- p_significance(m) + expect_identical(c(nrow(x), ncol(x)), c(5L, 5L)) + expect_named(x, c("Parameter", "CI", "CI_low", "CI_high", "ps")) + expect_snapshot(print(x)) - mp <- model_parameters(m) - set.seed(123) - x2 <- p_significance(mp) - expect_equal(x$ps, x2$ps, tolerance = 1e-4) + mp <- model_parameters(m) + set.seed(123) + x2 <- p_significance(mp) + expect_equal(x$ps, x2$ps, tolerance = 1e-4) - set.seed(123) - x <- p_significance(m, ci = 0.8) - expect_equal(x$ps, c(1, 0.3983, 0.9959, 0.6188, 0), tolerance = 1e-3) + set.seed(123) + x <- p_significance(m, ci = 0.8) + expect_equal(x$ps, c(1, 0.3983, 0.9959, 0.6188, 0), tolerance = 1e-3) - set.seed(123) - x <- p_significance(m, threshold = 0.5) - expect_equal(x$ps, c(1, 0.4393, 0.9969, 0.6803, 0), tolerance = 1e-4) + set.seed(123) + x <- p_significance(m, threshold = 0.5) + expect_equal(x$ps, c(1, 0.4393, 0.9969, 0.6803, 0), tolerance = 1e-4) - set.seed(123) - # Test p_significance with custom thresholds for specific parameters - x <- p_significance(m, threshold = list(cyl = 0.5, wt = 0.7)) - expect_equal(x$ps, c(1, 0.6002, 0.995, 0.6805, 0), tolerance = 1e-4) -}) + set.seed(123) + # Test p_significance with custom thresholds for specific parameters + x <- p_significance(m, threshold = list(cyl = 0.5, wt = 0.7)) + expect_equal(x$ps, c(1, 0.5982, 0.9955, 0.6803, 1e-04), tolerance = 1e-4) + }) +) test_that("p_significance, glmmTMB", { skip_if_not_installed("glmmTMB")