## ----echo=FALSE, results="hide", message=FALSE--------------------------------
require(knitr)
opts_chunk$set(error=FALSE, message=FALSE, warning=FALSE)

## ----style, echo=FALSE, results='asis'----------------------------------------
BiocStyle::markdown()

## ----install-bioc, eval = FALSE-----------------------------------------------
#  if (!requireNamespace("BiocManager", quietly = TRUE))
#      install.packages("BiocManager")
#  BiocManager::install("benchmarkfdrData2019")

## ----load-pkgs----------------------------------------------------------------
suppressPackageStartupMessages({
    library(ExperimentHub)
    library(benchmarkfdrData2019)
    library(SummarizedBenchmark)
    library(dplyr)
    library(ggplot2)
    library(rlang)
})

## ----eval = FALSE-------------------------------------------------------------
#  BiocManager::install("areyesq89/SummarizedBenchmark", ref = "fdrbenchmark")

## -----------------------------------------------------------------------------
hub <- ExperimentHub()
bfdrData <- query(hub, "benchmarkfdrData2019")
bfdrData

## ----getid-chipres------------------------------------------------------------
cbp_id <- bfdrData$ah_id[bfdrData$title == "cbp-csaw-benchmark"]
cbp_id

## ----load-chipres-------------------------------------------------------------
bfdrData[cbp_id]

chipres <- bfdrData[[cbp_id]]
chipres

## ----load-yeastres------------------------------------------------------------
yeast_id <- bfdrData$ah_id[bfdrData$title == "yeast-results-de5"]
bfdrData[yeast_id]

yeastres <- `yeast-results-de5`()
length(yeastres)
yeastres[[1]]

## ----fix-sbobjs---------------------------------------------------------------
chipres@BenchDesign <- BenchDesign()
yeastres <- lapply(yeastres, function(x) { x@BenchDesign <- BenchDesign(); x })

## ----colnames-----------------------------------------------------------------
colnames(chipres)

## ----show-assay---------------------------------------------------------------
dim(assay(chipres, "bench"))
head(assay(chipres, "bench"))

## ----default-metrics----------------------------------------------------------
availableMetrics()

## ----chip-add-metrics---------------------------------------------------------
chipres <- addPerformanceMetric(chipres,
                                evalMetric = "rejections",
                                assay = "bench")

## ----chip-est-metrics---------------------------------------------------------
chipdf <- estimatePerformanceMetrics(chipres,
                                     alpha = seq(0.01, 0.10, by = .01),
                                     tidy = TRUE)

dim(chipdf)
head(chipdf)

## ----chip-clean-cols----------------------------------------------------------
## subset IHW
chipdf <- dplyr:::filter(chipdf, !(grepl("ihw", label) & param.alpha != alpha))
chipdf <- dplyr:::mutate(chipdf, label = gsub("(ihw)-a\\d+", "\\1", label))

## subset BL
chipdf <- dplyr:::filter(chipdf, ! label %in% paste0("bl-df0", c(2, 4, 5)))

## ----chip-subset-cols---------------------------------------------------------
chipdf <- dplyr::select(chipdf, label, performanceMetric, alpha, value)
chipdf <- dplyr::filter(chipdf, !is.na(value))
head(chipdf)

## ----chip-plot-rejs, fig.width = 8, fig.height = 5----------------------------
ggplot(chipdf, aes(x = alpha, y = value, color = label)) +
    geom_point() +
    geom_line() +
    scale_color_viridis_d("Method") +
    scale_x_continuous(breaks = seq(0, 1, .01), limits = c(0, .11)) +
    ylab("Rejections") +
    theme_bw() +
    ggtitle("Number of rejections across multiple-testing methods",
            "ChIP-seq CBP differential analysis with informative covariate")

## ----yeast-subset-------------------------------------------------------------
yeastres10 <- yeastres[1:10]

## ----yeast-already-metrics----------------------------------------------------
names(performanceMetrics(yeastres10[[1]])[["qvalue"]])

## ----yeast-eval-metrics-------------------------------------------------------
yeastdf <- lapply(yeastres10, estimatePerformanceMetrics,
                  alpha = seq(0.01, 0.10, by = .01), tidy = TRUE)

## ----yeast-merge-reps---------------------------------------------------------
yeastdf <- dplyr::bind_rows(yeastdf, .id = "rep")

## ----yeast-clean-cols---------------------------------------------------------
## subset IHW
yeastdf <- dplyr:::filter(yeastdf,
                          !(grepl("ihw", label) & param.alpha != alpha))
yeastdf <- dplyr:::mutate(yeastdf, label = gsub("(ihw)-a\\d+", "\\1", label))

## subset BL
yeastdf <- dplyr:::filter(yeastdf, ! label %in% paste0("bl-df0", c(2, 4, 5)))

yeastdf <- dplyr::select(yeastdf, rep, label, performanceMetric, alpha, value)
yeastdf <- dplyr::filter(yeastdf, !is.na(value))
head(yeastdf)

## ----yeast-summarize----------------------------------------------------------
yeastdf <- dplyr::group_by(yeastdf, label, performanceMetric, alpha) 
yeastdf <- dplyr::summarize(yeastdf,
                            meanValue = mean(value),
                            seValue = sd(value) / sqrt(n()))
yeastdf <- dplyr::ungroup(yeastdf)

## ----yeast-plot-all, fig.width = 9, fig.height = 5----------------------------
yeastdf %>%
    dplyr::filter(performanceMetric %in% c("FDR", "TPR")) %>%
    ggplot(aes(x = alpha, y = meanValue,
               color = label,
               ymin = meanValue - seValue,
               ymax = meanValue + seValue)) + 
    geom_point() +
    geom_errorbar(width = .01 / 4, alpha = 1/4) +
    geom_line(alpha = 1/2) +
    scale_color_viridis_d("Method") +
    scale_x_continuous(breaks = seq(0, 1, .01), limits = c(0, .11)) +
    facet_wrap(~ performanceMetric, scales = 'free_y', nrow = 1) +
    ylab("average across replicates") +
    theme_bw() +
    geom_abline(aes(intercept = i_, slope = s_), color = 'red', linetype = 2,
                data = tibble(performanceMetric = 'FDR', i_ = 0, s_ = 1)) +
    ggtitle("FDR and TPR across multiple-testing methods",
            "yeast in silico experiment with informative covariate")

## ----chip-show-data-----------------------------------------------------------
dat <- tibble(pval = assay(chipres)[, "unadjusted"],
              covariate = rowData(chipres)$ind_covariate)
dat

## ----chip-new-benchdesign-----------------------------------------------------
bh_method <- BDMethod(x = p.adjust,
                      params = rlang::quos(p = pval,
                                           method = "BH"))
new_design <- BenchDesign(newBH = bh_method, data = dat)
new_design

## ----chip-eval-buildBench-----------------------------------------------------
new_res <- buildBench(new_design)
new_res

## ----chip-new-metrics---------------------------------------------------------
new_res <- addPerformanceMetric(new_res,
                                evalMetric = "rejections",
                                assay = "default")
new_df <- estimatePerformanceMetrics(new_res,
                                     alpha = seq(0.01, 0.10, by = 0.01),
                                     tidy = TRUE)

## ----chip-new-result----------------------------------------------------------
new_df <- dplyr::select(new_df, label, value, performanceMetric, alpha)
new_df

## ----chip-verify-bh-----------------------------------------------------------
dplyr::filter(chipdf, label == "bh")

## -----------------------------------------------------------------------------
sessionInfo()