---
title: "Breast survival dataset using network from STRING DB"
author: "André Veríssimo"
date: "`r Sys.Date()`"
output: 
  BiocStyle::html_document:
    number_sections: yes
    toc: true
vignette: >
  %\VignetteEngine{knitr::rmarkdown}
  %\VignetteIndexEntry{Breast survival dataset using network from STRING DB}
  %\VignetteEncoding{UTF-8}
params:
  seed: !r 20188102
---

# Instalation

```{r, eval=FALSE}
if (!require("BiocManager")) {
    install.packages("BiocManager")
}
BiocManager::install("glmSparseNet")
```

# Required Packages

```{r packages, message=FALSE, warning=FALSE, results='hide'}
library(dplyr)
library(Matrix)
library(ggplot2)
library(forcats)
library(parallel)
library(reshape2)
library(survival)
library(VennDiagram)
library(futile.logger)
library(curatedTCGAData)
library(MultiAssayExperiment)
library(TCGAutils)
#
library(glmSparseNet)
#
#
# Some general options for futile.logger the debugging package
flog.layout(layout.format("[~l] ~m"))
options(
    "glmSparseNet.show_message" = FALSE,
    "glmSparseNet.base_dir" = withr::local_tempdir()
)
# Setting ggplot2 default theme as minimal
theme_set(ggplot2::theme_minimal())
```

# Overview

This vignette uses the STRING database (https://string-db.org/) of 
protein-protein interactions as the network-based penalizer in generalized 
linear models using Breast invasive carcinoma sample dataset.

The degree vector is calculated manually to account for genes that are not 
present in the STRING database, as these will not have any interactions, 
i.e. edges.

## Download Data from STRING

Retrieve all interactions from [STRING databse](https://string-db.org/). 
We have included a helper function that retrieves the Homo sapiens known 
interactions.

For this vignette, we use a cached version of all interaction with 
`score_threshold = 700`

*Note*: Text-based interactions are excluded from the network.

```{r, eval=FALSE}
# Not evaluated in vignette as it takes too long to download and process
allInteractions700 <- stringDBhomoSapiens(scoreThreshold = 700)
stringNetwork <- buildStringNetwork(allInteractions700, "external")
```

```{r, include=FALSE}
data("string.network.700.cache", package = "glmSparseNet")
stringNetwork <- string.network.700.cache
```

# Build network matrix

Build a sparse matrix object that contains the network.

```{r}
stringNetworkUndirected <- stringNetwork + Matrix::t(stringNetwork)
stringNetworkUndirected <- (stringNetworkUndirected != 0) * 1
```

# Network Statistics

## Graph information

```{r, echo=FALSE, collapse=TRUE}
flog.info("Directed graph (score_threshold = %d)", 700)
flog.info("  *       total edges: %d", sum(stringNetwork != 0))
flog.info("  *    unique protein: %d", nrow(stringNetwork))
flog.info(
    "  * edges per protein: %f",
    sum(stringNetwork != 0) / nrow(stringNetwork)
)
flog.info("")
flog.info("Undirected graph (score_threshold = %d)", 700)
flog.info("  *       total edges: %d", sum(stringNetworkUndirected != 0) / 2)
flog.info("  *    unique protein: %d", nrow(stringNetworkUndirected))
flog.info(
    "  * edges per protein: %f",
    sum(stringNetworkUndirected != 0) / 2 / nrow(stringNetworkUndirected)
)
```

## Summary of degree *(indegree + outdegree)*

```{r, echo=FALSE}
stringNetworkBinary <- (stringNetworkUndirected != 0) * 1
degreeNetworkVector <- (
    Matrix::rowSums(stringNetworkBinary) +
        Matrix::colSums(stringNetworkBinary)
) / 2

flog.info("Summary of degree:", summary(degreeNetworkVector), capture = TRUE)
```

## Histogram of degree *(up until 99.999% quantile)*

```{r, warning=FALSE}
qplot(
    degreeNetworkVector[
        degreeNetworkVector <= quantile(degreeNetworkVector, probs = .99999)
    ],
    geom = "histogram", fill = my.colors(2), bins = 100
) +
    theme(legend.position = "none") + xlab("Degree (up until 99.999% quantile)")
```

# `glmSparseNet`

* Dataset from curatedTCGAdata

```{r, include=FALSE}
# chunk not included as it produces to many unnecessary messages
brca <- tryCatch(
    {
        curatedTCGAData(
            diseaseCode = "BRCA",
            assays = "RNASeq2GeneNorm",
            version = "1.1.38",
            dry.run = FALSE
        )
    },
    error = function(err) {
        NULL
    }
)
```

```{r, eval=FALSE}
brca <- curatedTCGAData(
    diseaseCode = "BRCA", assays = "RNASeq2GeneNorm",
    version = "1.1.38", dry.run = FALSE
)
```

Build the survival data from the clinical columns.

* Selects only primary solid tumour samples
* Merge survival times for patients, which have different columns in case they 
are alive or dead.
* Build two matrix objects that fit the data `xdata` and `ydata`

```{r data.show, warning=FALSE, error=FALSE, eval=!is.null(brca)}
# keep only solid tumour (code: 01)
brcaPrimarySolidTumor <- TCGAutils::TCGAsplitAssays(brca, "01")
xdataRaw <- t(assay(brcaPrimarySolidTumor[[1]]))

# Get survival information
ydataRaw <- colData(brcaPrimarySolidTumor) |>
    as.data.frame() |>
    # Convert days to integer
    dplyr::mutate(
        Days.to.date.of.Death = as.integer(Days.to.date.of.Death),
        Days.to.Last.Contact  = as.integer(Days.to.Date.of.Last.Contact)
    ) |>
    # Find max time between all days (ignoring missings)
    dplyr::rowwise() |>
    dplyr::mutate(
        time = max(days_to_last_followup, Days.to.date.of.Death,
            Days.to.Last.Contact, days_to_death,
            na.rm = TRUE
        )
    ) |>
    # Keep only survival variables and codes
    dplyr::select(patientID, status = vital_status, time) |>
    # Discard individuals with survival time less or equal to 0
    dplyr::filter(!is.na(time) & time > 0) |>
    as.data.frame()

# Set index as the patientID
rownames(ydataRaw) <- ydataRaw$patientID

# keep only features that are in degreeNetworkVector and have
#  standard deviation > 0
validFeatures <- colnames(xdataRaw)[
    colnames(xdataRaw) %in% names(degreeNetworkVector[degreeNetworkVector > 0])
]
xdataRaw <- xdataRaw[
    TCGAbarcode(rownames(xdataRaw)) %in% rownames(ydataRaw), validFeatures
]
xdataRaw <- scale(xdataRaw)

# Order ydata the same as assay
ydataRaw <- ydataRaw[TCGAbarcode(rownames(xdataRaw)), ]

# Using only a subset of genes previously selected to keep this short example.
set.seed(params$seed)
smallSubset <- c(
    "AAK1", "ADRB1", "AK7", "ALK", "APOBEC3F", "ARID1B", "BAMBI",
    "BRAF", "BTG1", "CACNG8", "CASP12", "CD5", "CDA", "CEP72",
    "CPD", "CSF2RB", "CSN3", "DCT", "DLG3", "DLL3", "DPP4",
    "DSG1", "EDA2R", "ERP27", "EXD1", "GABBR2", "GADD45A",
    "GBP1", "HTR1F", "IFNK", "IRF2", "IYD", "KCNJ11", "KRTAP5-6",
    "MAFA", "MAGEB4", "MAP2K6", "MCTS1", "MMP15", "MMP9",
    "NFKBIA", "NLRC4", "NT5C1A", "OPN4", "OR13C5", "OR13C8",
    "OR2T6", "OR4K2", "OR52E6", "OR5D14", "OR5H1", "OR6C4",
    "OR7A17", "OR8J3", "OSBPL1A", "PAK6", "PDE11A", "PELO",
    "PGK1", "PIK3CB", "PMAIP1", "POLR2B", "POP1", "PPFIA3",
    "PSME1", "PSME2", "PTEN", "PTGES3", "QARS", "RABGAP1",
    "RBM3", "RFC3", "RGPD8", "RPGRIP1L", "SAV1", "SDC1", "SDC3",
    "SEC16B", "SFPQ", "SFRP5", "SIPA1L1", "SLC2A14", "SLC6A9",
    "SPATA5L1", "SPINT1", "STAR", "STXBP5", "SUN3", "TACC2",
    "TACR1", "TAGLN2", "THPO", "TNIP1", "TP53", "TRMT2B", "TUBB1",
    "VDAC1", "VSIG8", "WNT3A", "WWOX", "XRCC4", "YME1L1",
    "ZBTB11", "ZSCAN21"
) |>
    sample(size = 50) |>
    sort()

# make sure we have 100 genes
smallSubset <- c(smallSubset, sample(colnames(xdataRaw), 51)) |>
    unique() |>
    sort()

xdata <- xdataRaw[, smallSubset[smallSubset %in% colnames(xdataRaw)]]
ydata <- ydataRaw |>
    dplyr::select(time, status) |>
    dplyr::filter(!is.na(time) | time < 0)
```

* Build dataset that overlaps with STRING data

```{r, eval=!is.null(brca)}
#
# Add degree 0 to genes not in STRING network

myDegree <- degreeNetworkVector[smallSubset]
myString <- stringNetworkBinary[smallSubset, smallSubset]
```

Degree distribution for sample set of gene features *(in xdata)*.

```{r, echo=FALSE, warning=FALSE, eval=!is.null(brca)}
qplot(myDegree, bins = 100, fill = my.colors(3)) +
    theme(legend.position = "none")
```

## Select balanced folds for cross-validation

```{r, eval=!is.null(brca)}
set.seed(params$seed)
foldid <- glmSparseNet:::balancedCvFolds(ydata$status)$output
```


```{r, include=FALSE, eval=!is.null(brca)}
# List that will store all selected genes
selectedGenes <- list()
```

## glmHub model

Penalizes using the Hub heuristics, see `hubHeuristic` function definition for 
more details.

```{r, warning=FALSE, error=FALSE, eval=!is.null(brca)}
resultCVHub <- cv.glmHub(xdata,
    Surv(ydata$time, ydata$status),
    family = "cox",
    foldid = foldid,
    network = myString,
    network.options = networkOptions(minDegree = 0.2)
)
```

Kaplan-Meier estimator separating individuals by low and high risk *(based on 
model's coefficients)*

```{r, echo=FALSE, eval=!is.null(brca)}
separate2GroupsCox(
    as.vector(coef(resultCVHub, s = "lambda.min")[, 1]),
    xdata, ydata,
    plot.title = "Full dataset",
    legend.outside = FALSE
)

selectedGenes[["Hub"]] <- Filter(
    function(.x) .x != 0,
    coef(resultCVHub, s = "lambda.min")[, 1]
) |>
    names() |>
    geneNames() |>
    magrittr::extract2("external_gene_name")
```

## glmOrphan model

Penalizes using the Orphan heuristics, see `orphanHeuristic` function 
definition for more details.

```{r, warning=FALSE, error=FALSE, eval=!is.null(brca)}
resultCVOrphan <- cv.glmOrphan(xdata,
    Surv(ydata$time, ydata$status),
    family = "cox",
    foldid = foldid,
    network = myString,
    network.options = networkOptions(minDegree = 0.2)
)
```

Kaplan-Meier estimator separating individuals by low and high risk 
*(based on model's coefficients)*

```{r, echo=FALSE, eval=!is.null(brca)}
separate2GroupsCox(
    as.vector(coef(resultCVOrphan, s = "lambda.min")[, 1]),
    xdata, ydata,
    plot.title = "Full dataset",
    legend.outside = FALSE
)

selectedGenes[["Orphan"]] <- Filter(
    function(.x) .x != 0,
    coef(resultCVOrphan, s = "lambda.min")[, 1]
) |>
    names() |>
    geneNames() |>
    magrittr::extract2("external_gene_name")
```

## Elastic Net model *(without network-penalization)*

Uses regular glmnet model as simple baseline

```{r, warning=FALSE, error=FALSE, eval=!is.null(brca)}
library(glmnet)
resultCVGlmnet <- cv.glmnet(xdata,
    Surv(ydata$time, ydata$status),
    family = "cox",
    foldid = foldid
)
```

Kaplan-Meier estimator separating individuals by low and high risk 
*(based on model's coefficients)*

```{r, echo=FALSE, eval=!is.null(brca)}
separate2GroupsCox(
    as.vector(coef(resultCVGlmnet, s = "lambda.min")[, 1]),
    xdata, ydata,
    plotTitle = "Full dataset",
    legendOutside = FALSE
)

selectedGenes[["GLMnet"]] <- Filter(
    function(.x) .x != 0,
    coef(resultCVGlmnet, s = "lambda.min")[, 1]
) |>
    names() |>
    geneNames() |>
    magrittr::extract2("external_gene_name")
```

## Selected genes

Venn diagram of overlapping genes.

```{r, echo=FALSE, warning=FALSE, eval=!is.null(brca)}
vennPlot <- venn.diagram(
    selectedGenes,
    NULL,
    fill = c(
        myColors(5), myColors(3),
        myColors(4)
    ),
    alpha = c(0.3, 0.3, .3),
    cex = 2,
    cat.fontface = 4,
    category.names = names(selectedGenes)
)
grid.draw(vennPlot)
```

Descriptive table showing which genes are selected in each model

We can observe, that elastic net without network-based penalization selects the 
best model with 40% more genes than glmOrphan and glmHub, without loosing 
accuracy.

*note*: size of circles represent the degree of that gene in network.

```{r, echo=FALSE, warning=FALSE, eval=!is.null(brca)}
melt(selectedGenes) |>
    mutate(
        Degree = myDegree[value],
        value = factor(value),
        L1 = factor(L1)
    ) |>
    mutate(value = fct_reorder(value, Degree)) |>
    as.data.frame() |>
    ggplot() +
    geom_point(
        aes(value, L1, size = Degree),
        shape = mySymbols(3), color = myColors(3)
    ) +
    theme(
        axis.text.x = element_text(angle = 90, hjust = 1, vjust = 0),
        legend.position = "top"
    ) +
    ylab("Model") +
    xlab("Gene") +
    scale_size_continuous(trans = "log10")
```


# Session Info

```{r sessionInfo}
sessionInfo()
```