# \donttest{
# Attach packages
library(cvms)
library(dplyr)
# Load data
data <- participant.scores
# Fit models
gaussian_model <- lm(age ~ diagnosis, data = data)
binomial_model <- glm(diagnosis ~ score, data = data)
# Add predictions
data[["gaussian_predictions"]] <- predict(gaussian_model, data,
type = "response",
allow.new.levels = TRUE
)
data[["binomial_predictions"]] <- predict(binomial_model, data,
allow.new.levels = TRUE
)
# Gaussian evaluation
evaluate(
data = data, target_col = "age",
prediction_cols = "gaussian_predictions",
type = "gaussian"
)
# Binomial evaluation
evaluate(
data = data, target_col = "diagnosis",
prediction_cols = "binomial_predictions",
type = "binomial"
)
#
# Multinomial
#
# Create a tibble with predicted probabilities and targets
data_mc <- multiclass_probability_tibble(
num_classes = 3, num_observations = 45,
apply_softmax = TRUE, FUN = runif,
class_name = "class_",
add_targets = TRUE
)
class_names <- paste0("class_", 1:3)
# Multinomial evaluation
evaluate(
data = data_mc, target_col = "Target",
prediction_cols = class_names,
type = "multinomial"
)
#
# ID evaluation
#
# Gaussian ID evaluation
# Note that 'age' is the same for all observations
# of a participant
evaluate(
data = data, target_col = "age",
prediction_cols = "gaussian_predictions",
id_col = "participant",
type = "gaussian"
)
# Binomial ID evaluation
evaluate(
data = data, target_col = "diagnosis",
prediction_cols = "binomial_predictions",
id_col = "participant",
id_method = "mean", # alternatively: "majority"
type = "binomial"
)
# Multinomial ID evaluation
# Add IDs and new targets (must be constant within IDs)
data_mc[["Target"]] <- NULL
data_mc[["ID"]] <- rep(1:9, each = 5)
id_classes <- tibble::tibble(
"ID" = 1:9,
"Target" = sample(x = class_names, size = 9, replace = TRUE)
)
data_mc <- data_mc %>%
dplyr::left_join(id_classes, by = "ID")
# Perform ID evaluation
evaluate(
data = data_mc, target_col = "Target",
prediction_cols = class_names,
id_col = "ID",
id_method = "mean", # alternatively: "majority"
type = "multinomial"
)
#
# Training and evaluating a multinomial model with nnet
#
# Only run if `nnet` is installed
if (requireNamespace("nnet", quietly = TRUE)){
# Create a data frame with some predictors and a target column
class_names <- paste0("class_", 1:4)
data_for_nnet <- multiclass_probability_tibble(
num_classes = 3, # Here, number of predictors
num_observations = 30,
apply_softmax = FALSE,
FUN = rnorm,
class_name = "predictor_"
) %>%
dplyr::mutate(Target = sample(
class_names,
size = 30,
replace = TRUE
))
# Train multinomial model using the nnet package
mn_model <- nnet::multinom(
"Target ~ predictor_1 + predictor_2 + predictor_3",
data = data_for_nnet
)
# Predict the targets in the dataset
# (we would usually use a test set instead)
predictions <- predict(
mn_model,
data_for_nnet,
type = "probs"
) %>%
dplyr::as_tibble()
# Add the targets
predictions[["Target"]] <- data_for_nnet[["Target"]]
# Evaluate predictions
evaluate(
data = predictions,
target_col = "Target",
prediction_cols = class_names,
type = "multinomial"
)
}
# }
Run the code above in your browser using DataLab