library(dplyr)
# Multiple regression metrics
multi_metric <- metric_set(rmse, rsq, ccc)
# The returned function has arguments:
# fn(data, truth, estimate, na_rm = TRUE, ...)
multi_metric(solubility_test, truth = solubility, estimate = prediction)
# Groups are respected on the new metric function
class_metrics <- metric_set(accuracy, kap)
hpc_cv %>%
group_by(Resample) %>%
class_metrics(obs, estimate = pred)
# ---------------------------------------------------------------------------
# If you need to set options for certain metrics,
# do so by wrapping the metric and setting the options inside the wrapper,
# passing along truth and estimate as quoted arguments.
# Then add on the function class of the underlying wrapped function,
# and the direction of optimization.
ccc_with_bias <- function(data, truth, estimate, na_rm = TRUE, ...) {
ccc(
data = data,
truth = !! rlang::enquo(truth),
estimate = !! rlang::enquo(estimate),
# set bias = TRUE
bias = TRUE,
na_rm = na_rm,
...
)
}
# Use `new_numeric_metric()` to formalize this new metric function
ccc_with_bias <- new_numeric_metric(ccc_with_bias, "maximize")
multi_metric2 <- metric_set(rmse, rsq, ccc_with_bias)
multi_metric2(solubility_test, truth = solubility, estimate = prediction)
# ---------------------------------------------------------------------------
# A class probability example:
# Note that, when given class or class prob functions,
# metric_set() returns a function with signature:
# fn(data, truth, ..., estimate)
# to be able to mix class and class prob metrics.
# You must provide the `estimate` column by explicitly naming
# the argument
class_and_probs_metrics <- metric_set(roc_auc, pr_auc, accuracy)
hpc_cv %>%
group_by(Resample) %>%
class_and_probs_metrics(obs, VF:L, estimate = pred)
Run the code above in your browser using DataLab