# \donttest{
# Train a causal forest to estimate a CATE based priority ranking
n <- 1500
p <- 5
X <- matrix(rnorm(n * p), n, p)
W <- rbinom(n, 1, 0.5)
event.prob <- 1 / (1 + exp(2*(pmax(2*X[, 1], 0) * W - X[, 2])))
Y <- rbinom(n, 1, event.prob)
train <- sample(1:n, n / 2)
cf.priority <- causal_forest(X[train, ], Y[train], W[train])
# Compute a prioritization based on estimated treatment effects.
# -1: in this example the treatment should reduce the risk of an event occuring.
priority.cate <- -1 * predict(cf.priority, X[-train, ])$predictions
# Estimate AUTOC on held out data.
cf.eval <- causal_forest(X[-train, ], Y[-train], W[-train])
rate <- rank_average_treatment_effect(cf.eval, priority.cate)
rate
# Plot the Targeting Operator Characteristic curve.
plot(rate)
# Compute a prioritization based on baseline risk.
rf.risk <- regression_forest(X[W[train] == 0, ], Y[W[train] == 0])
priority.risk <- predict(rf.risk, X[-train, ])$predictions
# Test if two RATEs are equal.
rate.diff <- rank_average_treatment_effect(cf.eval, cbind(priority.cate, priority.risk))
rate.diff
# Construct a 95 % confidence interval.
# (a significant result suggests that there are HTEs and that the prioritization rule is effective
# at stratifying the sample based on them. Conversely, a non-significant result suggests that either
# there are no HTEs or the treatment prioritization rule does not predict them effectively.)
rate.diff$estimate + data.frame(lower = -1.96 * rate.diff$std.err,
upper = 1.96 * rate.diff$std.err,
row.names = rate.diff$target)
# }
Run the code above in your browser using DataLab