## Default method:
h2o.randomForest(x, y, data, classification = TRUE, ntree = 50, depth = 20,
sample.rate = 2/3, classwt = NULL, nbins = 100, seed = -1, importance = FALSE,
validation, nodesize = 1, balance.classes = FALSE, max.after.balance.size = 5,
use_non_local = TRUE, version = 2)
## Import to a ValueArray object:
h2o.randomForest.VA(x, y, data, ntree = 50, depth = 20, sample.rate = 2/3,
classwt = NULL, nbins = 100, seed = -1, use_non_local = TRUE)
## Import to a FluidVecs object:
h2o.randomForest.FV(x, y, data, classification = TRUE, ntree = 50, depth = 20,
sample.rate = 2/3, nbins = 100, seed = -1, importance = FALSE, validation,
nodesize = 1, balance.classes = FALSE, max.after.balance.size = 5)
H2OParsedDataVA
(version = 1
) or H2OParsedData
(version = 2
) object containing the variables in the model.seed = -1
, one will automatically be generated by H2O.FALSE
to speed up computations.H2OParsedDataVA
(version = 1
) or H2OParsedData
(version = 2
) object indicating the validation dataset used to construct confusion matriversion = 1
, this will run the single-node ValueArray implementation, while version = 2
selects the distributed, but still beta stage FluidVecs implementation.H2ORFModelVA
(version = 1
) or H2ODRFModel
(version = 2
) with slots key, data, and model, where the last is a list of the following components:version = 1
, you must import data to a ValueArray object using h2o.importFile.VA
, h2o.importFolder.VA
or one of its variants. To run with version = 2
, you must import data to a FluidVecs object using h2o.importFile.FV
, h2o.importFolder.FV
or one of its variants.# Run an RF model on iris data
library(h2o)
localH2O = h2o.init(ip = "localhost", port = 54321, startH2O = TRUE)
irisPath = system.file("extdata", "iris.csv", package = "h2o")
iris.hex = h2o.importFile(localH2O, path = irisPath, key = "iris.hex")
h2o.randomForest(y = 5, x = c(2,3,4), data = iris.hex, ntree = 50, depth = 100)
Run the code above in your browser using DataLab