if (FALSE) {
library(sparklyr)
sc <- spark_connect(
master = "yarn",
spark_home = "~/spark/spark-2.4.5-bin-hadoop2.7"
)
# This is a contrived example to show reader tasks will be distributed across
# all Spark worker nodes
spark_read(
sc,
rep("/dev/null", 10),
reader = function(path) system("hostname", intern = TRUE),
columns = c(hostname = "string")
) %>% sdf_collect()
}
Run the code above in your browser using DataLab