### evaluate top-N list recommendations on a 0-1 data set
## Note: we sample only 100 users to make the example run faster
data("MSWeb")
MSWeb10 <- sample(MSWeb[rowCounts(MSWeb) >10,], 100)
## create an evaluation scheme (10-fold cross validation, given-3 scheme)
es <- evaluationScheme(MSWeb10, method="cross-validation",
k=10, given=3)
## run evaluation
ev <- evaluate(es, "POPULAR", n=c(1,3,5,10))
ev
## look at the results (the length of the topNList is shown as column n)
getResults(ev)
## get a confusion matrices averaged over the 10 folds
avg(ev)
plot(ev, annotate = TRUE)
## evaluate several algorithms (including a hybrid recommender) with a list
algorithms <- list(
RANDOM = list(name = "RANDOM", param = NULL),
POPULAR = list(name = "POPULAR", param = NULL),
HYBRID = list(name = "HYBRID", param =
list(recommenders = list(
RANDOM = list(name = "RANDOM", param = NULL),
POPULAR = list(name = "POPULAR", param = NULL)
)
)
)
)
evlist <- evaluate(es, algorithms, n=c(1,3,5,10))
evlist
names(evlist)
## select the first results by index
evlist[[1]]
avg(evlist[[1]])
plot(evlist, legend="topright")
### Evaluate using a data set with real-valued ratings
## Note: we sample only 100 users to make the example run faster
data("Jester5k")
es <- evaluationScheme(Jester5k[1:100], method="split",
train=.9, given=10, goodRating=5)
## Note: goodRating is used to determine positive ratings
## predict top-N recommendation lists
## (results in TPR/FPR and precision/recall)
ev <- evaluate(es, "RANDOM", type="topNList", n=10)
getResults(ev)
## predict missing ratings
## (results in RMSE, MSE and MAE)
ev <- evaluate(es, "RANDOM", type="ratings")
getResults(ev)
Run the code above in your browser using DataLab