data("Tiger")
sol <- solve_POMDP(model = Tiger)
# if no start is specified, a uniform belief is used.
reward(sol)
# we have additional information that makes us believe that the tiger
# is more likely to the left.
reward(sol, belief = c(0.85, 0.15))
# we start with strong evidence that the tiger is to the left.
reward(sol, belief = "tiger-left")
# Note that in this case, the total discounted expected reward is greater
# than 10 since the tiger problem resets and another game staring with
# a uniform belief is played which produces additional reward.
# return reward, the initial node in the policy graph and the optimal action for
# two beliefs.
reward_node_action(sol, belief = rbind(c(.5, .5), c(.9, .1)))
# manually combining reward with belief space sampling to show the value function
# (color signifies the optimal action)
samp <- sample_belief_space(sol, n = 200)
rew <- reward_node_action(sol, belief = samp)
plot(rew$belief[,"tiger-right"], rew$reward, col = rew$action, ylim = c(0, 15))
legend(x = "top", legend = levels(rew$action), title = "action", col = 1:3, pch = 1)
# this is the piecewise linear value function from the solution
plot_value_function(sol, ylim = c(0, 10))
Run the code above in your browser using DataLab