if (torch_is_installed()) {
# In this example we will create a custom optimizer
# that's just a simplified version of the `optim_sgd` function.
optim_sgd2 <- optimizer(
initialize = function(params, learning_rate) {
defaults <- list(
learning_rate = learning_rate
)
super$initialize(params, defaults)
},
step = function() {
with_no_grad({
for (g in seq_along(self$param_groups)) {
group <- self$param_groups[[g]]
for (p in seq_along(group$params)) {
param <- group$params[[p]]
if (is.null(param$grad) || is_undefined_tensor(param$grad)) {
next
}
param$add_(param$grad, alpha = -group$learning_rate)
}
}
})
}
)
x <- torch_randn(1, requires_grad = TRUE)
opt <- optim_sgd2(x, learning_rate = 0.1)
for (i in 1:100) {
opt$zero_grad()
y <- x^2
y$backward()
opt$step()
}
all.equal(x$item(), 0, tolerance = 1e-9)
}
Run the code above in your browser using DataLab