library(lavaan)
model <- '
# latent variable definitions
ind60 =~ x1 + x2 + x3
dem60 =~ y1 + y2 + y3 + y4
dem65 =~ y5 + a*y6 + y7 + y8
# regressions
dem60 ~ ind60
dem65 ~ ind60 + dem60
'
fit <- sem(model,
data = PoliticalDemocracy,
do.fit = FALSE)
est <- lav_export_estimation(lavaan_model = fit)
# The starting values are:
est$starting_values
# Note that these do not have labels (and may also differ from coef(fit)
# in case of equality constraints):
coef(fit)
# To get the same parameters, use:
est$get_coef(parameter_values = est$starting_values,
lavaan_model = fit)
# The objective function can be used to compute the fit at the current estimates:
est$objective_function(parameter_values = est$starting_values,
lavaan_model = fit)
# The gradient function can be used to compute the gradients at the current estimates:
est$gradient_function(parameter_values = est$starting_values,
lavaan_model = fit)
# Together, these elements provide the means to estimate the parameters with a large
# range of optimizers. For simplicity, here is an example using optim:
est_fit <- optim(par = est$starting_values,
fn = est$objective_function,
gr = est$gradient_function,
lavaan_model = fit,
method = "BFGS")
est$get_coef(parameter_values = est_fit$par,
lavaan_model = fit)
# This is identical to
coef(sem(model,
data = PoliticalDemocracy))
# Example using ridge regularization for parameter a
fn_ridge <- function(parameter_values, lavaan_model, est, lambda){
return(est$objective_function(parameter_values = parameter_values,
lavaan_model = lavaan_model) + lambda * parameter_values[6]^2)
}
ridge_fit <- optim(par = est$get_coef(est$starting_values,
lavaan_model = fit),
fn = fn_ridge,
lavaan_model = fit,
est = est,
lambda = 10)
est$get_coef(parameter_values = ridge_fit$par,
lavaan_model = fit)
Run the code above in your browser using DataLab