# NOT RUN {
# EXAMPLE 1 (INTERFACE=FORMULA): For this example, we simulate an
# example for a partially linear model and compare the coefficient
# estimates from the partially linear model with those from a correctly
# specified parametric model...
set.seed(42)
n <- 250
x1 <- rnorm(n)
x2 <- rbinom(n, 1, .5)
z1 <- rbinom(n, 1, .5)
z2 <- rnorm(n)
y <- 1 + x1 + x2 + z1 + sin(z2) + rnorm(n)
# First, compute data-driven bandwidths. This may take a few minutes
# depending on the speed of your computer...
bw <- npplregbw(formula=y~x1+factor(x2)|factor(z1)+z2)
# Next, compute the partially linear fit
pl <- npplreg(bws=bw)
# Print a summary of the model...
summary(pl)
# Sleep for 5 seconds so that we can examine the output...
Sys.sleep(5)
# Retrieve the coefficient estimates and their standard errors...
coef(pl)
coef(pl, errors = TRUE)
# Compare the partially linear results to those from a correctly
# specified model's coefficients for x1 and x2
ols <- lm(y~x1+factor(x2)+factor(z1)+I(sin(z2)))
# The intercept is coef()[1], and those for x1 and x2 are coef()[2] and
# coef()[3]. The standard errors are the square root of the diagonal of
# the variance-covariance matrix (elements 2 and 3)
coef(ols)[2:3]
sqrt(diag(vcov(ols)))[2:3]
# Sleep for 5 seconds so that we can examine the output...
Sys.sleep(5)
# Plot the regression surfaces via plot() (i.e., plot the `partial
# regression surface plots').
plot(bw)
# Note - to plot regression surfaces with variability bounds constructed
# from bootstrapped standard errors, use the following (note that this
# may take a minute or two depending on the speed of your computer as
# the bootstrapping is done in real time, and note also that we override
# the default number of bootstrap replications (399) reducing them to 25
# in order to quickly compute standard errors in this instance - don't
# of course do this in general)
plot(bw,
plot.errors.boot.num=25,
plot.errors.method="bootstrap")
# EXAMPLE 1 (INTERFACE=DATA FRAME): For this example, we simulate an
# example for a partially linear model and compare the coefficient
# estimates from the partially linear model with those from a correctly
# specified parametric model...
set.seed(42)
n <- 250
x1 <- rnorm(n)
x2 <- rbinom(n, 1, .5)
z1 <- rbinom(n, 1, .5)
z2 <- rnorm(n)
y <- 1 + x1 + x2 + z1 + sin(z2) + rnorm(n)
X <- data.frame(x1, factor(x2))
Z <- data.frame(factor(z1), z2)
# First, compute data-driven bandwidths. This may take a few minutes
# depending on the speed of your computer...
bw <- npplregbw(xdat=X, zdat=Z, ydat=y)
# Next, compute the partially linear fit
pl <- npplreg(bws=bw)
# Print a summary of the model...
summary(pl)
# Sleep for 5 seconds so that we can examine the output...
Sys.sleep(5)
# Retrieve the coefficient estimates and their standard errors...
coef(pl)
coef(pl, errors = TRUE)
# Compare the partially linear results to those from a correctly
# specified model's coefficients for x1 and x2
ols <- lm(y~x1+factor(x2)+factor(z1)+I(sin(z2)))
# The intercept is coef()[1], and those for x1 and x2 are coef()[2] and
# coef()[3]. The standard errors are the square root of the diagonal of
# the variance-covariance matrix (elements 2 and 3)
coef(ols)[2:3]
sqrt(diag(vcov(ols)))[2:3]
# Sleep for 5 seconds so that we can examine the output...
Sys.sleep(5)
# Plot the regression surfaces via plot() (i.e., plot the `partial
# regression surface plots').
plot(bw)
# Note - to plot regression surfaces with variability bounds constructed
# from bootstrapped standard errors, use the following (note that this
# may take a minute or two depending on the speed of your computer as
# the bootstrapping is done in real time, and note also that we override
# the default number of bootstrap replications (399) reducing them to 25
# in order to quickly compute standard errors in this instance - don't
# of course do this in general)
plot(bw,
plot.errors.boot.num=25,
plot.errors.method="bootstrap")
# }
# NOT RUN {
# }
# NOT RUN {
<!-- % enddontrun -->
# }
Run the code above in your browser using DataLab