# NOT RUN {
# }
# NOT RUN {
P <- function(Theta){exp(Theta^2 * 1.2 - 1) / (1 + exp(Theta^2 * 1.2 - 1))}
#make some data
set.seed(1234)
a <- matrix(rlnorm(20, meanlog=0, sdlog = .1),ncol=1)
d <- matrix(rnorm(20),ncol=1)
Theta <- matrix(rnorm(2000))
items <- rep('2PL', 20)
ps <- P(Theta)
baditem <- numeric(2000)
for(i in 1:2000)
baditem[i] <- sample(c(0,1), 1, prob = c(1-ps[i], ps[i]))
data <- cbind(simdata(a,d, 2000, items, Theta=Theta), baditem=baditem)
x <- mirt(data, 1)
raschfit <- mirt(data, 1, itemtype='Rasch')
fit <- itemfit(x)
fit
# p-value adjustment
itemfit(x, p.adjust='fdr')
# two different fit stats (with/without p-value adjustment)
itemfit(x, c('S_X2' ,'X2'), p.adjust='fdr')
itemfit(x, c('S_X2' ,'X2'))
# Conditional sum-score plot from S-X2 information
itemfit(x, S_X2.plot = 1) # good fit
itemfit(x, S_X2.plot = 2) # good fit
itemfit(x, S_X2.plot = 21) # bad fit
itemfit(x, 'X2') # just X2
itemfit(x, 'X2', method = 'ML') # X2 with maximum-likelihood estimates for traits
itemfit(x, group.bins=15, empirical.plot = 1, method = 'ML') #empirical item plot with 15 points
itemfit(x, group.bins=15, empirical.plot = 21, method = 'ML')
# PV and X2* statistics (parametric bootstrap stats not run to save time)
itemfit(x, 'PV_Q1')
# mirtCluster() # improve speed of bootstrap samples by running in parallel
# itemfit(x, 'PV_Q1*')
# itemfit(x, 'X2*') # Stone's 1993 statistic
# itemfit(x, 'X2*_df') # Stone's 2000 scaled statistic with df estimate
#empirical tables for X2 statistic
itemfit(x, empirical.table=1)
itemfit(x, empirical.table=21)
#infit/outfit statistics. method='ML' agrees better with eRm package
itemfit(raschfit, 'infit', method = 'ML') #infit and outfit stats
#same as above, but inputting ML estimates instead (saves time for re-use)
Theta <- fscores(raschfit, method = 'ML')
itemfit(raschfit, 'infit', Theta=Theta)
itemfit(raschfit, empirical.plot=1, Theta=Theta)
itemfit(raschfit, empirical.table=1, Theta=Theta)
# fit a new more flexible model for the mis-fitting item
itemtype <- c(rep('2PL', 20), 'spline')
x2 <- mirt(data, 1, itemtype=itemtype)
itemfit(x2)
itemplot(x2, 21)
anova(x2, x)
#------------------------------------------------------------
#similar example to Kang and Chen 2007
a <- matrix(c(.8,.4,.7, .8, .4, .7, 1, 1, 1, 1))
d <- matrix(rep(c(2.0,0.0,-1,-1.5),10), ncol=4, byrow=TRUE)
dat <- simdata(a,d,2000, itemtype = rep('graded', 10))
head(dat)
mod <- mirt(dat, 1)
itemfit(mod)
itemfit(mod, 'X2') #pretty much useless given inflated Type I error rates
itemfit(mod, empirical.plot = 1)
itemfit(mod, empirical.plot = 1, empirical.poly.collapse=TRUE)
# collapsed tables (see mincell.X2) for X2 and G2
itemfit(mod, empirical.table = 1)
mod2 <- mirt(dat, 1, 'Rasch')
itemfit(mod2, 'infit', method = 'ML')
#massive list of tables for S-X2
tables <- itemfit(mod, S_X2.tables = TRUE)
#observed and expected total score patterns for item 1 (post collapsing)
tables$O[[1]]
tables$E[[1]]
# fit stats with missing data (run in parallel using all cores)
dat[sample(1:prod(dim(dat)), 100)] <- NA
raschfit <- mirt(dat, 1, itemtype='Rasch')
#use only valid data by removing rows with missing terms
itemfit(raschfit, c('S_X2', 'infit'), na.rm = TRUE)
# note that X2, G2, PV-Q1, and X2* do not require complete datasets
thetas <- fscores(raschfit, method = 'ML') # save scores for faster computations
itemfit(raschfit, c('X2', 'G2'), Theta=thetas)
itemfit(raschfit, empirical.plot=1, Theta=thetas)
itemfit(raschfit, empirical.table=1, Theta=thetas)
# }
# NOT RUN {
# }
Run the code above in your browser using DataLab