#not run in the interest of time, but worth doing
d9 <- sim.irt(9,1000,-2.5,2.5,mod="normal") #dichotomous items
test <- irt.fa(d9$items)
scores <- scoreIrt(test,d9$items)
scores.df <- data.frame(scores,true=d9$theta) #combine the estimates with the true thetas.
pairs.panels(scores.df,pch=".",
main="Comparing IRT and classical with complete data")
#now show how to do this with a quasi-Rasch model
tau <- irt.tau(d9$items)
scores.rasch <- scoreIrt(tau,d9$items,key=rep(1,9))
scores.dfr<- data.frame(scores.df,scores.rasch) #almost identical to 2PL model!
pairs.panels(scores.dfr)
#with all the data, why bother ?
#now delete some of the data
d9$items[1:333,1:3] <- NA
d9$items[334:666,4:6] <- NA
d9$items[667:1000,7:9] <- NA
scores <- scoreIrt(test,d9$items)
scores.df <- data.frame(scores,true=d9$theta) #combine the estimates with the true thetas.
pairs.panels(scores.df, pch=".",
main="Comparing IRT and classical with random missing data")
#with missing data, the theta estimates are noticably better.
#now show how to do this with a quasi-Rasch model
tau <- irt.tau(d9$items)
scores.rasch <- scoreIrt(tau,d9$items,key=rep(1,9))
scores.dfr <- data.frame(scores.df,rasch = scores.rasch)
pairs.panels(scores.dfr) #rasch is actually better!
v9 <- sim.irt(9,1000,-2.,2.,mod="normal") #dichotomous items
items <- v9$items
test <- irt.fa(items)
total <- rowSums(items)
ord <- order(total)
items <- items[ord,]
#now delete some of the data - note that they are ordered by score
items[1:333,5:9] <- NA
items[334:666,3:7] <- NA
items[667:1000,1:4] <- NA
items[990:995,1:9] <- NA #the case of terrible data
items[996:998,] <- 0 #all wrong
items[999:1000] <- 1 #all right
scores <- scoreIrt(test,items)
unitweighted <- scoreIrt(items=items,keys=rep(1,9)) #each item has a discrimination of 1
#combine the estimates with the true thetas.
scores.df <- data.frame(v9$theta[ord],scores,unitweighted)
colnames(scores.df) <- c("True theta","irt theta","total","fit","rasch","total","fit")
pairs.panels(scores.df,pch=".",main="Comparing IRT and classical with missing data")
#with missing data, the theta estimates are noticably better estimates
#of the generating theta than using the empirically derived factor loading weights
#now show the ability to score multiple scales using keys
if(require(psychTools)) {
ab.tau <- irt.tau(psychTools::ability) #first find the tau values
ab.keys <- make.keys(psychTools::ability,list(g=1:16,reason=1:4,
letter=5:8,matrix=9:12,rotate=13:16))
#ab.scores <- scoreIrt(stats=ab.tau, items = psychTools::ability, keys = ab.keys)
}
#and now do it for polytomous items using 2pl
bfi.scores <- scoreIrt.2pl(bfi.keys,bfi[1:25])
#compare with classical unit weighting by using scoreItems
#not run in the interests of time
#bfi.unit <- scoreItems(bfi.keys,bfi[1:25])
#bfi.df <- data.frame(bfi.scores,bfi.unit$scores)
#pairs.panels(bfi.df,pch=".")
bfi.irt <- scoreIrt(items=bfi[16:20]) #find irt based N scores
#Specify item difficulties and discriminations from a different data set.
stats <- structure(list(MR1 = c(1.4, 1.3, 1.3, 0.8, 0.7), difficulty.1 = c(-1.2,
-2, -1.5, -1.2, -0.9), difficulty.2 = c(-0.1, -0.8, -0.4, -0.3,
-0.1), difficulty.3 = c(0.6, -0.2, 0.2, 0.2, 0.3), difficulty.4 = c(1.5,
0.9, 1.1, 1, 1), difficulty.5 = c(2.5, 2.1, 2.2, 1.7, 1.6)), row.names = c("N1",
"N2", "N3", "N4", "N5"), class = "data.frame")
stats #show them
bfi.new <-scoreIrt(stats,bfi[16:20])
bfi.irt <- scoreIrt(items=bfi[16:20])
cor2(bfi.new,bfi.irt)
newstats <- stats
newstats[2:6] <-stats[2:6 ] + 1 #change the difficulties
bfi.harder <- scoreIrt(newstats,bfi[16:20])
pooled <- cbind(bfi.irt,bfi.new,bfi.harder)
describe(pooled) #note that the mean scores have changed
lowerCor(pooled) #and although the unit weighted scale are identical,
# the irt scales differ by changing the difficulties
Run the code above in your browser using DataLab