## Not run:
# ## load one spike train data set of STAR
# data(e060824spont)
# ## Create the CountingProcessSamplePath object
# n1spt.cp <- as.CPSP(e060824spont[["neuron 1"]])
# ## print it
# n1spt.cp
# ## plot it
# plot(n1spt.cp)
# ## get the summary
# ## Notice the warning due to few identical interspike intervals
# ## leading to an inaccurate Berman's test.
# summary(n1spt.cp)
#
# ## Simulate data corresponding to a renewal process with
# ## an inverse Gaussian ISI distribution in the spontaneous
# ## regime modulated by a multiplicative stimulus whose time
# ## course is a shifted and scaled chi2 density.
# ## Define the "stimulus" function
# stimulus <- function(t,
# df=5,
# tonset=5,
# timeFactor=5,
# peakFactor=10) {
# dchisq((t-tonset)*timeFactor,df=df)*peakFactor
# }
# ## Define the conditional intensity / hazard function
# hFct <- function(t,
# tlast,
# df=5,
# tonset=5,
# timeFactor=5,
# peakFactor=10,
# mu=0.075,
# sigma2=3
# ) {
#
# hinvgauss(t-tlast,mu=mu,sigma2=sigma2)*exp(stimulus(t,df,tonset,timeFactor,peakFactor))
#
# }
# ## define the function simulating the train with the thinning method
# makeTrain <- function(tstop=10,
# peakCI=200,
# preTime=5,
# df=5,
# tonset=5,
# timeFactor=5,
# peakFactor=10,
# mu=0.075,
# sigma2=3) {
#
# result <- numeric(500) - preTime - .Machine$double.eps
# result.n <- 500
# result[1] <- 0
# idx <- 1
# currentTime <- result[1]
# while (currentTime < tstop+preTime) {
# currentTime <- currentTime+rexp(1,peakCI)
# p <- hFct(currentTime,
# result[idx],
# df=df,
# tonset=tonset+preTime,
# timeFactor=timeFactor,
# peakFactor=peakFactor,
# mu=mu,
# sigma2=sigma2)/peakCI
# rthreshold <- runif(1)
# if (p>1) stop("Wrong peakCI")
# while(p < rthreshold) {
# currentTime <- currentTime+rexp(1,peakCI)
# p <- hFct(currentTime,
# result[idx],
# df=df,
# tonset=tonset+preTime,
# timeFactor=timeFactor,
# peakFactor=peakFactor,
# mu=mu,
# sigma2=sigma2)/peakCI
# if (p>1) stop("Wrong peakCI")
# rthreshold <- runif(1)
# }
# idx <- idx+1
# if (idx > result.n) {
# result <- c(result,numeric(500)) - preTime - .Machine$double.eps
# result.n <- result.n + 500
# }
# result[idx] <- currentTime
# }
#
# result[preTime < result & result <= tstop+preTime] - preTime
#
# }
# ## set the seed
# set.seed(20061001)
# ## "make" the train
# t1 <- makeTrain()
# ## create the corresponding CountingProcessSamplePath
# ## object
# cpsp1 <- mkCPSP(t1)
# ## print it
# cpsp1
# ## test it
# cpsp1.summary <- summary(cpsp1)
# cpsp1.summary
# plot(cpsp1.summary)
# ## Define a function returning the conditional intensity function (cif)
# ciFct <- function(t,
# tlast,
# df=5,
# tonset=5,
# timeFactor=5,
# peakFactor=10,
# mu=0.075,
# sigma2=3
# ) {
#
# sapply(t, function(x) {
# if (x <= tlast[1]) return(1/mu)
# y <- x-max(tlast[tlast<x])
# hinvgauss(y,mu=mu,sigma2=sigma2)*exp(stimulus(x,df,tonset,timeFactor,peakFactor))
# }
# )
#
# }
# ## Compute the cif of the train
# tt <- seq(0,10,0.001)
# lambda.true <- ciFct(tt,cpsp1$ppspFct())
# ## plot it together with the events times
# ## Notice that the representation is somewhat inaccurate, the cif
# ## is in fact a left continuous function
# plot(tt,lambda.true,type="l",col=2)
# rug(cpsp1$ppspFct())
# ## plot the integrated intensity function and the counting process
# plot(tt,cumsum(lambda.true)*0.001,type="l",col=2)
# lines(cpsp1)
# ## define a function doing the time transformation / rescaling
# ## by integrating the cif and returning another CountingProcessSamplePath
# transformCPSP <- function(cpsp,
# ciFct,
# CIFct,
# method=c("integrate","discrete"),
# subdivisions=100,
# ...
# ) {
#
# if (!inherits(cpsp,"CountingProcessSamplePath"))
# stop("cpsp should be a CountingProcessSamplePath objet")
# st <- cpsp$ppspFct()
# n <- length(st)
# from <- cpsp$from
# to <- cpsp$to
# if (missing(CIFct)) {
# if (method[1] == "integrate") {
# lwr <- c(from,st)
# upr <- c(st,to)
# Lambda <- sapply(1:(n+1),
# function(idx)
# integrate(ciFct,
# lower=lwr[idx],
# upper=upr[idx],
# subdivisions=subdivisions,
# ...)$value
# )
# Lambda <- cumsum(Lambda)
# st <- Lambda[1:n]
# from <- 0
# to <- Lambda[n+1]
# } ## End of conditional on method[1] == "integrate"
# if (method[1] == "discrete") {
# lwr <- c(from,st)
# upr <- c(st,to)
# xx <- unlist(lapply(1:(n+1),
# function(idx) seq(lwr[idx],
# upr[idx],
# length.out=subdivisions)
# )
# )
# Lambda <- cumsum(ciFct(xx[-length(xx)])*diff(xx))
# Lambda <- Lambda - Lambda[1]
# st <- Lambda[(1:n)*subdivisions]
# from <- 0
# to <- Lambda[length(Lambda)]
# } ## End of conditional on method[1] == "discrete"
# } else {
# result <- CIFct(c(from,st,to))
# result <- result-result[1]
# from <- result[1]
# to <- result[n+2]
# st <- result[2:(n+1)]
# } ## End of conditional on missing(CIFct)
# mkCPSP(st,from,to)
# }
# ## transform cpsp1
# cpsp1t <- transformCPSP(cpsp1,function(t) ciFct(t,cpsp1$ppspFct()))
# ## test it
# cpsp1t.summary <- summary(cpsp1t)
# cpsp1t.summary
# plot(cpsp1t.summary)
# ## compare the finite sample performances of the
# ## Kolmogorov test (test the uniformity of the
# ## jump times given the number of events) with the
# ## ones of the new "Wiener process test"
# empiricalCovProb <- function(myRates=c(10,(1:8)*25,(5:10)*50,(6:10)*100),
# nbRep=1000,
# exact=NULL
# ) {
#
# b95 <- function(t) 0.299944595870772 + 2.34797018726827*sqrt(t)
# b99 <- function(t) 0.313071417065285 + 2.88963206734397*sqrt(t)
# result <- matrix(numeric(4*length(myRates)),nrow=4)
# colnames(result) <- paste(myRates)
# rownames(result) <- c("ks95","ks99","wp95","wp99")
# for (i in 1:length(myRates)) {
# rate <- myRates[i]
# partial <- sapply(1:nbRep,
# function(repIdx) {
# st <- cumsum(rexp(5*rate,rate))
# while(max(st) < 1) st <- c(st,max(st)+cumsum(rexp(5*rate,rate)))
# st <- st[st<=1]
# ks <- ks.test(st,punif,exact=exact)$p.value
# w <- (st*rate-seq(st))/sqrt(rate)
# c(ks95=0.95 < ks,
# ks99=0.99 < ks,
# wp95=any(w < -b95(st) | b95(st) < w),
# wp99=any(w < -b99(st) | b99(st) < w)
# )
# }
# )
#
# result[,i] <- apply(partial,1,sum)
# }
#
# attr(result,"nbRep") <- nbRep
# attr(result,"myRates") <- myRates
# attr(result,"call") <- match.call()
# result/nbRep
#
# }
#
# plotCovProb <- function(covprob,ci=0.95) {
#
# nbMax <- max(attr(covprob,"myRates"))
# plot(c(0,nbMax),c(0.94,1),
# type="n",
# xlab="Expected number of Spikes",
# ylab="Empirical cov. prob.",xaxs="i",yaxs="i")
# nbRep <- attr(covprob,"nbRep")
# polygon(c(0,nbMax,nbMax,0),
# c(rep(qbinom((1-ci)/2,nbRep,0.95)/nbRep,2),rep(qbinom(1-(1-ci)/2,nbRep,0.95)/nbRep,2)),
# col="grey50",border=NA)
# polygon(c(0,nbMax,nbMax,0),
# c(rep(qbinom((1-ci)/2,nbRep,0.99)/nbRep,2),rep(qbinom(1-(1-ci)/2,nbRep,0.99)/nbRep,2)),
# col="grey50",border=NA)
# nbS <- attr(covprob,"myRates")
# points(nbS,1-covprob[1,],pch=3)
# points(nbS,1-covprob[2,],pch=3)
# points(nbS,1-covprob[3,],pch=1)
# points(nbS,1-covprob[4,],pch=1)
#
# }
# system.time(covprobA <- empiricalCovProb())
# plotCovProb(covprobA)
# ## End(Not run)
Run the code above in your browser using DataLab