if (FALSE) {
target = soundgen(sylLen = 2000, addSilence = 0, temperature = 1e-2,
pitch = c(380, 550, 500, 220), subDep = c(0, 0, 40, 0, 0, 0, 0, 0),
amDep = c(0, 0, 0, 0, 80, 0, 0, 0), amFreq = 80,
noise = c(-10, rep(-40, 5)),
jitterDep = c(0, 0, 0, 0, 0, 3),
plot = TRUE, play = TRUE)
# classifier trained on manually annotated recordings of human nonverbal
# vocalizations
nlp = detectNLP(target, 16000,
predictors = c('subDep', 'amEnvDep', 'amMsPurity', 'HNR', 'CPP'),
plot = TRUE, ylim = c(0, 4))
# classifier trained on synthetic, soundgen()-generated sounds
nlp = detectNLP(target, 16000,
train = soundgen::detectNLP_training_synth,
predictors = c('subDep', 'amEnvDep', 'amMsPurity', 'HNR', 'CPP'),
plot = TRUE, ylim = c(0, 4))
head(nlp[, c('time', 'pr')])
table(nlp$pr)
plot(nlp$amEnvDep, type = 'l')
plot(nlp$subDep, type = 'l')
plot(nlp$entropy, type = 'l')
plot(nlp$none, type = 'l')
points(nlp$sb, type = 'l', col = 'blue')
points(nlp$sh, type = 'l', col = 'green')
points(nlp$chaos, type = 'l', col = 'red')
# detection of pitch jumps
s1 = soundgen(sylLen = 1200, temperature = .001, pitch = list(
time = c(0, 350, 351, 890, 891, 1200),
value = c(140, 230, 460, 330, 220, 200)))
playme(s1, 16000)
nlp1 = detectNLP(s1, 16000, plot = TRUE, ylim = c(0, 3),
predictors = c('subDep', 'amEnvDep', 'amMsPurity', 'HNR', 'CPP'),
train = soundgen::detectNLP_training_synth)
# process all files in a folder
nlp = detectNLP('/home/allgoodguys/Downloads/temp260/',
pitchManual = soundgen::pitchContour, cores = 4, plot = TRUE,
savePlots = '', ylim = c(0, 3))
}
Run the code above in your browser using DataLab