# NOT RUN {
# NB: GUI for soundgen is available as a Shiny app.
# Type "soundgen_app()" to start it
playback = c(TRUE, FALSE)[2] # set to TRUE to play back the audio from examples
sound = soundgen(play = playback)
# spectrogram(sound, 16000, osc = TRUE)
# playme(sound)
# Use the in-built collection of presets:
# names(presets) # speakers
# names(presets$Chimpanzee) # calls per speaker
s1 = eval(parse(text = presets$Chimpanzee$Scream_conflict)) # screaming chimp
# playme(s1)
s2 = eval(parse(text = presets$F1$Scream_conflict))
# playme(s2)
# }
# NOT RUN {
# unless temperature is 0, the sound is different every time
for (i in 1:3) sound = soundgen(play = playback, temperature = .2)
# Bouts versus syllables. Compare:
sound = soundgen(formants = 'uai', repeatBout = 3, play = playback)
sound = soundgen(formants = 'uai', nSyl = 3, play = playback)
# Intonation contours per syllable and globally:
sound = soundgen(nSyl = 5, sylLen = 200, pauseLen = 140,
play = playback, pitchAnchors = data.frame(
time = c(0, 0.65, 1), value = c(977, 1540, 826)),
pitchAnchorsGlobal = data.frame(time = c(0, .5, 1), value = c(-6, 7, 0)))
# Subharmonics in sidebands (noisy scream)
sound = soundgen (nonlinBalance = 100, subFreq = 75, subDep = 130,
pitchAnchors = data.frame(
time = c(0, .3, .9, 1), value = c(1200, 1547, 1487, 1154)),
sylLen = 800,
play = playback, plot = TRUE)
# Jitter and mouth opening (bark, dog-like)
sound = soundgen(repeatBout = 2, sylLen = 160, pauseLen = 100,
nonlinBalance = 100, subFreq = 100, subDep = 60, jitterDep = 1,
pitchAnchors = data.frame(time = c(0, 0.52, 1), value = c(559, 785, 557)),
mouthAnchors = data.frame(time = c(0, 0.5, 1), value = c(0, 0.5, 0)),
vocalTract = 5, play = playback)
# }
Run the code above in your browser using DataLab