if (FALSE) {
# Recommended workflow for analyzing a lot of short audio files
path_to_audio = '~/Downloads/temp' # our audio lives here
# STEP 1: extract manually corrected pitch contours
my_pitch = pitch_app() # runs in default browser such as Firefox or Chrome
# To change system default browser, run something like:
options('browser' = '/usr/bin/firefox') # path to the executable on Linux
# You can pass presets with your preferred parameter values:
my_pitch = pitch_app(windowLength = 20, step = 10,
pitchMethods = c('dom', 'autocor', 'cep'), ylim = c(0.1, 3))
# full list of parameters that can be passed to pitch_app():
# paste0(c(names(input)[which(names(input) %in% rownames(defaults_analyze))],
# 'pitchMethods', 'summaryFun', 'summaryFun_text', 'pathfinding', 'spec_ylim',
# 'spec_colorTheme', 'osc', 'wn'), collapse = ', ')
# Object "my_pitch" contains the output, notably the time-pitch matrix
plot(my_pitch[[1]]$detailed$time, my_pitch[[1]]$detailed$pitch, type = 'b',
xlab = 'Time, ms', ylab = 'Pitch, Hz')
# Run the app with previously used settings
my_pitch2 = do.call(pitch_app, my_pitch$settings)
# save the complete output, including the settings used
saveRDS(my_pitch2, 'my_pitch_analysis.rds')
# STEP 2: run analyze() with manually corrected pitch contours to obtain
# accurate descriptives like the proportion of energy in harmonics above f0,
# etc. This also gives you formants and loudness estimates (disabled in
# pitch_app to speed things up)
df2 = analyze(path_to_audio,
pitchMethods = 'autocor', # only needed for HNR
nFormants = 5, # now we can measure formants as well
pitchManual = my_pitch
# or, if loading the output of pitch_app() from the disk:
# pitchManual = '~/Downloads/output.csv'
# pitchManual = '~/path_to_some_folder/my_pitch_contours.rds
# etc
)
# STEP 3: add other acoustic descriptors, for ex.
df3 = segment(path_to_audio)
# STEP 4: merge df2, df3, df4, ... in R or a spreadsheet editor to have all
# acoustic descriptives together
# To verify your pitch contours and/or edit them later, copy output.csv to
# the folder with your audio, run pitch_app(), and load the audio + csv
# together. The saved pitch contours are treated as manual anchors
}
Run the code above in your browser using DataLab