# read raw data from MED
data("fi60_raw_from_med")
# see first 10 lines
head(fi60_raw_from_med, 10)
# create a temporary file to avoid non-staged installation warning
temp_file <- tempfile(fileext = ".txt")
# write the data to the temporary file
writeLines(fi60_raw_from_med, temp_file)
# Use the temporary file for processing
fi60_processed <- read_med(fname = temp_file, save_file = FALSE,
col_r = "C:", out = TRUE,
col_names = c("time", "event"), num_col = 6, time_dot_event = TRUE)
head(fi60_processed)
# __________________________________________________________________________
## To use in bulk
# 1) Generate a list of filenames of raw MED data
# 2) Loop over the list with the function, using each element
# of the list as the fname argument.
# __________________________________________________________________________
# Suppose all raw MED files start with 2020, and you are in the working directory
# If all the raw MED files are in the wd, we can directly get the filenames
# with unspecified path
# filenames <- list.files(pattern = "^2020")
# The above line will look in the wd for all the files starting with "2020"
# and it will save it as a vector of strings in "filenames".
# With that vector, make a for loop like the following:
# __________________________________________________________________________
# If you want to work immediately with the processed data, first create an empty
# dataframe to store the data file per file
# df_working = data.frame()
# __________________________________________________________________________
# for (f in filenames) {
# df_tmp <- read_med(fname = f,
# path_save = "data/processed/", # put here your path to save the csv
# col_r = 'C:', # if the time.event vector is saved in variable C
# out = TRUE ) # If you want to store processed data in df_tmp,
# otherwise write out = FALSE
# now append at rows the new data.frame
# df_working = rbind(df_working, df_tmp)
# }
# Thats all.
Run the code above in your browser using DataLab