Newer
Older
# running in one global workflow for now.
# clear dropped gages file
# checks from SWIM prep
file.remove("temp/GagestoCheck_GAGESIII.csv")
# checks from nhdplusv2 gageloc prep
file.remove("temp/GagestoCheck_GageLoc.csv")
Sys.setenv(TAR_PROJECT = "01_prep")
# step 1: gage selection
# do not run if gages aren't of interest
tar_make(gage_selection_map_report)
# step 2: vpu prep
# the vpu_codes target controls how many VPUs worth of data are prepared
# set the "domain" variable to an HU02 in "R/user_vars.R" to restrict
tar_make(vpu_codes)
tar_make(rpu_codes)
########## Debug help ################
if(FALSE) { # this won't run if you just bang through this file
# To debug a given vpu, first run all to get the dynamic branches established:
tar_make(vpu_base)
# if any error, you can rerun them as shown below.
# Then find the branch in question and add it to tar_options_set in the workflow file:
(branch <- tar_branch_names(vpu_base, which(vpu_codes == "01")))
vpu_codes[tar_branch_index(branch)]
cat('debug = "', branch, '", cue = tar_cue(mode = "never")', sep = "")
# now we run with callr_function = NULL to get the debug to hit on the desired branch.
# note that any other failed branches may try to run first -- it's best to debug the firt
# errored branch then work down the list.
tar_make(callr_function = NULL)
}
########## Debug help ################
# if running many processing units, parallelization is possible
# run branches for a given target in parallel if you have enough memory
# note this will only work for targets with 'deployment = "worker"'
tar_make_future(rpu_vpu_out_list, workers = workers)
# make sure to run all too!
# see what you've done with:
tar_visnetwork(targets_only = TRUE)
# and
View(tar_meta())
Sys.setenv(TAR_PROJECT = "02_POI_creation")
tar_make(vpu_gpkg)
# can see what we are going to run through
# the POI creation workflow loops over these
tar_read(vpu_gpkg)
# if running many, each can be run independently.
tar_make_future(huc12_poi, workers = workers)
tar_make_future(gage_pois, workers = workers)
tar_make_future(te_pois, workers = workers)
tar_make_future(resops_pois, workers = workers)
tar_make_future(hilarri_pois, workers = workers)
tar_make_future(wb_outlet_pois, workers = workers)
tar_make_future(write_wb_flowline_mod, workers = workers)
tar_make_future(ar_event_pois, workers = workers)
tar_make_future(terminal_pois, workers = workers)
tar_make_future(updated_flowline_confluence, workers = workers)
tar_make_future(wb_inlet_pois, workers = workers)
tar_make_future(nid_pois, workers = workers)
tar_make_future(headwater_pois, workers = workers)
tar_make_future(elevation_break_pois, workers = workers)
tar_make_future(time_of_travel_pois, workers = workers)
tar_make_future(final_pois, workers = workers)
tar_make_future(draft_segments, workers = workers)
tar_make_future(collapsed_pois, workers = workers)
tar_make_future(poi_lookup, workers = workers)
tar_make_future(draft_segments, workers = workers)
# Otherwise, just run all with
tar_make()
# iterate of "raster processing units"
tar_make(rpu_gpkg)
tar_make(vpu_gpkg) # need to track the vpu for each
tar_read(rpu_gpkg)
tar_read(vpu_gpkg)
# now make all the rpus that we need
Sys.setenv(TAR_PROJECT = "04_aggregate")
# iterate of "raster processing units"
tar_make(rpu_gpkg)
tar_make(agg_gpkg) # need to track the vpu for each
tar_read(rpu_gpkg)
tar_read(agg_gpkg) # output to "aggregate" rpu gpkgs
workers = 6
tar_make_future(workers = workers)
Sys.setenv(TAR_PROJECT = "05_non_dendritic")
# now we combine raster units back to vector processing units
tar_make(vpu_gpkg)
tar_read(vpu_gpkg)
# outputs will be written to a refactor and final "GF" gpkg
tar_make(rfc_gpkg)
tar_read(rfc_gpkg)
tar_make(gf_gpkg)
tar_read(gf_gpkg)
tar_make()
workers = 6
tar_make_future(workers = workers)