Please consider responding to a brief user needs survey.

Update extractor, attribute list, functions to rerun volume 2.

parent 5879459d
......@@ -116,7 +116,7 @@ cell_size = 5 # Cell size for raster outputs
pt2trans_disttolerance = 25 # Maximum distance between transect and point for assigning values; originally 10 m
########### Field names ##########################
trans_flds = ['TRANSECTID', 'TRANSORDER', 'DD_ID', 'MHW']
trans_flds = ['TRANSECTID', 'DD_ID', 'MHW']
# trans_flds = ['sort_ID', 'TRANSECTID','Azimuth',
# 'LRR', 'SL_x', 'SL_y', 'Bslope',
# 'DL_x', 'DL_y', 'DL_z', 'DL_zmhw', 'DL_snapX','DL_snapY',
......@@ -185,7 +185,7 @@ field_defs = {
'defs':'Himmelstoss and others (2010)'},
'TRANSECTID':{'def':'NASC transect ID: "Permanent and unique identification number for each [NASC] transect in the output rates table calculated by DSAS.""',
'defs':'Himmelstoss and others (2010)'},
'DD_ID':{'def':'Identifier that orders transects sequentially along the shoreline and is unique across all sites analyzed for Zeigler and others, 2018.'},
'DD_ID':{'def':'Identifier that orders transects sequentially along the shoreline and is unique across all sites analyzed for Zeigler and others, 2019.'},
'Azimuth':{'def':'Bearing of the transect measured in degrees clockwise from North. NoData value of -99999 occurs where the bearing could not be calculated.',
'unit':'degrees'},
'LRR':{'def':'"A linear regression rate-of-change statistic was calculated by fitting a least-squares regression line to all shoreline points for a particular transect. The best-fit regression line is placed so that the sum of the squared residuals (determined by squaring the offset distance of each data point from the regression line and adding the squared residuals together) is minimized. The linear regression rate is the slope of the line. The rate is reported in meters per year with positive values indicating accretion and negative values indicating erosion." NoData value of -99999 indicates an NASC transect is not present or does not have an LRR value.',
......
......@@ -880,6 +880,7 @@ def ArmorLineToTrans_PD(in_trans, armorLines, sl2trans_df, tID_fld, proj_code, e
df = FCtoDF(arm2trans, xy=True, dffields=[tID_fld, 'Arm_z'])
df.index = df.pop(tID_fld)
df.rename(columns={'SHAPE@X':'Arm_x','SHAPE@Y':'Arm_y'}, inplace=True)
df = df.astype({'Arm_z':'float64'})
# Where multiple armor intersect points created along a transect, use the closest point to the shoreline.
if df.index.duplicated().any():
idx = df.index[df.index.duplicated()]
......@@ -1331,9 +1332,11 @@ def FCtoDF(fc, xy=False, dffields=[], fill=-99999, id_fld=False, extra_fields=[]
def JoinDFtoFC(df, in_fc, join_id, target_id=False, out_fc='', overwrite=True, fill=-99999, verbose=True):
"""Join pandas dataframe to feature class."""
# Get join/ID field
if not target_id:
target_id=join_id
# If out_fc specified, initialize output FC with a copy of input
fun.check_id_fld(df, target_id)
# If out_fc is specified, initialize output FC with a copy of input
if not len(out_fc):
out_fc = in_fc
else:
......@@ -1374,6 +1377,7 @@ def JoinDFtoFC_2(df, in_fc, join_id, target_id=False, out_fc='', overwrite=True,
# Convert NP array to Arc table
out_tbl = os.path.join(arcpy.env.scratchGDB, out_fc+'_tbl')
arcpy.Delete_management(out_tbl) # delete if already exists
arcpy.da.NumPyArrayToTable(arr, out_tbl)
# Wait 5 seconds. We've been having problems with the next step that may have been caused by not pausing here.
......@@ -1381,6 +1385,7 @@ def JoinDFtoFC_2(df, in_fc, join_id, target_id=False, out_fc='', overwrite=True,
# Join table from DF to the copy of the extended transects.
arcpy.JoinField_management(out_fc, target_id, out_tbl, target_id, colnames)
arcpy.Delete_management(out_tbl)
if verbose:
print("Created {} from input dataframe and {} file.".format(os.path.basename(out_fc), os.path.basename(in_fc)))
return(out_fc)
......@@ -1466,3 +1471,40 @@ def JoinDFtoRaster(df, rst_ID, out_rst='', fill=-99999, id_fld='sort_ID', val_fl
arcpy.management.JoinField(out_rst, "Value", tbl_path, id_fld, val_fld)
print('OUTPUT: {}. Field "Value" is ID and "uBW" is beachwidth.'.format(os.path.basename(out_rst)))
return(out_rst)
def pts_to_csv_and_eainfoxml(in_fc, suffix, scratch_dir, pts_name, field_defs, fill):
# Convert to pandas DF
pts_df = FCtoDF(in_fc)
try:
pts_df = pts_df.astype({'slope_ci':'float64', 'ci95xsl': 'float64'})
except:
pass
new_shp = pts_name.split('_')[0] + suffix+ '.shp'
# Report values
xmlfile = os.path.join(scratch_dir, os.path.splitext(new_shp)[0] + '_eainfo.xml')
extra_flds = fun.report_fc_values(pts_df, field_defs, xmlfile)
# Delete extra fields from points feature class and dataframe (which will become CSV)
if len(extra_flds) > 0:
for fld in extra_flds:
try:
arcpy.DeleteField_management(in_fc, fld)
print('Deleted field "{}"'.format(fld))
except:
print('WARNING: Failed to delete field "{}"'.format(fld))
pass
arcpy.Delete_management(new_shp)
arcpy.FeatureClassToFeatureClass_conversion(in_fc, scratch_dir, new_shp)
print("\nOUTPUT: {} in specified scratch_dir.".format(os.path.basename(new_shp)))
# Save CSV in scratch_dir
pts_df.drop(extra_flds, axis=1, inplace=True)
csv_fname = os.path.join(scratch_dir, os.path.splitext(new_shp)[0] + '.csv')
pts_df.to_csv(csv_fname, na_rep=fill, index=False)
# Print file size
sz_mb = os.stat(csv_fname).st_size/(1024.0 * 1024.0)
print("\nOUTPUT: {} (size: {:.2f} MB) in specified scratch_dir.".format(os.path.basename(csv_fname), sz_mb))
return(csv_fname)
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment