Initial commit

This commit is contained in:
2026-04-10 16:46:45 +08:00
commit 4fd1b0a203
165 changed files with 25698 additions and 0 deletions

View File

@ -0,0 +1,140 @@
import json
import os
import warnings
import sys
import numpy as np
import h5py
import hytools as ht
from hytools.io.envi import *
from hytools.brdf import calc_flex_single_post
from hytools.glint import set_glint_parameters
from hytools.masks import mask_create
warnings.filterwarnings("ignore")
np.seterr(divide='ignore', invalid='ignore')
def main():
config_file = sys.argv[1]
sample_folder = sys.argv[2]
load_reflectance_mode = int(sys.argv[3])
if not load_reflectance_mode in [1,0]:
print("Please set the mode for loading H5 reflectance (0-Whole;1-By Band)")
return
with open(config_file, 'r') as outfile:
config_dict = json.load(outfile)
images = config_dict["input_files"]
brdf_dict = config_dict['brdf']
sample_h5_list = []
for image in images:
tmp_file_name = f"{sample_folder}/{os.path.splitext(os.path.basename(image))[0]}_prebrdf_sample.h5"
if os.path.exists(tmp_file_name):
sample_h5_list+=[tmp_file_name]
sample_dict = load_sample_h5(sample_h5_list,load_reflectance_mode)
if isinstance(brdf_dict['solar_zn_type'],str):
if brdf_dict['solar_zn_type'] == 'scene':
brdf_dict["solar_zn_norm_radians"]=float(sample_dict['mean_solar_zn'])
print("Scene average solar zenith angle : %s degrees" % round(np.degrees(brdf_dict["solar_zn_norm_radians"]),3))
elif isinstance(brdf_dict['solar_zn_type'],float):
brdf_dict["solar_zn_norm_radians"]=brdf_dict['solar_zn_type']
else:
print('Unrecognized solar zenith angle normalization')
calc_flex_single_post(sample_dict,brdf_dict,load_reflectance_mode)
if config_dict['export']['coeffs'] and len(config_dict["corrections"]) > 0:
print("Exporting correction coefficients.")
export_coeffs_brdf(sample_dict,config_dict['export'],images)
def load_sample_h5(h5_file_list,load_reflectance_mode):
'''Load information from H5 files, and return a dictionary with all the info needed.
'''
combine_refl = []
combine_kernel = []
solar_zn_list = []
ndi_list = []
bad_bands=None #get from the 1st image
for i_order, h5name in enumerate(h5_file_list):
h5_obj = h5py.File(h5name, "r")
wavelist = h5_obj["wavelengths"][()]
set_solar_zn = h5_obj["kernels_samples"].attrs['set_solar_zn']
refl_samples = h5_obj["reflectance_samples"][()]
kernel_samples = h5_obj["kernels_samples"][()]
if i_order==0:
bad_bands = h5_obj["bad_bands"][()]
h5_obj.close()
sample_nir=refl_samples[:,get_wave(850,wavelist)]
sample_red=refl_samples[:,get_wave(660,wavelist)]
sample_ndi = (sample_nir-sample_red)/(sample_nir+sample_red)
ndi_list+=[sample_ndi]
if load_reflectance_mode==0:
combine_refl+=[refl_samples]
refl_samples=None
else:
combine_refl+=[[h5name]]
solar_zn_list+=[set_solar_zn]
combine_kernel+=[kernel_samples]
return {
"kernels_samples":np.concatenate(combine_kernel,axis=0),
"reflectance_samples":np.concatenate(combine_refl,axis=0),
"ndi_samples":np.concatenate(ndi_list),
"mean_solar_zn": np.array(solar_zn_list).mean(),
"bad_bands":bad_bands,
}
def export_coeffs_brdf(data_dict,export_dict,images):
'''Export correction coefficients to file.
'''
for image in images:
coeff_file = export_dict['output_dir']
coeff_file += os.path.splitext(os.path.basename(image))[0]
coeff_file += "_%s_coeffs_%s_chtc.json" % ("brdf",export_dict["suffix"])
with open(coeff_file, 'w') as outfile:
corr_dict = data_dict['brdf_dict'] #hy_obj.brdf
json.dump(corr_dict,outfile)
def get_wave(wave,wavelengths):
"""Return the band image corresponding to the input wavelength.
If not an exact match the closest wavelength will be returned.
Args:
wave (float): Wavelength in image units.
wavelengths (list): Wavelength list
Returns:
band index 0-based.
"""
if (wave > wavelengths.max()) | (wave < wavelengths.min()):
print("Input wavelength outside wavelength range!")
band_ind = None
else:
band_ind = np.argmin(np.abs(wavelengths - wave))
return band_ind
if __name__== "__main__":
main()

View File

@ -0,0 +1,238 @@
import json
import os
import warnings
import sys
import numpy as np
from scipy.optimize import nnls
import h5py
import hytools as ht
from hytools.io.envi import *
from hytools.masks import mask_create
from hytools.topo.c import calc_c
warnings.filterwarnings("ignore")
np.seterr(divide='ignore', invalid='ignore')
def main():
config_file = sys.argv[1]
sample_folder = sys.argv[2]
topo_subgroup_id = sys.argv[3]
with open(config_file, 'r') as outfile:
config_dict = json.load(outfile)
images = []
if config_dict["topo"]["subgrouped"]:
topo_dict = config_dict['topo']
subgroup = topo_dict["subgroup"]
sample_h5_list=[]
for each_img_name in subgroup.keys():
each_h5_name = f"{sample_folder}/{os.path.splitext(os.path.basename(each_img_name))[0]}_pretopo_sample.h5"
if not os.path.exists(each_h5_name):
print(f"File:'{each_h5_name}' is not found, skip this...")
continue
if subgroup[each_img_name]==topo_subgroup_id:
sample_h5_list+=[each_h5_name]
images+=[each_img_name]
if len(sample_h5_list)==0:
print(f"Cannot find subgroup '{topo_subgroup_id}', exit.")
return
sample_dict = load_sample_h5(sample_h5_list)
calc_topo_single_post(sample_dict,topo_dict) #,update topo coeffs
apply_topo_scsc(sample_dict) # update reflectance
if config_dict['export']['coeffs'] and len(config_dict["corrections"]) > 0:
print("Exporting correction coefficients.")
export_coeffs_topo(sample_dict,config_dict['export'],images,sample_h5_list)
for image_order, imagename in enumerate(images):
export_h5(imagename,config_dict['export'],sample_dict[sample_h5_list[image_order]])
else:
print("No subgroup is defined, exit.")
def load_sample_h5(h5_file_list):
h5_all_dict = {}
bad_bands=None #get from the 1st image
for i_order, h5name in enumerate(h5_file_list):
h5_obj = h5py.File(h5name, "r")
wavelist = h5_obj["wavelengths"][()]
full_image_wavelist = h5_obj["image_wavelengths"][()]
set_solar_zn = h5_obj["kernels_samples"].attrs['set_solar_zn']
refl_samples = h5_obj["reflectance_samples"][()]
kernel_samples = h5_obj["kernels_samples"][()]
slope_samples = h5_obj["slope_samples"][()]
cosine_i_samples = h5_obj["cosine_i_samples"][()]
bad_bands = h5_obj["bad_bands"][()]
h5_obj.close()
sample_nir=refl_samples[:,get_wave(850,wavelist)]
sample_red=refl_samples[:,get_wave(660,wavelist)]
sample_ndi = (sample_nir-sample_red)/(sample_nir+sample_red)
h5_all_dict[h5name] = {
"kernels_samples":kernel_samples,
"reflectance_samples":refl_samples,
"ndi_samples":sample_ndi,
"bad_bands":bad_bands,
"full_image_wavelist":full_image_wavelist,
"set_solar_zn":set_solar_zn,
"wavelist":wavelist,
"slope_samples":slope_samples,
"cosine_i_samples":cosine_i_samples,
"topo_dict":None,
}
return h5_all_dict
def export_coeffs_topo(data_dict,export_dict,images,h5_list):
'''Export correction coefficients to file.
'''
for img_order, image in enumerate(images):
coeff_file = export_dict['output_dir']
coeff_file += os.path.splitext(os.path.basename(image))[0]
coeff_file += "_%s_coeffs_%s_chtc.json" % ("topo",export_dict["suffix"])
with open(coeff_file, 'w') as outfile:
corr_dict = data_dict[h5_list[img_order]]['topo_dict']
json.dump(corr_dict,outfile)
def get_wave(wave,wavelengths):
"""Return the band image corresponding to the input wavelength.
If not an exact match the closest wavelength will be returned.
Args:
wave (float): Wavelength in image units.
wavelengths (list): Wavelength list
Returns:
band index 0-based.
"""
if (wave > wavelengths.max()) | (wave < wavelengths.min()):
print("Input wavelength outside wavelength range!")
band_ind = None
else:
band_ind = np.argmin(np.abs(wavelengths - wave))
return band_ind
def export_h5(imagename,export_dict,obj_dict):
out_filename = f"{export_dict['output_dir']}{os.path.splitext(os.path.basename(imagename))[0]}_prebrdf_sample.h5"
h5_obj = h5py.File(out_filename, "w")
h5_obj.attrs['Image Name']=f"{os.path.splitext(os.path.basename(imagename))[0]}"
dset1 = h5_obj.create_dataset("kernels_samples", data=obj_dict["kernels_samples"])
dset2 = h5_obj.create_dataset("reflectance_samples", data=obj_dict["reflectance_samples"])
dset3 = h5_obj.create_dataset("wavelengths", data=obj_dict["wavelist"])
dset1.attrs['set_solar_zn']=obj_dict['set_solar_zn']
dset1.attrs['kernels_names']='["Volume","Geometry"]'
dset1.attrs['Solar Zenith Unit']="Radians"
dset4 = h5_obj.create_dataset("image_wavelengths", data=obj_dict["full_image_wavelist"])
dset5 = h5_obj.create_dataset("bad_bands", data=obj_dict["bad_bands"])
h5_obj.close()
print(f"{out_filename} saved.")
def calc_topo_single_post(sample_dict,topo_dict):
combine_refl = []
combine_cos_i = []
combine_slope = []
ndi_list = []
for h5_name in sample_dict.keys():
sub_dict = sample_dict[h5_name]
ndi_list+=[sub_dict["ndi_samples"]]
combine_refl+=[sub_dict["reflectance_samples"]]
combine_cos_i+=[sub_dict["cosine_i_samples"]]
combine_slope+=[sub_dict["slope_samples"]]
bad_bands = sub_dict["bad_bands"]
combine_refl=np.concatenate(combine_refl,axis=0)
combine_cos_i=np.concatenate(combine_cos_i,axis=0)
combine_slope=np.concatenate(combine_slope,axis=0)
ndi_list=np.concatenate(ndi_list,axis=0)
mask = np.ones(ndi_list.shape).astype(bool)
mask &= (ndi_list >= float(topo_dict['calc_mask'][0][1]['min'])) & (ndi_list <= float(topo_dict['calc_mask'][0][1]['max']))
mask &= (combine_slope >= float(topo_dict['calc_mask'][1][1]['min'])) & (combine_slope <= float(topo_dict['calc_mask'][1][1]['max']))
mask &= (combine_cos_i >= float(topo_dict['calc_mask'][2][1]['min'])) & (combine_cos_i <= float(topo_dict['calc_mask'][2][1]['max']))
feasible_sample_count=np.count_nonzero(mask)
if feasible_sample_count>10:
used_reflectance_samples = combine_refl[mask==1,:]
used_cos_i = combine_cos_i[mask==1]
topo_dict['coeffs'] = {}
band_cursor=0
for band_num,band in enumerate(bad_bands):
if ~band:
topo_dict['coeffs'][band_num] = calc_c(used_reflectance_samples[:,band_cursor],used_cos_i,
fit_type=topo_dict['c_fit_type'])
band_cursor+=1
else:
topo_dict['coeffs'] = {}
band_cursor=0
for band_num,band in enumerate(bad_bands):
if ~band:
topo_dict['coeffs'][band_num] = 100000.0
band_cursor+=1
for h5_name in sample_dict.keys():
sub_dict = sample_dict[h5_name]
sub_dict["topo_dict"] = topo_dict
def apply_topo_scsc(sample_dict):
for h5_name in sample_dict.keys():
data_dict = sample_dict[h5_name]
topo_dict = data_dict["topo_dict"]
slope_samples = data_dict['slope_samples']
c1 = np.cos(slope_samples) * np.cos(data_dict['set_solar_zn'])
cosine_i = data_dict['cosine_i_samples']
ndi_list = data_dict['ndi_samples']
refl_samples = data_dict['reflectance_samples']
C_arr = np.array(list(topo_dict['coeffs'].values()))
mask = np.ones(ndi_list.shape).astype(bool)
# mask order in the config matters here
mask &= (ndi_list >= float(topo_dict['apply_mask'][0][1]['min'])) & (ndi_list <= float(topo_dict['apply_mask'][0][1]['max']))
mask &= (slope_samples >= float(topo_dict['apply_mask'][1][1]['min'])) & (slope_samples <= float(topo_dict['apply_mask'][1][1]['max']))
mask &= (cosine_i >= float(topo_dict['apply_mask'][2][1]['min'])) & (cosine_i <= float(topo_dict['apply_mask'][2][1]['max']))
for band_order in range(refl_samples.shape[1]):
band = np.copy(refl_samples[:,band_order])
correction_factor = (c1 + C_arr[band_order])/(cosine_i + C_arr[band_order])
band[mask] = band[mask]*correction_factor[mask]
refl_samples[:,band_order]=band
if __name__== "__main__":
main()

View File

@ -0,0 +1,150 @@
import json
import os
import warnings
import sys
import numpy as np
import hytools as ht
from hytools.io.envi import *
from hytools.topo import load_topo_precomputed
from hytools.brdf import load_brdf_precomputed
from hytools.glint import set_glint_parameters_single
from hytools.masks import mask_create
warnings.filterwarnings("ignore")
np.seterr(divide='ignore', invalid='ignore')
def main():
config_file = sys.argv[1]
image_order = int(sys.argv[2])
with open(config_file, 'r') as outfile:
config_dict = json.load(outfile)
image = config_dict["input_files"][image_order]
actor = ht.HyTools()
if config_dict['file_type'] == 'envi':
anc_file = config_dict["anc_files"][image]
actor.read_file(image,config_dict['file_type'],anc_file)
elif config_dict['file_type'] == 'neon':
actor.read_file(image,config_dict['file_type'])
actor.create_bad_bands(config_dict['bad_bands'])
for correction in config_dict["corrections"]:
if correction =='topo':
if config_dict['topo']['type'] == 'precomputed':
print("Using precomputed topographic coefficients.")
load_topo_precomputed(actor,config_dict['topo'])
actor.corrections.append('topo')
else:
print('Only precomputed topographic coefficients are accepted. Quit.')
return
elif correction == 'brdf':
if config_dict['brdf']['type'] == 'precomputed':
print("Using precomputed BRDF coefficients.")
load_brdf_precomputed(actor,config_dict['brdf'])
actor.corrections.append('brdf')
else:
print('Only precomputed BRDF coefficients are accepted. Quit.')
return
elif correction == 'glint':
set_glint_parameters_single(actor, config_dict)
if config_dict['export']['image']:
print("Exporting corrected image.")
apply_corrections_single(actor,config_dict)
def apply_corrections_single(hy_obj,config_dict):
'''Apply correction to image and export
to file.
'''
header_dict = hy_obj.get_header()
header_dict['data ignore value'] = hy_obj.no_data
header_dict['data type'] = 4
output_name = config_dict['export']['output_dir']
output_name += os.path.splitext(os.path.basename(hy_obj.file_name))[0]
output_name += "_%s" % config_dict['export']["suffix"]
#Export all wavelengths
if len(config_dict['export']['subset_waves']) == 0:
if config_dict["resample"] == True:
hy_obj.resampler = config_dict['resampler']
waves= hy_obj.resampler['out_waves']
else:
waves = hy_obj.wavelengths
header_dict['bands'] = len(waves)
header_dict['wavelength'] = waves
writer = WriteENVI(output_name,header_dict)
iterator = hy_obj.iterate(by='line', corrections=hy_obj.corrections,
resample=config_dict['resample'])
while not iterator.complete:
line = iterator.read_next()
writer.write_line(line,iterator.current_line)
writer.close()
#Export subset of wavelengths
else:
waves = config_dict['export']['subset_waves']
bands = [hy_obj.wave_to_band(x) for x in waves]
waves = [round(hy_obj.wavelengths[x],2) for x in bands]
header_dict['bands'] = len(bands)
header_dict['wavelength'] = waves
writer = WriteENVI(output_name,header_dict)
for b,band_num in enumerate(bands):
band = hy_obj.get_band(band_num,
corrections=hy_obj.corrections)
writer.write_band(band, b)
writer.close()
#Export masks
if (config_dict['export']['masks']) and (len(config_dict["corrections"]) > 0):
masks = []
mask_names = []
for correction in config_dict["corrections"]:
for mask_type in getattr(hy_obj,correction)['apply_mask']:
mask_names.append(correction + '_' + mask_type[0])
masks.append(mask_create(hy_obj, [mask_type]))
header_dict['data type'] = 1
header_dict['bands'] = len(masks)
header_dict['band names'] = mask_names
header_dict['samples'] = hy_obj.columns
header_dict['lines'] = hy_obj.lines
header_dict['wavelength'] = []
header_dict['fwhm'] = []
header_dict['wavelength units'] = ''
header_dict['data ignore value'] = 255
output_name = config_dict['export']['output_dir']
output_name += os.path.splitext(os.path.basename(hy_obj.file_name))[0]
output_name += "_%s_mask" % config_dict['export']["suffix"]
writer = WriteENVI(output_name,header_dict)
for band_num,mask in enumerate(masks):
mask = mask.astype(int)
mask[~hy_obj.mask['no_data']] = 255
writer.write_band(mask,band_num)
del masks
if __name__== "__main__":
main()

View File

@ -0,0 +1,116 @@
import json
import os
import warnings
import sys
import numpy as np
import h5py
import hytools as ht
from hytools.io.envi import *
from hytools.topo import calc_topo_coeffs_single
from hytools.brdf import calc_brdf_coeffs_pre
from hytools.glint import set_glint_parameters
from hytools.masks import mask_create
warnings.filterwarnings("ignore")
np.seterr(divide='ignore', invalid='ignore')
def main():
config_file = sys.argv[1]
flightline_index = int(sys.argv[2])
group_topo_bool = bool(int(sys.argv[3]))
print("For Group Topo (no correction applied to samples)",group_topo_bool)
with open(config_file, 'r') as outfile:
config_dict = json.load(outfile)
image = config_dict["input_files"][flightline_index]
Ht_Obj = ht.HyTools()
if config_dict['file_type'] == 'envi':
anc_files = config_dict["anc_files"]
Ht_Obj.read_file(image,config_dict['file_type'],anc_files[image])
elif config_dict['file_type'] == 'neon':
Ht_Obj.read_file(image,config_dict['file_type'])
elif config_dict['file_type'] == 'ncav' or config_dict['file_type'] == 'emit':
anc_files = config_dict["anc_files"]
Ht_Obj.read_file(image,config_dict['file_type'],anc_files[image])
Ht_Obj.create_bad_bands(config_dict['bad_bands'])
non_water_count = np.count_nonzero(Ht_Obj.ndi()[Ht_Obj.mask['no_data']]>0.001)
if non_water_count<50:
print("Not enough ground pixels... exit")
return
for correction in config_dict["corrections"]:
if correction =='topo':
if group_topo_bool is False: # get single line topo coeffs
calc_topo_coeffs_single(Ht_Obj,config_dict['topo'])
elif correction == 'brdf':
data_dict=calc_brdf_coeffs_pre(Ht_Obj,config_dict)
print(f"{data_dict['kernel_samples'].shape[0]} pixels extracted.")
export_h5(Ht_Obj,config_dict['export'],data_dict,group_topo_bool)
if (config_dict['export']['coeffs'] and len(config_dict["corrections"]) > 0) and group_topo_bool is False:
print("Exporting correction coefficients.")
export_coeffs_topo(Ht_Obj,config_dict['export'])
def export_h5(hy_obj,export_dict,obj_dict,topo_bool):
if topo_bool==False:
out_filename = f"{export_dict['output_dir']}{os.path.splitext(os.path.basename(hy_obj.file_name))[0]}_prebrdf_sample.h5"
else:
out_filename = f"{export_dict['output_dir']}{os.path.splitext(os.path.basename(hy_obj.file_name))[0]}_pretopo_sample.h5"
h5_obj = h5py.File(out_filename, "w")
dset1 = h5_obj.create_dataset("kernels_samples", data=obj_dict["kernel_samples"]) #chunks=(50, 50), compression="gzip"
dset2 = h5_obj.create_dataset("reflectance_samples", data=obj_dict["reflectance_samples"])
dset3 = h5_obj.create_dataset("wavelengths", data=obj_dict["used_band"])
dset1.attrs['set_solar_zn']=obj_dict['set_solar_zn']
dset1.attrs['kernels_names']='["Volume","Geometry"]'
dset1.attrs['Solar Zenith Unit']="Radians"
dset4 = h5_obj.create_dataset("image_wavelengths", data=np.array(hy_obj.wavelengths))
dset5 = h5_obj.create_dataset("bad_bands", data=np.array(hy_obj.bad_bands))
dset6 = h5_obj.create_dataset("slope_samples", data=obj_dict["slope_samples"])
dset6.attrs['Slope Unit']="Radians"
dset7 = h5_obj.create_dataset("cosine_i_samples", data=obj_dict["cos_i_samples"])
h5_obj.close()
print(f"{out_filename} saved.")
def export_coeffs_topo(hy_obj,export_dict):
'''Export correction coefficients to file.
'''
for correction in hy_obj.corrections:
if not correction == 'topo':
continue
coeff_file = export_dict['output_dir']
coeff_file += os.path.splitext(os.path.basename(hy_obj.file_name))[0]
coeff_file += "_%s_coeffs_%s_chtc.json" % (correction,export_dict["suffix"])
with open(coeff_file, 'w') as outfile:
if correction == 'topo':
corr_dict = hy_obj.topo
elif correction == 'glint':
continue
else:
corr_dict = hy_obj.brdf
json.dump(corr_dict,outfile)
if __name__== "__main__":
main()

View File

@ -0,0 +1,100 @@
import json
import os
import warnings
import sys
import numpy as np
import h5py
import hytools as ht
from hytools.io.envi import *
from hytools.topo import calc_topo_coeffs_single
from hytools.brdf import calc_brdf_coeffs_pre
from hytools.glint import set_glint_parameters
from hytools.masks import mask_create
warnings.filterwarnings("ignore")
np.seterr(divide='ignore', invalid='ignore')
def main():
config_file = sys.argv[1]
flightline_index = int(sys.argv[2])
with open(config_file, 'r') as outfile:
config_dict = json.load(outfile)
image = config_dict["input_files"][flightline_index]
Ht_Obj = ht.HyTools()
if config_dict['file_type'] == 'envi':
anc_files = config_dict["anc_files"]
Ht_Obj.read_file(image,config_dict['file_type'],anc_files[image])
elif config_dict['file_type'] == 'neon':
Ht_Obj.read_file(image,config_dict['file_type'])
Ht_Obj.create_bad_bands(config_dict['bad_bands'])
for correction in config_dict["corrections"]:
if correction =='topo':
calc_topo_coeffs_single(Ht_Obj,config_dict['topo'])
elif correction == 'brdf':
data_dict=calc_brdf_coeffs_pre(Ht_Obj,config_dict)
print(f"{data_dict['kernel_samples'].shape[0]} pixels extracted.")
export_h5(Ht_Obj,config_dict['export'],data_dict)
if config_dict['export']['coeffs'] and len(config_dict["corrections"]) > 0:
print("Exporting correction coefficients.")
export_coeffs_topo(Ht_Obj,config_dict['export'])
def export_h5(hy_obj,export_dict,obj_dict):
out_filename = f"{export_dict['output_dir']}{os.path.splitext(os.path.basename(hy_obj.file_name))[0]}_prebrdf_sample.h5"
h5_obj = h5py.File(out_filename, "w")
dset1 = h5_obj.create_dataset("kernels_samples", data=obj_dict["kernel_samples"]) #compression="gzip"
dset2 = h5_obj.create_dataset("reflectance_samples", data=obj_dict["reflectance_samples"])
dset3 = h5_obj.create_dataset("wavelengths", data=obj_dict["used_band"])
dset1.attrs['set_solar_zn']=obj_dict['set_solar_zn']
dset1.attrs['kernels_names']='["Volume","Geometry"]'
dset4 = h5_obj.create_dataset("image_wavelengths", data=np.array(hy_obj.wavelengths))
dset5 = h5_obj.create_dataset("bad_bands", data=np.array(hy_obj.bad_bands))
h5_obj.close()
print(f"{out_filename} saved.")
def export_coeffs_topo(hy_obj,export_dict):
'''Export correction coefficients to file.
'''
for correction in hy_obj.corrections:
if not (correction == 'topo'):
continue
coeff_file = export_dict['output_dir']
coeff_file += os.path.splitext(os.path.basename(hy_obj.file_name))[0]
coeff_file += "_%s_coeffs_%s_chtc.json" % (correction,export_dict["suffix"])
with open(coeff_file, 'w') as outfile:
if correction == 'topo':
corr_dict = hy_obj.topo
elif correction == 'glint':
continue
else:
corr_dict = hy_obj.brdf
json.dump(corr_dict,outfile)
if __name__== "__main__":
main()

View File

@ -0,0 +1,35 @@
import sys, os
import multiprocessing
import subprocess, json
exec_str="python ../no_ray/image_correct_export_image.py "
def run_command(command):
print(command)
subprocess.run(command,shell=True)
def main():
config_file = sys.argv[1]
total_count = int(sys.argv[2])
worker_count = min(os.cpu_count()-1,total_count)
with open(config_file, 'r') as outfile:
config_dict = json.load(outfile)
if total_count > len(config_dict["input_files"]):
print("Out of upper bound")
return
pool = multiprocessing.Pool(processes=worker_count)
commands = [f"{exec_str} {config_file} {order}" for order in range(total_count)]
pool.map(run_command, commands)
pool.close()
pool.join() # Wait for all subprocesses to finish
print('All image export is done.')
if __name__== "__main__":
main()

View File

@ -0,0 +1,42 @@
import sys, os
import multiprocessing
import subprocess, json
exec_str="python ../no_ray/image_correct_get_sample_chtc.py "
merge_str="python ../no_ray/image_correct_combine_sample_chtc.py {} {}"
def run_command(command):
print(command)
subprocess.run(command,shell=True)
def main():
config_file = sys.argv[1]
total_count = int(sys.argv[2])
worker_count = min(os.cpu_count()-1,total_count)
with open(config_file, 'r') as outfile:
config_dict = json.load(outfile)
h5_folder=config_dict["export"]["output_dir"]
if total_count > len(config_dict["input_files"]):
print("Out of upper bound")
return
pool = multiprocessing.Pool(processes=worker_count)
commands = [f"{exec_str} {config_file} {order}" for order in range(total_count)]
pool.map(run_command, commands)
pool.close()
pool.join() # Wait for all subprocesses to finish
print('All extractions are done.')
subprocess.run(merge_str.format(config_file,h5_folder),shell=True)
if __name__== "__main__":
main()

View File

@ -0,0 +1,86 @@
import sys, os, time
import multiprocessing
import subprocess, json
exec_str="python ../noray/image_correct_get_raw_sample_chtc.py "
topo_group_str = "python ../noray/image_correct_combine_topo_sample_chtc.py {} {} {}"
merge_str="python ../noray/image_correct_combine_sample_chtc.py {} {}"
def parse_group_info(group_dict,full_img_list):
out_group_dict={}
out_group_dict_short={}
for img_name in full_img_list:
subgroup_name = group_dict[img_name]
if subgroup_name in out_group_dict:
out_group_dict[subgroup_name]+=[img_name]
out_group_dict_short[subgroup_name]+=[full_img_list.index(img_name)]
else:
out_group_dict[subgroup_name]=[img_name]
out_group_dict_short[subgroup_name]=[full_img_list.index(img_name)]
return out_group_dict, out_group_dict_short
def run_command(command):
subprocess.run(command,shell=True)
def run_step2(params):
config_file,h5_folder,current_group_name=params
#print(topo_group_str.format(config_file,h5_folder,current_group_name))
subprocess.run(topo_group_str.format(config_file,h5_folder,current_group_name),shell=True)
def main():
config_file = sys.argv[1]
h5_folder = sys.argv[2]
with open(config_file, 'r') as outfile:
config_dict = json.load(outfile)
image_list = config_dict["input_files"]
total_count = int(len(image_list))
subgroup=config_dict['topo']['subgroup']
group_meta_dict, worker_unfinished=parse_group_info(subgroup,image_list)
level1_worker_count = min(os.cpu_count()-1,total_count)
print(level1_worker_count)
with multiprocessing.Pool(processes=level1_worker_count) as pool:
subgroup_status_dict={}
for sub_group_name in group_meta_dict:
step1_status=[]
for order_in_full_list in worker_unfinished[sub_group_name]:
command = f"{exec_str} {config_file} {order_in_full_list} 1"
step1_status += [pool.apply_async(run_command, args=(command,))]
subgroup_status_dict[sub_group_name] = step1_status
level1_total_unfinished = total_count
while level1_total_unfinished:
# check for finished subgroup every 10 sec. If the raw pixel extraction is done for a subgroup, start step2-group TOPO.
temp_unfinished_count=0
subgroup_list = list(subgroup_status_dict.keys())
for gp_name in subgroup_list:
sub_count=sum([not r.ready() for r in subgroup_status_dict[gp_name]])
if sub_count==0:
pool.apply_async(run_step2, args=((config_file,h5_folder,gp_name),))
del subgroup_status_dict[gp_name]
temp_unfinished_count += sub_count
level1_total_unfinished=temp_unfinished_count
#print(f"=={level1_total_unfinished}/{total_count} tasks Level 1 pretopo")
time.sleep(10.0)
print('All extraction is done.')
subprocess.run(merge_str.format(config_file,h5_folder),shell=True)
if __name__== "__main__":
main()

View File

@ -0,0 +1,45 @@
import sys, os
import multiprocessing
import subprocess, json
exec_str="python ../no_ray/trait_estimate_inde.py "
def run_command(command):
print(command)
subprocess.run(command,shell=True)
def main():
config_file = sys.argv[1]
total_img_count = int(sys.argv[2])
total_trait_count = int(sys.argv[3])
total_count = total_img_count*total_trait_count
worker_count = min(os.cpu_count()-1,total_count)
with open(config_file, 'r') as outfile:
config_dict = json.load(outfile)
pool = multiprocessing.Pool(processes=worker_count)
if (total_img_count > len(config_dict["input_files"])) or (total_trait_count> len(config_dict["input_files"])):
print("Out of upper bound")
return
param_list = []
for img_i in range(total_img_count):
for trait_j in range(total_trait_count):
param_list+=[[img_i,trait_j]]
commands=[f"{exec_str} {config_file} {param_order[0]} {param_order[1]}" for param_order in param_list]
pool.map(run_command, commands)
pool.close()
pool.join() # Wait for all subprocesses to finish
print('All traits are done.')
if __name__== "__main__":
main()

View File

@ -0,0 +1,165 @@
import json
import os
import warnings
import sys
import numpy as np
import hytools as ht
from hytools.io.envi import *
from hytools.masks import mask_dict
warnings.filterwarnings("ignore")
def main():
config_file = sys.argv[1]
image_order = int(sys.argv[2])
trait_order = int(sys.argv[3])
with open(config_file, 'r') as outfile:
config_dict = json.load(outfile)
image= config_dict["input_files"][image_order]
actor = ht.HyTools()
# Load data
if config_dict['file_type'] in ('envi','emit','ncav'):
anc_file = config_dict["anc_files"][image]
if "glt_files" in config_dict:
if bool(config_dict["glt_files"]):
actor.read_file(image,config_dict['file_type'],anc_path=anc_file,glt_path=config_dict["glt_files"][image]) # chunk_glt writing is not supported
else:
actor.read_file(image,config_dict['file_type'],anc_path=anc_file)
else:
actor.read_file(image,config_dict['file_type'],anc_path=anc_file)
elif config_dict['file_type'] == 'neon':
actor.read_file(image,config_dict['file_type'])
trait = config_dict['trait_models'][trait_order]
with open(trait, 'r') as json_file:
trait_model = json.load(json_file)
print("\t %s" % trait_model["name"])
apply_single_trait_models(actor,config_dict,trait_order)
def apply_single_trait_models(hy_obj,config_dict,trait_order):
'''Apply trait model(s) to image and export to file.
'''
hy_obj.create_bad_bands(config_dict['bad_bands'])
hy_obj.corrections = config_dict['corrections']
# Load correction coefficients
if 'topo' in hy_obj.corrections:
hy_obj.load_coeffs(config_dict['topo'][hy_obj.file_name],'topo')
if 'brdf' in hy_obj.corrections:
hy_obj.load_coeffs(config_dict['brdf'][hy_obj.file_name],'brdf')
hy_obj.resampler['type'] = config_dict["resampling"]['type']
for trait in [config_dict['trait_models'][trait_order]]:
with open(trait, 'r') as json_file:
trait_model = json.load(json_file)
coeffs = np.array(trait_model['model']['coefficients'])
intercept = np.array(trait_model['model']['intercepts'])
model_waves = np.array(trait_model['wavelengths'])
#Check if wavelengths match
resample = not all(x in hy_obj.wavelengths for x in model_waves)
if resample:
hy_obj.resampler['out_waves'] = model_waves
hy_obj.resampler['out_fwhm'] = trait_model['fwhm']
else:
wave_mask = [np.argwhere(x==hy_obj.wavelengths)[0][0] for x in model_waves]
# Build trait image file
header_dict = hy_obj.get_header()
header_dict['wavelength'] = []
header_dict['data ignore value'] = -9999
header_dict['data type'] = 4
header_dict['trait unit'] = trait_model['units']
header_dict['band names'] = ["%s_mean" % trait_model["name"],
"%s_std" % trait_model["name"],
'range_mask'] + [mask[0] for mask in config_dict['masks']]
header_dict['bands'] = len(header_dict['band names'] )
#Generate masks
for mask,args in config_dict['masks']:
mask_function = mask_dict[mask]
hy_obj.gen_mask(mask_function,mask,args)
output_name = config_dict['output_dir']
output_name += os.path.splitext(os.path.basename(hy_obj.file_name))[0] + "_%s" % trait_model["name"]
writer = WriteENVI(output_name,header_dict)
if config_dict['file_type'] == 'envi' or config_dict['file_type'] == 'emit':
iterator = hy_obj.iterate(by = 'chunk',
chunk_size = (2,hy_obj.columns),
corrections = hy_obj.corrections,
resample=resample)
elif config_dict['file_type'] == 'neon':
iterator = hy_obj.iterate(by = 'chunk',
chunk_size = (int(np.ceil(hy_obj.lines/32)),int(np.ceil(hy_obj.columns/32))),
corrections = hy_obj.corrections,
resample=resample)
elif config_dict['file_type'] == 'ncav':
iterator = hy_obj.iterate(by = 'chunk',
chunk_size = (512,512),
corrections = hy_obj.corrections,
resample=resample)
while not iterator.complete:
chunk = iterator.read_next()
if not resample:
chunk = chunk[:,:,wave_mask]
trait_est = np.zeros((chunk.shape[0],
chunk.shape[1],
header_dict['bands']))
# Apply spectrum transforms
for transform in trait_model['model']["transform"]:
if transform== "vector": #vnorm
norm = np.linalg.norm(chunk,axis=2)
chunk = chunk/norm[:,:,np.newaxis]
if transform == "absorb":
chunk = np.log(1/chunk)
if transform == "mean":
mean = chunk.mean(axis=2)
chunk = chunk/mean[:,:,np.newaxis]
trait_pred = np.einsum('jkl,ml->jkm',chunk,coeffs, optimize='optimal')
trait_pred = trait_pred + intercept
trait_est[:,:,0] = trait_pred.mean(axis=2)
trait_est[:,:,1] = trait_pred.std(ddof=1,axis=2)
range_mask = (trait_est[:,:,0] > trait_model["model_diagnostics"]['min']) & \
(trait_est[:,:,0] < trait_model["model_diagnostics"]['max'])
trait_est[:,:,2] = range_mask.astype(int)
# Subset and assign custom masks
for i,(mask,args) in enumerate(config_dict['masks']):
mask = hy_obj.mask[mask][iterator.current_line:iterator.current_line+chunk.shape[0],
iterator.current_column:iterator.current_column+chunk.shape[1]]
trait_est[:,:,3+i] = mask.astype(int)
nd_mask = hy_obj.mask['no_data'][iterator.current_line:iterator.current_line+chunk.shape[0],
iterator.current_column:iterator.current_column+chunk.shape[1]]
trait_est[~nd_mask] = -9999
writer.write_chunk(trait_est,
iterator.current_line,
iterator.current_column)
writer.close()
if __name__== "__main__":
main()