Initial commit
This commit is contained in:
22
Flexbrdf/hytools/__init__.py
Normal file
22
Flexbrdf/hytools/__init__.py
Normal file
@ -0,0 +1,22 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
HyTools: Hyperspectral image processing library
|
||||
Copyright (C) 2021 University of Wisconsin
|
||||
|
||||
Authors: Adam Chlus, Zhiwei Ye, Philip Townsend.
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation, version 3 of the License.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
Initialize hytools
|
||||
"""
|
||||
from .base import HyTools
|
||||
791
Flexbrdf/hytools/base.py
Normal file
791
Flexbrdf/hytools/base.py
Normal file
@ -0,0 +1,791 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
HyTools: Hyperspectral image processing library
|
||||
Copyright (C) 2021 University of Wisconsin
|
||||
|
||||
Authors: Adam Chlus, Zhiwei Ye, Philip Townsend.
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation, version 3 of the License.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
Base
|
||||
|
||||
TODO: Add corrections to ndi()
|
||||
|
||||
"""
|
||||
import os
|
||||
import json
|
||||
import numpy as np
|
||||
import h5py
|
||||
import warnings
|
||||
import sys
|
||||
from .io.envi import envi_read_band,envi_read_pixels
|
||||
from .io.envi import envi_read_line,envi_read_column,envi_read_chunk
|
||||
from .io.envi import open_envi,parse_envi_header,envi_header_from_neon,envi_header_from_nc
|
||||
from .io.neon import open_neon
|
||||
from .io.netcdf import open_netcdf
|
||||
from .brdf import apply_brdf_correct
|
||||
from .glint import apply_glint_correct
|
||||
from .brdf.kernels import calc_volume_kernel,calc_geom_kernel
|
||||
from .topo import calc_cosine_i,apply_topo_correct
|
||||
from .transform.resampling import *
|
||||
|
||||
warnings.filterwarnings("ignore")
|
||||
|
||||
class HyTools:
|
||||
"""HyTools file object"""
|
||||
|
||||
def __init__(self):
|
||||
"""Constructor method
|
||||
"""
|
||||
self.anc_path = {}
|
||||
self.ancillary = {}
|
||||
self.bad_bands = []
|
||||
self.bands = None
|
||||
self.base_key = None
|
||||
self.base_name = None
|
||||
self.brdf = {'type': None}
|
||||
self.glint= {'type': None}
|
||||
self.byte_order = None
|
||||
self.columns = None
|
||||
self.columns_glt = None
|
||||
self.corrections = []
|
||||
self.crs = None
|
||||
self.data = None
|
||||
self.dtype = None
|
||||
self.endianness = None
|
||||
self.file_name = None
|
||||
self.file_type = None
|
||||
self.fill_mask = None
|
||||
self.fwhm = []
|
||||
self.glt_path = {}
|
||||
self.glt_x = None
|
||||
self.glt_y = None
|
||||
self.glt_projection = None
|
||||
self.glt_map_info = None
|
||||
self.glt_transform = None
|
||||
self.hdf_obj = None
|
||||
self.interleave = None
|
||||
self.lines = None
|
||||
self.lines_glt = None
|
||||
self.map_info = None
|
||||
self.mask = {}
|
||||
self.nc4_obj = None
|
||||
self.no_data = None
|
||||
self.offset = 0
|
||||
self.projection = None
|
||||
self.transform = None
|
||||
self.resampler = {'type': None}
|
||||
self.shape = None
|
||||
self.topo = {'type': None}
|
||||
self.ulx = None
|
||||
self.uly = None
|
||||
self.wavelength_units = None
|
||||
self.wavelengths = []
|
||||
|
||||
def read_file(self,file_name,file_type = 'envi',anc_path = None, ext = False, glt_path = None):
|
||||
self.file_name = file_name
|
||||
self.file_type = file_type
|
||||
|
||||
if file_type == 'envi':
|
||||
open_envi(self,anc_path,ext,glt_path)
|
||||
elif file_type == "neon":
|
||||
open_neon(self)
|
||||
elif file_type == "emit":
|
||||
open_netcdf(self,'EMIT',anc_path,glt_path)
|
||||
elif file_type == "ncav":
|
||||
open_netcdf(self,'AV',anc_path,glt_path)
|
||||
|
||||
else:
|
||||
print("Unrecognized file type.")
|
||||
|
||||
# Create a no data mask
|
||||
if self.bands>11:
|
||||
self.mask['no_data'] = self.get_wave(660) > 0.5*self.no_data
|
||||
else:
|
||||
self.mask['no_data'] = self.get_band(0) > 0.5*self.no_data
|
||||
|
||||
#Match mask with ancillary mask
|
||||
if anc_path:
|
||||
|
||||
if file_type == 'envi':
|
||||
ancillary = HyTools()
|
||||
ancillary.read_file(self.anc_path['solar_zn'][0],'envi')
|
||||
if not np.array_equal(self.mask['no_data'],ancillary.mask['no_data']):
|
||||
print('Reflectance and ancillary no data extents do not match, combining no data masks.')
|
||||
self.mask['no_data'] &= ancillary.mask['no_data']
|
||||
ancillary.close_data()
|
||||
del ancillary
|
||||
elif file_type == 'emit' and not self.anc_path['slope'][0].endswith('.nc'):
|
||||
ancillary = HyTools()
|
||||
ancillary.read_file(self.anc_path['slope'][0],'envi')
|
||||
if not np.array_equal(self.mask['no_data'],ancillary.mask['no_data']):
|
||||
print('Reflectance and ancillary no data extents do not match, combining no data masks.')
|
||||
self.mask['no_data'] &= ancillary.mask['no_data']
|
||||
ancillary.close_data()
|
||||
del ancillary
|
||||
|
||||
self.base_name = os.path.basename(os.path.splitext(self.file_name)[0])
|
||||
|
||||
def create_bad_bands(self,bad_regions):
|
||||
"""Create bad bands mask, Good: True, bad : False.
|
||||
|
||||
Args:
|
||||
bad_regions (list of lists): start and end values of wavelength
|
||||
regions considered bad. Wavelengths should be in the same units as
|
||||
data units. ex: [[350,400].....[2450,2500]].
|
||||
|
||||
Returns:
|
||||
None.
|
||||
|
||||
"""
|
||||
|
||||
bad_bands = []
|
||||
for wavelength in self.wavelengths:
|
||||
bad=False
|
||||
for start,end in bad_regions:
|
||||
bad = ((wavelength >= start) & (wavelength <=end)) or bad
|
||||
bad_bands.append(bad)
|
||||
self.bad_bands = np.array(bad_bands)
|
||||
|
||||
|
||||
def load_data(self, mode = 'r'):
|
||||
"""Load data object to memory.
|
||||
|
||||
Args:
|
||||
mode (str, optional): File read mode. Defaults to 'r'.
|
||||
offset (int, optional): Offset in bytes. Defaults to 0.
|
||||
|
||||
Returns:
|
||||
None.
|
||||
|
||||
"""
|
||||
|
||||
if self.file_type == "envi":
|
||||
self.data = np.memmap(self.file_name,dtype = self.dtype, mode=mode,
|
||||
shape = self.shape,offset=self.offset)
|
||||
|
||||
if bool(self.glt_path):
|
||||
self.glt_x = self.load_glt('glt_x')
|
||||
self.glt_y = self.load_glt('glt_y')
|
||||
if not self.glt_x is None:
|
||||
self.fill_mask = self.glt_x>0
|
||||
self.glt_x = self.glt_x.astype(np.int16)
|
||||
self.glt_y = self.glt_y.astype(np.int16)
|
||||
|
||||
elif self.file_type == "neon":
|
||||
self.hdf_obj = h5py.File(self.file_name,'r')
|
||||
self.data = self.hdf_obj[self.base_key]["Reflectance"]["Reflectance_Data"]
|
||||
elif self.file_type == "emit":
|
||||
self.nc4_obj = h5py.File(self.file_name,'r')
|
||||
self.data = self.nc4_obj[self.base_key]
|
||||
self.glt_x = self.load_glt('glt_x').astype(np.int16)
|
||||
self.glt_y = self.load_glt('glt_y').astype(np.int16)
|
||||
self.fill_mask = self.glt_x>0
|
||||
elif self.file_type == "ncav":
|
||||
self.nc4_obj = h5py.File(self.file_name,'r')
|
||||
|
||||
self.data = self.nc4_obj[self.base_key][self.base_key]
|
||||
self.glt_x = self.load_glt('glt_x')
|
||||
self.glt_y = self.load_glt('glt_y')
|
||||
if not self.glt_x is None:
|
||||
self.fill_mask = self.glt_x>0
|
||||
self.glt_x = self.glt_x.astype(np.int16)
|
||||
self.glt_y = self.glt_y.astype(np.int16)
|
||||
|
||||
|
||||
def close_data(self):
|
||||
"""Close data object.
|
||||
|
||||
"""
|
||||
if self.file_type == "envi":
|
||||
del self.data
|
||||
elif self.file_type == "neon":
|
||||
self.hdf_obj.close()
|
||||
self.hdf_obj = None
|
||||
elif self.file_type == "emit" or self.file_type == "ncav":
|
||||
self.nc4_obj.close()
|
||||
self.nc4_obj = None
|
||||
self.data = None
|
||||
|
||||
|
||||
def iterate(self,by,chunk_size= (100,100),corrections = [],resample=False):
|
||||
"""Create data Iterator.
|
||||
|
||||
Args:
|
||||
by (str): Dimension along which to iterate: "line","column","band","chunk".
|
||||
chunk_size (tuple, optional): Two dimensional chunk size (Y,X).
|
||||
Applies only when "chunk" selected.
|
||||
Defaults to (100,100).
|
||||
|
||||
Returns:
|
||||
Iterator class object: Data Iterator.
|
||||
|
||||
"""
|
||||
|
||||
return Iterator(self,by,chunk_size,corrections =corrections,resample=resample)
|
||||
|
||||
def wave_to_band(self,wave):
|
||||
"""Return band index corresponding to input wavelength. Return closest band if
|
||||
not an exact match.
|
||||
|
||||
Args:
|
||||
wave (float): Wavelength of band to be retrieved in image wavelength units.
|
||||
|
||||
Returns:
|
||||
int: Band index.
|
||||
|
||||
"""
|
||||
|
||||
if (wave > self.wavelengths.max()) | (wave < self.wavelengths.min()):
|
||||
print("Input wavelength outside image range!")
|
||||
band_num = None
|
||||
else:
|
||||
band_num = np.argmin(np.abs(self.wavelengths - wave))
|
||||
return band_num
|
||||
|
||||
def get_band(self,index,corrections= [], mask =None):
|
||||
"""
|
||||
Args:
|
||||
index (int): Zero-indexed band index.
|
||||
mask (str): Return masked values using named mask.
|
||||
corrections(list): Corrections to apply, will be applied in
|
||||
order listed.
|
||||
|
||||
Returns:
|
||||
numpy.ndarray: A 2D (lines x columns) array or 1D if masked.
|
||||
|
||||
"""
|
||||
|
||||
self.load_data()
|
||||
|
||||
if self.file_type == "neon":
|
||||
band = self.data[:,:,index]
|
||||
elif self.file_type == "emit":
|
||||
band = self.data[:,:,index]
|
||||
elif self.file_type == "ncav":
|
||||
band = self.data[index,:,:]
|
||||
elif self.file_type == "envi":
|
||||
band = envi_read_band(self.data,index,self.interleave)
|
||||
if self.endianness != sys.byteorder:
|
||||
band = band.byteswap()
|
||||
self.close_data()
|
||||
|
||||
band = self.correct(band,'band',index,corrections)
|
||||
|
||||
if mask:
|
||||
band = band[self.mask[mask]]
|
||||
|
||||
return band
|
||||
|
||||
|
||||
def get_wave(self,wave,corrections= [],mask =None):
|
||||
"""Return the band image corresponding to the input wavelength.
|
||||
If not an exact match the closest wavelength will be returned.
|
||||
|
||||
Args:
|
||||
wave (float): Wavelength in image units.
|
||||
mask (str): Return masked values using named mask.
|
||||
|
||||
|
||||
Returns:
|
||||
numpy.ndarray: Band image array (line,columns).
|
||||
|
||||
"""
|
||||
|
||||
if (wave > self.wavelengths.max()) | (wave < self.wavelengths.min()):
|
||||
print("Input wavelength outside wavelength range!")
|
||||
band = None
|
||||
else:
|
||||
band_num = np.argmin(np.abs(self.wavelengths - wave))
|
||||
band = self.get_band(band_num,corrections= corrections, mask=mask)
|
||||
return band
|
||||
|
||||
def get_pixels(self,lines,columns,corrections= [],resample = False):
|
||||
"""
|
||||
Args:
|
||||
lines (list): List of zero-indexed line indices.
|
||||
columns (list): List of zero-indexed column indices.
|
||||
|
||||
Returns:
|
||||
numpy.ndarray: Pixel array (pixels,bands).
|
||||
|
||||
"""
|
||||
|
||||
self.load_data()
|
||||
if self.file_type in ["neon","emit"]:
|
||||
pixels = []
|
||||
for line,column in zip(lines,columns):
|
||||
pixels.append(self.data[line,column,:])
|
||||
pixels = np.array(pixels)
|
||||
elif self.file_type == "ncav":
|
||||
pixels = []
|
||||
for line,column in zip(lines,columns):
|
||||
pixels.append(self.data[:,line,column])
|
||||
pixels = np.array(pixels)
|
||||
elif self.file_type == "envi":
|
||||
pixels = envi_read_pixels(self.data,lines,columns,self.interleave)
|
||||
if self.endianness != sys.byteorder:
|
||||
pixels = pixels.byteswap()
|
||||
self.close_data()
|
||||
|
||||
pixels = self.correct(pixels,'pixels',
|
||||
[lines,columns],corrections)
|
||||
|
||||
if resample:
|
||||
pixels = pixels[np.newaxis,:,~self.bad_bands]
|
||||
pixels = apply_resampler(self,pixels)[0,:,:]
|
||||
|
||||
return pixels
|
||||
|
||||
def get_line(self,index, corrections= [],resample = False):
|
||||
"""
|
||||
Args:
|
||||
index (int): Zero-indexed line index.
|
||||
|
||||
Returns:
|
||||
numpy.ndarray: Line array (columns, bands).
|
||||
|
||||
"""
|
||||
|
||||
self.load_data()
|
||||
if self.file_type == "neon" or self.file_type == "emit":
|
||||
line = self.data[index,:,:]
|
||||
elif self.file_type == "ncav":
|
||||
line = np.moveaxis(self.data[:,index,:],0,1)
|
||||
elif self.file_type == "envi":
|
||||
line = envi_read_line(self.data,index,self.interleave)
|
||||
if self.endianness != sys.byteorder:
|
||||
line = line.byteswap()
|
||||
self.close_data()
|
||||
|
||||
line = self.correct(line,'line',index,corrections)
|
||||
|
||||
if resample:
|
||||
line = line[np.newaxis,:,~self.bad_bands]
|
||||
line = apply_resampler(self,line)[0,:,:]
|
||||
|
||||
return line
|
||||
|
||||
def get_column(self,index,corrections = [],resample = False):
|
||||
"""
|
||||
Args:
|
||||
index (int): Zero-indexed column index.
|
||||
|
||||
Returns:
|
||||
numpy.ndarray: Column array (lines, bands).
|
||||
|
||||
"""
|
||||
|
||||
self.load_data()
|
||||
if self.file_type == "neon" or self.file_type == "emit":
|
||||
column = self.data[:,index,:]
|
||||
elif self.file_type == "ncav":
|
||||
column = np.moveaxis(self.data[:,:,index],0,1)
|
||||
elif self.file_type == "envi":
|
||||
column = envi_read_column(self.data,index,self.interleave)
|
||||
if self.endianness != sys.byteorder:
|
||||
column = column.byteswap()
|
||||
self.close_data()
|
||||
|
||||
column = self.correct(column,'column',index,corrections)
|
||||
|
||||
if resample:
|
||||
column = column[:,np.newaxis,~self.bad_bands]
|
||||
column = apply_resampler(self,column)[:,0,:]
|
||||
|
||||
return column
|
||||
|
||||
def get_chunk(self,col_start,col_end,line_start,line_end, corrections= [],resample = False):
|
||||
"""
|
||||
Args:
|
||||
col_start (int): Chunk starting column.
|
||||
col_end (int): Noninclusive chunk ending column index.
|
||||
line_start (int): Chunk starting line.
|
||||
line_end (int): Noninclusive chunk ending line index.
|
||||
corrections(list): Corrections to apply, will be applied in
|
||||
order listed.
|
||||
resample (bool): Resample wavelengths. Defaults to False.
|
||||
|
||||
Returns:
|
||||
numpy.ndarray: Chunk array (line_end-line_start,col_end-col_start,bands).
|
||||
|
||||
"""
|
||||
|
||||
self.load_data()
|
||||
if self.file_type == "neon" or self.file_type == "emit":
|
||||
chunk = self.data[line_start:line_end,col_start:col_end,:]
|
||||
elif self.file_type == "ncav":
|
||||
chunk = np.moveaxis(self.data[:,line_start:line_end,col_start:col_end],0,-1)
|
||||
elif self.file_type == "envi":
|
||||
chunk = envi_read_chunk(self.data,col_start,col_end,
|
||||
line_start,line_end,self.interleave)
|
||||
if self.endianness != sys.byteorder:
|
||||
chunk = chunk.byteswap()
|
||||
self.close_data()
|
||||
|
||||
chunk = self.correct(chunk,'chunk',
|
||||
[col_start,col_end,line_start,line_end],
|
||||
corrections)
|
||||
if resample:
|
||||
chunk = apply_resampler(self,chunk[:,:,~self.bad_bands])
|
||||
return chunk
|
||||
|
||||
def correct(self,data,dimension,index,corrections):
|
||||
for correction in corrections:
|
||||
if correction == 'topo':
|
||||
data = apply_topo_correct(self,data,dimension,index)
|
||||
elif correction == 'brdf':
|
||||
data = apply_brdf_correct(self,data,dimension,index)
|
||||
elif correction == 'glint':
|
||||
data = apply_glint_correct(self,data,dimension,index)
|
||||
return data
|
||||
|
||||
def get_anc(self,anc,radians = True,mask = None):
|
||||
"""Read ancillary datasets to memory.
|
||||
|
||||
Args:
|
||||
anc (str): Ancillary dataset name.
|
||||
radians (bool, optional): Convert angular measures to radians. Defaults to True.
|
||||
|
||||
Returns:
|
||||
anc_data (numpy.ndarray)
|
||||
|
||||
"""
|
||||
|
||||
angular_anc = ['slope','sensor_az','sensor_zn','aspect','solar_zn','solar_az']
|
||||
|
||||
if self.file_type == "envi":
|
||||
ancillary = HyTools()
|
||||
ancillary.read_file(self.anc_path[anc][0],'envi')
|
||||
ancillary.load_data()
|
||||
anc_data = np.copy(ancillary.get_band(self.anc_path[anc][1]))
|
||||
if ancillary.endianness != sys.byteorder:
|
||||
anc_data = anc_data.byteswap()
|
||||
ancillary.close_data()
|
||||
|
||||
elif self.file_type == "neon":
|
||||
keys = self.anc_path[anc]
|
||||
|
||||
if len(keys)==2 and isinstance(keys[-1],int):
|
||||
ancillary = HyTools()
|
||||
ancillary.read_file(self.anc_path[anc][0],'envi')
|
||||
ancillary.load_data()
|
||||
anc_data = np.copy(ancillary.get_band(self.anc_path[anc][1]))
|
||||
if ancillary.endianness != sys.byteorder:
|
||||
anc_data = anc_data.byteswap()
|
||||
ancillary.close_data()
|
||||
|
||||
else:
|
||||
hdf_obj = h5py.File(self.file_name,'r')
|
||||
|
||||
metadata = hdf_obj[self.base_key]["Reflectance"]["Metadata"]
|
||||
for key in keys:
|
||||
metadata = metadata[key]
|
||||
anc_data = metadata[()]
|
||||
hdf_obj.close()
|
||||
|
||||
#Make solar geometry into 2D array
|
||||
if anc in ['solar_zn','solar_az']:
|
||||
anc_data = np.ones((self.lines, self.columns)) * anc_data
|
||||
|
||||
elif self.file_type in ["emit","ncav"]:
|
||||
if bool(self.anc_path)==False:
|
||||
return None
|
||||
|
||||
else:
|
||||
if (self.anc_path[anc][0]).endswith('nc'):
|
||||
nc4_anc_obj = h5py.File(self.anc_path[anc][0],'r')
|
||||
|
||||
if self.file_type == "emit":
|
||||
anc_data = nc4_anc_obj['obs'][()][:,:,self.anc_path[anc][1]]
|
||||
elif self.file_type == "ncav":
|
||||
anc_data_raw = nc4_anc_obj['observation_parameters'][self.anc_path[anc][1]][()]
|
||||
obs_glt_x = np.abs(nc4_anc_obj['geolocation_lookup_table']['sample'][()]) # some values in the GLT are negative for unknown reason
|
||||
obs_glt_y = np.abs(nc4_anc_obj['geolocation_lookup_table']['line'][()])
|
||||
anc_data = np.zeros(obs_glt_x.shape)
|
||||
anc_data[obs_glt_x<=0] = nc4_anc_obj['observation_parameters'][self.anc_path[anc][1]].attrs['_FillValue'][0] # -9999
|
||||
data_mask_to_fill = obs_glt_x>0
|
||||
anc_data[data_mask_to_fill] = anc_data_raw[obs_glt_y[data_mask_to_fill].astype(int)-1,obs_glt_x[data_mask_to_fill].astype(int)-1]
|
||||
|
||||
nc4_anc_obj.close()
|
||||
else:
|
||||
ancillary = HyTools()
|
||||
ancillary.read_file(self.anc_path[anc][0],'envi')
|
||||
ancillary.load_data()
|
||||
anc_data = np.copy(ancillary.get_band(self.anc_path[anc][1]))
|
||||
if ancillary.endianness != sys.byteorder:
|
||||
anc_data = anc_data.byteswap()
|
||||
ancillary.close_data()
|
||||
|
||||
if radians and (anc in angular_anc):
|
||||
anc_data= np.radians(anc_data)
|
||||
|
||||
|
||||
if mask:
|
||||
anc_data = anc_data[self.mask[mask]]
|
||||
|
||||
return anc_data
|
||||
|
||||
def load_anc(self,anc,radians = True):
|
||||
self.ancillary[anc] = self.get_anc(self,anc,radians)
|
||||
|
||||
|
||||
def load_glt(self,glt):
|
||||
# check if GLT inside nc is used
|
||||
if self.glt_path[glt][0] in ['location','geolocation_lookup_table']:
|
||||
glt_data = self.nc4_obj[self.glt_path[glt][0]][self.glt_path[glt][1]][()]
|
||||
elif self.glt_path[glt][0] is None:
|
||||
return None
|
||||
else:
|
||||
glt_img = HyTools()
|
||||
glt_img.read_file(self.glt_path[glt][0],'envi')
|
||||
glt_img.load_data()
|
||||
glt_data = np.copy(glt_img.get_band(self.glt_path[glt][1]))
|
||||
if glt_img.endianness != sys.byteorder:
|
||||
glt_data = glt_data.byteswap()
|
||||
glt_img.close_data()
|
||||
|
||||
return glt_data
|
||||
|
||||
|
||||
def volume_kernel(self,kernel):
|
||||
"""Calculate volume scattering kernel.
|
||||
"""
|
||||
|
||||
return calc_volume_kernel(self.get_anc('solar_az'), self.get_anc('solar_zn'),
|
||||
self.get_anc('sensor_az'), self.get_anc('sensor_zn'),
|
||||
kernel)
|
||||
|
||||
def geom_kernel(self,kernel,b_r=1.,h_b =2.):
|
||||
"""Calculate volume scattering kernel.
|
||||
"""
|
||||
|
||||
return calc_geom_kernel(self.get_anc('solar_az'),self.get_anc('solar_zn'),
|
||||
self.get_anc('sensor_az'),self.get_anc('sensor_zn'),
|
||||
kernel,b_r=b_r,h_b =h_b)
|
||||
|
||||
def cosine_i(self):
|
||||
""" Calculate the cosine of the solar incidence angle. Assumes
|
||||
path to required ancillary datasets have been specified.
|
||||
|
||||
Returns:
|
||||
cos_i numpy.ndarray: Cosine of solar incidence angle.
|
||||
|
||||
"""
|
||||
|
||||
cos_i = calc_cosine_i(self.get_anc('solar_zn'), self.get_anc('solar_az'),
|
||||
self.get_anc('aspect') ,self.get_anc('slope'))
|
||||
return cos_i
|
||||
|
||||
def ndi(self,wave1= 850,wave2 = 660,mask = None):
|
||||
""" Calculate normalized difference index.
|
||||
Defaults to NDVI. Assumes input wavelengths are in
|
||||
nanometers
|
||||
|
||||
Args:
|
||||
wave1 (int,float): Wavelength of first band. Defaults to 850.
|
||||
wave2 (int,float): Wavelength of second band. Defaults to 660.
|
||||
mask (bool): Mask data
|
||||
|
||||
Returns:
|
||||
ndi numpy.ndarray:
|
||||
|
||||
"""
|
||||
|
||||
wave1 = self.get_wave(wave1)
|
||||
wave2 = self.get_wave(wave2)
|
||||
ndi = (wave1-wave2)/(wave1+wave2)
|
||||
|
||||
if mask:
|
||||
ndi = ndi[self.mask[mask]]
|
||||
return ndi
|
||||
|
||||
|
||||
def set_mask(self,mask,name):
|
||||
"""Generate mask using masking function which takes a HyTools object as
|
||||
an argument.
|
||||
"""
|
||||
self.mask[name] = mask
|
||||
|
||||
def gen_mask(self,masker,name,args = None):
|
||||
"""Generate mask using masking function which takes a HyTools object as
|
||||
an argument.
|
||||
"""
|
||||
if args:
|
||||
self.mask[name] = masker(self,args)
|
||||
else:
|
||||
self.mask[name] = masker(self)
|
||||
|
||||
def do(self,function,args = None):
|
||||
"""Run a function and return the results.
|
||||
|
||||
"""
|
||||
if args:
|
||||
return function(self, args)
|
||||
else:
|
||||
return function(self)
|
||||
|
||||
|
||||
def get_header(self,warp_glt = False):
|
||||
""" Return header dictionary
|
||||
|
||||
"""
|
||||
if self.file_type == "neon":
|
||||
header_dict = envi_header_from_neon(self)
|
||||
elif self.file_type == "emit" or self.file_type == "ncav":
|
||||
header_dict = envi_header_from_nc(self,warp_glt = warp_glt)
|
||||
elif self.file_type == "envi":
|
||||
header_dict = parse_envi_header(self.header_file)
|
||||
header_dict["projection"] = self.projection
|
||||
if "coordinate system string" in header_dict.keys():
|
||||
header_dict["projection"] = header_dict["coordinate system string"]
|
||||
header_dict['transform'] = self.transform
|
||||
if warp_glt:
|
||||
header_dict["samples"] = self.columns_glt
|
||||
header_dict["lines"] = self.lines_glt
|
||||
header_dict["map info"] = self.glt_map_info
|
||||
header_dict["projection"] = self.glt_projection
|
||||
header_dict['transform'] = self.glt_transform
|
||||
|
||||
return header_dict
|
||||
|
||||
|
||||
def load_coeffs(self, coeff_file,kind):
|
||||
with open(coeff_file, 'r') as outfile:
|
||||
if kind == 'brdf':
|
||||
self.brdf = json.load(outfile, cls =Decoder)
|
||||
elif kind == 'topo':
|
||||
self.topo = json.load(outfile, cls =Decoder)
|
||||
|
||||
|
||||
class Iterator:
|
||||
"""Iterator class
|
||||
"""
|
||||
|
||||
def __init__(self,hy_obj,by,chunk_size = None,corrections = [],resample = False):
|
||||
"""
|
||||
Args:
|
||||
hy_obj (Hytools object): Populated Hytools file object.
|
||||
by (str): Iterator slice dimension: "line", "column", "band"",chunk".
|
||||
chunk_size (tuple, optional): Chunk size. Defaults to None.
|
||||
|
||||
Iterator cannot be pickled when reading HDF files.
|
||||
|
||||
Returns:
|
||||
None.
|
||||
|
||||
"""
|
||||
|
||||
self.chunk_size= chunk_size
|
||||
self.by = by
|
||||
self.current_column = -1
|
||||
self.current_line = -1
|
||||
self.current_band = -1
|
||||
self.complete = False
|
||||
self.hy_obj = hy_obj
|
||||
self.resample = resample
|
||||
self.corrections = corrections
|
||||
|
||||
|
||||
def read_next(self):
|
||||
""" Return next line/column/band/chunk.
|
||||
"""
|
||||
|
||||
if self.by == "line":
|
||||
self.current_line +=1
|
||||
if self.current_line == self.hy_obj.lines-1:
|
||||
self.complete = True
|
||||
subset = self.hy_obj.get_line(self.current_line,
|
||||
corrections =self.corrections,
|
||||
resample = self.resample)
|
||||
elif self.by == "column":
|
||||
self.current_column +=1
|
||||
if self.current_column == self.hy_obj.columns-1:
|
||||
self.complete = True
|
||||
subset = self.hy_obj.get_column(self.current_column,
|
||||
corrections =self.corrections,
|
||||
resample = self.resample)
|
||||
elif self.by == "band":
|
||||
self.current_band +=1
|
||||
if self.current_band == self.hy_obj.bands-1:
|
||||
self.complete = True
|
||||
subset = self.hy_obj.get_band(self.current_band,
|
||||
corrections =self.corrections)
|
||||
|
||||
elif self.by == "chunk":
|
||||
if self.current_column == -1:
|
||||
self.current_column +=1
|
||||
self.current_line +=1
|
||||
else:
|
||||
self.current_column += self.chunk_size[1]
|
||||
if self.current_column >= self.hy_obj.columns:
|
||||
self.current_column = 0
|
||||
self.current_line += self.chunk_size[0]
|
||||
|
||||
y_start = self.current_line
|
||||
y_end = self.current_line + self.chunk_size[0]
|
||||
if y_end >= self.hy_obj.lines:
|
||||
y_end = self.hy_obj.lines
|
||||
x_start = self.current_column
|
||||
x_end = self.current_column + self.chunk_size[1]
|
||||
if x_end >= self.hy_obj.columns:
|
||||
x_end = self.hy_obj.columns
|
||||
|
||||
if (y_end == self.hy_obj.lines) and (x_end == self.hy_obj.columns):
|
||||
self.complete = True
|
||||
|
||||
subset = self.hy_obj.get_chunk(x_start,x_end, y_start,y_end,
|
||||
corrections =self.corrections,
|
||||
resample = self.resample)
|
||||
|
||||
elif self.by == "glt_line":
|
||||
self.current_line +=1
|
||||
if self.current_line == self.hy_obj.lines-1:
|
||||
self.complete = True
|
||||
valid_mask=self.hy_obj.fill_mask[self.current_line,:]
|
||||
|
||||
valid_subset = self.hy_obj.get_pixels(
|
||||
self.hy_obj.glt_y[self.current_line,valid_mask]-1,self.hy_obj.glt_x[self.current_line,valid_mask]-1,
|
||||
corrections = self.corrections,
|
||||
resample = self.resample)
|
||||
|
||||
subset = np.full((self.hy_obj.columns_glt,valid_subset.shape[1]),-9999).astype(np.float32)
|
||||
subset[valid_mask,:] = valid_subset
|
||||
return subset
|
||||
|
||||
def reset(self):
|
||||
"""Reset counters.
|
||||
"""
|
||||
self.current_column = -1
|
||||
self.current_line = -1
|
||||
self.current_band = -1
|
||||
self.complete = False
|
||||
|
||||
|
||||
class Decoder(json.JSONDecoder):
|
||||
def decode(self, s):
|
||||
result = super().decode(s) # result = super(Decoder, self).decode(s) for Python 2.x
|
||||
return self._decode(result)
|
||||
|
||||
def _decode(self, o):
|
||||
if isinstance(o, str):
|
||||
try:
|
||||
return int(o)
|
||||
except ValueError:
|
||||
return o
|
||||
elif isinstance(o, dict):
|
||||
return {k: self._decode(v) for k, v in o.items()}
|
||||
elif isinstance(o, list):
|
||||
return [self._decode(v) for v in o]
|
||||
else:
|
||||
return o
|
||||
19
Flexbrdf/hytools/brdf/__init__.py
Normal file
19
Flexbrdf/hytools/brdf/__init__.py
Normal file
@ -0,0 +1,19 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
HyTools: 高光谱图像处理库
|
||||
版权所有 (C) 2021 威斯康星大学
|
||||
|
||||
作者:Adam Chlus, Zhiwei Ye, Philip Townsend。
|
||||
|
||||
本程序是自由软件:您可以根据自由软件基金会发布的 GNU 通用公共许可证第 3 版条款重新分发和/或修改它。
|
||||
|
||||
本程序的分发是希望它会有用,但没有任何保证;甚至没有对适销性或特定用途适用性的暗示保证。有关更多详细信息,请参阅 GNU 通用公共许可证。
|
||||
|
||||
您应该已经随本程序收到了 GNU 通用公共许可证副本。如果没有,请参见 <https://www.gnu.org/licenses/>。
|
||||
|
||||
:mod:`hytools.correction` 模块包含用于图像校正的函数。
|
||||
"""
|
||||
from .brdf import *
|
||||
from .kernels import *
|
||||
from .universal import *
|
||||
from .flex import *
|
||||
246
Flexbrdf/hytools/brdf/brdf.py
Normal file
246
Flexbrdf/hytools/brdf/brdf.py
Normal file
@ -0,0 +1,246 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
HyTools: Hyperspectral image processing library
|
||||
Copyright (C) 2021 University of Wisconsin
|
||||
|
||||
Authors: Adam Chlus, Zhiwei Ye, Philip Townsend.
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation, version 3 of the License.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
BRDF 校正
|
||||
"""
|
||||
import json
|
||||
import ray
|
||||
import numpy as np
|
||||
import h5py
|
||||
from .universal import universal_brdf,apply_universal
|
||||
from .flex import flex_brdf,apply_flex,ndvi_stratify, get_kernel_samples, ndvi_bins, get_band_samples
|
||||
from ..masks import mask_create
|
||||
from ..misc import set_brdf, update_brdf, progbar
|
||||
|
||||
def apply_brdf_correct(hy_obj,data,dimension,index):
|
||||
''' 在内存中应用 BRDF 校正。
|
||||
'''
|
||||
|
||||
if hy_obj.brdf['type'] == 'universal':
|
||||
data = apply_universal(hy_obj,data,dimension,index)
|
||||
elif hy_obj.brdf['type'] == 'flex':
|
||||
data = apply_flex(hy_obj,data,dimension,index)
|
||||
elif hy_obj.brdf['type'] == 'local':
|
||||
print('Local/class BRDF correction....under development')
|
||||
return data
|
||||
|
||||
def load_brdf_precomputed(hy_obj,brdf_dict):
|
||||
with open(brdf_dict['coeff_files'][hy_obj.file_name], 'r') as outfile:
|
||||
hy_obj.brdf = json.load(outfile)
|
||||
|
||||
def set_solar_zn(hy_obj):
|
||||
"""设置太阳天顶角归一化值"""
|
||||
solar_zn = hy_obj.get_anc('solar_zn')
|
||||
solar_zn = np.mean(solar_zn[hy_obj.mask['no_data']])
|
||||
hy_obj.brdf['solar_zn_norm_radians'] = float(solar_zn)
|
||||
return solar_zn
|
||||
|
||||
def ndvi_stratify_samples(combine_dict):
|
||||
'''创建 NDVI 分箱分层掩膜
|
||||
'''
|
||||
|
||||
ndvi = combine_dict["ndi_samples"]
|
||||
class_mask = np.zeros(ndvi.shape)
|
||||
|
||||
for bin_num in combine_dict['brdf_dict']['bins']:
|
||||
start,end = combine_dict['brdf_dict']['bins'][bin_num]
|
||||
class_mask[(ndvi > start) & (ndvi <= end)] = bin_num
|
||||
|
||||
class_mask = class_mask.astype(np.int8)
|
||||
combine_dict['ndvi_classes'] = class_mask
|
||||
|
||||
def get_topo_var_samples_pre(hy_obj):
|
||||
'''获取分组地形校正变量,在 ndvi_stratify() 之后运行
|
||||
'''
|
||||
slope = hy_obj.get_anc('slope')
|
||||
cosine_i = hy_obj.cosine_i()
|
||||
sample_ind = (hy_obj.ancillary['ndvi_classes'] !=0)
|
||||
|
||||
return slope[sample_ind], cosine_i[sample_ind]
|
||||
|
||||
def calc_flex_single_post(combine_data_dict,brdf_dict,load_reflectance_mode):
|
||||
|
||||
combine_data_dict["brdf_dict"] = brdf_dict
|
||||
bad_bands = combine_data_dict['bad_bands']
|
||||
# 确定分箱维度并创建类别掩膜
|
||||
if brdf_dict['bin_type'] == 'dynamic':
|
||||
bins = ndvi_bins(combine_data_dict["ndi_samples"],brdf_dict)
|
||||
# 更新分箱数量
|
||||
#print(bins)
|
||||
combine_data_dict["brdf_dict"]['num_bins']=len(bins) #hy_obj.brdf['num_bins'] = len(bins)
|
||||
else:
|
||||
bins = brdf_dict['bins']
|
||||
|
||||
combine_data_dict['brdf_dict']['bins'] = {k:v for (k,v) in enumerate(bins,start=1)}
|
||||
|
||||
ndvi_stratify_samples(combine_data_dict)
|
||||
|
||||
coeffs = {}
|
||||
good_band_count=0
|
||||
for band_num,band in enumerate(bad_bands):
|
||||
if ~band:
|
||||
coeffs[band_num] = {}
|
||||
|
||||
if load_reflectance_mode==0:
|
||||
band_samples = combine_data_dict["reflectance_samples"][:,good_band_count] #ray.get([a.do.remote(get_band_samples,
|
||||
#{'band_num':band_num}) for a in actors])
|
||||
else:
|
||||
combine_refl = []
|
||||
for h5name in combine_data_dict["reflectance_samples"]:
|
||||
h5_obj = h5py.File(h5name, "r")
|
||||
sub_refl_samples = h5_obj["reflectance_samples"][()][:,good_band_count]
|
||||
combine_refl += [sub_refl_samples]
|
||||
h5_obj.close()
|
||||
band_samples = np.concatenate(combine_refl,axis=0)
|
||||
|
||||
band_coeffs= []
|
||||
for bin_num in combine_data_dict['brdf_dict']['bins']:
|
||||
|
||||
bin_mask = (combine_data_dict["ndvi_classes"]== bin_num)
|
||||
|
||||
X = np.concatenate([combine_data_dict["kernels_samples"],np.ones((bin_mask.shape[0],1))],axis=1)[bin_mask] #kernel_samples[:,:3][bin_mask]
|
||||
y = band_samples[bin_mask]
|
||||
band_coeffs.append(np.linalg.lstsq(X, y,rcond=-1)[0].flatten().tolist())
|
||||
coeffs[band_num] = band_coeffs
|
||||
progbar(np.sum(~bad_bands[:band_num+1]),np.sum(~bad_bands))
|
||||
good_band_count+=1
|
||||
|
||||
print('\n')
|
||||
|
||||
combine_data_dict["brdf_dict"]['coeffs'] = coeffs
|
||||
|
||||
|
||||
|
||||
def calc_flex_single_pre(hy_obj,brdf_dict):
|
||||
''' 获取单个图像的样本,用于未来的 BRDF 系数估计
|
||||
'''
|
||||
hy_obj.brdf['coeffs'] ={}
|
||||
|
||||
# 确定分箱维度并创建类别掩膜
|
||||
if hy_obj.brdf['bin_type'] == 'dynamic':
|
||||
bins = ndvi_bins(hy_obj.ndi()[hy_obj.mask['no_data']],brdf_dict)
|
||||
# 更新分箱数量
|
||||
hy_obj.brdf['num_bins'] = len(bins)
|
||||
else:
|
||||
bins = brdf_dict['bins']
|
||||
|
||||
hy_obj.brdf['bins'] = {k:v for (k,v) in enumerate(bins,start=1)}
|
||||
ndvi_stratify(hy_obj)
|
||||
kernel_samples= get_kernel_samples(hy_obj)
|
||||
|
||||
# 循环每个波段
|
||||
refl_samples_list = []
|
||||
used_band = []
|
||||
for band_num,band in enumerate(hy_obj.bad_bands):
|
||||
if ~band:
|
||||
band_samples = hy_obj.do(get_band_samples, {'band_num':band_num})
|
||||
refl_samples_list+=[band_samples[:,None]]
|
||||
used_band+=[hy_obj.wavelengths[band_num]]
|
||||
|
||||
refl_samples = np.concatenate(refl_samples_list,axis=1)
|
||||
|
||||
slope_samples, cos_i_samples = get_topo_var_samples_pre(hy_obj) # 坡度和余弦i
|
||||
|
||||
return kernel_samples[:,:2], refl_samples, used_band, slope_samples, cos_i_samples
|
||||
|
||||
|
||||
def calc_brdf_coeffs(actors,config_dict):
|
||||
|
||||
brdf_dict = config_dict['brdf']
|
||||
|
||||
if brdf_dict['type'] == 'precomputed':
|
||||
print("使用预计算的 BRDF 系数")
|
||||
_ = ray.get([a.do.remote(load_brdf_precomputed,
|
||||
config_dict['brdf']) for a in actors])
|
||||
else:
|
||||
# 设置 BRDF 字典
|
||||
_ = ray.get([a.do.remote(set_brdf,brdf_dict) for a in actors])
|
||||
|
||||
# 创建用于计算系数的掩膜
|
||||
_ = ray.get([a.gen_mask.remote(mask_create,'calc_brdf',
|
||||
brdf_dict['calc_mask']) for a in actors])
|
||||
|
||||
# 计算平均太阳天顶角
|
||||
if isinstance(brdf_dict['solar_zn_type'],str):
|
||||
|
||||
# 分配每条线的平均太阳天顶角
|
||||
solar_zn_samples = ray.get([a.do.remote(set_solar_zn) for a in actors])
|
||||
# 计算并分配场景平均太阳天顶角
|
||||
if brdf_dict['solar_zn_type'] == 'scene':
|
||||
scene_mean = float(np.mean(solar_zn_samples))
|
||||
_ = ray.get([a.do.remote(update_brdf,{'key':'solar_zn_norm_radians',
|
||||
'value': scene_mean }) for a in actors])
|
||||
print("场景平均太阳天顶角 : %s 度" % round(np.degrees(scene_mean),3))
|
||||
|
||||
elif isinstance(brdf_dict['solar_zn_type'],float):
|
||||
_ = ray.get([a.do.remote(update_brdf,{'key':'solar_zn_norm_radians',
|
||||
'value': brdf_dict['solar_zn_type']}) for a in actors])
|
||||
else:
|
||||
print('无法识别的太阳天顶角归一化')
|
||||
|
||||
print("计算 BRDF 系数")
|
||||
if brdf_dict['type']== 'universal':
|
||||
universal_brdf(actors,config_dict)
|
||||
elif brdf_dict['type'] == 'flex':
|
||||
flex_brdf(actors,config_dict)
|
||||
elif brdf_dict['type'] == 'local':
|
||||
print('本地/类别 BRDF 校正....开发中')
|
||||
|
||||
_ = ray.get([a.do.remote(lambda x: x.corrections.append('brdf')) for a in actors])
|
||||
|
||||
def calc_brdf_coeffs_pre(hy_obj,config_dict):
|
||||
|
||||
brdf_dict = config_dict['brdf']
|
||||
|
||||
if brdf_dict['type'] == 'precomputed':
|
||||
print("使用预计算的 BRDF 系数")
|
||||
load_brdf_precomputed(hy_obj,config_dict['brdf'])
|
||||
|
||||
else:
|
||||
# 设置 BRDF 字典
|
||||
|
||||
set_brdf(hy_obj,brdf_dict)
|
||||
set_solar_zn_0 = set_solar_zn(hy_obj)
|
||||
|
||||
# 创建用于计算系数的掩膜
|
||||
hy_obj.gen_mask(mask_create,'calc_brdf',brdf_dict['calc_mask'])
|
||||
|
||||
kernel_samples, reflectance_samples, used_band, slope_samples, cos_i_samples = calc_flex_single_pre(hy_obj,brdf_dict)
|
||||
|
||||
hy_obj.corrections.append('brdf')
|
||||
|
||||
return {
|
||||
"set_solar_zn":set_solar_zn_0,
|
||||
#"ndvi":hy_obj.ndi(),
|
||||
"kernel_samples":kernel_samples,
|
||||
"reflectance_samples":reflectance_samples,
|
||||
"used_band":used_band,
|
||||
"slope_samples":slope_samples,
|
||||
"cos_i_samples":cos_i_samples,
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
363
Flexbrdf/hytools/brdf/flex.py
Normal file
363
Flexbrdf/hytools/brdf/flex.py
Normal file
@ -0,0 +1,363 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
HyTools: Hyperspectral image processing library
|
||||
Copyright (C) 2021 University of Wisconsin
|
||||
|
||||
Authors: Adam Chlus, Zhiwei Ye, Philip Townsend.
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation, version 3 of the License.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
本模块包含应用经验性 BRDF 校正的函数,如下论文所述:
|
||||
|
||||
方程和常数可在以下论文中找到:
|
||||
|
||||
"""
|
||||
|
||||
import numpy as np
|
||||
import ray
|
||||
from scipy.interpolate import interp1d
|
||||
from .kernels import calc_volume_kernel,calc_geom_kernel
|
||||
from ..masks import mask_create
|
||||
from ..misc import progbar, pairwise
|
||||
from ..misc import update_brdf
|
||||
from ..plotting import flex_diagno_plot
|
||||
|
||||
def flex_brdf(actors,config_dict):
|
||||
brdf_dict= config_dict['brdf']
|
||||
if brdf_dict['grouped']:
|
||||
calc_flex_group(actors,brdf_dict)
|
||||
else:
|
||||
_ = ray.get([a.do.remote(calc_flex_single,brdf_dict) for a in actors])
|
||||
|
||||
if "diagnostic_plots" in brdf_dict:
|
||||
if brdf_dict['diagnostic_plots']:
|
||||
print('Exporting diagnostic plots.')
|
||||
_ = ray.get([a.do.remote(flex_diagno_plot,config_dict) for a in actors])
|
||||
|
||||
|
||||
def ndvi_stratify(hy_obj):
|
||||
'''创建 NDVI 分箱分层掩膜
|
||||
'''
|
||||
|
||||
ndvi = hy_obj.ndi()
|
||||
class_mask = np.zeros((hy_obj.lines, hy_obj.columns))
|
||||
|
||||
for bin_num in hy_obj.brdf['bins']:
|
||||
start,end = hy_obj.brdf['bins'][bin_num]
|
||||
class_mask[(ndvi > start) & (ndvi <= end)] = bin_num
|
||||
|
||||
class_mask[~hy_obj.mask['calc_brdf']] = 0
|
||||
|
||||
#Subsample data
|
||||
idx = np.array(np.where(class_mask!=0)).T
|
||||
idxRand= idx[np.random.choice(range(len(idx)),int(len(idx)*(1-hy_obj.brdf['sample_perc'])), replace = False)].T
|
||||
class_mask[idxRand[0],idxRand[1]] = 0
|
||||
class_mask = class_mask.astype(np.int8)
|
||||
hy_obj.ancillary['ndvi_classes'] = class_mask
|
||||
|
||||
|
||||
def ndvi_2nd_split(ndvi_bins_dynamic, all_ndvi_array, ndvi_bin_range_thres=0.15):
|
||||
''' 执行第二次 NDVI 分割
|
||||
'''
|
||||
|
||||
ndvi_bin_range_thres = -0.015625 * (len(ndvi_bins_dynamic)-1) + 0.43125
|
||||
ndvi_bin_range = np.array(ndvi_bins_dynamic[1:]) - np.array(ndvi_bins_dynamic[:-1])
|
||||
|
||||
bin_for_split = np.argwhere(ndvi_bin_range>=ndvi_bin_range_thres).ravel()
|
||||
|
||||
new_break = []
|
||||
if bin_for_split.shape[0]>0:
|
||||
for bin_id in bin_for_split:
|
||||
# Use median of the bin as the new break point
|
||||
new_break += [np.median(all_ndvi_array[(all_ndvi_array > ndvi_bins_dynamic[bin_id]) & (all_ndvi_array < ndvi_bins_dynamic[bin_id+1])]).astype(np.float64)]
|
||||
|
||||
# New list of bin break points
|
||||
ndvi_bins_dynamic = sorted(ndvi_bins_dynamic + new_break)
|
||||
|
||||
return ndvi_bins_dynamic
|
||||
|
||||
def ndvi_bins(ndvi,brdf_dict):
|
||||
'''计算 NDVI 分箱范围
|
||||
'''
|
||||
perc_range = brdf_dict['ndvi_perc_max'] - brdf_dict['ndvi_perc_min'] + 1
|
||||
|
||||
ndvi_break_dyn_bin = np.percentile(ndvi[ndvi > 0],
|
||||
np.arange(brdf_dict['ndvi_perc_min'],
|
||||
brdf_dict['ndvi_perc_max'] + 1,
|
||||
perc_range / (brdf_dict['num_bins'] - 1)))
|
||||
ndvi_thres = [brdf_dict['ndvi_bin_min']]
|
||||
ndvi_thres += ndvi_break_dyn_bin.tolist()
|
||||
ndvi_thres += [brdf_dict['ndvi_bin_max']]
|
||||
ndvi_thres = sorted(list(set(ndvi_thres)))
|
||||
|
||||
# 对 NDVI 分箱进行第二次分割
|
||||
ndvi_thres = ndvi_2nd_split(ndvi_thres, ndvi)
|
||||
|
||||
bins = [[x,y] for x,y in pairwise(ndvi_thres)]
|
||||
return bins
|
||||
|
||||
def get_kernel_samples(hy_obj):
|
||||
'''计算并采样 BRDF 核函数
|
||||
'''
|
||||
geom_kernel = hy_obj.geom_kernel(hy_obj.brdf['geometric'],
|
||||
b_r=hy_obj.brdf["b/r"] ,
|
||||
h_b =hy_obj.brdf["h/b"])
|
||||
geom_kernel = geom_kernel[hy_obj.ancillary['ndvi_classes'] !=0]
|
||||
|
||||
vol_kernel = hy_obj.volume_kernel(hy_obj.brdf['volume'])
|
||||
vol_kernel = vol_kernel[hy_obj.ancillary['ndvi_classes'] !=0]
|
||||
|
||||
classes = hy_obj.ancillary['ndvi_classes'][hy_obj.ancillary['ndvi_classes'] !=0]
|
||||
X = np.vstack([vol_kernel,geom_kernel,
|
||||
np.ones(vol_kernel.shape),classes]).T
|
||||
return X
|
||||
|
||||
def get_band_samples(hy_obj,args):
|
||||
band = hy_obj.get_band(args['band_num'],
|
||||
corrections = hy_obj.corrections)
|
||||
return band[hy_obj.ancillary['ndvi_classes'] !=0]
|
||||
|
||||
def calc_flex_single(hy_obj,brdf_dict):
|
||||
''' 计算单个图像的 BRDF 系数
|
||||
'''
|
||||
hy_obj.brdf['coeffs'] ={}
|
||||
|
||||
# 确定分箱维度并创建类别掩膜
|
||||
if hy_obj.brdf['bin_type'] == 'dynamic':
|
||||
bins = ndvi_bins(hy_obj.ndi()[hy_obj.mask['no_data']],brdf_dict)
|
||||
# 更新分箱数量
|
||||
hy_obj.brdf['num_bins'] = len(bins)
|
||||
else:
|
||||
bins = brdf_dict['bins']
|
||||
|
||||
hy_obj.brdf['bins'] = {k:v for (k,v) in enumerate(bins,start=1)}
|
||||
ndvi_stratify(hy_obj)
|
||||
kernel_samples= get_kernel_samples(hy_obj)
|
||||
|
||||
# 计算每个波段和类别的系数
|
||||
for band_num,band in enumerate(hy_obj.bad_bands):
|
||||
if ~band:
|
||||
hy_obj.brdf['coeffs'][band_num] = {}
|
||||
band_samples = hy_obj.do(get_band_samples, {'band_num':band_num})
|
||||
coeffs= []
|
||||
|
||||
for bin_num in hy_obj.brdf['bins']:
|
||||
bin_mask = (kernel_samples[:,3] == bin_num)
|
||||
X = kernel_samples[:,:3][bin_mask]
|
||||
y = band_samples[bin_mask]
|
||||
coeffs.append(np.linalg.lstsq(X, y,rcond=-1)[0].flatten().tolist())
|
||||
hy_obj.brdf['coeffs'][band_num] = coeffs
|
||||
|
||||
def calc_flex_group(actors,brdf_dict):
|
||||
''' 计算一组图像的 BRDF 系数
|
||||
'''
|
||||
# 从图像聚合 NDVI 值
|
||||
ndvi = ray.get([a.ndi.remote(mask = 'no_data') for a in actors])
|
||||
ndvi = np.concatenate([n.flatten() for n in ndvi])
|
||||
|
||||
# 确定分箱维度
|
||||
if brdf_dict['bin_type'] == 'dynamic':
|
||||
bins = ndvi_bins(ndvi,brdf_dict)
|
||||
# 更新分箱数量
|
||||
_ = ray.get([a.do.remote(update_brdf,{'key':'num_bins',
|
||||
'value': len(bins)}) for a in actors])
|
||||
else:
|
||||
bins = brdf_dict['bins']
|
||||
|
||||
bins = {k:v for (k,v) in enumerate(bins,start=1)}
|
||||
|
||||
# 更新 BRDF 系数
|
||||
_ = ray.get([a.do.remote(update_brdf,{'key':'bins',
|
||||
'value': bins}) for a in actors])
|
||||
|
||||
# 创建 NDVI 类别掩膜并采样核函数
|
||||
_ = ray.get([a.do.remote(ndvi_stratify) for a in actors])
|
||||
kernel_samples = ray.get([a.do.remote(get_kernel_samples) for a in actors])
|
||||
kernel_samples = np.concatenate(kernel_samples)
|
||||
|
||||
bad_bands = ray.get(actors[0].do.remote(lambda x: x.bad_bands))
|
||||
coeffs = {}
|
||||
|
||||
for band_num,band in enumerate(bad_bands):
|
||||
if ~band:
|
||||
coeffs[band_num] = {}
|
||||
band_samples = ray.get([a.do.remote(get_band_samples,
|
||||
{'band_num':band_num}) for a in actors])
|
||||
band_samples = np.concatenate(band_samples)
|
||||
band_coeffs= []
|
||||
for bin_num in bins:
|
||||
bin_mask = (kernel_samples[:,3] == bin_num)
|
||||
X = kernel_samples[:,:3][bin_mask]
|
||||
y = band_samples[bin_mask]
|
||||
band_coeffs.append(np.linalg.lstsq(X, y,rcond=-1)[0].flatten().tolist())
|
||||
coeffs[band_num] = band_coeffs
|
||||
progbar(np.sum(~bad_bands[:band_num+1]),np.sum(~bad_bands))
|
||||
|
||||
print('\n')
|
||||
|
||||
# 更新 BRDF 系数
|
||||
_ = ray.get([a.do.remote(update_brdf,{'key':'coeffs',
|
||||
'value': coeffs}) for a in actors])
|
||||
|
||||
def apply_flex(hy_obj,data,dimension,index):
|
||||
''' 对数据切片应用 flex BRDF 校正
|
||||
|
||||
参数:
|
||||
hy_obj : Hytools 类对象。
|
||||
data (np.ndarray): 数据切片。
|
||||
index (int,list): 数据索引。
|
||||
|
||||
返回:
|
||||
data (np.ndarray): BRDF 校正后的数据切片。
|
||||
'''
|
||||
|
||||
if 'k_vol' not in hy_obj.ancillary:
|
||||
hy_obj.ancillary['k_vol'] = hy_obj.volume_kernel(hy_obj.brdf['volume'])
|
||||
if 'k_geom' not in hy_obj.ancillary:
|
||||
hy_obj.ancillary['k_geom'] = hy_obj.geom_kernel(hy_obj.brdf['geometric'],
|
||||
b_r=hy_obj.brdf["b/r"],
|
||||
h_b =hy_obj.brdf["h/b"])
|
||||
if ('k_vol_nadir' not in hy_obj.ancillary) or ('k_geom_nadir' not in hy_obj.ancillary):
|
||||
solar_zn = hy_obj.brdf['solar_zn_norm_radians'] * np.ones((hy_obj.lines,hy_obj.columns))
|
||||
hy_obj.ancillary['k_vol_nadir'] = calc_volume_kernel(0,solar_zn,
|
||||
0,0,hy_obj.brdf['volume'])
|
||||
hy_obj.ancillary['k_geom_nadir'] = calc_geom_kernel(0,solar_zn,
|
||||
0,0,hy_obj.brdf['geometric'],
|
||||
b_r=hy_obj.brdf["b/r"],
|
||||
h_b =hy_obj.brdf["h/b"])
|
||||
if 'apply_brdf' not in hy_obj.mask:
|
||||
hy_obj.gen_mask(mask_create,'apply_brdf',hy_obj.brdf['apply_mask'])
|
||||
|
||||
if 'ndvi' not in hy_obj.ancillary:
|
||||
hy_obj.ancillary['ndvi'] = hy_obj.ndi()
|
||||
|
||||
if 'interpolators' not in hy_obj.ancillary:
|
||||
bin_centers = np.mean(list(hy_obj.brdf['bins'].values()),axis=1)
|
||||
hy_obj.ancillary['interpolators'] ={}
|
||||
|
||||
# 生成插值器
|
||||
for i in hy_obj.brdf['coeffs']:
|
||||
coeffs= np.array(hy_obj.brdf['coeffs'][i])
|
||||
interpolator = interp1d(bin_centers, coeffs, kind = hy_obj.brdf['interp_kind'],
|
||||
axis=0,fill_value="extrapolate")
|
||||
hy_obj.ancillary['interpolators'][int(i)] = interpolator
|
||||
|
||||
# 转换为浮点数
|
||||
data = data.astype(np.float32)
|
||||
brdf_bands = [int(x) for x in hy_obj.ancillary['interpolators']]
|
||||
if dimension == 'line':
|
||||
# index= 3000
|
||||
# data = hy_obj.get_line(3000)
|
||||
|
||||
interpolated_f = [hy_obj.ancillary['interpolators'][band](hy_obj.ancillary['ndvi'][index,:]) for band in brdf_bands]
|
||||
interpolated_f = np.array(interpolated_f)
|
||||
fvol, fgeo, fiso = interpolated_f[:,:,0], interpolated_f[:,:,1], interpolated_f[:,:,2]
|
||||
|
||||
brdf = fvol*hy_obj.ancillary['k_vol'][index,:]
|
||||
brdf+= fgeo*hy_obj.ancillary['k_geom'][index,:]
|
||||
brdf+= fiso
|
||||
|
||||
brdf_nadir = fvol*hy_obj.ancillary['k_vol_nadir'][index,:]
|
||||
brdf_nadir+= fgeo*hy_obj.ancillary['k_geom_nadir'][index,:]
|
||||
brdf_nadir+= fiso
|
||||
|
||||
correction_factor = brdf_nadir/brdf
|
||||
correction_factor[:,~hy_obj.mask['apply_brdf'][index]] = 1
|
||||
|
||||
data[:,brdf_bands] = data[:,brdf_bands]*correction_factor.T
|
||||
|
||||
elif dimension == 'column':
|
||||
#index= 300
|
||||
#data = hy_obj.get_column(index)
|
||||
|
||||
interpolated_f = [hy_obj.ancillary['interpolators'][band](hy_obj.ancillary['ndvi'][:,index]) for band in brdf_bands]
|
||||
interpolated_f = np.array(interpolated_f)
|
||||
fvol, fgeo, fiso = interpolated_f[:,:,0], interpolated_f[:,:,1], interpolated_f[:,:,2]
|
||||
|
||||
brdf = fvol*hy_obj.ancillary['k_vol'][:,index]
|
||||
brdf+= fgeo*hy_obj.ancillary['k_geom'][:,index]
|
||||
brdf+= fiso
|
||||
|
||||
brdf_nadir = fvol*hy_obj.ancillary['k_vol_nadir'][:,index]
|
||||
brdf_nadir+= fgeo*hy_obj.ancillary['k_geom_nadir'][:,index]
|
||||
brdf_nadir+= fiso
|
||||
|
||||
correction_factor = brdf_nadir/brdf
|
||||
correction_factor = np.moveaxis(correction_factor,0,1)
|
||||
correction_factor[:,~hy_obj.mask['apply_brdf'][index]] = 1
|
||||
data[:,brdf_bands] = data[:,brdf_bands]*correction_factor.T
|
||||
|
||||
elif (dimension == 'band') & (index in brdf_bands):
|
||||
# index= 8
|
||||
# data = hy_obj.get_band(index)
|
||||
|
||||
interpolated_f = hy_obj.ancillary['interpolators'][index](hy_obj.ancillary['ndvi'])
|
||||
fvol, fgeo, fiso = interpolated_f[:,:,0], interpolated_f[:,:,1], interpolated_f[:,:,2]
|
||||
|
||||
brdf = fvol*hy_obj.ancillary['k_vol']
|
||||
brdf += fgeo*hy_obj.ancillary['k_geom']
|
||||
brdf += fiso
|
||||
|
||||
brdf_nadir = fvol*hy_obj.ancillary['k_vol_nadir']
|
||||
brdf_nadir += fgeo*hy_obj.ancillary['k_geom_nadir']
|
||||
brdf_nadir += fiso
|
||||
|
||||
correction_factor = brdf_nadir/brdf
|
||||
correction_factor[~hy_obj.mask['apply_brdf']] = 1
|
||||
data= data* correction_factor
|
||||
|
||||
elif dimension == 'chunk':
|
||||
# index = 200,501,3000,3501
|
||||
x1,x2,y1,y2 = index
|
||||
# data = hy_obj.get_chunk(x1,x2,y1,y2)
|
||||
|
||||
interpolated_f = [hy_obj.ancillary['interpolators'][band](hy_obj.ancillary['ndvi'][y1:y2,x1:x2]) for band in brdf_bands]
|
||||
interpolated_f = np.array(interpolated_f)
|
||||
interpolated_f = np.swapaxes(interpolated_f,0,-1)
|
||||
fvol, fgeo, fiso = interpolated_f[0,:,:,:], interpolated_f[1,:,:,:], interpolated_f[2,:,:,:]
|
||||
|
||||
brdf = fvol*hy_obj.ancillary['k_vol'][y1:y2,x1:x2,np.newaxis]
|
||||
brdf+= fgeo*hy_obj.ancillary['k_geom'][y1:y2,x1:x2,np.newaxis]
|
||||
brdf+= fiso
|
||||
|
||||
brdf_nadir = fvol*hy_obj.ancillary['k_vol_nadir'][y1:y2,x1:x2,np.newaxis]
|
||||
brdf_nadir+= fgeo*hy_obj.ancillary['k_geom_nadir'][y1:y2,x1:x2,np.newaxis]
|
||||
brdf_nadir+= fiso
|
||||
|
||||
correction_factor = brdf_nadir/brdf
|
||||
correction_factor[~hy_obj.mask['apply_brdf'][y1:y2,x1:x2]] = 1
|
||||
data[:,:,brdf_bands] = data[:,:,brdf_bands]*correction_factor
|
||||
|
||||
elif dimension == 'pixels':
|
||||
# index = [[2000,2001],[200,501]]
|
||||
y,x = index
|
||||
# data = hy_obj.get_pixels(y,x)
|
||||
|
||||
interpolated_f = [hy_obj.ancillary['interpolators'][band](hy_obj.ancillary['ndvi'][y,x]) for band in brdf_bands]
|
||||
interpolated_f = np.array(interpolated_f)
|
||||
interpolated_f = np.swapaxes(interpolated_f,0,1)
|
||||
fvol, fgeo, fiso = interpolated_f[:,:,0], interpolated_f[:,:,1], interpolated_f[:,:,2]
|
||||
|
||||
brdf = fvol*hy_obj.ancillary['k_vol'][y,x,np.newaxis]
|
||||
brdf+= fgeo*hy_obj.ancillary['k_geom'][y,x,np.newaxis]
|
||||
brdf+= fiso
|
||||
|
||||
brdf_nadir = fvol*hy_obj.ancillary['k_vol_nadir'][y,x,np.newaxis]
|
||||
brdf_nadir+= fgeo*hy_obj.ancillary['k_geom_nadir'][y,x,np.newaxis]
|
||||
brdf_nadir+= fiso
|
||||
|
||||
correction_factor = brdf_nadir/brdf
|
||||
correction_factor[~hy_obj.mask['apply_brdf'][y,x]] = 1
|
||||
data[:,brdf_bands] = data[:,brdf_bands]*correction_factor
|
||||
|
||||
return data
|
||||
167
Flexbrdf/hytools/brdf/kernels.py
Normal file
167
Flexbrdf/hytools/brdf/kernels.py
Normal file
@ -0,0 +1,167 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
HyTools: Hyperspectral image processing library
|
||||
Copyright (C) 2021 University of Wisconsin
|
||||
|
||||
Authors: Adam Chlus, Zhiwei Ye, Philip Townsend.
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation, version 3 of the License.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
本模块包含计算 BRDF 散射核函数的函数。
|
||||
|
||||
方程和常数可在以下论文中找到:
|
||||
|
||||
Colgan, M. S., Baldeck, C. A., Feret, J. B., & Asner, G. P. (2012).
|
||||
Mapping savanna tree species at ecosystem scales using support vector machine classification
|
||||
and BRDF correction on airborne hyperspectral and LiDAR data.
|
||||
Remote Sensing, 4(11), 3462-3480.
|
||||
https://doi.org/10.3390/rs4113462
|
||||
|
||||
Lucht, W., Schaaf, C. B., & Strahler, A. H. (2000).
|
||||
An algorithm for the retrieval of albedo from space using semiempirical BRDF models.
|
||||
IEEE Transactions on Geoscience and Remote sensing, 38(2), 977-998.
|
||||
https://doi.org/10.1109/36.841980
|
||||
|
||||
Maignan, F., Bréon, F. M., & Lacaze, R. (2004).
|
||||
Bidirectional reflectance of Earth targets: Evaluation of analytical
|
||||
models using a large set of spaceborne measurements with emphasis on the Hot Spot.
|
||||
Remote Sensing of Environment, 90(2), 210-220.
|
||||
https://doi.org/10.1016/j.rse.2003.12.006
|
||||
|
||||
Roujean, J. L., Leroy, M., & Deschamps, P. Y. (1992).
|
||||
A bidirectional reflectance model of the Earth's surface for the correction
|
||||
of remote sensing data.
|
||||
Journal of Geophysical Research: Atmospheres, 97(D18), 20455-20468.
|
||||
https://doi.org/10.1029/92JD01411
|
||||
|
||||
Schlapfer, D., Richter, R., & Feingersh, T. (2015).
|
||||
Operational BRDF effects correction for wide-field-of-view optical scanners (BREFCOR).
|
||||
IEEE Transactions on Geoscience and Remote Sensing, 53(4), 1855-1864.
|
||||
https://doi.org/10.1109/TGRS.2014.2349946
|
||||
|
||||
Wanner, W., Li, X., & Strahler, A. H. (1995).
|
||||
On the derivation of kernels for kernel-driven models of bidirectional reflectance.
|
||||
Journal of Geophysical Research: Atmospheres, 100(D10), 21077-21089.
|
||||
https://doi.org/10.1029/95JD02371
|
||||
|
||||
Zhang, X., Jiao, Z., Dong, Y., Zhang, H., Li, Y., He, D., ... & Chang, Y. (2018).
|
||||
Potential investigation of linking PROSAIL with the ross-li BRDF model for
|
||||
vegetation characterization.
|
||||
Remote Sensing, 10(3), 437.
|
||||
https://doi.org/10.3390/rs10030437SSS
|
||||
|
||||
"""
|
||||
import numpy as np
|
||||
|
||||
def calc_geom_kernel(solar_az,solar_zn,sensor_az,sensor_zn,kernel,b_r=1.,h_b =2.):
|
||||
"""计算几何散射核函数。
|
||||
常数 b_r (b/r) 和 h_b (h/b) 来自 Colgan 等人 RS 2012
|
||||
替代方案包括 MODIS 规范:
|
||||
b/r : 稀疏: 1, 密集: 2.5
|
||||
h/b : 稀疏, 密集 : 2
|
||||
|
||||
所有输入几何单位必须以弧度为单位。
|
||||
|
||||
参数:
|
||||
solar_az (numpy.ndarray): 太阳方位角。
|
||||
solar_zn (numpy.ndarray): 太阳天顶角。
|
||||
sensor_az (numpy.ndarray): 传感器视角方位角。
|
||||
sensor_zn (numpy.ndarray): 传感器视角天顶角。
|
||||
kernel (str): Li 几何散射核类型 [li_dense,li_sparse, roujean]。
|
||||
b_r (float, 可选): 物体高度。默认为 10。
|
||||
h_b (float, 可选): 物体形状。默认为 2。
|
||||
|
||||
返回:
|
||||
numpy.ndarray: 几何散射核。
|
||||
|
||||
"""
|
||||
|
||||
relative_az = sensor_az - solar_az
|
||||
|
||||
# Eq. 37,52. Wanner et al. JGRA 1995
|
||||
solar_zn_p = np.arctan(b_r * np.tan(solar_zn))
|
||||
sensor_zn_p = np.arctan(b_r * np.tan(sensor_zn))
|
||||
# Eq 50. Wanner et al. JGRA 1995
|
||||
D = np.sqrt((np.tan(solar_zn_p)**2) + (np.tan(sensor_zn_p)**2) - 2*np.tan(solar_zn_p)*np.tan(sensor_zn_p)*np.cos(relative_az))
|
||||
# Eq 49. Wanner et al. JGRA 1995
|
||||
t_num = h_b * np.sqrt(D**2 + (np.tan(solar_zn_p)*np.tan(sensor_zn_p)*np.sin(relative_az))**2)
|
||||
t_denom = (1/np.cos(solar_zn_p)) + (1/np.cos(sensor_zn_p))
|
||||
t = np.arccos(np.clip(t_num/t_denom,-1,1))
|
||||
# Eq 33,48. Wanner et al. JGRA 1995
|
||||
O = (1/np.pi) * (t - np.sin(t)*np.cos(t)) * t_denom
|
||||
# Eq 51. Wanner et al. JGRA 1995
|
||||
cos_phase_p = np.cos(solar_zn_p)*np.cos(sensor_zn_p) + np.sin(solar_zn_p)*np.sin(sensor_zn_p)*np.cos(relative_az)
|
||||
|
||||
if kernel == 'li_sparse':
|
||||
# Eq 32. Wanner et al. JGRA 1995
|
||||
k_geom = O - (1/np.cos(solar_zn_p)) - (1/np.cos(sensor_zn_p)) + .5*(1+ cos_phase_p) * (1/np.cos(sensor_zn_p))
|
||||
elif kernel == 'li_dense':
|
||||
# Eq 47. Wanner et al. JGRA 1995
|
||||
k_geom = (((1+cos_phase_p) * (1/np.cos(sensor_zn_p)))/ (t_denom - O)) - 2
|
||||
elif kernel == 'li_sparse_r':
|
||||
# Eq 39. Lucht et al. TGRS 2000
|
||||
k_geom = O - (1/np.cos(solar_zn_p)) - (1/np.cos(sensor_zn_p)) + .5*(1+ cos_phase_p) * (1/np.cos(sensor_zn_p)) * (1/np.cos(solar_zn_p))
|
||||
elif kernel == 'li_dense_r':
|
||||
# Eq 5. Zhang et al. RS 2018 <-- Find a more original reference
|
||||
k_geom = (((1+cos_phase_p) * (1/np.cos(sensor_zn_p)) * (1/np.cos(solar_zn_p)))/ (t_denom - O)) - 2
|
||||
elif kernel == 'roujean':
|
||||
# Eq 2 Roujean et al. JGR 1992
|
||||
k_geom1 = (1/(2*np.pi)) * ((np.pi - relative_az)*np.cos(relative_az)+np.sin(relative_az)) *np.tan(solar_zn)*np.tan(sensor_zn)
|
||||
k_geom2 = (1/np.pi) * (np.tan(solar_zn) + np.tan(sensor_zn) + np.sqrt(np.tan(solar_zn)**2 + np.tan(sensor_zn)**2 - 2*np.tan(solar_zn)*np.tan(sensor_zn)*np.cos(relative_az)))
|
||||
k_geom = k_geom1 - k_geom2
|
||||
else:
|
||||
print("Unrecognized kernel type: %s" % kernel)
|
||||
k_geom = None
|
||||
return k_geom
|
||||
|
||||
|
||||
def calc_volume_kernel(solar_az,solar_zn,sensor_az,sensor_zn,kernel):
|
||||
"""计算体积散射核函数。
|
||||
|
||||
所有输入几何单位必须以弧度为单位。
|
||||
|
||||
参数:
|
||||
solar_az (numpy.ndarray): 太阳方位角。
|
||||
solar_zn (numpy.ndarray): 太阳天顶角。
|
||||
sensor_az (numpy.ndarray): 传感器视角方位角。
|
||||
sensor_zn (numpy.ndarray): 传感器视角天顶角。
|
||||
kernel (str): 体积散射核类型 [ross_thick,ross_thin]。
|
||||
|
||||
返回:
|
||||
numpy.ndarray: 体积散射核。
|
||||
|
||||
"""
|
||||
|
||||
relative_az = sensor_az - solar_az
|
||||
|
||||
# Eq 2. Schlapfer et al. IEEE-TGARS 2015
|
||||
phase = np.arccos(np.cos(solar_zn)*np.cos(sensor_zn) + np.sin(solar_zn)*np.sin(sensor_zn)* np.cos(relative_az))
|
||||
|
||||
if kernel == 'ross_thin':
|
||||
# Eq 13. Wanner et al. JGRA 1995
|
||||
k_vol = ((np.pi/2 - phase)*np.cos(phase) + np.sin(phase))/ (np.cos(sensor_zn)*np.cos(solar_zn)) - (np.pi/2)
|
||||
elif kernel == 'ross_thick':
|
||||
# Eq 7. Wanner et al. JGRA 1995
|
||||
k_vol = ((np.pi/2 - phase)*np.cos(phase) + np.sin(phase))/ (np.cos(sensor_zn)+np.cos(solar_zn)) - (np.pi/4)
|
||||
elif kernel in ('hotspot','roujean'):
|
||||
# Eq 8 Roujean et al. JGR 1992
|
||||
k_vol1 = (4/(3*np.pi)) * (1/(np.cos(solar_zn) + np.cos(sensor_zn)))
|
||||
k_vol2 = (((np.pi/2) - phase) * np.cos(phase) + np.sin(phase))
|
||||
k_vol = k_vol1*(k_vol2- (1/3))
|
||||
if kernel == 'hotspot':
|
||||
# Eq. 12 Maignan et al. RSE 2004
|
||||
k_vol = k_vol1* k_vol2 * (1 + (1 + (phase/np.radians(1.5)))**-1) - (1/3)
|
||||
else:
|
||||
print("Unrecognized kernel type: %s" % kernel)
|
||||
k_vol = None
|
||||
return k_vol
|
||||
21
Flexbrdf/hytools/brdf/local.py
Normal file
21
Flexbrdf/hytools/brdf/local.py
Normal file
@ -0,0 +1,21 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
'''
|
||||
HyTools: Hyperspectral image processing library
|
||||
Copyright (C) 2021 University of Wisconsin
|
||||
|
||||
Authors: Adam Chlus, Zhiwei Ye, Philip Townsend.
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation, version 3 of the License.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
本地/类别 BRDF 校正占位符....开发中。
|
||||
'''
|
||||
220
Flexbrdf/hytools/brdf/universal.py
Normal file
220
Flexbrdf/hytools/brdf/universal.py
Normal file
@ -0,0 +1,220 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
HyTools: Hyperspectral image processing library
|
||||
Copyright (C) 2021 University of Wisconsin
|
||||
|
||||
Authors: Adam Chlus, Zhiwei Ye, Philip Townsend.
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation, version 3 of the License.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
本模块包含用于计算和应用单一(“通用”)组乘法 BRDF 校正系数的函数。系数可以按飞行线计算,也可以跨多个飞行线计算。
|
||||
|
||||
"""
|
||||
from itertools import product
|
||||
from copy import deepcopy
|
||||
import numpy as np
|
||||
import ray
|
||||
from scipy.optimize import minimize
|
||||
from .kernels import calc_volume_kernel,calc_geom_kernel
|
||||
from ..misc import progbar
|
||||
from ..misc import update_brdf
|
||||
from ..masks import mask_create
|
||||
from ..plotting import universal_diagno_plot
|
||||
|
||||
def universal_brdf(actors,config_dict):
|
||||
brdf_dict = config_dict['brdf']
|
||||
|
||||
if brdf_dict['grouped']:
|
||||
actors = calc_universal_group(actors)
|
||||
else:
|
||||
_ = ray.get([a.do.remote(calc_universal_single) for a in actors])
|
||||
|
||||
if brdf_dict['diagnostic_plots']:
|
||||
print('Exporting diagnostic plots.')
|
||||
_ = ray.get([a.do.remote(universal_diagno_plot,config_dict) for a in actors])
|
||||
|
||||
def sample_kernels(hy_obj):
|
||||
'''计算并采样 BRDF 核函数
|
||||
'''
|
||||
#Sample kernel images
|
||||
geom_kernel = hy_obj.geom_kernel(hy_obj.brdf['geometric'],
|
||||
b_r=hy_obj.brdf["b/r"],
|
||||
h_b =hy_obj.brdf["h/b"])[hy_obj.mask['calc_brdf']]
|
||||
vol_kernel = hy_obj.volume_kernel(hy_obj.brdf['volume'])[hy_obj.mask['calc_brdf']]
|
||||
X = np.vstack([vol_kernel,geom_kernel,
|
||||
np.ones(vol_kernel.shape)]).T
|
||||
return X
|
||||
|
||||
def subsample_mask(hy_obj):
|
||||
'''对计算掩膜进行子采样并更新
|
||||
'''
|
||||
if hy_obj.brdf['sample_perc'] < 1:
|
||||
idx = np.array(np.where(hy_obj.mask['calc_brdf'])).T
|
||||
idx_rand= idx[np.random.choice(range(len(idx)),
|
||||
int(len(idx)*(1- hy_obj.brdf['sample_perc'])),
|
||||
replace = False)].T
|
||||
hy_obj.mask['calc_brdf'][idx_rand[0],idx_rand[1]] = False
|
||||
|
||||
def calc_universal_single(hy_obj):
|
||||
'''逐条飞行线计算 BRDF 系数。
|
||||
'''
|
||||
subsample_mask(hy_obj)
|
||||
X = sample_kernels(hy_obj)
|
||||
|
||||
hy_obj.brdf['coeffs'] = {}
|
||||
for band_num,band in enumerate(hy_obj.bad_bands):
|
||||
if ~band:
|
||||
band = hy_obj.get_band(band_num,
|
||||
corrections = hy_obj.corrections, mask='calc_brdf')
|
||||
brdf_coeff = np.linalg.lstsq(X, band,rcond=None)[0].flatten().tolist()
|
||||
hy_obj.brdf['coeffs'][band_num] = brdf_coeff
|
||||
|
||||
def calc_universal_group(actors):
|
||||
'''使用所有飞行线的合并数据计算 BRDF 系数。
|
||||
'''
|
||||
_ = ray.get([a.do.remote(subsample_mask) for a in actors])
|
||||
X = ray.get([a.do.remote(sample_kernels) for a in actors])
|
||||
X = np.concatenate(X)
|
||||
|
||||
bad_bands = ray.get(actors[0].do.remote(lambda x: x.bad_bands))
|
||||
corections = ray.get(actors[0].do.remote(lambda x: x.corrections))
|
||||
|
||||
coeffs = {}
|
||||
|
||||
for band_num,band in enumerate(bad_bands):
|
||||
if ~band:
|
||||
y = ray.get([a.get_band.remote(band_num,mask='calc_brdf',
|
||||
corrections = corections) for a in actors])
|
||||
y = np.concatenate(y)
|
||||
coeffs[band_num] = np.linalg.lstsq(X, y)[0].flatten().tolist()
|
||||
progbar(np.sum(~bad_bands[:band_num+1]),np.sum(~bad_bands))
|
||||
print('\n')
|
||||
|
||||
# 更新 BRDF 系数
|
||||
_ = ray.get([a.do.remote(update_brdf,{'key':'coeffs',
|
||||
'value': coeffs}) for a in actors])
|
||||
|
||||
return actors
|
||||
|
||||
|
||||
def apply_universal(hy_obj,data,dimension,index):
|
||||
''' 对数据切片应用通用 BRDF 校正
|
||||
|
||||
参数:
|
||||
hy_obj : Hytools 类对象。
|
||||
data (np.ndarray): 数据切片。
|
||||
index (int,list): 数据索引。
|
||||
|
||||
返回:
|
||||
data (np.ndarray): BRDF 校正后的数据切片。
|
||||
'''
|
||||
|
||||
if 'k_vol' not in hy_obj.ancillary:
|
||||
hy_obj.ancillary['k_vol'] = hy_obj.volume_kernel(hy_obj.brdf['volume'])
|
||||
if 'k_geom' not in hy_obj.ancillary:
|
||||
hy_obj.ancillary['k_geom'] = hy_obj.geom_kernel(hy_obj.brdf['geometric'],
|
||||
b_r=hy_obj.brdf["b/r"],
|
||||
h_b =hy_obj.brdf["h/b"])
|
||||
if ('k_vol_nadir' not in hy_obj.ancillary) or ('k_geom_nadir' not in hy_obj.ancillary):
|
||||
solar_zn = hy_obj.brdf['solar_zn_norm_radians'] * np.ones((hy_obj.lines,hy_obj.columns))
|
||||
hy_obj.ancillary['k_vol_nadir'] = calc_volume_kernel(0,solar_zn,
|
||||
0,0,hy_obj.brdf['volume'])
|
||||
hy_obj.ancillary['k_geom_nadir'] = calc_geom_kernel(0,solar_zn,
|
||||
0,0,hy_obj.brdf['geometric'],
|
||||
b_r=hy_obj.brdf["b/r"],
|
||||
h_b =hy_obj.brdf["h/b"])
|
||||
if 'apply_brdf' not in hy_obj.mask:
|
||||
hy_obj.gen_mask(mask_create,'apply_brdf',hy_obj.brdf['apply_mask'])
|
||||
|
||||
brdf_bands = [int(x) for x in hy_obj.brdf['coeffs'].keys()]
|
||||
fvol, fgeo, fiso = np.array([hy_obj.brdf['coeffs'][band] for band in hy_obj.brdf['coeffs'].keys()]).T
|
||||
|
||||
# 转换为浮点数
|
||||
data = data.astype(np.float32)
|
||||
|
||||
if dimension == 'line':
|
||||
|
||||
brdf = fvol[:,np.newaxis]*hy_obj.ancillary['k_vol'][[index],:]
|
||||
brdf+= fgeo[:,np.newaxis]*hy_obj.ancillary['k_geom'][[index],:]
|
||||
brdf+= fiso[:,np.newaxis]
|
||||
|
||||
brdf_nadir = fvol[:,np.newaxis]*hy_obj.ancillary['k_vol_nadir'][[index],:]
|
||||
brdf_nadir+= fgeo[:,np.newaxis]*hy_obj.ancillary['k_geom_nadir'][[index],:]
|
||||
brdf_nadir+= fiso[:,np.newaxis]
|
||||
|
||||
correction_factor = brdf_nadir/brdf
|
||||
correction_factor[:,~hy_obj.mask['apply_brdf'][index,:]] = 1
|
||||
data[:,brdf_bands] = data[:,brdf_bands]*correction_factor.T
|
||||
|
||||
elif dimension == 'column':
|
||||
|
||||
brdf = fvol[np.newaxis,:]*hy_obj.ancillary['k_vol'][:,[index]]
|
||||
brdf+= fgeo[np.newaxis,:]*hy_obj.ancillary['k_geom'][:,[index]]
|
||||
brdf+= fiso[np.newaxis,:]
|
||||
|
||||
brdf_nadir = fvol[np.newaxis,:]*hy_obj.ancillary['k_vol_nadir'][:,[index]]
|
||||
brdf_nadir+= fgeo[np.newaxis,:]*hy_obj.ancillary['k_geom_nadir'][:,[index]]
|
||||
brdf_nadir+= fiso[np.newaxis,:]
|
||||
|
||||
correction_factor = brdf_nadir/brdf
|
||||
correction_factor[~hy_obj.mask['apply_brdf'][:,index],:] = 1
|
||||
|
||||
data[:,brdf_bands] = data[:,brdf_bands]*correction_factor.T
|
||||
|
||||
elif dimension == 'band':
|
||||
fvol, fgeo, fiso = hy_obj.brdf['coeffs'][index]
|
||||
brdf = fvol*hy_obj.ancillary['k_vol']
|
||||
brdf += fgeo*hy_obj.ancillary['k_geom']
|
||||
brdf+=fiso
|
||||
|
||||
brdf_nadir = fvol*hy_obj.ancillary['k_vol_nadir']
|
||||
brdf_nadir+= fgeo*hy_obj.ancillary['k_geom_nadir']
|
||||
brdf_nadir+= fiso
|
||||
|
||||
correction_factor = brdf_nadir/brdf
|
||||
correction_factor[~hy_obj.mask['apply_brdf']] = 1
|
||||
data= data* correction_factor
|
||||
|
||||
elif dimension == 'chunk':
|
||||
x1,x2,y1,y2 = index
|
||||
|
||||
brdf = fvol[np.newaxis,np.newaxis,:]*hy_obj.ancillary['k_vol'][y1:y2,x1:x2,np.newaxis]
|
||||
brdf+= fgeo[np.newaxis,np.newaxis,:]*hy_obj.ancillary['k_geom'][y1:y2,x1:x2,np.newaxis]
|
||||
brdf+= fiso[np.newaxis,np.newaxis,:]
|
||||
|
||||
brdf_nadir = fvol[np.newaxis,np.newaxis,:]*hy_obj.ancillary['k_vol_nadir'][y1:y2,x1:x2,np.newaxis]
|
||||
brdf_nadir+= fgeo[np.newaxis,np.newaxis,:]*hy_obj.ancillary['k_geom_nadir'][y1:y2,x1:x2,np.newaxis]
|
||||
brdf_nadir+= fiso[np.newaxis,np.newaxis,:]
|
||||
|
||||
correction_factor = brdf_nadir/brdf
|
||||
correction_factor[~hy_obj.mask['apply_brdf'][y1:y2,x1:x2]] = 1
|
||||
|
||||
data[:,:,brdf_bands] = data[:,:,brdf_bands]*correction_factor
|
||||
|
||||
elif dimension == 'pixels':
|
||||
y,x = index
|
||||
|
||||
brdf = fvol[np.newaxis,:]*hy_obj.ancillary['k_vol'][y,x,np.newaxis]
|
||||
brdf+= fgeo[np.newaxis,:]*hy_obj.ancillary['k_geom'][y,x,np.newaxis]
|
||||
brdf+= fiso[np.newaxis,:]
|
||||
|
||||
brdf_nadir = fvol[np.newaxis,:]*hy_obj.ancillary['k_vol_nadir'][y,x,np.newaxis]
|
||||
brdf_nadir+= fgeo[np.newaxis,:]*hy_obj.ancillary['k_geom_nadir'][y,x,np.newaxis]
|
||||
brdf_nadir+= fiso[np.newaxis,:]
|
||||
|
||||
correction_factor = brdf_nadir/brdf
|
||||
correction_factor[~hy_obj.mask['apply_brdf'][y,x]] = 1
|
||||
|
||||
data[:,brdf_bands] = data[:,brdf_bands]*correction_factor
|
||||
|
||||
return data
|
||||
25
Flexbrdf/hytools/glint/__init__.py
Normal file
25
Flexbrdf/hytools/glint/__init__.py
Normal file
@ -0,0 +1,25 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
HyTools: Hyperspectral image processing library
|
||||
Copyright (C) 2021 University of Wisconsin
|
||||
|
||||
Authors: Evan Greenberg.
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation, version 3 of the License.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
The :mod:`hytools.correction` module include functions image correction.
|
||||
"""
|
||||
from .glint import *
|
||||
from .gao_2021 import *
|
||||
from .hedley_2005 import *
|
||||
from .hochberg_2003 import *
|
||||
216
Flexbrdf/hytools/glint/gao_2021.py
Normal file
216
Flexbrdf/hytools/glint/gao_2021.py
Normal file
@ -0,0 +1,216 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
HyTools: Hyperspectral image processing library
|
||||
Copyright (C) 2021 University of Wisconsin
|
||||
|
||||
Authors: Evan Greenberg.
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation, version 3 of the License.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
"""
|
||||
import numpy as np
|
||||
from ..masks import mask_create
|
||||
|
||||
|
||||
REFRACTIVE_INDICES = np.array([
|
||||
[200, 1.396],
|
||||
[225, 1.373],
|
||||
[250, 1.362],
|
||||
[275, 1.354],
|
||||
[300, 1.349],
|
||||
[325, 1.346],
|
||||
[350, 1.343],
|
||||
[375, 1.341],
|
||||
[400, 1.339],
|
||||
[425, 1.338],
|
||||
[450, 1.337],
|
||||
[475, 1.336],
|
||||
[500, 1.335],
|
||||
[525, 1.334],
|
||||
[550, 1.333],
|
||||
[575, 1.333],
|
||||
[600, 1.332],
|
||||
[625, 1.332],
|
||||
[650, 1.331],
|
||||
[675, 1.331],
|
||||
[700, 1.331],
|
||||
[725, 1.33],
|
||||
[750, 1.33],
|
||||
[775, 1.33],
|
||||
[800, 1.329],
|
||||
[825, 1.329],
|
||||
[850, 1.329],
|
||||
[875, 1.328],
|
||||
[900, 1.328],
|
||||
[925, 1.328],
|
||||
[950, 1.327],
|
||||
[975, 1.327],
|
||||
[1000, 1.327],
|
||||
[1200, 1.324],
|
||||
[1400, 1.321],
|
||||
[1600, 1.317],
|
||||
[1800, 1.312],
|
||||
[2000, 1.306],
|
||||
[2200, 1.296],
|
||||
[2400, 1.279],
|
||||
[2600, 1.242],
|
||||
[2650, 1.219],
|
||||
[2700, 1.188],
|
||||
[2750, 1.157],
|
||||
[2800, 1.142],
|
||||
[2850, 1.149],
|
||||
[2900, 1.201],
|
||||
[2950, 1.292],
|
||||
[3000, 1.371]
|
||||
])
|
||||
|
||||
|
||||
def apply_gao_2021_correction(hy_obj, data, dimension, index):
|
||||
"""
|
||||
Glint correction algorithm following:
|
||||
|
||||
Gao BC, Li RR.
|
||||
Correction of Sunglint Effects in High Spatial Resolution
|
||||
Hyperspectral Imagery Using SWIR or NIR Bands and Taking Account of
|
||||
Spectral Variation of Refractive Index of Water.
|
||||
Adv Environ Eng Res 2021;2(3):16; doi:10.21926/aeer.2103017.
|
||||
"""
|
||||
|
||||
|
||||
if 'apply_glint' not in hy_obj.mask:
|
||||
hy_obj.gen_mask(mask_create,'apply_glint',hy_obj.glint['apply_mask'])
|
||||
|
||||
if hy_obj.mask['apply_glint'].sum() == 0:
|
||||
return data
|
||||
|
||||
hy_obj.glint['correction_band'] = hy_obj.wave_to_band(hy_obj.glint['correction_wave'])
|
||||
|
||||
if 'gao_b_simu' not in hy_obj.ancillary:
|
||||
hy_obj.ancillary['gao_b_simu'] = get_b_simu(hy_obj)
|
||||
|
||||
if 'gao_rto' not in hy_obj.ancillary:
|
||||
hy_obj.ancillary['gao_rto'] = get_rto(hy_obj)
|
||||
|
||||
if dimension == 'line':
|
||||
rto_line = hy_obj.ancillary['gao_rto'][index, :]
|
||||
rto_line = np.reshape(rto_line, (len(rto_line), 1))
|
||||
correction = rto_line * hy_obj.ancillary['gao_b_simu']
|
||||
|
||||
elif dimension == 'column':
|
||||
rto_col = hy_obj.ancillary['gao_rto'][:, index]
|
||||
rto_col = np.reshape(rto_col, (len(rto_col), 1))
|
||||
correction = rto_col * hy_obj.ancillary['gao_b_simu']
|
||||
|
||||
elif (dimension == 'band'):
|
||||
correction = (
|
||||
hy_obj.ancillary['gao_b_simu'][0, :][index]
|
||||
* hy_obj.ancillary['gao_rto']
|
||||
)
|
||||
|
||||
elif dimension == 'chunk':
|
||||
x1, x2, y1, y2 = index
|
||||
rto_chunk = hy_obj.ancillary['gao_rto'][y1:y2, x1:x2]
|
||||
rto_chunk = np.reshape(
|
||||
rto_chunk,
|
||||
(
|
||||
rto_chunk.shape[0],
|
||||
rto_chunk.shape[1],
|
||||
1
|
||||
)
|
||||
)
|
||||
correction = rto_chunk * hy_obj.ancillary['gao_b_simu']
|
||||
|
||||
elif dimension == 'pixels':
|
||||
y, x = index
|
||||
rto_pixels = hy_obj.ancillary['gao_rto'][y, x]
|
||||
rto_pixels = np.reshape(rto_pixels, (len(rto_pixels), 1))
|
||||
correction = rto_pixels * hy_obj.ancillary['gao_b_simu']
|
||||
|
||||
return data - correction
|
||||
|
||||
|
||||
def zenith_refracted(theta, n):
|
||||
"""
|
||||
Find zenith of the outgoing reflected light
|
||||
n is the refractive index of water at a specific wavelength
|
||||
"""
|
||||
theta_p = np.degrees(
|
||||
np.arcsin(np.sin(np.radians(theta)) / n)
|
||||
)
|
||||
|
||||
return theta_p
|
||||
|
||||
|
||||
def fresnel_reflectence(theta, theta_p):
|
||||
"""
|
||||
Uses the fresnel equation to find the
|
||||
percentege of incident light reflected
|
||||
"""
|
||||
theta_rad = np.radians(theta)
|
||||
theta_p_rad = np.radians(theta_p)
|
||||
|
||||
return (
|
||||
(
|
||||
(np.sin(theta_rad - theta_p_rad)**2)
|
||||
/ (np.sin(theta_rad + theta_p_rad)**2)
|
||||
) + (
|
||||
(np.tan(theta_rad - theta_p_rad)**2)
|
||||
/ (np.tan(theta_rad + theta_p_rad)**2)
|
||||
)
|
||||
) / 2
|
||||
|
||||
|
||||
def fresnel_spectra(theta, xs, ns):
|
||||
"""
|
||||
Solves for the spectrum of reflected light
|
||||
according to fresnels equations
|
||||
"""
|
||||
spectra = []
|
||||
for x in xs:
|
||||
n = np.interp(x, ns[:, 0], ns[:, 1])
|
||||
theta_p = zenith_refracted(theta, n)
|
||||
spectra.append(fresnel_reflectence(theta, theta_p))
|
||||
|
||||
return np.array(spectra)
|
||||
|
||||
|
||||
def get_b_simu(hy_obj):
|
||||
b_simu = fresnel_spectra(
|
||||
10**-5,
|
||||
hy_obj.wavelengths,
|
||||
REFRACTIVE_INDICES
|
||||
)
|
||||
|
||||
return np.reshape(
|
||||
b_simu, (1, len(b_simu))
|
||||
)
|
||||
|
||||
|
||||
def get_rto(hy_obj):
|
||||
b_ref = hy_obj.get_wave(hy_obj.glint['correction_wave'])
|
||||
|
||||
b_ref_min = np.percentile(
|
||||
b_ref[
|
||||
(hy_obj.mask['apply_glint'])
|
||||
& (b_ref > 0)
|
||||
],
|
||||
.0001
|
||||
)
|
||||
b_ref = b_ref - b_ref_min
|
||||
|
||||
rto = (
|
||||
b_ref
|
||||
/ hy_obj.ancillary['gao_b_simu'][0, :][hy_obj.glint['correction_band']]
|
||||
)
|
||||
rto[~hy_obj.mask['apply_glint']] = 0
|
||||
|
||||
return rto
|
||||
75
Flexbrdf/hytools/glint/glint.py
Normal file
75
Flexbrdf/hytools/glint/glint.py
Normal file
@ -0,0 +1,75 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
HyTools: Hyperspectral image processing library
|
||||
Copyright (C) 2021 University of Wisconsin
|
||||
|
||||
Authors: Evan Greenberg.
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation, version 3 of the License.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
"""
|
||||
import ray
|
||||
from ..misc import set_glint
|
||||
from .hochberg_2003 import apply_hochberg_2003_correction
|
||||
from .gao_2021 import apply_gao_2021_correction
|
||||
from .hedley_2005 import apply_hedley_2005_correction
|
||||
|
||||
|
||||
def set_glint_parameters(actors, config_dict):
|
||||
# Assign glint dict
|
||||
glint_dict = config_dict['glint']
|
||||
|
||||
# Set Glint dict
|
||||
_ = ray.get([
|
||||
a.do.remote(set_glint, glint_dict) for a in actors
|
||||
])
|
||||
|
||||
# Add glint correction
|
||||
_ = ray.get([
|
||||
a.do.remote(lambda x: x.corrections.append('glint')) for a in actors
|
||||
])
|
||||
|
||||
def set_glint_parameters_single(hy_obj, config_dict):
|
||||
# Assign glint dict
|
||||
glint_dict = config_dict['glint']
|
||||
|
||||
# Set Glint dict
|
||||
set_glint(hy_obj, glint_dict)
|
||||
|
||||
# Add glint correction
|
||||
hy_obj.corrections.append('glint')
|
||||
|
||||
|
||||
def apply_glint_correct(hy_obj, data, dimension, index):
|
||||
''' Corrects glint based on the specified algorithm in the config.
|
||||
Options include:
|
||||
Hochberg et al., 2003: hochberg
|
||||
Gao et al., 2021: gao
|
||||
Hedley et al. 2005: hedley
|
||||
...
|
||||
'''
|
||||
|
||||
# Perform one of the corrections
|
||||
if hy_obj.glint['type'] == 'hochberg':
|
||||
data = apply_hochberg_2003_correction(hy_obj, data, dimension, index)
|
||||
|
||||
elif hy_obj.glint['type'] == 'gao':
|
||||
data = apply_gao_2021_correction(hy_obj, data, dimension, index)
|
||||
|
||||
elif hy_obj.glint['type'] == 'hedley':
|
||||
data = apply_hedley_2005_correction(hy_obj, data, dimension, index)
|
||||
|
||||
#Truncate reflectance values below 0
|
||||
if hy_obj.glint['truncate']:
|
||||
data[(data < 0) & (data != hy_obj.no_data)]= 0
|
||||
|
||||
return data
|
||||
140
Flexbrdf/hytools/glint/hedley_2005.py
Normal file
140
Flexbrdf/hytools/glint/hedley_2005.py
Normal file
@ -0,0 +1,140 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
HyTools: Hyperspectral image processing library
|
||||
Copyright (C) 2021 University of Wisconsin
|
||||
|
||||
Authors: Evan Greenberg.
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation, version 3 of the License.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
"""
|
||||
import numpy as np
|
||||
from scipy import stats
|
||||
from ..masks import mask_create
|
||||
|
||||
|
||||
def apply_hedley_2005_correction(hy_obj, data, dimension, index):
|
||||
"""
|
||||
Glint correction algorithm following:
|
||||
|
||||
Hedley, J. D., Harborne, A. R., & Mumby, P. J. (2005).
|
||||
Simple and robust removal of sun glint for mapping shallow‐water benthos.
|
||||
International Journal of Remote Sensing, 26(10), 2107-2112.
|
||||
"""
|
||||
# Raise exception is there is no deep water sample provided
|
||||
if isinstance(hy_obj.glint.get('deep_water_sample'), type(None)):
|
||||
raise KeyError("No Deep Water Sample Provided")
|
||||
|
||||
if 'apply_glint' not in hy_obj.mask:
|
||||
hy_obj.gen_mask(mask_create,'apply_glint',hy_obj.glint['apply_mask'])
|
||||
|
||||
if hy_obj.mask['apply_glint'].sum() == 0:
|
||||
return data
|
||||
|
||||
hy_obj.glint['correction_band'] = hy_obj.wave_to_band(
|
||||
hy_obj.glint['correction_wave']
|
||||
)
|
||||
|
||||
if 'hedley_slopes' not in hy_obj.ancillary:
|
||||
hy_obj.ancillary['hedley_slopes'] = optimize_slopes(hy_obj)
|
||||
|
||||
if 'hedley_nir_swir_diff' not in hy_obj.ancillary:
|
||||
hy_obj.ancillary['hedley_nir_swir_diff'] = nir_swir_diff(hy_obj)
|
||||
|
||||
if dimension == 'line':
|
||||
correction = (
|
||||
hy_obj.ancillary['hedley_nir_swir_diff'][index, :].reshape(-1, 1)
|
||||
* hy_obj.ancillary['hedley_slopes']
|
||||
)
|
||||
correction[~hy_obj.mask['apply_glint'][index, :], :] = 0
|
||||
|
||||
elif dimension == 'column':
|
||||
correction = (
|
||||
hy_obj.ancillary['hedley_nir_swir_diff'][:, index].reshape(-1, 1)
|
||||
* hy_obj.ancillary['hedley_slopes']
|
||||
)
|
||||
correction[~hy_obj.mask['apply_glint'][:, index], :] = 0
|
||||
|
||||
elif (dimension == 'band'):
|
||||
correction = (
|
||||
hy_obj.ancillary['hedley_nir_swir_diff']
|
||||
* hy_obj.ancillary['hedley_slopes'][0, index]
|
||||
)
|
||||
correction[~hy_obj.mask['apply_glint']] = 0
|
||||
|
||||
elif dimension == 'chunk':
|
||||
x1, x2, y1, y2 = index
|
||||
corr_diff = hy_obj.ancillary['hedley_nir_swir_diff'][y1:y2, x1:x2]
|
||||
bandnums = data.shape[2]
|
||||
corr_diff = np.repeat(
|
||||
corr_diff[:, :, np.newaxis],
|
||||
bandnums,
|
||||
axis=2
|
||||
)
|
||||
|
||||
correction = corr_diff * hy_obj.ancillary['hedley_slopes']
|
||||
correction[~hy_obj.mask['apply_glint'][y1:y2, x1:x2], :] = 0
|
||||
|
||||
elif dimension == 'pixels':
|
||||
y, x = index
|
||||
|
||||
correction = (
|
||||
hy_obj.ancillary['hedley_nir_swir_diff'][y, x].reshape(-1, 1)
|
||||
* hy_obj.ancillary['hedley_slopes']
|
||||
)
|
||||
correction[~hy_obj.mask['apply_glint'][y, x], :] = 0
|
||||
|
||||
return data - correction
|
||||
|
||||
|
||||
def optimize_slopes(hy_obj):
|
||||
deep_water = hy_obj.get_chunk(
|
||||
*hy_obj.glint['deep_water_sample'][hy_obj.file_name]
|
||||
)
|
||||
|
||||
deep_correction = (
|
||||
deep_water[:, :, hy_obj.glint['correction_band']].flatten()
|
||||
)
|
||||
|
||||
# Iterate through each band to find the band-slope
|
||||
slopes = np.empty([1, len(hy_obj.wavelengths)])
|
||||
for i, band in enumerate(hy_obj.wavelengths):
|
||||
# Get flattened deep water sample of band
|
||||
wave_num = np.argmin(
|
||||
np.abs(hy_obj.wavelengths - band)
|
||||
)
|
||||
wave = deep_water[:, :, wave_num].flatten()
|
||||
|
||||
# Regress
|
||||
(
|
||||
slope,
|
||||
intercept,
|
||||
r_value,
|
||||
p_value,
|
||||
std_err
|
||||
) = stats.linregress(
|
||||
deep_correction,
|
||||
wave
|
||||
)
|
||||
slopes[0, i] = slope
|
||||
|
||||
return slopes
|
||||
|
||||
|
||||
def nir_swir_diff(hy_obj):
|
||||
nir_swir_array = np.copy(
|
||||
hy_obj.get_wave(hy_obj.glint['correction_wave'])
|
||||
)
|
||||
nir_swir_array[~hy_obj.mask['apply_glint']] = 0
|
||||
nir_swir_min = np.percentile(nir_swir_array[nir_swir_array > 0], .0001)
|
||||
|
||||
return nir_swir_array - nir_swir_min
|
||||
88
Flexbrdf/hytools/glint/hochberg_2003.py
Normal file
88
Flexbrdf/hytools/glint/hochberg_2003.py
Normal file
@ -0,0 +1,88 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
HyTools: Hyperspectral image processing library
|
||||
Copyright (C) 2021 University of Wisconsin
|
||||
|
||||
Authors: Evan Greenberg.
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation, version 3 of the License.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
"""
|
||||
import numpy as np
|
||||
from ..masks import mask_create
|
||||
|
||||
|
||||
def apply_hochberg_2003_correction(hy_obj, data, dimension, index):
|
||||
"""
|
||||
Glint correction algorithm following:
|
||||
|
||||
Hochberg, EJ, Andréfouët, S and Tyler, MR. 2003.
|
||||
Sea surface correction of high spatial resolution Ikonos images to
|
||||
improve bottom mapping in near‐shore environments..
|
||||
IEEE Transactions on Geoscience and Remote Sensing, 41: 1724–1729.
|
||||
"""
|
||||
|
||||
if 'apply_glint' not in hy_obj.mask:
|
||||
hy_obj.gen_mask(mask_create,'apply_glint',hy_obj.glint['apply_mask'])
|
||||
|
||||
if hy_obj.mask['apply_glint'].sum() == 0:
|
||||
return data
|
||||
|
||||
if 'hochberg_correction' not in hy_obj.ancillary:
|
||||
hy_obj.ancillary['hochberg_correction'] = (
|
||||
get_hochberg_correction(hy_obj)
|
||||
)
|
||||
|
||||
if dimension == 'line':
|
||||
correction = hy_obj.ancillary['hochberg_correction'][index, :][:,np.newaxis]
|
||||
|
||||
elif dimension == 'column':
|
||||
correction = hy_obj.ancillary['hochberg_correction'][:, index][np.newaxis,:]
|
||||
|
||||
elif dimension == 'band':
|
||||
correction = hy_obj.ancillary['hochberg_correction']
|
||||
|
||||
elif dimension == 'chunk':
|
||||
x1, x2, y1, y2 = index
|
||||
correction = hy_obj.ancillary['hochberg_correction'][y1:y2, x1:x2]
|
||||
|
||||
elif dimension == 'pixels':
|
||||
y, x = index
|
||||
correction = hy_obj.ancillary['hochberg_correction'][y, x]
|
||||
|
||||
return data - correction
|
||||
|
||||
def get_hochberg_correction(hy_obj):
|
||||
"""
|
||||
Calculates the hochberg correction across entire image.
|
||||
Uses the NIR or SWIR wavelengths to find the amount of signal
|
||||
attributed to glint. Zeros out non-water pixels
|
||||
"""
|
||||
|
||||
if isinstance(hy_obj.glint['correction_wave'],list):
|
||||
nir_swir_array = np.zeros((hy_obj.lines,hy_obj.columns))
|
||||
for wave in hy_obj.glint['correction_wave']:
|
||||
nir_swir_array+= hy_obj.get_wave(wave)
|
||||
nir_swir_array/=len(hy_obj.glint['correction_wave'])
|
||||
else:
|
||||
nir_swir_array = np.copy(hy_obj.get_wave(hy_obj.glint['correction_wave']))
|
||||
|
||||
nir_swir_array[~hy_obj.mask['apply_glint']] = 0
|
||||
|
||||
nir_swir_min = np.percentile(
|
||||
nir_swir_array[nir_swir_array > 0], .001
|
||||
)
|
||||
|
||||
hochberg_correction = nir_swir_array - nir_swir_min
|
||||
hochberg_correction[~hy_obj.mask['apply_glint']] = 0
|
||||
|
||||
return hochberg_correction
|
||||
23
Flexbrdf/hytools/io/__init__.py
Normal file
23
Flexbrdf/hytools/io/__init__.py
Normal file
@ -0,0 +1,23 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
HyTools: Hyperspectral image processing library
|
||||
Copyright (C) 2021 University of Wisconsin
|
||||
|
||||
Authors: Adam Chlus, Zhiwei Ye, Philip Townsend.
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation, version 3 of the License.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
The :mod:`hytools.io` module includes functions for reading
|
||||
from multiple file formats and writing to ENVI formatted binary files.
|
||||
"""
|
||||
from .envi import *
|
||||
697
Flexbrdf/hytools/io/envi.py
Normal file
697
Flexbrdf/hytools/io/envi.py
Normal file
@ -0,0 +1,697 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
HyTools: Hyperspectral image processing library
|
||||
Copyright (C) 2021 University of Wisconsin
|
||||
|
||||
Authors: Adam Chlus, Zhiwei Ye, Philip Townsend.
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation, version 3 of the License.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
Functions for reading and writing ENVI formatted binary files
|
||||
|
||||
Todo:
|
||||
* Implement opening of ENVI files with different byte order
|
||||
|
||||
"""
|
||||
import os
|
||||
import sys
|
||||
from collections import Counter
|
||||
import numpy as np
|
||||
|
||||
# ENVI datatype conversion dictionary
|
||||
dtype_dict = {1:np.uint8,
|
||||
2:np.int16,
|
||||
3:np.int32,
|
||||
4:np.float32,
|
||||
5:np.float64,
|
||||
12:np.uint16,
|
||||
13:np.uint32,
|
||||
14:np.int64,
|
||||
15:np.uint64}
|
||||
|
||||
# Dictionary of all ENVI header fields
|
||||
field_dict = {"acquisition time": "str",
|
||||
"band names":"list_str",
|
||||
"bands": "int",
|
||||
"bbl": "list_float",
|
||||
"byte order": "int",
|
||||
"class lookup": "str",
|
||||
"class names": "str",
|
||||
"classes": "int",
|
||||
"cloud cover": "float",
|
||||
"complex function": "str",
|
||||
"coordinate system string": "str",
|
||||
"correction factors": "list_float",
|
||||
"data gain values": "list_float",
|
||||
"data ignore value": "float",
|
||||
"data offset values": "list_float",
|
||||
"data reflectance gain values": "list_float",
|
||||
"data reflectance offset values": "list_float",
|
||||
"data type": "int",
|
||||
"default bands": "list_float",
|
||||
"default stretch": "str",
|
||||
"dem band": "int",
|
||||
"dem file": "str",
|
||||
"description": "str",
|
||||
"envi description":"str",
|
||||
"file type": "str",
|
||||
"fwhm": "list_float",
|
||||
"geo points": "list_float",
|
||||
"header offset": "int",
|
||||
"interleave": "str",
|
||||
"lines": "int",
|
||||
"map info": "list_str",
|
||||
"pixel size": "list_str",
|
||||
"projection info": "str",
|
||||
"read procedures": "str",
|
||||
"reflectance scale factor": "float",
|
||||
"rpc info": "str",
|
||||
"samples":"int",
|
||||
"security tag": "str",
|
||||
"sensor type": "str",
|
||||
"smoothing factors": "list_float",
|
||||
"solar irradiance": "float",
|
||||
"spectra names": "list_str",
|
||||
"sun azimuth": "float",
|
||||
"sun elevation": "float",
|
||||
"wavelength": "list_float",
|
||||
"wavelength units": "str",
|
||||
"x start": "float",
|
||||
"y start": "float",
|
||||
"z plot average": "str",
|
||||
"z plot range": "str",
|
||||
"z plot titles": "str"}
|
||||
|
||||
|
||||
def open_envi(hy_obj,anc_path = {}, ext = False, glt_path = None):
|
||||
"""Open ENVI formatted image file and populate Hytools object.
|
||||
|
||||
|
||||
Args:
|
||||
src_file (str): Pathname of input ENVI image file, header assumed to be located in
|
||||
same directory.
|
||||
anc_path (dict): Dictionary with pathnames and band numbers of ancillary datasets.
|
||||
ext: (bool) Input ENVI file has a file extension
|
||||
|
||||
Returns:
|
||||
HyTools file object: Populated HyTools file object.
|
||||
|
||||
"""
|
||||
|
||||
header_file = os.path.splitext(hy_obj.file_name)[0] + ".hdr"
|
||||
|
||||
|
||||
if not os.path.isfile(header_file):
|
||||
print("ERROR: Header file not found.")
|
||||
return None
|
||||
|
||||
header_dict = parse_envi_header(header_file)
|
||||
hy_obj.lines = header_dict["lines"]
|
||||
hy_obj.columns = header_dict["samples"]
|
||||
hy_obj.bands = header_dict["bands"]
|
||||
hy_obj.bad_bands = np.array([False for band in range(hy_obj.bands)])
|
||||
hy_obj.interleave = header_dict["interleave"]
|
||||
hy_obj.fwhm = header_dict["fwhm"]
|
||||
hy_obj.wavelengths = header_dict["wavelength"]
|
||||
hy_obj.wavelength_units = header_dict["wavelength units"]
|
||||
hy_obj.dtype = dtype_dict[header_dict["data type"]]
|
||||
hy_obj.no_data = header_dict['data ignore value']
|
||||
hy_obj.map_info = header_dict['map info']
|
||||
hy_obj.byte_order = header_dict['byte order']
|
||||
hy_obj.anc_path = anc_path
|
||||
hy_obj.header_file = header_file
|
||||
hy_obj.transform = calc_geotransform(header_dict['map info'])
|
||||
if bool(header_dict['coordinate system string']):
|
||||
hy_obj.projection = header_dict['coordinate system string']
|
||||
else:
|
||||
hy_obj.projection = ''
|
||||
|
||||
if hy_obj.byte_order == 1:
|
||||
hy_obj.endianness = 'big'
|
||||
else:
|
||||
hy_obj.endianness = 'little'
|
||||
|
||||
if isinstance(header_dict['bbl'],np.ndarray):
|
||||
hy_obj.bad_bands = np.array([x==1 for x in header_dict['bbl']])
|
||||
if header_dict["interleave"] == 'bip':
|
||||
hy_obj.shape = (hy_obj.lines, hy_obj.columns, hy_obj.bands)
|
||||
elif header_dict["interleave"] == 'bil':
|
||||
hy_obj.shape = (hy_obj.lines, hy_obj.bands, hy_obj.columns)
|
||||
elif header_dict["interleave"] == 'bsq':
|
||||
hy_obj.shape = (hy_obj.bands, hy_obj.lines, hy_obj.columns)
|
||||
else:
|
||||
print("ERROR: Unrecognized interleave type.")
|
||||
hy_obj = None
|
||||
|
||||
# If no_data value is not specified guess using image corners.
|
||||
if hy_obj.no_data is None:
|
||||
hy_obj.load_data()
|
||||
band_ind = 5 if hy_obj.bands > 10 else 0
|
||||
if header_dict["interleave"] == 'bip':
|
||||
up_l = hy_obj.data[0,0,band_ind]
|
||||
up_r = hy_obj.data[0,-1,band_ind]
|
||||
low_l = hy_obj.data[-1,0,band_ind]
|
||||
low_r = hy_obj.data[-1,-1,band_ind]
|
||||
elif header_dict["interleave"] == 'bil':
|
||||
up_l = hy_obj.data[0,band_ind,0]
|
||||
up_r = hy_obj.data[0,band_ind,-1]
|
||||
low_l = hy_obj.data[-1,band_ind,0]
|
||||
low_r = hy_obj.data[-1,band_ind,-1]
|
||||
elif header_dict["interleave"] == 'bsq':
|
||||
up_l = hy_obj.data[band_ind,0,0]
|
||||
up_r = hy_obj.data[band_ind,0,-1]
|
||||
low_l = hy_obj.data[band_ind,-1,0]
|
||||
low_r = hy_obj.data[band_ind,-1,-1]
|
||||
|
||||
if hy_obj.endianness != sys.byteorder:
|
||||
up_l = up_l.byteswap()
|
||||
up_r = up_r.byteswap()
|
||||
low_l = low_l.byteswap()
|
||||
low_r = low_r.byteswap()
|
||||
|
||||
counts = {v: k for k, v in Counter([up_l,up_r,low_l,low_r]).items()}
|
||||
hy_obj.no_data = counts[max(counts.keys())]
|
||||
hy_obj.close_data()
|
||||
|
||||
if bool(glt_path):
|
||||
glt_meta_dict = parse_glt_envi(glt_path)
|
||||
|
||||
hy_obj.glt_path = glt_meta_dict["glt_path"]
|
||||
hy_obj.glt_map_info = glt_meta_dict["map_info"]
|
||||
hy_obj.lines_glt = glt_meta_dict["lines_glt"]
|
||||
hy_obj.columns_glt = glt_meta_dict["columns_glt"]
|
||||
hy_obj.glt_transform = glt_meta_dict["transform"]
|
||||
hy_obj.glt_projection = glt_meta_dict["projection"]
|
||||
|
||||
del glt_meta_dict
|
||||
|
||||
del header_dict
|
||||
return hy_obj
|
||||
|
||||
|
||||
class WriteENVI:
|
||||
"""Iterator class for writing to an ENVI data file.
|
||||
|
||||
"""
|
||||
def __init__(self,output_name,header_dict):
|
||||
"""
|
||||
Args:
|
||||
output_name (str): Pathname of output ENVI data file.
|
||||
header_dict (dict): Dictionary containing ENVI header information.
|
||||
|
||||
Returns:
|
||||
None.
|
||||
|
||||
"""
|
||||
|
||||
|
||||
self.interleave = header_dict['interleave']
|
||||
self.header_dict = header_dict
|
||||
self.output_name =output_name
|
||||
dtype = dtype_dict[header_dict["data type"]]
|
||||
lines = header_dict['lines']
|
||||
columns = header_dict['samples']
|
||||
bands = header_dict['bands']
|
||||
|
||||
if self.interleave == "bip":
|
||||
self.data = np.memmap(output_name,dtype = dtype,
|
||||
mode='w+', shape = (lines,columns,bands))
|
||||
elif self.interleave == "bil":
|
||||
self.data = np.memmap(output_name,dtype = dtype,
|
||||
mode='w+', shape =(lines,bands,columns))
|
||||
elif self.interleave == "bsq":
|
||||
self.data = np.memmap(output_name,dtype = dtype,
|
||||
mode='w+',shape =(bands,lines,columns))
|
||||
write_envi_header(self.output_name,self.header_dict)
|
||||
|
||||
def write_line(self,line,index):
|
||||
"""
|
||||
Args:
|
||||
line (numpy.ndarray): Line array (columns,bands).
|
||||
index (int): Zero-based line index.
|
||||
|
||||
Returns:
|
||||
None.
|
||||
|
||||
"""
|
||||
|
||||
if self.interleave == "bip":
|
||||
self.data[index,:,:] = line
|
||||
|
||||
elif self.interleave == "bil":
|
||||
self.data[index,:,:] = np.moveaxis(line,0,1)
|
||||
|
||||
elif self.interleave == "bsq":
|
||||
self.data[:,index,:] = np.moveaxis(line,0,1)
|
||||
|
||||
|
||||
def write_line_glt(self,arr,glt_indices_y,glt_indices_x):
|
||||
"""
|
||||
Args:
|
||||
line (numpy.ndarray): Line array (columns,bands).
|
||||
index (int): Zero-based line index.
|
||||
|
||||
Returns:
|
||||
None.
|
||||
|
||||
"""
|
||||
|
||||
if self.interleave == "bip":
|
||||
self.data[glt_indices_y,glt_indices_x,:] = arr
|
||||
|
||||
elif self.interleave == "bil":
|
||||
self.data[glt_indices_y,:,glt_indices_x] = arr #np.moveaxis(line,0,1)
|
||||
|
||||
elif self.interleave == "bsq":
|
||||
self.data[:,glt_indices_y,glt_indices_x] = np.moveaxis(arr,0,1)
|
||||
|
||||
|
||||
def write_column(self,column,index):
|
||||
"""
|
||||
Args:
|
||||
column (numpy.ndarray): Column array (lines,bands).
|
||||
index (int): Zero-based column index.
|
||||
|
||||
Returns:
|
||||
None.
|
||||
|
||||
"""
|
||||
|
||||
if self.interleave == "bip":
|
||||
self.data[:,index,:] = column
|
||||
elif self.interleave == "bil":
|
||||
self.data[:,:,index] = column
|
||||
elif self.interleave == "bsq":
|
||||
self.data[:,:,index] = np.moveaxis(column,0,1)
|
||||
|
||||
def write_band(self,band,index):
|
||||
"""
|
||||
Args:
|
||||
band (numpy.ndarray): Band array (lines,columns).
|
||||
index (int): Zero-based band index.
|
||||
|
||||
Returns:
|
||||
None.
|
||||
|
||||
"""
|
||||
|
||||
if self.interleave == "bip":
|
||||
self.data[:,:,index] = band
|
||||
elif self.interleave == "bil":
|
||||
self.data[:,index,:] = band
|
||||
elif self.interleave == "bsq":
|
||||
self.data[index,:,:]= band
|
||||
|
||||
def write_band_glt(self,band,index,glt_indices,fill_mask):
|
||||
"""
|
||||
Args:
|
||||
band (numpy.ndarray): Band array (lines,columns).
|
||||
index (int): Zero-based band index.
|
||||
glt_indices (numpy.ndarray,numpy.ndarray): Zero-based tuple indices.
|
||||
|
||||
Returns:
|
||||
None.
|
||||
|
||||
"""
|
||||
|
||||
if self.interleave == "bip":
|
||||
self.data[:,:,index][fill_mask] = band[glt_indices]
|
||||
self.data[:,:,index][~fill_mask] = self.header_dict['data ignore value']
|
||||
elif self.interleave == "bil":
|
||||
self.data[:,index,:][fill_mask] = band[glt_indices]
|
||||
self.data[:,index,:][~fill_mask] = self.header_dict['data ignore value']
|
||||
elif self.interleave == "bsq":
|
||||
self.data[index,:,:][fill_mask] = band[glt_indices]
|
||||
self.data[index,:,:][~fill_mask] = self.header_dict['data ignore value']
|
||||
|
||||
|
||||
|
||||
def write_chunk(self,chunk,line_index,column_index):
|
||||
"""
|
||||
Args:
|
||||
chunk (TYPE): Chunks array (chunk lines,chunk columns,bands).
|
||||
line_index (int): Zero-based upper line index.
|
||||
column_index (int): Zero-based left column index.
|
||||
|
||||
Returns:
|
||||
None.
|
||||
|
||||
"""
|
||||
|
||||
x_start = column_index
|
||||
x_end = column_index + chunk.shape[1]
|
||||
y_start = line_index
|
||||
y_end = line_index + chunk.shape[0]
|
||||
|
||||
if self.interleave == "bip":
|
||||
self.data[y_start:y_end,x_start:x_end,:] = chunk
|
||||
elif self.interleave == "bil":
|
||||
self.data[y_start:y_end,:,x_start:x_end] = np.moveaxis(chunk,-1,-2)
|
||||
elif self.interleave == "bsq":
|
||||
self.data[:,y_start:y_end,x_start:x_end] = np.moveaxis(chunk,-1,0)
|
||||
|
||||
def write_pixel(self,pixel,line_index,column_index):
|
||||
"""
|
||||
Args:
|
||||
pixel (TYPE): pixel array (bands).
|
||||
line_index (int): Zero-based upper line index.
|
||||
column_index (int): Zero-based left column index.
|
||||
|
||||
Returns:
|
||||
None.
|
||||
|
||||
"""
|
||||
|
||||
if self.interleave == "bip":
|
||||
self.data[line_index,column_index,:] = pixel
|
||||
elif self.interleave == "bil":
|
||||
self.data[line_index,:,column_index] = pixel
|
||||
elif self.interleave == "bsq":
|
||||
self.data[:,line_index,column_index] = pixel
|
||||
|
||||
|
||||
def close(self):
|
||||
"""Delete numpy memmap.
|
||||
"""
|
||||
del self.data
|
||||
|
||||
def envi_header_from_neon(hy_obj, interleave = 'bsq'):
|
||||
"""Create an ENVI header dictionary from NEON metadata
|
||||
|
||||
Args:
|
||||
hy_obj (Hytools object): Populated HyTools file object.
|
||||
interleave (str, optional): Date interleave type. Defaults to 'bil'.
|
||||
|
||||
Returns:
|
||||
dict: Populated ENVI header dictionary.
|
||||
|
||||
"""
|
||||
|
||||
header_dict = {}
|
||||
header_dict["ENVI description"] = "{}"
|
||||
header_dict["samples"] = hy_obj.columns
|
||||
header_dict["lines"] = hy_obj.lines
|
||||
header_dict["bands"] = hy_obj.bands
|
||||
header_dict["header offset"] = 0
|
||||
header_dict["file type"] = "ENVI Standard"
|
||||
header_dict["data type"] = 2
|
||||
header_dict["interleave"] = interleave
|
||||
header_dict["sensor type"] = ""
|
||||
header_dict["byte order"] = 0
|
||||
header_dict["map info"] = hy_obj.map_info
|
||||
header_dict["coordinate system string"] = hy_obj.projection
|
||||
header_dict["wavelength units"] = hy_obj.wavelength_units
|
||||
header_dict["data ignore value"] =hy_obj.no_data
|
||||
header_dict["wavelength"] =hy_obj.wavelengths
|
||||
return header_dict
|
||||
|
||||
def envi_header_from_nc(hy_obj, interleave = 'bsq', warp_glt = False):
|
||||
"""Create an ENVI header dictionary from NetCDF metadata
|
||||
|
||||
Args:
|
||||
hy_obj (Hytools object): Populated HyTools file object.
|
||||
interleave (str, optional): Date interleave type. Defaults to 'bil'.
|
||||
|
||||
Returns:
|
||||
dict: Populated ENVI header dictionary.
|
||||
|
||||
"""
|
||||
|
||||
header_dict = {}
|
||||
header_dict["ENVI description"] = "{}"
|
||||
|
||||
if warp_glt == False:
|
||||
header_dict["samples"] = hy_obj.columns
|
||||
header_dict["lines"] = hy_obj.lines
|
||||
header_dict["map info"] = hy_obj.map_info
|
||||
header_dict["coordinate system string"] = "{%s}" % hy_obj.projection if hy_obj.projection else "{}"
|
||||
header_dict["projection"] = hy_obj.projection
|
||||
header_dict["transform"] = hy_obj.transform
|
||||
|
||||
else:
|
||||
header_dict["samples"] = hy_obj.columns_glt
|
||||
header_dict["lines"] = hy_obj.lines_glt
|
||||
header_dict["map info"] = hy_obj.glt_map_info
|
||||
header_dict["coordinate system string"] = "{%s}" % hy_obj.glt_projection if hy_obj.glt_projection else "{}"
|
||||
header_dict["projection"] = hy_obj.glt_projection
|
||||
header_dict["transform"] = hy_obj.glt_transform
|
||||
|
||||
header_dict["bands"] = 2 #hy_obj.bands
|
||||
header_dict["header offset"] = 0
|
||||
header_dict["file type"] = "ENVI Standard"
|
||||
header_dict["data type"] = 4
|
||||
header_dict["interleave"] = interleave
|
||||
header_dict["sensor type"] = ""
|
||||
header_dict["byte order"] = 0
|
||||
|
||||
|
||||
header_dict["wavelength units"] = hy_obj.wavelength_units
|
||||
header_dict["data ignore value"] = hy_obj.no_data
|
||||
header_dict["wavelength"] = hy_obj.wavelengths
|
||||
return header_dict
|
||||
|
||||
|
||||
def write_envi_header(output_name,header_dict,mode = 'w'):
|
||||
"""Write ENVI header file to disk.
|
||||
|
||||
Args:
|
||||
output_name (str): Header file pathname.
|
||||
header_dict (dict): Populated ENVI header dictionary.
|
||||
mode (str): File open mode. default: w
|
||||
|
||||
Returns:
|
||||
None.
|
||||
|
||||
"""
|
||||
|
||||
base_name = os.path.splitext(output_name)[0]
|
||||
header_file = open(base_name + ".hdr",mode)
|
||||
header_file.write("ENVI\n")
|
||||
|
||||
for key in header_dict.keys():
|
||||
value = header_dict[key]
|
||||
# Convert list to comma separated strings
|
||||
if isinstance(value,(list,np.ndarray)):
|
||||
value = "{%s}" % ",".join(map(str, value))
|
||||
elif key == "coordinate system string" and value and isinstance(value, str):
|
||||
# 对 coordinate system string 字段确保有花括号包围
|
||||
if not value.startswith("{"):
|
||||
value = "{%s}" % value
|
||||
else:
|
||||
value = str(value)
|
||||
# Skip entires with nan as value
|
||||
if value != 'None':
|
||||
header_file.write("%s = %s\n" % (key,value))
|
||||
header_file.close()
|
||||
|
||||
|
||||
|
||||
def envi_header_dict():
|
||||
"""
|
||||
Returns:
|
||||
dict: Empty ENVI header dictionary.
|
||||
|
||||
"""
|
||||
return {key:None for (key,value) in field_dict.items()}
|
||||
|
||||
|
||||
def envi_read_line(data,index,interleave):
|
||||
"""
|
||||
Args:
|
||||
data (numpy.memmap): Numpy memory-map.
|
||||
index (int): Zero-based line index.
|
||||
interleave (str): Data interleave type.
|
||||
|
||||
Returns:
|
||||
numpy.ndarray: Line array (columns, bands).
|
||||
|
||||
"""
|
||||
|
||||
if interleave == "bip":
|
||||
line = data[index,:,:]
|
||||
elif interleave == "bil":
|
||||
line = np.moveaxis(data[index,:,:],0,1)
|
||||
elif interleave == "bsq":
|
||||
line = np.moveaxis(data[:,index,:],0,1)
|
||||
return line
|
||||
|
||||
def envi_read_column(data,index,interleave):
|
||||
"""
|
||||
Args:
|
||||
data (numpy.memmap): Numpy memory-map.
|
||||
index (int): Zero-based column index.
|
||||
interleave (str): Data interleave type.
|
||||
|
||||
Returns:
|
||||
numpy.ndarray: Column array (lines,bands).
|
||||
|
||||
"""
|
||||
|
||||
if interleave == "bip":
|
||||
column = data[:,index,:]
|
||||
elif interleave == "bil":
|
||||
column = data[:,:,index]
|
||||
elif interleave == "bsq":
|
||||
column = np.moveaxis(data[:,:,index],0,1)
|
||||
return column
|
||||
|
||||
def envi_read_band(data,index,interleave):
|
||||
"""
|
||||
Args:
|
||||
data (numpy.memmap): Numpy memory-map.
|
||||
index (int): Zero-based line index.
|
||||
interleave (str): Data interleave type.
|
||||
|
||||
Returns:
|
||||
numpy.ndarray: Band array (lines,columns).
|
||||
|
||||
"""
|
||||
|
||||
if interleave == "bip":
|
||||
band = data[:,:,index]
|
||||
elif interleave == "bil":
|
||||
band = data[:,index,:]
|
||||
elif interleave == "bsq":
|
||||
band = data[index,:,:]
|
||||
return band
|
||||
|
||||
def envi_read_pixels(data,lines,columns,interleave):
|
||||
"""
|
||||
Args:
|
||||
data (numpy.memmap): Numpy memory-map.
|
||||
lines (list): List of zero-indexed line indices.
|
||||
columns (list): List of zero-indexed column indices.
|
||||
interleave (str): Data interleave type.
|
||||
|
||||
Returns:
|
||||
numpy.ndarray: Pixel array (pixels,bands).
|
||||
|
||||
"""
|
||||
if interleave == "bip":
|
||||
pixels = data[lines,columns,:]
|
||||
elif interleave == "bil":
|
||||
pixels = data[lines,:,columns]
|
||||
elif interleave == "bsq":
|
||||
pixels = data[:,lines,columns]
|
||||
return pixels
|
||||
|
||||
|
||||
def envi_read_chunk(data,col_start,col_end,line_start,line_end,interleave):
|
||||
"""
|
||||
Args:
|
||||
data (numpy.memmap): Numpy memory-map.
|
||||
col_start (int): Zero-based left column index.
|
||||
col_end (int): Non-inclusive zero-based right column index.
|
||||
line_start (int): Zero-based top line index.
|
||||
line_end (int): Non-inclusive zero-based bottom line index.
|
||||
interleave (str): Data interleave type.
|
||||
|
||||
Returns:
|
||||
numpy.ndarray: Chunk array (line_end-line_start,col_end-col_start,bands).
|
||||
|
||||
"""
|
||||
|
||||
if interleave == "bip":
|
||||
chunk = data[line_start:line_end,col_start:col_end,:]
|
||||
elif interleave == "bil":
|
||||
chunk = np.moveaxis(data[line_start:line_end,:,col_start:col_end],-1,-2)
|
||||
elif interleave == "bsq":
|
||||
chunk = np.moveaxis(data[:,line_start:line_end,col_start:col_end],0,-1)
|
||||
return chunk
|
||||
|
||||
def calc_geotransform(mapinfo):
|
||||
if mapinfo[-1].startswith('rotation'):
|
||||
rot_ang_rad = np.radians(float(mapinfo[-1].split('=')[1]))
|
||||
pixel_size = float(mapinfo[5])
|
||||
|
||||
new_rot_mat = pixel_size * np.array([[np.cos(rot_ang_rad),-np.sin(rot_ang_rad)],[np.sin(rot_ang_rad),np.cos(rot_ang_rad)]])@np.array([[1,0],[0,-1]])
|
||||
geotransform = (float(mapinfo[3]),new_rot_mat[0,0],new_rot_mat[0,1],
|
||||
float(mapinfo[4]),new_rot_mat[1,0],new_rot_mat[1,1])
|
||||
else:
|
||||
# same as 0 rotation
|
||||
geotransform = (float(mapinfo[3]),float(mapinfo[5]),0,
|
||||
float(mapinfo[4]),0,-float(mapinfo[6]))
|
||||
return geotransform
|
||||
|
||||
def parse_glt_envi(glt_path):
|
||||
glt_meta_dict = {}
|
||||
glt_meta_dict["glt_path"] = glt_path
|
||||
|
||||
glt_header_file = os.path.splitext(glt_path[list(glt_path.keys())[0]][0])[0] + ".hdr"
|
||||
glt_header=parse_envi_header(glt_header_file)
|
||||
glt_meta_dict["map_info"] = glt_header["map info"]
|
||||
glt_meta_dict["lines_glt"] = glt_header["lines"]
|
||||
glt_meta_dict["columns_glt"] = glt_header["samples"]
|
||||
|
||||
glt_meta_dict["transform"] = calc_geotransform(glt_header["map info"])
|
||||
|
||||
if "coordinate system string" in glt_header:
|
||||
glt_meta_dict["projection"] = glt_header["coordinate system string"]
|
||||
else:
|
||||
glt_meta_dict["projection"] = ''
|
||||
|
||||
return glt_meta_dict
|
||||
|
||||
|
||||
def parse_envi_header(header_file):
|
||||
"""
|
||||
Args:
|
||||
header_file (str): Header file pathname.
|
||||
|
||||
Returns:
|
||||
dict: Populated header dictionary.
|
||||
|
||||
"""
|
||||
|
||||
header_dict = envi_header_dict()
|
||||
header_file = open(header_file,'r')
|
||||
line = header_file.readline()
|
||||
|
||||
while line :
|
||||
if "=" in line:
|
||||
key,value = line.rstrip().split("=",1)
|
||||
# Add fields not in ENVI default list
|
||||
if key.strip() not in field_dict.keys():
|
||||
field_dict[key.strip()] = "str"
|
||||
val_type = field_dict[key.strip()]
|
||||
|
||||
if "{" in value and not "}" in value:
|
||||
while "}" not in line:
|
||||
line = header_file.readline()
|
||||
value+=line
|
||||
|
||||
if '{}' in value:
|
||||
value = None
|
||||
elif val_type == "list_float":
|
||||
value= np.array([float(x) for x in value.translate(str.maketrans("\n{}"," ")).split(",")])
|
||||
elif val_type == "list_int":
|
||||
value= np.array([int(x) for x in value.translate(str.maketrans("\n{}"," ")).split(",")])
|
||||
elif val_type == "list_str":
|
||||
value= [x.strip() for x in value.translate(str.maketrans("\n{}"," ")).split(",")]
|
||||
elif val_type == "int":
|
||||
value = int(value.translate(str.maketrans("\n{}"," ")))
|
||||
elif val_type == "float":
|
||||
value = float(value.translate(str.maketrans("\n{}"," ")))
|
||||
elif val_type == "str":
|
||||
value = value.translate(str.maketrans("\n{}"," ")).strip().lower()
|
||||
|
||||
header_dict[key.strip()] = value
|
||||
line = header_file.readline()
|
||||
|
||||
# Fill unused fields with None
|
||||
for key in field_dict:
|
||||
if key not in header_dict.keys():
|
||||
header_dict[key] = None
|
||||
|
||||
header_file.close()
|
||||
return header_dict
|
||||
71
Flexbrdf/hytools/io/neon.py
Normal file
71
Flexbrdf/hytools/io/neon.py
Normal file
@ -0,0 +1,71 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
HyTools: Hyperspectral image processing library
|
||||
Copyright (C) 2021 University of Wisconsin
|
||||
|
||||
Authors: Adam Chlus, Zhiwei Ye, Philip Townsend.
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation, version 3 of the License.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
NEON AOP HDF opener
|
||||
"""
|
||||
import h5py
|
||||
import numpy as np
|
||||
|
||||
|
||||
def open_neon(hy_obj, no_data = -9999):
|
||||
"""Load and parse NEON formated HDF image into a HyTools file object.
|
||||
|
||||
Args:
|
||||
src_file (str): pathname of input HDF file.
|
||||
no_data (float, optional): No data value. Defaults to -9999.
|
||||
|
||||
Returns:
|
||||
HyTools file object: Populated HyTools file object.
|
||||
|
||||
"""
|
||||
|
||||
hdf_obj = h5py.File(hy_obj.file_name,'r')
|
||||
hy_obj.base_key = list(hdf_obj.keys())[0]
|
||||
metadata = hdf_obj[hy_obj.base_key]["Reflectance"]["Metadata"]
|
||||
data = hdf_obj[hy_obj.base_key]["Reflectance"]["Reflectance_Data"]
|
||||
|
||||
hy_obj.projection = metadata['Coordinate_System']['Coordinate_System_String'][()].decode("utf-8")
|
||||
hy_obj.map_info = metadata['Coordinate_System']['Map_Info'][()].decode("utf-8").split(',')
|
||||
hy_obj.transform = (float(hy_obj.map_info [3]),float(hy_obj.map_info [1]),0,float(hy_obj.map_info [4]),0,-float(hy_obj.map_info [2]))
|
||||
hy_obj.fwhm = metadata['Spectral_Data']['FWHM'][()]
|
||||
hy_obj.wavelengths = metadata['Spectral_Data']['Wavelength'][()]
|
||||
hy_obj.wavelength_units = metadata['Spectral_Data']['Wavelength'].attrs['Units']
|
||||
hy_obj.lines = data.shape[0]
|
||||
hy_obj.columns = data.shape[1]
|
||||
hy_obj.bands = data.shape[2]
|
||||
hy_obj.bad_bands = np.array([False for band in range(hy_obj.bands)])
|
||||
hy_obj.no_data = no_data
|
||||
hy_obj.anc_path = {'path_length': ['Ancillary_Imagery','Path_Length'],
|
||||
'sensor_az': ['to-sensor_Azimuth_Angle'],
|
||||
'sensor_zn': ['to-sensor_Zenith_Angle'],
|
||||
'solar_az': ['Logs','Solar_Azimuth_Angle'],
|
||||
'solar_zn': ['Logs','Solar_Zenith_Angle'],
|
||||
'slope': ['Ancillary_Imagery','Slope'],
|
||||
'aspect':['Ancillary_Imagery','Aspect'],
|
||||
'aod': ['Ancillary_Imagery','Aerosol_Optical_Depth'],
|
||||
'sky_view': ['Ancillary_Imagery','Sky_View_Factor'],
|
||||
'illum_factor': ['Ancillary_Imagery','Illumination_Factor'],
|
||||
'elevation;': ['Ancillary_Imagery','Smooth_Surface_Elevation'],
|
||||
'cast_shadow': ['Ancillary_Imagery','Cast_Shadow'],
|
||||
'dense_veg': ['Ancillary_Imagery','Dark_Dense_Vegetation_Classification'],
|
||||
'visibility_index': ['Ancillary_Imagery','Visibility_Index_Map'],
|
||||
'haze_water_cloud': ['Ancillary_Imagery','Haze_Water_Cloud_Map'],
|
||||
'water_vapor': ['Ancillary_Imagery','Water_Vapor_Column']}
|
||||
|
||||
return hy_obj
|
||||
426
Flexbrdf/hytools/io/netcdf.py
Normal file
426
Flexbrdf/hytools/io/netcdf.py
Normal file
@ -0,0 +1,426 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
HyTools: Hyperspectral image processing library
|
||||
Copyright (C) 2021 University of Wisconsin
|
||||
|
||||
Authors: Adam Chlus, Zhiwei Ye, Philip Townsend.
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation, version 3 of the License.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
NASA NetCDF opener
|
||||
"""
|
||||
import os
|
||||
import h5py
|
||||
import h5netcdf
|
||||
import numpy as np
|
||||
from .envi import parse_envi_header, WriteENVI, parse_glt_envi
|
||||
|
||||
unit_dict = {'nm':'nanometers'}
|
||||
utm_zone_dict = {'N':'North','S':'South'}
|
||||
|
||||
def open_netcdf(hy_obj, sensor,anc_path = {}, glt_path = {}):
|
||||
"""Load and parse NASA formatted NetCDF AVIRIS/EMIT image into a HyTools file object.
|
||||
|
||||
Args:
|
||||
HyTools file object: Populated HyTools file object.
|
||||
sensor (str): sensor name for reading, either 'emit' (EMIT) or 'ncav' (AVIRIS)
|
||||
anc_path (dict): Dictionary with pathnames and band numbers of ancillary datasets.
|
||||
glt_path (list): Dictionary with pathnames and band numbers of external GLT datasets.
|
||||
Returns:
|
||||
HyTools file object: Populated HyTools file object.
|
||||
|
||||
"""
|
||||
|
||||
nc4_obj = h5py.File(hy_obj.file_name,'r')
|
||||
|
||||
if "radiance" in list(nc4_obj.keys()):
|
||||
data_var_name = "radiance"
|
||||
else:
|
||||
#elif "reflectance" in list(nc4_obj.keys()):
|
||||
data_var_name = "reflectance"
|
||||
hy_obj.base_key = data_var_name
|
||||
|
||||
if "geolocation_lookup_table" in list(nc4_obj.keys()):
|
||||
glt_var_name = "geolocation_lookup_table"
|
||||
elif "location" in list(nc4_obj.keys()):
|
||||
glt_var_name = "location"
|
||||
else:
|
||||
glt_var_name = None
|
||||
|
||||
metadata = nc4_obj.attrs
|
||||
if sensor=='AV':
|
||||
data = nc4_obj[data_var_name][data_var_name]
|
||||
hy_obj.fwhm = nc4_obj[data_var_name]['fwhm'][()]
|
||||
hy_obj.wavelengths = nc4_obj[data_var_name]['wavelength'][()]
|
||||
|
||||
if 'units' in nc4_obj[data_var_name]['wavelength'].attrs.keys():
|
||||
hy_obj.wavelength_units = unit_dict[get_attr_string(nc4_obj[data_var_name]['wavelength'].attrs['units'])]
|
||||
elif 'unit' in nc4_obj[data_var_name]['wavelength'].attrs.keys():
|
||||
hy_obj.wavelength_units = get_attr_string(nc4_obj[data_var_name]['wavelength'].attrs['unit'])
|
||||
|
||||
hy_obj.lines = data.shape[1]
|
||||
hy_obj.columns = data.shape[2]
|
||||
hy_obj.bands = data.shape[0]
|
||||
|
||||
elif sensor == 'EMIT':
|
||||
data = nc4_obj[data_var_name]
|
||||
hy_obj.fwhm = nc4_obj['sensor_band_parameters']['fwhm'][()]
|
||||
hy_obj.wavelengths = nc4_obj['sensor_band_parameters']['wavelengths'][()]
|
||||
hy_obj.wavelength_units = unit_dict[get_attr_string(nc4_obj['sensor_band_parameters']['wavelengths'].attrs['units'])]
|
||||
hy_obj.lines = data.shape[0]
|
||||
hy_obj.columns = data.shape[1]
|
||||
hy_obj.bands = data.shape[2]
|
||||
|
||||
hy_obj.bad_bands = np.array(1-nc4_obj['sensor_band_parameters']['good_wavelengths'][()]).astype(np.bool)
|
||||
|
||||
if isinstance(data.attrs['_FillValue'],np.ndarray):
|
||||
hy_obj.no_data = data.attrs['_FillValue'][0]
|
||||
else:
|
||||
hy_obj.no_data = data.attrs['_FillValue']
|
||||
hy_obj.anc_path = anc_path
|
||||
|
||||
if bool(glt_path):
|
||||
glt_meta_dict = parse_glt_envi(glt_path)
|
||||
|
||||
hy_obj.glt_path = glt_meta_dict["glt_path"]
|
||||
hy_obj.glt_map_info = glt_meta_dict["map_info"]
|
||||
hy_obj.lines_glt = glt_meta_dict["lines_glt"]
|
||||
hy_obj.columns_glt = glt_meta_dict["columns_glt"]
|
||||
hy_obj.glt_transform = glt_meta_dict["transform"]
|
||||
hy_obj.glt_projection = glt_meta_dict["projection"]
|
||||
del glt_meta_dict
|
||||
|
||||
if sensor == "EMIT":
|
||||
# EMIT can only has one set of geotransform / GLT, this one will override the built-in GLT
|
||||
hy_obj.projection = hy_obj.glt_projection
|
||||
hy_obj.map_info = hy_obj.glt_map_info
|
||||
hy_obj.transform = hy_obj.glt_transform
|
||||
|
||||
else:
|
||||
if sensor == 'EMIT':
|
||||
|
||||
hy_obj.glt_path = { "glt_x": ["location","glt_x"],
|
||||
"glt_y": ["location","glt_y"]}
|
||||
hy_obj.projection = get_attr_string(metadata['spatial_ref'])
|
||||
geotransform = nc4_obj.attrs['geotransform'][()]
|
||||
hy_obj.map_info = ['Geographic Lat/Lon','1','1',
|
||||
str(geotransform[0]),str(geotransform[3]),
|
||||
str(geotransform[1]),str(-geotransform[5]),
|
||||
'WGS-84']
|
||||
hy_obj.transform = tuple(metadata['geotransform'][()])
|
||||
glt_x = nc4_obj['location']['glt_x']
|
||||
|
||||
hy_obj.lines_glt = glt_x.shape[0]
|
||||
hy_obj.columns_glt = glt_x.shape[1]
|
||||
|
||||
hy_obj.glt_projection = hy_obj.projection
|
||||
hy_obj.glt_transform = hy_obj.transform
|
||||
hy_obj.glt_map_info = hy_obj.map_info
|
||||
|
||||
elif sensor == 'AV':
|
||||
if "transverse_mercator" in nc4_obj.keys():
|
||||
spatial_ref_name_tag = "transverse_mercator"
|
||||
elif "projection" in nc4_obj.keys():
|
||||
spatial_ref_name_tag = "projection"
|
||||
else:
|
||||
spatial_ref_name_tag = None
|
||||
|
||||
hy_obj.projection = get_attr_string(nc4_obj[spatial_ref_name_tag].attrs['spatial_ref'])
|
||||
geotransform = [float(x) for x in get_attr_string(nc4_obj[spatial_ref_name_tag].attrs['GeoTransform']).split(' ')]
|
||||
|
||||
utm_zone_tag=((hy_obj.projection).split('UTM zone ')[1]).split('",GEOGCS')[0]
|
||||
hy_obj.map_info = ['UTM','1','1',
|
||||
str(geotransform[0]),str(geotransform[3]),
|
||||
str(geotransform[1]),str(-geotransform[5]),
|
||||
utm_zone_tag[:-1],utm_zone_dict[utm_zone_tag[-1]],'WGS-84']
|
||||
hy_obj.transform = tuple(geotransform)
|
||||
|
||||
hy_obj.glt_path = { "glt_x": [glt_var_name,"sample"], #["geolocation_lookup_table","sample"],
|
||||
"glt_y": [glt_var_name,"line"]} #["geolocation_lookup_table","line"]}
|
||||
|
||||
if glt_var_name is None:
|
||||
hy_obj.lines_glt = hy_obj.lines
|
||||
hy_obj.columns_glt = hy_obj.columns
|
||||
else:
|
||||
glt_x = nc4_obj[glt_var_name]['sample']
|
||||
hy_obj.lines_glt = glt_x.shape[0]
|
||||
hy_obj.columns_glt = glt_x.shape[1]
|
||||
|
||||
if hy_obj.base_key=="radiance":
|
||||
hy_obj.glt_projection = hy_obj.projection
|
||||
hy_obj.glt_transform = hy_obj.transform
|
||||
hy_obj.glt_map_info = hy_obj.map_info
|
||||
|
||||
|
||||
return hy_obj
|
||||
|
||||
def get_attr_string(attr):
|
||||
if isinstance(attr, bytes):
|
||||
return attr.decode("utf-8")
|
||||
return attr
|
||||
|
||||
def set_wavelength_meta(nc4_obj,header_dict,glt_bool):
|
||||
file_type = (header_dict['file_type']).lower()
|
||||
|
||||
if file_type in ["envi","ncav"] or (file_type=="emit" and glt_bool is True):
|
||||
gp=nc4_obj.create_group("reflectance")
|
||||
wavelength_var=nc4_obj.create_variable("/reflectance/wavelength",("wavelength",),
|
||||
data=header_dict['wavelength'],
|
||||
dtype=np.float32)
|
||||
fwhm_var = nc4_obj.create_variable("/reflectance/fwhm",("wavelength",),
|
||||
data=header_dict['fwhm'],
|
||||
dtype=np.float32)
|
||||
elif file_type=="emit":
|
||||
if glt_bool: # handled in above codes
|
||||
pass
|
||||
else: # do not warp with GLT
|
||||
nc4_obj.dimensions["bands"]=header_dict['bands']
|
||||
wavelength_var=nc4_obj.create_variable("/sensor_band_parameters/wavelengths",("bands",),
|
||||
data=np.array(header_dict['wavelength']),
|
||||
dtype=np.float32)
|
||||
fwhm_var = nc4_obj.create_variable("/sensor_band_parameters/fwhm", ("bands",),
|
||||
data=header_dict['fwhm'],
|
||||
dtype=np.float32)
|
||||
|
||||
|
||||
def write_netcdf_refl_meta(nc4_obj,header_dict,glt_bool):
|
||||
set_wavelength_meta(nc4_obj,header_dict,glt_bool)
|
||||
write_netcdf_meta(nc4_obj,header_dict,glt_bool)
|
||||
|
||||
class WriteNetCDF(WriteENVI):
|
||||
"""Iterator class for writing to a NetCDF data file.
|
||||
The class inherites all the write functionss from WriteENVI: write pixel, line, band, chunk, etc.
|
||||
"""
|
||||
def __init__(self,output_name, header_dict, attr_dict, glt_bool, type_tag, band_name=None):
|
||||
"""
|
||||
Args:
|
||||
output_name (str): Pathname of output ENVI data file.
|
||||
header_dict (dict): Dictionary containing ENVI header information.
|
||||
|
||||
Returns:
|
||||
None.
|
||||
|
||||
"""
|
||||
|
||||
dim1_chunk_size = 2**(min(int(np.log2(header_dict['lines'])),8))
|
||||
dim2_chunk_size = 2**(min(int(np.log2(header_dict['samples'])),8))
|
||||
|
||||
if type_tag=="reflectance": # for reflectance
|
||||
self.header_dict = header_dict
|
||||
self.output_name = output_name
|
||||
self.file_type = header_dict['file_type'].lower()
|
||||
|
||||
self.nc4_obj = h5netcdf.File(output_name, "w")
|
||||
|
||||
write_netcdf_refl_meta(self.nc4_obj,header_dict,glt_bool)
|
||||
if self.file_type in ["ncav","envi"]:
|
||||
self.interleave = "bsq"
|
||||
self.data = self.nc4_obj.create_variable("/reflectance/reflectance",
|
||||
("wavelength","northing","easting"),
|
||||
np.float32,
|
||||
chunks=(2,dim1_chunk_size,dim2_chunk_size),
|
||||
compression='gzip')
|
||||
self.data.attrs["grid_mapping"] = "projection"
|
||||
elif self.file_type == "emit":
|
||||
if glt_bool:
|
||||
self.interleave = "bsq"
|
||||
self.data = self.nc4_obj.create_variable("/reflectance/reflectance",
|
||||
("wavelength","northing","easting"),
|
||||
np.float32,
|
||||
chunks=(1,dim1_chunk_size,dim2_chunk_size),
|
||||
compression='gzip')
|
||||
self.data.attrs["grid_mapping"] = "projection"
|
||||
else:
|
||||
self.interleave = "bip"
|
||||
self.data = self.nc4_obj.create_variable("reflectance",
|
||||
("downtrack","crosstrack","bands"),
|
||||
np.float32,
|
||||
chunks=(dim1_chunk_size,dim2_chunk_size,2),
|
||||
compression='gzip')
|
||||
|
||||
self.data.attrs["_FillValue"]=np.array([-9999.0],dtype=np.float32)
|
||||
self.external_nc_attrs(attr_dict)
|
||||
elif type_tag=="mask": # for masks
|
||||
self.interleave = "bsq"
|
||||
self.header_dict = header_dict
|
||||
self.file_type = header_dict['file_type'].lower()
|
||||
self.nc4_obj = h5netcdf.File(output_name, "r+")
|
||||
|
||||
if self.file_type in ["ncav","envi"]:
|
||||
self.data = self.nc4_obj.create_variable(f"/masks/{band_name}",
|
||||
("northing","easting"),
|
||||
np.uint8,
|
||||
chunks=(dim1_chunk_size,dim2_chunk_size),
|
||||
compression='gzip')
|
||||
self.data.attrs["grid_mapping"] = "projection"
|
||||
elif self.file_type == "emit":
|
||||
if glt_bool:
|
||||
self.data = self.nc4_obj.create_variable(f"/masks/{band_name}",
|
||||
("northing","easting"),
|
||||
np.uint8,
|
||||
chunks=(dim1_chunk_size,dim2_chunk_size),
|
||||
compression='gzip')
|
||||
self.data.attrs["grid_mapping"] = "projection"
|
||||
else:
|
||||
self.data = self.nc4_obj.create_variable(f"/masks/{band_name}",
|
||||
("downtrack","crosstrack"),
|
||||
np.uint8,
|
||||
chunks=(dim1_chunk_size,dim2_chunk_size),
|
||||
compression='gzip')
|
||||
self.data.attrs["_FillValue"]=np.array([255],dtype=np.uint8)
|
||||
self.external_nc_attrs(attr_dict)
|
||||
elif type_tag=="trait":
|
||||
self.interleave = "bsq"
|
||||
self.file_type = header_dict['file_type'].lower()
|
||||
self.nc4_obj = h5netcdf.File(output_name, "w")
|
||||
|
||||
self.nc4_obj.dimensions["bands"]=2
|
||||
|
||||
self.interleave = "bsq"
|
||||
|
||||
write_netcdf_meta(self.nc4_obj,header_dict,glt_bool)
|
||||
if self.file_type in ["ncav","envi"]:
|
||||
self.data = self.nc4_obj.create_variable(f"/{band_name}/stack",
|
||||
("bands","northing","easting"),
|
||||
np.float32,
|
||||
chunks=(1,dim1_chunk_size,dim2_chunk_size),
|
||||
compression='gzip')
|
||||
self.data.attrs["grid_mapping"] = "projection"
|
||||
|
||||
elif self.file_type == "emit":
|
||||
if glt_bool:
|
||||
self.data = self.nc4_obj.create_variable(f"/{band_name}/stack",
|
||||
("bands","northing","easting"),
|
||||
np.float32,
|
||||
chunks=(1,dim1_chunk_size,dim2_chunk_size),
|
||||
compression='gzip')
|
||||
self.data.attrs["grid_mapping"] = "projection"
|
||||
else:
|
||||
self.data = self.nc4_obj.create_variable(f"/{band_name}/stack",
|
||||
("bands","downtrack","crosstrack"),
|
||||
np.float32,
|
||||
chunks=(1,dim1_chunk_size,dim2_chunk_size),
|
||||
compression='gzip')
|
||||
|
||||
self.data.attrs["band_names"] = header_dict["band names"][:2]
|
||||
self.data.attrs["_FillValue"] = np.array([-9999.0],dtype=np.float32)
|
||||
|
||||
def write_mask_band(self,band):
|
||||
self.data[:,:] = band
|
||||
|
||||
def write_mask_band_glt(self,band,glt_indices,fill_mask):
|
||||
tmp_band = np.ones(fill_mask.shape)*self.header_dict['data ignore value']
|
||||
tmp_band[fill_mask] = band[glt_indices]
|
||||
tmp_band[~fill_mask] = 255
|
||||
|
||||
self.data[:,:] = tmp_band
|
||||
|
||||
|
||||
def write_glt_dataset(self,glt_x_arr,glt_y_arr,dim_x_name="ortho_x",dim_y_name="ortho_y"):
|
||||
var_glt_x = self.nc4_obj.create_variable("/location/glt_x",(dim_y_name,dim_x_name),
|
||||
data=glt_x_arr,
|
||||
dtype=np.int32,
|
||||
chunks=(256,256),
|
||||
compression='gzip')
|
||||
var_glt_y = self.nc4_obj.create_variable("/location/glt_y",(dim_y_name,dim_x_name),
|
||||
data=glt_y_arr,
|
||||
dtype=np.int32,
|
||||
chunks=(256,256),
|
||||
compression='gzip')
|
||||
|
||||
var_glt_x.attrs["grid_mapping"] = "projection"
|
||||
var_glt_y.attrs["grid_mapping"] = "projection"
|
||||
|
||||
var_glt_x.attrs["_FillValue"]=np.array([0],dtype=np.int32)
|
||||
var_glt_y.attrs["_FillValue"]=np.array([0],dtype=np.int32)
|
||||
|
||||
|
||||
def write_netcdf_band_glt(self,band,index,glt_indices,fill_mask):
|
||||
"""
|
||||
Args:
|
||||
band (numpy.ndarray): Band array (lines,columns).
|
||||
index (int): Zero-based band index.
|
||||
glt_indices (numpy.ndarray,numpy.ndarray): Zero-based tuple indices.
|
||||
|
||||
Returns:
|
||||
None.
|
||||
|
||||
"""
|
||||
|
||||
tmp_band = np.ones(fill_mask.shape)*(-9999)
|
||||
tmp_band[fill_mask] = band[glt_indices]
|
||||
tmp_band[~fill_mask] = -9999
|
||||
|
||||
if self.interleave == "bip":
|
||||
self.data[:,:,index]=tmp_band
|
||||
elif self.interleave == "bil":
|
||||
self.data[:,index,:]=tmp_band
|
||||
elif self.interleave == "bsq":
|
||||
self.data[index,:,:]=tmp_band
|
||||
|
||||
def external_nc_attrs(self,attr_dict):
|
||||
|
||||
if attr_dict is None:
|
||||
return
|
||||
|
||||
for key in attr_dict:
|
||||
split_key = key.split('/')
|
||||
if len(split_key[0])==0:
|
||||
split_key.pop(0)
|
||||
if len(split_key)>1:
|
||||
group_path = '/'+'/'.join(split_key[:-1])
|
||||
self.nc4_obj[group_path].attrs[split_key[-1]]=str(attr_dict[key]).encode("utf-8")
|
||||
else:
|
||||
self.nc4_obj.attrs[key]=str(attr_dict[key]).encode("utf-8")
|
||||
|
||||
|
||||
def close(self):
|
||||
"""Delete
|
||||
"""
|
||||
self.nc4_obj.close()
|
||||
|
||||
def write_netcdf_meta(nc4_obj,header_dict,glt_bool):
|
||||
|
||||
file_type = (header_dict['file_type']).lower()
|
||||
|
||||
if file_type in ["envi","ncav"] or (file_type=="emit" and glt_bool is True):
|
||||
|
||||
transform=header_dict['transform']
|
||||
|
||||
nc4_obj.dimensions["northing"]=header_dict['lines'] #dim0
|
||||
nc4_obj.dimensions["easting"]=header_dict['samples'] #dim1
|
||||
|
||||
tm_var = nc4_obj.create_variable("/projection",data=np.array([0]),dtype=np.uint8)
|
||||
tm_var.attrs["GeoTransform"]=' '.join([str(x) for x in header_dict['transform']]).encode("utf-8")
|
||||
tm_var.attrs["crs_wkt"]=header_dict['projection'].encode("utf-8")
|
||||
tm_var.attrs["spatial_ref"]=header_dict['projection'].encode("utf-8")
|
||||
|
||||
elif file_type=="emit":
|
||||
if glt_bool: # handled in above codes
|
||||
pass
|
||||
else: # do not warp with GLT
|
||||
loc_gp=nc4_obj.create_group("location")
|
||||
nc4_obj.dimensions["downtrack"]=header_dict['lines'] #dim0
|
||||
nc4_obj.dimensions["crosstrack"]=header_dict['samples'] #dim1
|
||||
|
||||
nc4_obj.dimensions["ortho_y"]=header_dict['lines_glt']
|
||||
nc4_obj.dimensions["ortho_x"]=header_dict['samples_glt']
|
||||
|
||||
nc4_obj.attrs["geotransform"]=' '.join([str(x) for x in header_dict['transform']]).encode("utf-8")
|
||||
nc4_obj.attrs["spatial_ref"]=header_dict['projection'].encode("utf-8")
|
||||
nc4_obj.attrs["spatialResolution"]=np.sqrt(header_dict['transform'][1]**2+header_dict['transform'][2]**2)
|
||||
|
||||
tm_var = nc4_obj.create_variable("/projection",data=np.array([0]),dtype=np.uint8)
|
||||
tm_var.attrs["GeoTransform"]=' '.join([str(x) for x in header_dict['transform']]).encode("utf-8")
|
||||
tm_var.attrs["crs_wkt"]=header_dict['projection'].encode("utf-8")
|
||||
tm_var.attrs["spatial_ref"]=header_dict['projection'].encode("utf-8")
|
||||
27
Flexbrdf/hytools/masks/__init__.py
Normal file
27
Flexbrdf/hytools/masks/__init__.py
Normal file
@ -0,0 +1,27 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
HyTools: Hyperspectral image processing library
|
||||
Copyright (C) 2021 University of Wisconsin
|
||||
|
||||
Authors: Adam Chlus, Zhiwei Ye, Philip Townsend.
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation, version 3 of the License.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
The :mod:`hytools.masks` module include functions image correction.
|
||||
"""
|
||||
from .masks import *
|
||||
from .cloud import *
|
||||
from .calc_apply import *
|
||||
|
||||
|
||||
|
||||
109
Flexbrdf/hytools/masks/calc_apply.py
Normal file
109
Flexbrdf/hytools/masks/calc_apply.py
Normal file
@ -0,0 +1,109 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
HyTools: Hyperspectral image processing library
|
||||
Copyright (C) 2021 University of Wisconsin
|
||||
|
||||
Authors: Adam Chlus, Zhiwei Ye, Philip Townsend.
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation, version 3 of the License.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
This module contain functions for generating boolean masks specific to apply image corrections and
|
||||
models.
|
||||
"""
|
||||
|
||||
from scipy.ndimage.morphology import binary_erosion
|
||||
import numpy as np
|
||||
from .cloud import zhai_cloud
|
||||
|
||||
|
||||
def ndi(hy_obj,args):
|
||||
mask = hy_obj.ndi(args['band_1'],args['band_2'])
|
||||
mask = (mask >= float(args['min'])) & (mask <= float(args['max']))
|
||||
return mask
|
||||
|
||||
|
||||
def ancillary(hy_obj,args):
|
||||
''' Mask ancillary datasets based off min and max threshold
|
||||
|
||||
'''
|
||||
if args['name'] == 'cosine_i':
|
||||
mask= hy_obj.cosine_i()
|
||||
else:
|
||||
mask = hy_obj.get_anc(args['name'])
|
||||
mask = (mask >= float(args['min'])) & (mask <= float(args['max']))
|
||||
return mask
|
||||
|
||||
|
||||
def neon_edge(hy_obj,args):
|
||||
'''
|
||||
Mask artifacts in NEON images around edges.
|
||||
'''
|
||||
radius =args['radius']
|
||||
y_grid, x_grid = np.ogrid[-radius: radius + 1, -radius: radius + 1]
|
||||
window = (x_grid**2 + y_grid**2 <= radius**2).astype(np.float32)
|
||||
buffer_edge = binary_erosion(hy_obj.mask['no_data'], window).astype(bool)
|
||||
return buffer_edge
|
||||
|
||||
|
||||
def kernel_finite(hy_obj,args):
|
||||
'''
|
||||
Create NDVI bin class mask
|
||||
'''
|
||||
k_vol = hy_obj.volume_kernel(hy_obj.brdf['volume'])
|
||||
k_geom = hy_obj.geom_kernel(hy_obj.brdf['geometric'],
|
||||
b_r=hy_obj.brdf["b/r"],
|
||||
h_b =hy_obj.brdf["h/b"])
|
||||
mask = np.isfinite(k_vol) & np.isfinite(k_geom)
|
||||
return mask
|
||||
|
||||
|
||||
def cloud(hy_obj,args):
|
||||
if args['method'] == 'zhai_2018':
|
||||
mask = ~zhai_cloud(hy_obj,args['cloud'],args['shadow'],
|
||||
args['T1'], args['t2'], args['t3'],
|
||||
args['t4'], args['T7'], args['T8'])
|
||||
|
||||
return mask
|
||||
|
||||
|
||||
def water(hy_obj,args):
|
||||
'''
|
||||
Create water mask using NDWI threshold
|
||||
'''
|
||||
mask = hy_obj.ndi(args['band_1'],args['band_2'])
|
||||
mask = mask <= float(args['threshold'])
|
||||
|
||||
mask = binary_erosion(mask)
|
||||
|
||||
return mask
|
||||
|
||||
|
||||
def external(hy_obj,args):
|
||||
'''Load a mask from an external dataset
|
||||
'''
|
||||
|
||||
hy_obj.anc_path['external_mask'] = [args['files'][hy_obj.file_name], 0]
|
||||
mask = hy_obj.get_anc('external_mask') == args['class']
|
||||
|
||||
return mask
|
||||
|
||||
|
||||
def band(hy_obj,args):
|
||||
'''
|
||||
Create mask using band thresholds
|
||||
'''
|
||||
|
||||
mask = hy_obj.get_wave(args['band'])
|
||||
mask = (mask >= float(args['min'])) & (mask <= float(args['max']))
|
||||
|
||||
return mask
|
||||
119
Flexbrdf/hytools/masks/cloud.py
Normal file
119
Flexbrdf/hytools/masks/cloud.py
Normal file
@ -0,0 +1,119 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
'''
|
||||
HyTools: Hyperspectral image processing library
|
||||
Copyright (C) 2021 University of Wisconsin
|
||||
|
||||
Authors: Adam Chlus, Zhiwei Ye, Philip Townsend.
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation, version 3 of the License.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
Cloud masks
|
||||
|
||||
'''
|
||||
from scipy.ndimage import median_filter
|
||||
import numpy as np
|
||||
|
||||
|
||||
def zhai_cloud(hy_obj,cloud,shadow,T1=0.01,t2=.1,t3=.25,t4=.5,T7= 9,T8= 9):
|
||||
'''This function replicates the method of Zhai et al. (2018) for detecting clouds and shadows in
|
||||
multispectral and hyperspectral imagery but does not apply shadow spatial refinement.
|
||||
|
||||
Suggested values for coefficients and params:
|
||||
T1 : 0.01, 0.1, 1, 10, 100
|
||||
t2 : 1/10, 1/9, 1/8, 1/7, 1/6, 1/5, 1/4, 1/3, 1/2
|
||||
t3 : 1/4, 1/3, 1/2, 2/3, 3/4
|
||||
t4 : 1/2, 2/3, 3/4, 4/5, 5/6
|
||||
T7 : 3, 5, 7, 9, 11
|
||||
T8 : 3, 5, 7, 9, 11
|
||||
|
||||
Zhai, H., Zhang, H., Zhang, L., & Li, P. (2018).
|
||||
Cloud/shadow detection based on spectral indices for multi/hyperspectral optical remote sensing imagery.
|
||||
ISPRS journal of photogrammetry and remote sensing, 144, 235-253.
|
||||
https://doi.org/10.1016/j.isprsjprs.2018.07.006
|
||||
|
||||
Args:
|
||||
hy_obj : HyTools data container object:
|
||||
cloud (bool): Detect clouds.
|
||||
shadow (bool): Detect clouds.
|
||||
T1 (float): Threshold T1.
|
||||
t2 (float): Adjusting coefficient t2.
|
||||
t3 (float): Adjusting coefficient t3.
|
||||
t4 (float): Adjusting coefficient t4.
|
||||
T7 (float): Parameter T7.
|
||||
T8 (float): Parameter T8.
|
||||
|
||||
Returns:
|
||||
mask (nd.array): Boolean array where detected clouds and/or shadows = True.
|
||||
|
||||
'''
|
||||
|
||||
blue= hy_obj.get_wave(440)
|
||||
green= hy_obj.get_wave(550)
|
||||
red= hy_obj.get_wave(660)
|
||||
nir = hy_obj.get_wave(850)
|
||||
|
||||
#If SWIR not available
|
||||
if hy_obj.wavelengths.max() < 1570:
|
||||
# Zhai et al. 2018 Eq. 1a,b
|
||||
CI_1 = (3*nir)/(blue+green+red)
|
||||
CI_2 = (blue+green+red+nir)/4
|
||||
# Zhai et al. 2018 Eq. 3
|
||||
CSI = nir
|
||||
|
||||
else:
|
||||
swir1 = hy_obj.get_wave(1570)
|
||||
swir2= hy_obj.get_wave(2110)
|
||||
# Zhai et al. 2018 Eq. 1a,b
|
||||
CI_1 = (nir+ 2*swir1)/(blue+green+red)
|
||||
CI_2 = (blue+green+red+nir+swir1+swir2)/6
|
||||
# Zhai et al. 2018 Eq. 3
|
||||
CSI = (nir + swir1)/2
|
||||
|
||||
# Zhai et al. 2018 Eq.5
|
||||
T2 = np.mean(CI_2[hy_obj.mask['no_data']]) + t2*(np.max(CI_2[hy_obj.mask['no_data']])-np.mean(CI_2[hy_obj.mask['no_data']]))
|
||||
# Zhai et al. 2018 Eq.6
|
||||
T3 = np.min(CSI[hy_obj.mask['no_data']]) + t3*(np.mean(CSI[hy_obj.mask['no_data']])-np.min(CSI[hy_obj.mask['no_data']]))
|
||||
# Zhai et al. 2018 Eq.7
|
||||
T4 = np.min(blue[hy_obj.mask['no_data']]) + t4*(np.mean(blue[hy_obj.mask['no_data']])-np.min(blue[hy_obj.mask['no_data']]))
|
||||
|
||||
mask = np.zeros((hy_obj.lines,hy_obj.columns)).astype(bool)
|
||||
|
||||
if cloud:
|
||||
clouds = (np.abs(CI_1) < T1) | (CI_2 > T2)
|
||||
clouds = median_filter(clouds, T7)
|
||||
mask[clouds] = True
|
||||
|
||||
if shadow:
|
||||
shadows = (CSI<T3) & (blue<T4)
|
||||
shadows = median_filter(shadows,T8)
|
||||
mask[shadows] = True
|
||||
|
||||
return mask
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
42
Flexbrdf/hytools/masks/masks.py
Normal file
42
Flexbrdf/hytools/masks/masks.py
Normal file
@ -0,0 +1,42 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
'''
|
||||
HyTools: Hyperspectral image processing library
|
||||
Copyright (C) 2021 University of Wisconsin
|
||||
|
||||
Authors: Adam Chlus, Zhiwei Ye, Philip Townsend.
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation, version 3 of the License.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
'''
|
||||
from .calc_apply import *
|
||||
from .cloud import *
|
||||
|
||||
mask_dict = {'ndi' : ndi,
|
||||
'neon_edge' : neon_edge,
|
||||
'kernel_finite': kernel_finite,
|
||||
'ancillary': ancillary,
|
||||
'cloud': cloud,
|
||||
'water': water,
|
||||
'band': band,
|
||||
'external' : external}
|
||||
|
||||
def mask_create(hy_obj,masks):
|
||||
''' Combine a series of boolean masks using an
|
||||
and operator
|
||||
'''
|
||||
mask = np.copy(hy_obj.mask['no_data'])
|
||||
|
||||
for mask_name,args in masks:
|
||||
mask &= mask_dict[mask_name](hy_obj,args)
|
||||
|
||||
return mask
|
||||
23
Flexbrdf/hytools/misc/__init__.py
Normal file
23
Flexbrdf/hytools/misc/__init__.py
Normal file
@ -0,0 +1,23 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
HyTools: Hyperspectral image processing library
|
||||
Copyright (C) 2021 University of Wisconsin
|
||||
|
||||
Authors: Adam Chlus, Zhiwei Ye, Philip Townsend.
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation, version 3 of the License.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
"""
|
||||
from .misc import *
|
||||
from .geog_utm import *
|
||||
from .point import *
|
||||
258
Flexbrdf/hytools/misc/geog_utm.py
Normal file
258
Flexbrdf/hytools/misc/geog_utm.py
Normal file
@ -0,0 +1,258 @@
|
||||
|
||||
import numpy as np
|
||||
from types import SimpleNamespace
|
||||
|
||||
NAD83_WGS84_dict = {
|
||||
"a":6378137,
|
||||
"b":6356752.3142,
|
||||
"flat":1/298.257223563,
|
||||
"a_dscp":"Equatorial Radius, meters",
|
||||
"b_dscp":"Polar Radius, meters",
|
||||
"flat_dscp":"Flattening (a-b)/a",
|
||||
}
|
||||
|
||||
NAD83_WGS84_obj = SimpleNamespace(**NAD83_WGS84_dict)
|
||||
|
||||
class BasicMapObj:
|
||||
|
||||
def __init__(self,ellipsoid=NAD83_WGS84_obj,zone=None):
|
||||
|
||||
b=ellipsoid.b
|
||||
a=ellipsoid.a
|
||||
e=np.sqrt(1-b**2/a**2)
|
||||
|
||||
self.b=b
|
||||
self.a=a
|
||||
self.e=e
|
||||
self.ep2=(e*a/b)**2
|
||||
self.n=(a-b)/(a+b)
|
||||
self.k0=0.9996
|
||||
self.easting = 500000
|
||||
self.zone = zone
|
||||
#self.northing = None
|
||||
|
||||
if zone is None:
|
||||
self.lon0=None
|
||||
self.northing = None
|
||||
else:
|
||||
if zone.startswith('326'):
|
||||
zone = zone[3:5] + 'N'
|
||||
self.zone = zone
|
||||
elif zone.startswith('327'):
|
||||
zone = zone[3:5] + 'S'
|
||||
self.zone = zone
|
||||
|
||||
if str(zone)[-1:].isnumeric(): # default is N, not S
|
||||
zone_number = int(zone)
|
||||
self.northing = 0
|
||||
else:
|
||||
zone_number = int(zone[:-1])
|
||||
if zone[-1] in ('N','n'):
|
||||
self.northing = 0
|
||||
elif zone[-1] in ('S','s'):
|
||||
self.northing = 1e7
|
||||
|
||||
self.lon0 = (zone_number - 1)*6 -180 +3 # in Degrees
|
||||
|
||||
def calc_rho(self,lat_rad):
|
||||
a=self.a
|
||||
#b=self.b
|
||||
e=self.e
|
||||
|
||||
return a*(1-e**2)/((1-e**2*(np.sin(lat_rad))**2)**(3/2))
|
||||
|
||||
def calc_nu(self,lat_rad):
|
||||
a=self.a
|
||||
e=self.e
|
||||
return a / (1-(e*np.sin(lat_rad))**2)**0.5
|
||||
|
||||
def calc_p(self,lon_rad):
|
||||
return lon_rad - np.radians(self.lon0)
|
||||
|
||||
def calc_S(self,lat_rad):
|
||||
#S is the meridional arc
|
||||
a=self.a
|
||||
n=self.n
|
||||
a_p = 1 * a * (1 - n + 5/4*(n**2-n**3) + 81/64*(n**4-n**5))
|
||||
b_p = 3/2 * a * n * (1 - n + 7/8*(n**2-n**3) + 55/64*(n**4))
|
||||
c_p = 15/16 * a * (n**2) * (1 - n + 3/4*(n**2-n**3))
|
||||
d_p = 35/48 * a * (n**3) * (1 - n + 11/16*(n**2))
|
||||
e_p = 315/512*a * (n**4) * (1 - n)
|
||||
|
||||
s = a_p*lat_rad \
|
||||
- b_p*np.sin(2*lat_rad) \
|
||||
+ c_p*np.sin(4*lat_rad) \
|
||||
- d_p*np.sin(6*lat_rad) \
|
||||
+ e_p*np.sin(8*lat_rad) \
|
||||
|
||||
return s
|
||||
|
||||
def calc_K3(self,nu,lat_rad):
|
||||
k0 = self.k0
|
||||
ep2 = self.ep2
|
||||
|
||||
k_3 = k0*nu*np.sin(lat_rad)* (np.cos(lat_rad))**3 / 24
|
||||
k_3 *= 5 - (np.tan(lat_rad))**2 + 9 * ep2 * (np.cos(lat_rad))**2 + 4 * (ep2**2) * (np.cos(lat_rad))**4
|
||||
|
||||
return k_3
|
||||
|
||||
def calc_K5(self,nu,lat_rad):
|
||||
k0 = self.k0
|
||||
ep2 = self.ep2
|
||||
|
||||
k_5 = k0 * nu * (np.cos(lat_rad))**3 /6
|
||||
k_5 *= 1 - (np.tan(lat_rad))**2 + ep2 * (np.cos(lat_rad))**2
|
||||
|
||||
return k_5
|
||||
|
||||
def estimate_lon0(self, lon_deg):
|
||||
if self.lon0 is None:
|
||||
major_lon = np.median(lon_deg)
|
||||
central_meridians = np.arange(0,60,1)*6 - 180 +3
|
||||
close_meridian = central_meridians[np.argmin(np.abs(major_lon-central_meridians))]
|
||||
self.lon0 = close_meridian
|
||||
self.zone = int((close_meridian-3 +180)/6)+1 #(zone_number - 1)*6 -180 +3
|
||||
else:
|
||||
#use lon0 during initialization
|
||||
pass
|
||||
|
||||
def estimate_northing(self,lat_deg):
|
||||
if self.northing is None:
|
||||
major_lat = np.median(lat_deg)
|
||||
if major_lat>0:
|
||||
self.northing=0
|
||||
else:
|
||||
self.northing=1e7
|
||||
|
||||
def convert_xycoord(self,lat_deg,lon_deg):
|
||||
lat_rad = np.radians(lat_deg)
|
||||
lon_rad = np.radians(lon_deg)
|
||||
|
||||
self.estimate_lon0(lon_deg)
|
||||
#print(self.lon0)
|
||||
|
||||
self.estimate_northing(lat_deg)
|
||||
|
||||
s = self.calc_S(lat_rad)
|
||||
k0 = self.k0
|
||||
nu = self.calc_nu(lat_rad)
|
||||
p = self.calc_p(lon_rad)
|
||||
|
||||
k_1 = s*k0
|
||||
k_2 = k0*nu*np.sin(2*lat_rad)/4
|
||||
k_3 = self.calc_K3(nu,lat_rad)
|
||||
|
||||
y = k_1 + k_2 * (p**2) + k_3 * (p**4) + self.northing
|
||||
|
||||
k_5 = self.calc_K5(nu,lat_rad)
|
||||
k_4 = k0 * nu * np.cos(lat_rad)
|
||||
|
||||
x = k_4*p + k_5*(p**3)+ self.easting
|
||||
|
||||
return x,y
|
||||
|
||||
########################
|
||||
#https://gdal.org/en/stable/proj_list/transverse_mercator.html
|
||||
# ref: Snyder J.P. (1987) Map projections a working manual, U.S. Geological Survey Professional Paper 1395, 1987. page.61
|
||||
def convert_xycoord_gdal(self, lat_deg,lon_deg):
|
||||
lat_rad = np.radians(lat_deg)
|
||||
lon_rad = np.radians(lon_deg)
|
||||
|
||||
self.estimate_lon0(lon_deg)
|
||||
self.estimate_northing(lat_deg)
|
||||
|
||||
k0 = self.k0
|
||||
E = (self.e)**2
|
||||
p = self.calc_p(lon_rad)
|
||||
cos_lat = np.cos(lat_rad)
|
||||
sin_lat = np.sin(lat_rad)
|
||||
tan_lat = sin_lat / cos_lat
|
||||
tan2_lat = tan_lat**2
|
||||
|
||||
e_p2 = self.ep2
|
||||
|
||||
nu = self.calc_nu(lat_rad)
|
||||
#nu = self.a / np.sqrt(1 - E * sin_lat**2)
|
||||
C = e_p2 * cos_lat**2
|
||||
A = cos_lat * p
|
||||
|
||||
E2=E**2
|
||||
E3=E**3
|
||||
|
||||
M1 = 1 - E / 4 - 3 * E2 / 64 - 5 * E3 / 256
|
||||
M2 = 3 * E / 8 + 3 * E2 / 32 + 45 * E3 / 1024
|
||||
M3 = 15 * E2 / 256 + 45 * E3 / 1024
|
||||
M4 = 35 * E3 / 3072
|
||||
M = self.a * (M1 * lat_rad -
|
||||
M2 * np.sin(2 * lat_rad) +
|
||||
M3 * np.sin(4 * lat_rad) -
|
||||
M4 * np.sin(6 * lat_rad))
|
||||
|
||||
#M = a[(1 - e2/4 - 3e4/64 - 5e6/256 -....)* - (3e2/8 + 3e4/32 + 45e6/1024+....)sin2*
|
||||
#+ (15e4/256 + 45e6/1024 +.....)sin4* - (35e6/3072 + ....)sin6* + .....]
|
||||
|
||||
x = k0 * nu * (A +
|
||||
A**3 / 6 * (1 - tan2_lat + C) +
|
||||
A**5 / 120 * (5 - 18 * tan2_lat + tan2_lat**2 + 72 * C - 58 * e_p2))+ self.easting
|
||||
|
||||
y = k0 * (M + nu * tan_lat * (A**2 / 2 +
|
||||
A**4 / 24 * (5 - tan2_lat + 9 * C + 4 * C**2) +
|
||||
A**6 / 720 * (61 - 58 * tan2_lat + tan2_lat**2 + 600 * C - 330 * e_p2)))+ self.northing
|
||||
|
||||
return x,y
|
||||
|
||||
########################
|
||||
|
||||
def calc_mu(self): #calc_e1_mu(self):
|
||||
e=self.e
|
||||
a=self.a
|
||||
|
||||
mu_recip = a * (1-0.25*(e**2) -3/64*(e**4) -5/256 * (e**6))
|
||||
#e1 = (1 - eee) / (1 + eee) # same as self.n
|
||||
return mu_recip
|
||||
|
||||
# ref : Snyder J.P. (1987) Map projections a working manual, U.S. Geological Survey Professional Paper 1395, 1987. page.63
|
||||
# https://pubs.usgs.gov/pp/1395/report.pdf
|
||||
def convert_latlon(self,x,y):
|
||||
x_in = x - self.easting
|
||||
y_in = y - self.northing
|
||||
|
||||
ep2 = self.ep2
|
||||
a = self.a
|
||||
e =self.e
|
||||
|
||||
k0 = self.k0
|
||||
|
||||
M = y_in / k0
|
||||
|
||||
mu_recip = self.calc_mu() #self.calc_e1_mu()
|
||||
e1=self.n
|
||||
mu = M / mu_recip
|
||||
|
||||
J1 = 3/2 * e1 - 27/32 * (e1**3)
|
||||
J2 = 21/16*(e1**2) -55/32*(e1**4)
|
||||
J3 = 151/96 * (e1**3)
|
||||
J4 = 1097/512 * (e1**4)
|
||||
|
||||
fp = mu + J1*np.sin(2*mu) + J2*np.sin(4*mu) + J3*np.sin(6*mu) + J4*np.sin(8*mu)
|
||||
|
||||
C1 = ep2*(np.cos(fp))**2
|
||||
T1 = (np.tan(fp))**2
|
||||
R1 = a*(1-e**2) / (1-(e*np.sin(fp))**2)**1.5
|
||||
N1 = a / (1-(e*np.sin(fp))**2)**0.5
|
||||
D = x_in / N1 / k0
|
||||
|
||||
Q1 = N1*np.tan(fp)/R1
|
||||
Q2 = D**2 / 2
|
||||
Q3 = (5 + 3*T1 + 10*C1 - 4*C1**2 -9*ep2) * D**4 / 24
|
||||
Q4 = (61 + 90*T1 + 298*C1 +45*T1**2 - 3*C1**2 -252*ep2) * D**6 /720
|
||||
|
||||
lat_out = fp - Q1*(Q2-Q3+Q4)
|
||||
|
||||
Q5 = D
|
||||
Q6 = (1 + 2*T1 + C1) * D**3 / 6
|
||||
Q7 = (5 - 2*C1 + 28*T1 -3*C1**2 + 8*ep2 +24*T1**2) * D**5 / 120
|
||||
|
||||
lon_out = np.radians(self.lon0) + (Q5-Q6+Q7) / np.cos(fp)
|
||||
|
||||
return np.degrees(lat_out), np.degrees(lon_out)
|
||||
86
Flexbrdf/hytools/misc/misc.py
Normal file
86
Flexbrdf/hytools/misc/misc.py
Normal file
@ -0,0 +1,86 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
HyTools: Hyperspectral image processing library
|
||||
Copyright (C) 2021 University of Wisconsin
|
||||
|
||||
Authors: Adam Chlus, Zhiwei Ye, Philip Townsend.
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation, version 3 of the License.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
"""
|
||||
from itertools import tee
|
||||
|
||||
def progbar(curr, total, full_progbar = 100):
|
||||
'''Display progress bar.
|
||||
|
||||
Gist from:
|
||||
|
||||
https://gist.github.com/marzukr/3ca9e0a1b5881597ce0bcb7fb0adc549
|
||||
|
||||
Args:
|
||||
curr (int, float): Current task level.
|
||||
total (int, float): Task level at completion.
|
||||
full_progbar (TYPE): Defaults to 100.
|
||||
|
||||
Returns:
|
||||
None.
|
||||
|
||||
'''
|
||||
frac = curr/total
|
||||
filled_progbar = round(frac*full_progbar)
|
||||
print('\r', '#'*filled_progbar + '-'*(full_progbar-filled_progbar), '[{:>7.2%}]'.format(frac), end='')
|
||||
|
||||
|
||||
def pairwise(iterable):
|
||||
a, b = tee(iterable)
|
||||
next(b, None)
|
||||
return zip(a, b)
|
||||
|
||||
def set_brdf(hy_obj,brdf_dict):
|
||||
hy_obj.brdf = brdf_dict
|
||||
|
||||
def set_topo(hy_obj,topo_dict):
|
||||
hy_obj.topo = topo_dict
|
||||
|
||||
def update_brdf(hy_obj,args):
|
||||
hy_obj.brdf[args['key']] = args['value']
|
||||
|
||||
def update_topo(hy_obj,args):
|
||||
hy_obj.topo[args['key']] = args['value']
|
||||
|
||||
def set_glint(hy_obj,glint_dict):
|
||||
|
||||
# If the type is hedley, need to specify deep water area
|
||||
if glint_dict['type'] == 'Hedley':
|
||||
glint_dict['deep_water_sample'] = glint_dict['deep_water_sample'][hy_obj.file_name]
|
||||
|
||||
hy_obj.glint = glint_dict
|
||||
|
||||
def update_topo_group(subgroup_dict_in):
|
||||
|
||||
subgroup_dict = {}
|
||||
group_tag_list=[]
|
||||
|
||||
for file_name in subgroup_dict_in.keys():
|
||||
group_tag = subgroup_dict_in[file_name]
|
||||
if group_tag in subgroup_dict:
|
||||
subgroup_dict[group_tag]+=[file_name]
|
||||
else:
|
||||
subgroup_dict[group_tag]=[file_name]
|
||||
group_tag_list+=[group_tag]
|
||||
|
||||
update_name_list=[]
|
||||
for group_tag in subgroup_dict.keys():
|
||||
update_name_list+=[subgroup_dict[group_tag]]
|
||||
|
||||
return update_name_list,group_tag_list
|
||||
258
Flexbrdf/hytools/misc/point.py
Normal file
258
Flexbrdf/hytools/misc/point.py
Normal file
@ -0,0 +1,258 @@
|
||||
|
||||
import pandas as pd
|
||||
from .geog_utm import *
|
||||
|
||||
def local_transform_all_point(mapobj, point_df, uid, xcoord, ycoord,point_epsg_code):
|
||||
''' Create a dataframe with image georeferenced coordinates of all points of interest
|
||||
|
||||
'''
|
||||
|
||||
if point_epsg_code is None:
|
||||
print("Default latlon")
|
||||
re_df = pd.DataFrame(point_df[[uid,xcoord,ycoord]])
|
||||
re_df.columns = [uid,'img_x','img_y']
|
||||
return re_df
|
||||
else:
|
||||
ycoord_arr = point_df[ycoord]
|
||||
xcoord_arr = point_df[xcoord]
|
||||
lat_arr,lon_arr=mapobj.convert_latlon(xcoord_arr,ycoord_arr)
|
||||
re_df = point_df[[uid, xcoord, ycoord]].join(pd.DataFrame(np.array((lat_arr,lon_arr)).T))
|
||||
re_df.columns=[uid,'img_x','img_y','lat','lon']
|
||||
return re_df
|
||||
|
||||
def get_neighbor(hyObj, point_coord_df, n_neighbor, uid, point_epsg_code,mapobj,use_glt_bool):
|
||||
''' Create a dataframe with columns and lines of all image space neighbors of points of interest
|
||||
|
||||
'''
|
||||
if use_glt_bool:
|
||||
ul_x, new_x_resolution, new_x_rot, ul_y, new_y_rot, new_y_resolution = hyObj.glt_transform
|
||||
print(hyObj.glt_projection,hyObj.glt_map_info)
|
||||
else:
|
||||
ul_x, new_x_resolution, new_x_rot, ul_y, new_y_rot, new_y_resolution = hyObj.transform
|
||||
print(hyObj.projection,hyObj.map_info)
|
||||
|
||||
transform_matrix = np.array([[new_x_resolution, new_x_rot],[new_y_rot, new_y_resolution]])
|
||||
|
||||
if hyObj.map_info[0].startswith("Geographic"):
|
||||
|
||||
if mapobj.zone is None: # Not defined, assume to be geographic from csv / point_df
|
||||
xy_coord_array = point_coord_df[['img_x','img_y']].values-np.array([[ul_x,ul_y]])
|
||||
else: # assume to has both UTM and coord in point_df
|
||||
xy_coord_array = point_coord_df[['lon','lat']].values-np.array([[ul_x,ul_y]])
|
||||
elif hyObj.map_info[0].startswith("UTM"):
|
||||
|
||||
if point_epsg_code is None: # latlon in point, but utm in image
|
||||
img_zone = hyObj.map_info[7]+hyObj.map_info[8][0]
|
||||
img_mapobj = BasicMapObj(zone=img_zone) #NAD83_WGS84_obj,
|
||||
|
||||
x_coord, y_coord = img_mapobj.convert_xycoord_gdal(point_coord_df['img_y'].values, point_coord_df['img_x'].values)
|
||||
xy_coord_array = np.stack((x_coord, y_coord)).T -np.array([[ul_x,ul_y]])
|
||||
else:
|
||||
xy_coord_array = point_coord_df[['img_x','img_y']].values-np.array([[ul_x,ul_y]])
|
||||
|
||||
|
||||
img_loc_array = (xy_coord_array@(np.linalg.inv(transform_matrix).T)).astype(np.int32) # zero-based
|
||||
|
||||
n_neighbor = max(0,n_neighbor)
|
||||
if n_neighbor>=0:
|
||||
|
||||
if n_neighbor==0:
|
||||
offset_arr_col = np.array([[1,0]])
|
||||
offset_arr_row = np.array([[1,0]])
|
||||
uid_list = np.repeat(point_coord_df[uid].values,1)
|
||||
new_uid_list = np.tile([f'_{x}' for x in range(1)],img_loc_array.shape[0])
|
||||
|
||||
if n_neighbor== 4:
|
||||
|
||||
offset_arr_col = np.array([[1,0],
|
||||
[1,0],
|
||||
[1,-1],
|
||||
[1,1],
|
||||
[1,0]])
|
||||
offset_arr_row = np.array([[1,0],
|
||||
[1,-1],
|
||||
[1,0],
|
||||
[1,0],
|
||||
[1,1]])
|
||||
|
||||
uid_list = np.repeat(point_coord_df[uid].values,5)
|
||||
new_uid_list = np.tile([f'_{x}' for x in range(5)],img_loc_array.shape[0])
|
||||
|
||||
if n_neighbor== 8:
|
||||
offset_arr_col = np.array([[1,0],
|
||||
[1,0],
|
||||
[1,-1],
|
||||
[1,1],
|
||||
[1,0],
|
||||
[1,-1],
|
||||
[1,1],
|
||||
[1,-1],
|
||||
[1,1]])
|
||||
offset_arr_row = np.array([[1,0],
|
||||
[1,-1],
|
||||
[1,0],
|
||||
[1,0],
|
||||
[1,1],
|
||||
[1,-1],
|
||||
[1,-1],
|
||||
[1,1],
|
||||
[1,1]])
|
||||
|
||||
uid_list = np.repeat(point_coord_df[uid].values,9)
|
||||
new_uid_list = np.tile([f'_{x}' for x in range(9)],img_loc_array.shape[0])
|
||||
|
||||
img_loc_array_with_nb_col = offset_arr_col@np.vstack([img_loc_array[:,0],np.ones(img_loc_array.shape[0])])
|
||||
img_loc_array_with_nb_row = offset_arr_row@np.vstack([img_loc_array[:,1],np.ones(img_loc_array.shape[0])])
|
||||
new_uid_list = uid_list+new_uid_list
|
||||
|
||||
img_loc_array_with_nb_col = img_loc_array_with_nb_col.T.ravel().astype(np.int32)
|
||||
img_loc_array_with_nb_row = img_loc_array_with_nb_row.T.ravel().astype(np.int32) # zero-based
|
||||
|
||||
return_df = pd.DataFrame({'new_uid':new_uid_list,uid:uid_list,'img_col_glt':img_loc_array_with_nb_col,'img_row_glt':img_loc_array_with_nb_row})
|
||||
|
||||
print('use_glt_bool',use_glt_bool)
|
||||
if use_glt_bool:
|
||||
valid_mask = (img_loc_array_with_nb_col>=0) & (img_loc_array_with_nb_col< hyObj.columns_glt) & (img_loc_array_with_nb_row>=0) & (img_loc_array_with_nb_row< hyObj.lines_glt)
|
||||
|
||||
if valid_mask.sum()==0:
|
||||
print("No valid GLT locations.")
|
||||
return pd.DataFrame()
|
||||
|
||||
return_df = return_df[valid_mask]
|
||||
|
||||
post_glt_col_ind = hyObj.glt_x[(img_loc_array_with_nb_row[valid_mask],img_loc_array_with_nb_col[valid_mask])]-1
|
||||
post_glt_row_ind = hyObj.glt_y[(img_loc_array_with_nb_row[valid_mask],img_loc_array_with_nb_col[valid_mask])]-1 # one-based to zero-based
|
||||
|
||||
return_df["img_col_raw"] = post_glt_col_ind.astype(np.int32)
|
||||
return_df["img_row_raw"] = post_glt_row_ind.astype(np.int32) # zero-based
|
||||
else:
|
||||
return_df["img_col_raw"] = return_df['img_col_glt']
|
||||
return_df["img_row_raw"] = return_df['img_row_glt']
|
||||
|
||||
# check whether points are within the boundary of the image or not
|
||||
return_df = return_df[(return_df['img_col_raw']>=0) & (return_df['img_col_raw']< hyObj.columns) & (return_df['img_row_raw']>=0) & (return_df['img_row_raw']< hyObj.lines)]
|
||||
return return_df
|
||||
|
||||
def add_df_lat_lon(point_coord_neighbor_df, hyObj, mapobj, offset=0.5, use_glt_bool = False):
|
||||
''' Add LAT LON of the points in the dataframe
|
||||
|
||||
'''
|
||||
|
||||
if use_glt_bool:
|
||||
ul_x, new_x_resolution, new_x_rot, ul_y, new_y_rot, new_y_resolution = hyObj.glt_transform
|
||||
else:
|
||||
ul_x, new_x_resolution, new_x_rot, ul_y, new_y_rot, new_y_resolution = hyObj.transform
|
||||
|
||||
transform_matrix = np.array([[new_x_resolution, new_x_rot],[new_y_rot, new_y_resolution]])
|
||||
|
||||
loc_array = point_coord_neighbor_df[['img_col_glt','img_row_glt']].values.transpose() # zero-based
|
||||
|
||||
img_coord_array = np.dot(transform_matrix,loc_array+offset)+np.array([[ul_x],[ul_y]])
|
||||
|
||||
if hyObj.map_info[0].startswith("Geographic"):
|
||||
point_coord_neighbor_df['lat'] = img_coord_array[1,:]
|
||||
point_coord_neighbor_df['lon'] = img_coord_array[0,:]
|
||||
elif hyObj.map_info[0].startswith("UTM"):
|
||||
lat_list,lon_list = mapobj.convert_latlon(img_coord_array[0,:],img_coord_array[1,:])
|
||||
point_coord_neighbor_df['lat'] = lat_list
|
||||
point_coord_neighbor_df['lon'] = lon_list
|
||||
|
||||
|
||||
def subset_band_list(hyObj,spec_df,use_band_list, band_list):
|
||||
|
||||
# do not subset bands, do nothing
|
||||
if use_band_list==False:
|
||||
return spec_df
|
||||
|
||||
# subset bands
|
||||
else:
|
||||
# user does not provide band list, use bad band list as default
|
||||
if len(band_list)==0:
|
||||
# no bad band list in the file, do nothing
|
||||
if not isinstance(hyObj.bad_bands,np.ndarray):
|
||||
return spec_df
|
||||
# use bad band list
|
||||
else:
|
||||
return spec_df.iloc[:,hyObj.bad_bands]
|
||||
# user provides band list
|
||||
else:
|
||||
return spec_df.iloc[:, band_list]
|
||||
|
||||
def local_point2spec(hyObj, point_csv, uid, xcoord, ycoord, point_epsg_code, n_neighbor=4, use_band_list=True, band_list=[],use_glt_bool=False):
|
||||
"""Extract spectra with points in a CSV from the hyperspectral image
|
||||
|
||||
Parameters
|
||||
----------
|
||||
hyObj : HyTools file object
|
||||
point_csv: str
|
||||
full filename of the point CSV
|
||||
uid: str
|
||||
the user specified unique point ID in the CSV
|
||||
xcoord: str
|
||||
the column name in CSV for X coordinate of the points
|
||||
ycoord: str
|
||||
the column name in CSV for Y coordinate of the points
|
||||
point_epsg_code: int
|
||||
EPSG code for the projection of the points, XY coordinates are based on this projection
|
||||
n_neighbor: int
|
||||
default is 4, other options are 0, 8
|
||||
how many neighbors in the image should be sampled from the center
|
||||
use_band_list: boolean
|
||||
default True; whether to use a subset of bands
|
||||
band_list: list or numpy array
|
||||
default is a blank list
|
||||
if it is a list, it should be one like [5,6,7,8,9, 12]
|
||||
if it is a numpy array, it should be the same size as hyObj.bad_bands with only True or False in the array
|
||||
|
||||
use_glt_bool: boolean
|
||||
default False; whether to use geo-lookup table for pixel indexing
|
||||
|
||||
Returns
|
||||
-------
|
||||
point_coord_neighbor_df: pandas dataframe
|
||||
it include all the location and spectra information for all points from the CSV
|
||||
|
||||
"""
|
||||
|
||||
point_df = pd.read_csv(point_csv, sep=',')
|
||||
if point_epsg_code is None:
|
||||
if hyObj.map_info[0].startswith("UTM"):
|
||||
img_zone = hyObj.map_info[7]+hyObj.map_info[8][0]
|
||||
parameter_obj = BasicMapObj(zone=img_zone) #NAD83_WGS84_obj,
|
||||
else:
|
||||
parameter_obj = BasicMapObj() #NAD83_WGS84_obj
|
||||
else:
|
||||
parameter_obj = BasicMapObj(zone=point_epsg_code) #NAD83_WGS84_obj,
|
||||
|
||||
# create a dataframe with image georeferenced coordinates of all points of interest
|
||||
point_coord_df = local_transform_all_point(parameter_obj, point_df, uid, xcoord, ycoord,point_epsg_code)
|
||||
|
||||
# create a dataframe with columns and lines of all image space neighbors of points of interest
|
||||
point_coord_neighbor_df = get_neighbor(hyObj, point_coord_df, n_neighbor, uid,point_epsg_code,parameter_obj,use_glt_bool)
|
||||
|
||||
if point_coord_neighbor_df.shape[0]==0:
|
||||
print("0 point within boundary!\n\n")
|
||||
return None
|
||||
else:
|
||||
# add LAT LON of the points in the dataframe
|
||||
add_df_lat_lon(point_coord_neighbor_df, hyObj,parameter_obj,use_glt_bool=use_glt_bool)
|
||||
|
||||
spec_data = hyObj.get_pixels(point_coord_neighbor_df['img_row_raw'].values,point_coord_neighbor_df['img_col_raw'].values) # zero-based
|
||||
|
||||
# determine the column names of the spectra dataframe based on wavelengths
|
||||
if hyObj.wavelength_units.lower()[:4]=='micr':
|
||||
new_band_name = ['B{:0.3f}'.format(x) for x in hyObj.wavelengths]
|
||||
elif hyObj.wavelength_units.lower()[:4]=='nano' :
|
||||
new_band_name = ['B{:04d}'.format(int(x)) for x in hyObj.wavelengths]
|
||||
else:
|
||||
new_band_name = ['B{:d}'.format(x+1) for x in range(hyObj.bands)]
|
||||
|
||||
spec_df = pd.DataFrame(spec_data, columns=new_band_name)
|
||||
|
||||
# perform the subsetting of the columns in the dataframe according to the band_list or hyObj.bad_bands
|
||||
spec_df = subset_band_list(hyObj,spec_df,use_band_list, band_list)
|
||||
|
||||
point_coord_neighbor_df=point_coord_neighbor_df.reset_index(drop=True)
|
||||
point_coord_neighbor_df = pd.concat([point_coord_neighbor_df,spec_df], axis=1, join='inner')
|
||||
|
||||
return point_coord_neighbor_df
|
||||
22
Flexbrdf/hytools/plotting/__init__.py
Normal file
22
Flexbrdf/hytools/plotting/__init__.py
Normal file
@ -0,0 +1,22 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
HyTools: Hyperspectral image processing library
|
||||
Copyright (C) 2021 University of Wisconsin
|
||||
|
||||
Authors: Adam Chlus, Zhiwei Ye, Philip Townsend.
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation, version 3 of the License.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
The :mod:`hytools.plottinh` module include functions for plotting.
|
||||
"""
|
||||
from .brdf_diagnostics import *
|
||||
143
Flexbrdf/hytools/plotting/brdf_diagnostics.py
Normal file
143
Flexbrdf/hytools/plotting/brdf_diagnostics.py
Normal file
@ -0,0 +1,143 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
HyTools: Hyperspectral image processing library
|
||||
Copyright (C) 2021 University of Wisconsin
|
||||
|
||||
Authors: Adam Chlus, Zhiwei Ye, Philip Townsend.
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation, version 3 of the License.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
Plotting functions for BRDF
|
||||
"""
|
||||
import pandas as pd
|
||||
import numpy as np
|
||||
import matplotlib.pyplot as plt
|
||||
from matplotlib.lines import Line2D
|
||||
|
||||
def universal_diagno_plot(hy_obj,config_dict):
|
||||
''' Generate a diagnostic plot of BRDF correction results.
|
||||
'''
|
||||
#Flip sign of zenith angle at minimum
|
||||
sensor_zn = hy_obj.get_anc('sensor_zn',radians =False)
|
||||
sensor_zn[~hy_obj.mask['no_data']] = np.nan
|
||||
for i,line in enumerate(sensor_zn):
|
||||
line[:np.nanargmin(line)] *= -1
|
||||
sensor_zn[i] = line
|
||||
sensor_zn = (sensor_zn[hy_obj.mask['calc_brdf']]//2)*2
|
||||
|
||||
diagno_df = pd.DataFrame()
|
||||
diagno_df['sensor_zn'] =sensor_zn
|
||||
|
||||
bands = [hy_obj.wave_to_band(wave) for wave in config_dict['brdf']['diagnostic_waves']]
|
||||
for band_num in bands:
|
||||
band = hy_obj.get_band(band_num,mask='calc_brdf')
|
||||
diagno_df['uncorr_%s' % band_num] = band
|
||||
|
||||
band = hy_obj.get_band(band_num,
|
||||
corrections = hy_obj.corrections + ['brdf'],
|
||||
mask='calc_brdf')
|
||||
|
||||
diagno_df['corr_%s' % band_num] = band
|
||||
fvol, fgeo, fiso = hy_obj.brdf['coeffs'][band_num]
|
||||
|
||||
brdf = fvol*hy_obj.ancillary['k_vol']
|
||||
brdf += fgeo*hy_obj.ancillary['k_geom']
|
||||
brdf+=fiso
|
||||
brdf = brdf[hy_obj.mask['calc_brdf']]
|
||||
diagno_df['brdf_%s' % band_num] = brdf
|
||||
|
||||
# Average every 2 degrees of zenith angle
|
||||
diagno_df= diagno_df.groupby(by= 'sensor_zn').mean()
|
||||
|
||||
fig = plt.figure(figsize= (8,6))
|
||||
fig.suptitle(hy_obj.base_name)
|
||||
for a,band_num in enumerate(bands,start=1):
|
||||
ax = fig.add_subplot(2,2,a)
|
||||
ax.plot(diagno_df.index,diagno_df['brdf_%s' % band_num],c='k',ls ='--')
|
||||
ax.scatter(diagno_df.index,diagno_df['uncorr_%s' % band_num],marker ='o',fc='w',ec='k')
|
||||
ax.scatter(diagno_df.index,diagno_df['corr_%s' % band_num],marker ='o',fc='k',ec='k')
|
||||
ax.text(.85,.9, "%s nm" % int(hy_obj.wavelengths[band_num]), transform=ax.transAxes,
|
||||
ha = 'center', fontsize = 12)
|
||||
if a > 2:
|
||||
ax.set_xlabel('View zenith angle')
|
||||
if a in [1,3]:
|
||||
ax.set_ylabel('Reflectance')
|
||||
|
||||
#Create legend
|
||||
custom_points = []
|
||||
custom_points.append(Line2D([0],[0], marker = 'o',label='Uncorrected',
|
||||
markerfacecolor='w', markersize=10,lw=0,markeredgecolor='k'))
|
||||
custom_points.append(Line2D([0],[0], marker = 'o',label='Corrected',
|
||||
markerfacecolor='k', markersize=10,lw=0,markeredgecolor='k'))
|
||||
custom_points.append(Line2D([0],[1],label='Modeled BRDF',c='k', ls ='--'))
|
||||
ax.legend(handles=custom_points, loc='center',frameon=False,
|
||||
bbox_to_anchor=(-.15, -.3), ncol =3,columnspacing = 1.5,labelspacing=.25)
|
||||
|
||||
plt.savefig("%s%s_brdf_plot.png" % (config_dict['export']['output_dir'],hy_obj.base_name),
|
||||
bbox_inches = 'tight')
|
||||
plt.close()
|
||||
|
||||
|
||||
def flex_diagno_plot(hy_obj,config_dict):
|
||||
''' Generate a diagnostic plot of BRDF correction results.
|
||||
'''
|
||||
#Flip sign of zenith angle at minimum
|
||||
sensor_zn = hy_obj.get_anc('sensor_zn',radians =False)
|
||||
sensor_zn[~hy_obj.mask['no_data']] = np.nan
|
||||
for i,line in enumerate(sensor_zn):
|
||||
line[:np.nanargmin(line)] *= -1
|
||||
sensor_zn[i] = line
|
||||
sensor_zn = (sensor_zn[hy_obj.mask['calc_brdf']]//2)*2
|
||||
|
||||
diagno_df = pd.DataFrame()
|
||||
diagno_df['sensor_zn'] =sensor_zn
|
||||
|
||||
bands = [hy_obj.wave_to_band(wave) for wave in config_dict['brdf']['diagnostic_waves']]
|
||||
|
||||
for band_num in bands:
|
||||
band = hy_obj.get_band(band_num,mask='calc_brdf')
|
||||
diagno_df['uncorr_%s' % band_num] = band
|
||||
|
||||
band = hy_obj.get_band(band_num,
|
||||
corrections = hy_obj.corrections + ['brdf'],
|
||||
mask='calc_brdf')
|
||||
|
||||
diagno_df['corr_%s' % band_num] = band
|
||||
|
||||
# Average every 2 degrees of zenith angle
|
||||
diagno_df= diagno_df.groupby(by= 'sensor_zn').mean()
|
||||
|
||||
fig = plt.figure(figsize= (8,6))
|
||||
fig.suptitle(hy_obj.base_name)
|
||||
for a,band_num in enumerate(bands,start=1):
|
||||
ax = fig.add_subplot(2,2,a)
|
||||
ax.scatter(diagno_df.index,diagno_df['uncorr_%s' % band_num],marker ='o',fc='w',ec='k')
|
||||
ax.scatter(diagno_df.index,diagno_df['corr_%s' % band_num],marker ='o',fc='k',ec='k')
|
||||
ax.text(.85,.9, "%s nm" % int(hy_obj.wavelengths[band_num]), transform=ax.transAxes,
|
||||
ha = 'center', fontsize = 12)
|
||||
if a > 2:
|
||||
ax.set_xlabel('View zenith angle')
|
||||
if a in [1,3]:
|
||||
ax.set_ylabel('Reflectance')
|
||||
|
||||
#Create legend
|
||||
custom_points = []
|
||||
custom_points.append(Line2D([0],[0], marker = 'o',label='Uncorrected',
|
||||
markerfacecolor='w', markersize=10,lw=0,markeredgecolor='k'))
|
||||
custom_points.append(Line2D([0],[0], marker = 'o',label='Corrected',
|
||||
markerfacecolor='k', markersize=10,lw=0,markeredgecolor='k'))
|
||||
ax.legend(handles=custom_points, loc='center',frameon=False,
|
||||
bbox_to_anchor=(-.15, -.3), ncol =2,columnspacing = 1.5,labelspacing=.25)
|
||||
plt.savefig("%s%s_flexbrdf_plot.png" % (config_dict['export']['output_dir'],hy_obj.base_name),
|
||||
bbox_inches = 'tight')
|
||||
plt.close()
|
||||
27
Flexbrdf/hytools/topo/__init__.py
Normal file
27
Flexbrdf/hytools/topo/__init__.py
Normal file
@ -0,0 +1,27 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
HyTools: Hyperspectral image processing library
|
||||
Copyright (C) 2021 University of Wisconsin
|
||||
|
||||
Authors: Adam Chlus, Zhiwei Ye, Philip Townsend.
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation, version 3 of the License.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
The :mod:`hytools.correction` module include functions image correction.
|
||||
"""
|
||||
from .topo import *
|
||||
from .scsc import *
|
||||
from .c import *
|
||||
from .cosine import *
|
||||
from .scs import *
|
||||
from .modminn import *
|
||||
222
Flexbrdf/hytools/topo/c.py
Normal file
222
Flexbrdf/hytools/topo/c.py
Normal file
@ -0,0 +1,222 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
HyTools: Hyperspectral image processing library
|
||||
Copyright (C) 2021 University of Wisconsin
|
||||
|
||||
Authors: Adam Chlus, Zhiwei Ye, Philip Townsend.
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation, version 3 of the License.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
This module contains functions to apply a topographic correction (SCS+C)
|
||||
described in the following papers:
|
||||
|
||||
Scott A. Soenen, Derek R. Peddle, & Craig A. Coburn (2005).
|
||||
SCS+C: A Modified Sun-Canopy-Sensor Topographic Correction in Forested Terrain.
|
||||
IEEE Transactions on Geoscience and Remote Sensing, 43(9), 2148-2159.
|
||||
https://doi.org/10.1109/TGRS.2005.852480
|
||||
|
||||
Topographic correction consists of the following steps:
|
||||
|
||||
1. calculate incidence angle if it is not provided
|
||||
2. estimate C-Correction value
|
||||
3. apply C-Correction value to the image data
|
||||
|
||||
TODO: Rationale/ examples for using different fitting algorithms
|
||||
|
||||
"""
|
||||
|
||||
import numpy as np
|
||||
from scipy.optimize import nnls
|
||||
from ..io.envi import WriteENVI
|
||||
|
||||
def calc_c(data,cosine_i,fit_type = 'ols'):
|
||||
"""Calculate the topographic correction coefficient (c) for the input data.
|
||||
Used for both the cosine and SCS+S topographic corrections.
|
||||
|
||||
Args:
|
||||
band (numpy.ndarray): Image array.
|
||||
cosine_i (numpy.ndarray): Cosine i array.
|
||||
fit_type (str): Linear model fitting type.
|
||||
|
||||
Returns:
|
||||
numpy.ndarray: Topographic correction coefficient.
|
||||
|
||||
"""
|
||||
|
||||
# Reshape for regression
|
||||
cosine_i = np.expand_dims(cosine_i,axis=1)
|
||||
|
||||
if cosine_i.shape[0]==0:
|
||||
return 100000.0
|
||||
|
||||
X = np.concatenate([cosine_i,np.ones(cosine_i.shape)],axis=1)
|
||||
|
||||
# Eq 7. Soenen et al. 2005
|
||||
if fit_type == 'ols':
|
||||
slope, intercept = np.linalg.lstsq(X, data,rcond=-1)[0].flatten()
|
||||
elif fit_type == 'nnls':
|
||||
slope, intercept = nnls(X, data)[0].flatten()
|
||||
|
||||
# Eq 8. Soenen et al. 2005
|
||||
c= intercept/slope
|
||||
|
||||
# Set a large number if slope is zero
|
||||
if not np.isfinite(c):
|
||||
c = 100000.0
|
||||
return c
|
||||
|
||||
def calc_c_coeffs(hy_obj,topo_dict):
|
||||
'''
|
||||
|
||||
Args:
|
||||
hy_obj (TYPE): DESCRIPTION.
|
||||
|
||||
Returns:
|
||||
None.
|
||||
|
||||
'''
|
||||
|
||||
topo_dict['coeffs'] = {}
|
||||
cosine_i = hy_obj.cosine_i()
|
||||
|
||||
for band_num,band in enumerate(hy_obj.bad_bands):
|
||||
if ~band:
|
||||
band = hy_obj.get_band(band_num,mask='calc_topo')
|
||||
topo_dict['coeffs'][band_num] = calc_c(band,cosine_i[hy_obj.mask['calc_topo']],
|
||||
fit_type=topo_dict['c_fit_type'])
|
||||
hy_obj.topo = topo_dict
|
||||
|
||||
def get_band_samples(hy_obj,args):
|
||||
band = hy_obj.get_band(args['band_num'],
|
||||
corrections = hy_obj.corrections)
|
||||
return band[hy_obj.ancillary['sample_mask'] !=0]
|
||||
|
||||
def get_cosine_i_samples(hy_obj):
|
||||
'''Calculate and sample cosine_i
|
||||
'''
|
||||
cosine_i=hy_obj.cosine_i()
|
||||
cosine_i = cosine_i[hy_obj.ancillary['sample_mask'] !=0]
|
||||
|
||||
return cosine_i
|
||||
|
||||
def calc_c_coeffs_group(actors,topo_dict,group_tag):
|
||||
|
||||
cosine_i_samples = ray.get([a.do.remote(get_cosine_i_samples) for a in actors])
|
||||
cosine_i_samples = np.concatenate(cosine_i_samples)
|
||||
|
||||
print(f'Topo Subgroup {group_tag}')
|
||||
|
||||
bad_bands = ray.get(actors[0].do.remote(lambda x: x.bad_bands))
|
||||
coeffs = {}
|
||||
|
||||
for band_num,band in enumerate(bad_bands):
|
||||
if ~band:
|
||||
coeffs[band_num] = {}
|
||||
band_samples = ray.get([a.do.remote(get_band_samples,
|
||||
{'band_num':band_num}) for a in actors])
|
||||
band_samples = np.concatenate(band_samples)
|
||||
|
||||
coeffs[band_num] = calc_c(band_samples,cosine_i_samples,fit_type=topo_dict['c_fit_type'])
|
||||
progbar(np.sum(~bad_bands[:band_num+1]),np.sum(~bad_bands))
|
||||
|
||||
print('\n')
|
||||
|
||||
#Update TOPO coeffs
|
||||
_ = ray.get([a.do.remote(update_topo,{'key':'coeffs',
|
||||
'value': coeffs}) for a in actors])
|
||||
_ = ray.get([a.do.remote(update_topo,{'key':'subgroup',
|
||||
'value': group_tag}) for a in actors])
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
def apply_c(hy_obj,data,dimension,index):
|
||||
''' Apply SCSS correction to a slice of the data
|
||||
|
||||
Args:
|
||||
hy_obj (TYPE): DESCRIPTION.
|
||||
band (TYPE): DESCRIPTION.
|
||||
index (TYPE): DESCRIPTION.
|
||||
|
||||
Returns:
|
||||
band (TYPE): DESCRIPTION.
|
||||
|
||||
'''
|
||||
|
||||
if 'cos_sz' not in hy_obj.ancillary.keys():
|
||||
cos_sz = np.cos(hy_obj.get_anc('solar_zn'))
|
||||
hy_obj.ancillary['cos_sz'] = cos_sz
|
||||
if 'cosine_i' not in hy_obj.ancillary.keys():
|
||||
cosine_i = hy_obj.cosine_i()
|
||||
hy_obj.ancillary['cosine_i'] = cosine_i
|
||||
|
||||
C_bands = list(hy_obj.topo['coeffs'].keys())
|
||||
C = np.array(list(hy_obj.topo['coeffs'].values()))
|
||||
|
||||
#Convert to float
|
||||
data = data.astype(np.float32)
|
||||
|
||||
if dimension == 'line':
|
||||
#index= 3000
|
||||
#data = hy_obj.get_line(3000)
|
||||
data = data[:,C_bands]
|
||||
mask = hy_obj.mask['apply_topo'][index,:]
|
||||
cosine_i = hy_obj.ancillary['cosine_i'][[index],:].T
|
||||
cos_sz = hy_obj.ancillary['cos_sz'][[index],:].T
|
||||
correction_factor = (cos_sz + C)/(cosine_i + C)
|
||||
data[mask,:] = data[mask,:]*correction_factor[mask,:]
|
||||
|
||||
elif dimension == 'column':
|
||||
# index= 300
|
||||
# data = hy_obj.get_column(index)
|
||||
data = data[:,C_bands]
|
||||
mask = hy_obj.mask['apply_topo'][:,index]
|
||||
cosine_i = hy_obj.ancillary['cosine_i'][:,[index]]
|
||||
cos_sz = hy_obj.ancillary['cos_sz'][:,[index]]
|
||||
correction_factor = (cos_sz + C)/(cosine_i + C)
|
||||
data[mask,:] = data[mask,:]*correction_factor[mask,:]
|
||||
|
||||
elif dimension == 'band':
|
||||
#index= 8
|
||||
#data = hy_obj.get_band(index)
|
||||
C = hy_obj.topo['coeffs'][index]
|
||||
correction_factor = (hy_obj.ancillary['cos_sz'] + C)/(hy_obj.ancillary['cosine_i'] + C)
|
||||
data[hy_obj.mask['apply_topo']] = data[hy_obj.mask['apply_topo']] * correction_factor[hy_obj.mask['apply_topo']]
|
||||
|
||||
elif dimension == 'chunk':
|
||||
# index = 200,501,3000,3501
|
||||
x1,x2,y1,y2 = index
|
||||
# data = hy_obj.get_chunk(x1,x2,y1,y2)
|
||||
data = data[:,:,C_bands]
|
||||
mask = hy_obj.mask['apply_topo'][y1:y2,x1:x2]
|
||||
cosine_i = hy_obj.ancillary['cosine_i'][y1:y2,x1:x2][:,:,np.newaxis]
|
||||
cos_sz = hy_obj.ancillary['cos_sz'][y1:y2,x1:x2][:,:,np.newaxis]
|
||||
correction_factor = (cos_sz + C)/(cosine_i + C)
|
||||
data[mask,:] = data[mask,:]*correction_factor[mask,:]
|
||||
|
||||
elif dimension == 'pixels':
|
||||
# index = [[2000,2001],[200,501]]
|
||||
y,x = index
|
||||
# data = hy_obj.get_pixels(y,x)
|
||||
data = data[:,C_bands]
|
||||
mask = hy_obj.mask['apply_topo'][y,x]
|
||||
cosine_i = hy_obj.ancillary['cosine_i'][[y],[x]].T
|
||||
cos_sz = hy_obj.ancillary['cos_sz'][[y],[x]].T
|
||||
correction_factor = (cos_sz + C)/(cosine_i + C)
|
||||
data[mask,:] = data[mask,:]*correction_factor[mask,:]
|
||||
|
||||
return data
|
||||
|
||||
|
||||
|
||||
99
Flexbrdf/hytools/topo/cosine.py
Normal file
99
Flexbrdf/hytools/topo/cosine.py
Normal file
@ -0,0 +1,99 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
HyTools: Hyperspectral image processing library
|
||||
Copyright (C) 2021 University of Wisconsin
|
||||
|
||||
Authors: Adam Chlus, Zhiwei Ye, Philip Townsend.
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation, version 3 of the License.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
This module contains functions to apply the Modified topographic correction (SCS+C)
|
||||
described in the following paper:
|
||||
|
||||
Richter, R., Kellenberger, T., & Kaufmann, H. (2009).
|
||||
Comparison of topographic correction methods.
|
||||
Remote Sensing, 1(3), 184-196.
|
||||
https://doi.org/10.3390/rs1030184
|
||||
|
||||
Topographic correction consists of the following steps:
|
||||
|
||||
|
||||
"""
|
||||
import numpy as np
|
||||
|
||||
def calc_cosine_coeffs(hy_obj,topo_dict):
|
||||
'''
|
||||
|
||||
Args:
|
||||
hy_obj (TYPE): DESCRIPTION.
|
||||
|
||||
Returns:
|
||||
None.
|
||||
|
||||
'''
|
||||
hy_obj.topo = topo_dict
|
||||
hy_obj.anc_data = {}
|
||||
|
||||
cos_i = hy_obj.cosine_i()
|
||||
cos_solar_zn = np.cos(hy_obj.get_anc('solar_zn'))
|
||||
|
||||
c_factor = cos_solar_zn/cos_i
|
||||
c_factor[~hy_obj.mask['no_data']] = 1.
|
||||
hy_obj.ancillary['cosine_factor'] =c_factor
|
||||
|
||||
def apply_cosine(hy_obj,data,dimension,index):
|
||||
''' Apply cosine correction to a slice of the data
|
||||
|
||||
Args:
|
||||
hy_obj (TYPE): DESCRIPTION.
|
||||
band (TYPE): DESCRIPTION.
|
||||
index (TYPE): DESCRIPTION.
|
||||
|
||||
Returns:
|
||||
band (TYPE): DESCRIPTION.
|
||||
|
||||
'''
|
||||
|
||||
if 'cosine_factor' not in hy_obj.ancillary.keys():
|
||||
calc_cosine_coeffs(hy_obj)
|
||||
|
||||
#Convert to float
|
||||
data = data.astype(np.float32)
|
||||
|
||||
if dimension == 'line':
|
||||
#index= 3000
|
||||
#data = hy_obj.get_line(3000)
|
||||
data = data*hy_obj.ancillary['cosine_factor'][np.newaxis,index,:]
|
||||
|
||||
elif dimension == 'column':
|
||||
#index= 300
|
||||
#data = hy_obj.get_column(index)
|
||||
data = hy_obj.ancillary['cosine_factor'][:,index,np.newaxis]
|
||||
|
||||
elif dimension == 'band':
|
||||
#index= 8
|
||||
#data = hy_obj.get_band(index)
|
||||
data = data * hy_obj.ancillary['cosine_factor']
|
||||
|
||||
elif dimension == 'chunk':
|
||||
#index = 200,501,3000,3501
|
||||
x1,x2,y1,y2 = index
|
||||
#data = hy_obj.get_chunk(x1,x2,y1,y2)
|
||||
data = data*hy_obj.ancillary['cosine_factor'][y1:y2,x1:x2][:,:,np.newaxis]
|
||||
|
||||
elif dimension == 'pixels':
|
||||
#index = [[2000,2001],[200,501]]
|
||||
y,x = index
|
||||
#data = hy_obj.get_pixels(y,x)
|
||||
data = data*hy_obj.ancillary['cosine_factor'][y,x][:, np.newaxis]
|
||||
return data
|
||||
140
Flexbrdf/hytools/topo/modminn.py
Normal file
140
Flexbrdf/hytools/topo/modminn.py
Normal file
@ -0,0 +1,140 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
HyTools: Hyperspectral image processing library
|
||||
Copyright (C) 2021 University of Wisconsin
|
||||
|
||||
Authors: Adam Chlus, Zhiwei Ye, Philip Townsend.
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation, version 3 of the License.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
This module contains functions to apply the Modified topographic correction (SCS+C)
|
||||
described in the following paper:
|
||||
|
||||
Richter, R., Kellenberger, T., & Kaufmann, H. (2009).
|
||||
Comparison of topographic correction methods.
|
||||
Remote Sensing, 1(3), 184-196.
|
||||
https://doi.org/10.3390/rs1030184
|
||||
|
||||
Topographic correction consists of the following steps:
|
||||
|
||||
|
||||
"""
|
||||
import numpy as np
|
||||
|
||||
def calc_modminn_coeffs(hy_obj,topo_dict):
|
||||
'''
|
||||
|
||||
Args:
|
||||
hy_obj (TYPE): DESCRIPTION.
|
||||
|
||||
Returns:
|
||||
None.
|
||||
|
||||
'''
|
||||
hy_obj.topo =topo_dict
|
||||
|
||||
cos_i = hy_obj.cosine_i()
|
||||
i = np.rad2deg(np.arccos(cos_i))
|
||||
solar_zn = hy_obj.get_anc('solar_zn',radians=False)
|
||||
|
||||
solar_zn_t = np.zeros(solar_zn.shape)
|
||||
solar_zn_t[solar_zn < 45] = solar_zn[solar_zn < 45] +20
|
||||
solar_zn_t[(solar_zn >= 45) & (solar_zn <= 55)] = solar_zn[(solar_zn >= 45) & (solar_zn <= 55)] +15
|
||||
solar_zn_t[solar_zn > 55] = solar_zn[solar_zn > 55] +10
|
||||
|
||||
#Create NDVI mask to seperate vegetation
|
||||
ir = hy_obj.get_wave(850)
|
||||
red = hy_obj.get_wave(660)
|
||||
ndvi = (ir-red)/(ir+red)
|
||||
veg_mask = ndvi > 0.2
|
||||
|
||||
c_factors = np.ones((2,hy_obj.lines,hy_obj.columns))
|
||||
c_factors[:] = cos_i/np.cos(np.radians(solar_zn_t))
|
||||
|
||||
# Non vegetation correction factor
|
||||
c_factors[0][~veg_mask] = c_factors[0][~veg_mask]**(1/2)
|
||||
c_factors[1][~veg_mask] = c_factors[1][~veg_mask]**(1/2)
|
||||
|
||||
# Vegetation correction factor
|
||||
c_factors[0][veg_mask] = c_factors[0][veg_mask]**(3/4)
|
||||
c_factors[1][veg_mask] = c_factors[1][veg_mask]**(1/3)
|
||||
|
||||
#Adjust correction factors to prevent too strong correction
|
||||
c_factors[c_factors <.25] = .25
|
||||
c_factors[c_factors > 1] = 1
|
||||
|
||||
#Correct pixels only where i > threshold
|
||||
c_factors[0][i < solar_zn_t] = 1
|
||||
c_factors[1][i < solar_zn_t] = 1
|
||||
|
||||
c_factors[0][ir == hy_obj.no_data] = 1
|
||||
c_factors[1][ir == hy_obj.no_data] = 1
|
||||
|
||||
hy_obj.ancillary['mm_c_factor'] = c_factors
|
||||
|
||||
def apply_modminn(hy_obj,data,dimension,index):
|
||||
''' Apply SCSS correction to a slice of the data
|
||||
|
||||
Args:
|
||||
hy_obj (TYPE): DESCRIPTION.
|
||||
band (TYPE): DESCRIPTION.
|
||||
index (TYPE): DESCRIPTION.
|
||||
|
||||
Returns:
|
||||
band (TYPE): DESCRIPTION.
|
||||
|
||||
'''
|
||||
|
||||
if 'mm_c_factor' not in hy_obj.ancillary.keys():
|
||||
calc_modminn_coeffs(hy_obj)
|
||||
|
||||
#Convert to float
|
||||
data = data.astype(np.float32)
|
||||
|
||||
wave_mask =hy_obj.wavelengths >=720
|
||||
|
||||
if dimension == 'line':
|
||||
#index= 3000
|
||||
#data = hy_obj.get_line(3000)
|
||||
data[:,wave_mask] = data[:,wave_mask]*hy_obj.ancillary['mm_c_factor'][1,index,:][:,np.newaxis]
|
||||
data[:,~wave_mask] = data[:,~wave_mask]*hy_obj.ancillary['mm_c_factor'][0,index,:][:,np.newaxis]
|
||||
|
||||
elif dimension == 'column':
|
||||
#index= 300
|
||||
#data = hy_obj.get_column(index)
|
||||
data[:,wave_mask] = data[:,wave_mask]*hy_obj.ancillary['mm_c_factor'][1,:,index][:,np.newaxis]
|
||||
data[:,~wave_mask] = data[:,~wave_mask]*hy_obj.ancillary['mm_c_factor'][0,:,index][:,np.newaxis]
|
||||
|
||||
elif dimension == 'band':
|
||||
#index= 50
|
||||
#data = hy_obj.get_band(index)
|
||||
if hy_obj.wavelengths[index] >=720:
|
||||
cf_index = 1
|
||||
else:
|
||||
cf_index = 0
|
||||
data = data * hy_obj.ancillary['mm_c_factor'][cf_index]
|
||||
|
||||
elif dimension == 'chunk':
|
||||
#index = 200,501,3000,3501
|
||||
x1,x2,y1,y2 = index
|
||||
#data = hy_obj.get_chunk(x1,x2,y1,y2)
|
||||
data[:,:,wave_mask] = data[:,:,wave_mask]*hy_obj.ancillary['mm_c_factor'][1,y1:y2,x1:x2][:,:,np.newaxis]
|
||||
data[:,:,~wave_mask] = data[:,:,~wave_mask]*hy_obj.ancillary['mm_c_factor'][0,y1:y2,x1:x2][:,:,np.newaxis]
|
||||
|
||||
elif dimension == 'pixels':
|
||||
#index = [[2000,2001],[200,501]]
|
||||
y,x = index
|
||||
#data = hy_obj.get_pixels(y,x)
|
||||
data[:,wave_mask] = data[:,wave_mask]*hy_obj.ancillary['mm_c_factor'][1,y,x][:, np.newaxis]
|
||||
data[:,~wave_mask] = data[:,~wave_mask]*hy_obj.ancillary['mm_c_factor'][0,y,x][:, np.newaxis]
|
||||
return data
|
||||
100
Flexbrdf/hytools/topo/scs.py
Normal file
100
Flexbrdf/hytools/topo/scs.py
Normal file
@ -0,0 +1,100 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
HyTools: Hyperspectral image processing library
|
||||
Copyright (C) 2021 University of Wisconsin
|
||||
|
||||
Authors: Adam Chlus, Zhiwei Ye, Philip Townsend.
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation, version 3 of the License.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
This module contains functions to apply the Modified topographic correction (SCS+C)
|
||||
described in the following paper:
|
||||
|
||||
Richter, R., Kellenberger, T., & Kaufmann, H. (2009).
|
||||
Comparison of topographic correction methods.
|
||||
Remote Sensing, 1(3), 184-196.
|
||||
https://doi.org/10.3390/rs1030184
|
||||
|
||||
Topographic correction consists of the following steps:
|
||||
|
||||
|
||||
"""
|
||||
import numpy as np
|
||||
|
||||
def calc_scs_coeffs(hy_obj,topo_dict):
|
||||
'''
|
||||
|
||||
Args:
|
||||
hy_obj (TYPE): DESCRIPTION.
|
||||
|
||||
Returns:
|
||||
None.
|
||||
|
||||
'''
|
||||
hy_obj.topo = topo_dict
|
||||
hy_obj.anc_data = {}
|
||||
|
||||
cos_i = hy_obj.cosine_i()
|
||||
cos_solar_zn = np.cos(hy_obj.get_anc('solar_zn'))
|
||||
cos_slope = np.cos(hy_obj.get_anc('slope'))
|
||||
|
||||
c_factor = (cos_slope *cos_solar_zn)/cos_i
|
||||
c_factor[~hy_obj.mask['no_data']] = 1.
|
||||
hy_obj.ancillary['scs_factor'] =c_factor
|
||||
|
||||
def apply_scs(hy_obj,data,dimension,index):
|
||||
''' Apply SCSS correction to a slice of the data
|
||||
|
||||
Args:
|
||||
hy_obj (TYPE): DESCRIPTION.
|
||||
band (TYPE): DESCRIPTION.
|
||||
index (TYPE): DESCRIPTION.
|
||||
|
||||
Returns:
|
||||
band (TYPE): DESCRIPTION.
|
||||
|
||||
'''
|
||||
|
||||
if 'scs_factor' not in hy_obj.ancillary.keys():
|
||||
calc_scs_coeffs(hy_obj)
|
||||
|
||||
#Convert to float
|
||||
data = data.astype(np.float32)
|
||||
|
||||
if dimension == 'line':
|
||||
#index= 3000
|
||||
#data = hy_obj.get_line(3000)
|
||||
data = data*hy_obj.ancillary['scs_factor'][np.newaxis,index,:]
|
||||
|
||||
elif dimension == 'column':
|
||||
#index= 300
|
||||
#data = hy_obj.get_column(index)
|
||||
data = hy_obj.ancillary['scs_factor'][:,index,np.newaxis]
|
||||
|
||||
elif dimension == 'band':
|
||||
#index= 8
|
||||
#data = hy_obj.get_band(index)
|
||||
data = data * hy_obj.ancillary['scs_factor']
|
||||
|
||||
elif dimension == 'chunk':
|
||||
#index = 200,501,3000,3501
|
||||
x1,x2,y1,y2 = index
|
||||
#data = hy_obj.get_chunk(x1,x2,y1,y2)
|
||||
data = data*hy_obj.ancillary['scs_factor'][y1:y2,x1:x2][:,:,np.newaxis]
|
||||
|
||||
elif dimension == 'pixels':
|
||||
#index = [[2000,2001],[200,501]]
|
||||
y,x = index
|
||||
#data = hy_obj.get_pixels(y,x)
|
||||
data = data*hy_obj.ancillary['scs_factor'][y,x][:, np.newaxis]
|
||||
return data
|
||||
204
Flexbrdf/hytools/topo/scsc.py
Normal file
204
Flexbrdf/hytools/topo/scsc.py
Normal file
@ -0,0 +1,204 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
HyTools: Hyperspectral image processing library
|
||||
Copyright (C) 2021 University of Wisconsin
|
||||
|
||||
Authors: Adam Chlus, Zhiwei Ye, Philip Townsend.
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation, version 3 of the License.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
This module contains functions to apply a topographic correction (SCS+C)
|
||||
described in the following papers:
|
||||
|
||||
Scott A. Soenen, Derek R. Peddle, & Craig A. Coburn (2005).
|
||||
SCS+C: A Modified Sun-Canopy-Sensor Topographic Correction in Forested Terrain.
|
||||
IEEE Transactions on Geoscience and Remote Sensing, 43(9), 2148-2159.
|
||||
https://doi.org/10.1109/TGRS.2005.852480
|
||||
|
||||
Topographic correction consists of the following steps:
|
||||
|
||||
1. calculate incidence angle if it is not provided
|
||||
2. estimate C-Correction value
|
||||
3. apply C-Correction value to the image data
|
||||
|
||||
TODO: Rationale/ examples for using different fitting algorithms
|
||||
|
||||
"""
|
||||
import numpy as np
|
||||
from .c import calc_c, get_band_samples, get_cosine_i_samples
|
||||
import ray
|
||||
from ..misc import update_topo
|
||||
from ..misc import progbar
|
||||
|
||||
def calc_scsc_c1(solar_zn,slope):
|
||||
""" Calculate c1
|
||||
All input geometry units must be in radians.
|
||||
|
||||
Args:
|
||||
solar_zn (numpy.ndarray): Solar zenith angle.
|
||||
slope (numpy.ndarray): Ground slope.
|
||||
|
||||
Returns:
|
||||
numpy.ndarray: C1.
|
||||
|
||||
"""
|
||||
|
||||
# Eq 11. Soenen et al. 2005
|
||||
scsc_c1 = np.cos(solar_zn) * np.cos(slope)
|
||||
return scsc_c1
|
||||
|
||||
def calc_scsc_coeffs(hy_obj,topo_dict):
|
||||
'''
|
||||
|
||||
Args:
|
||||
hy_obj (TYPE): DESCRIPTION.
|
||||
|
||||
Returns:
|
||||
None.
|
||||
|
||||
'''
|
||||
|
||||
topo_dict['coeffs'] = {}
|
||||
cosine_i = hy_obj.cosine_i()
|
||||
|
||||
for band_num,band in enumerate(hy_obj.bad_bands):
|
||||
if ~band:
|
||||
band = hy_obj.get_band(band_num,mask='calc_topo')
|
||||
topo_dict['coeffs'][band_num] = calc_c(band,cosine_i[hy_obj.mask['calc_topo']],
|
||||
fit_type=topo_dict['c_fit_type'])
|
||||
hy_obj.topo = topo_dict
|
||||
|
||||
def calc_scsc_coeffs_group(actors,topo_dict,group_tag):
|
||||
|
||||
cosine_i_samples = ray.get([a.do.remote(get_cosine_i_samples) for a in actors])
|
||||
cosine_i_samples = np.concatenate(cosine_i_samples)
|
||||
|
||||
print(f'Topo Subgroup {group_tag}')
|
||||
|
||||
bad_bands = ray.get(actors[0].do.remote(lambda x: x.bad_bands))
|
||||
coeffs = {}
|
||||
|
||||
for band_num,band in enumerate(bad_bands):
|
||||
if ~band:
|
||||
coeffs[band_num] = {}
|
||||
band_samples = ray.get([a.do.remote(get_band_samples,
|
||||
{'band_num':band_num}) for a in actors])
|
||||
band_samples = np.concatenate(band_samples)
|
||||
|
||||
coeffs[band_num] = calc_c(band_samples,cosine_i_samples,fit_type=topo_dict['c_fit_type'])
|
||||
progbar(np.sum(~bad_bands[:band_num+1]),np.sum(~bad_bands))
|
||||
|
||||
print('\n')
|
||||
|
||||
#Update TOPO coeffs
|
||||
_ = ray.get([a.do.remote(update_topo,{'key':'coeffs',
|
||||
'value': coeffs}) for a in actors])
|
||||
_ = ray.get([a.do.remote(update_topo,{'key':'subgroup',
|
||||
'value': group_tag}) for a in actors])
|
||||
|
||||
def apply_scsc_band(hy_obj,band,index):
|
||||
'''
|
||||
|
||||
Args:
|
||||
hy_obj (TYPE): DESCRIPTION.
|
||||
band (TYPE): DESCRIPTION.
|
||||
index (TYPE): DESCRIPTION.
|
||||
|
||||
Returns:
|
||||
band (TYPE): DESCRIPTION.
|
||||
|
||||
'''
|
||||
|
||||
c1 = np.cos(hy_obj.get_anc('slope')) * np.cos(hy_obj.get_anc('solar_zn'))
|
||||
cosine_i = hy_obj.cosine_i()
|
||||
|
||||
C = hy_obj.topo['coeffs'][index]
|
||||
correction_factor = (c1 + C)/(cosine_i + C)
|
||||
band[hy_obj.mask['calc_topo']] = band[hy_obj.mask['calc_topo']] * correction_factor[hy_obj.mask['calc_topo']]
|
||||
band[~hy_obj.mask['no_data']] = hy_obj.no_data
|
||||
|
||||
return band
|
||||
|
||||
def apply_scsc(hy_obj,data,dimension,index):
|
||||
''' Apply SCSS correction to a slice of the data
|
||||
|
||||
Args:
|
||||
hy_obj (TYPE): DESCRIPTION.
|
||||
band (TYPE): DESCRIPTION.
|
||||
index (TYPE): DESCRIPTION.
|
||||
|
||||
Returns:
|
||||
band (TYPE): DESCRIPTION.
|
||||
|
||||
'''
|
||||
|
||||
if 'c1' not in hy_obj.ancillary.keys():
|
||||
c1 = np.cos(hy_obj.get_anc('slope')) * np.cos(hy_obj.get_anc('solar_zn'))
|
||||
hy_obj.ancillary['c1'] = c1
|
||||
if 'cosine_i' not in hy_obj.ancillary.keys():
|
||||
cosine_i = hy_obj.cosine_i()
|
||||
hy_obj.ancillary['cosine_i'] = cosine_i
|
||||
|
||||
C_bands = list([int(x) for x in hy_obj.topo['coeffs'].keys()])
|
||||
C = np.array(list(hy_obj.topo['coeffs'].values()))
|
||||
|
||||
#Convert to float
|
||||
data = data.astype(np.float32)
|
||||
hy_obj.topo['coeffs'] = {int(k): hy_obj.topo['coeffs'][k] for k in hy_obj.topo['coeffs']}
|
||||
|
||||
if (dimension != 'band') & (dimension != 'chunk'):
|
||||
if dimension == 'line':
|
||||
#index= 3000
|
||||
#data = hy_obj.get_line(3000)
|
||||
mask = hy_obj.mask['apply_topo'][index,:]
|
||||
cosine_i = hy_obj.ancillary['cosine_i'][[index],:].T
|
||||
c1 = hy_obj.ancillary['c1'][[index],:].T
|
||||
|
||||
elif dimension == 'column':
|
||||
#index= 300
|
||||
#data = hy_obj.get_column(index)
|
||||
mask = hy_obj.mask['apply_topo'][:,index]
|
||||
cosine_i = hy_obj.ancillary['cosine_i'][:,[index]]
|
||||
c1 = hy_obj.ancillary['c1'][:,[index]]
|
||||
|
||||
elif dimension == 'pixels':
|
||||
#index = [[2000,2001],[200,501]]
|
||||
y,x = index
|
||||
#data = hy_obj.get_pixels(y,x)
|
||||
mask = hy_obj.mask['apply_topo'][y,x]
|
||||
cosine_i = hy_obj.ancillary['cosine_i'][[y],[x]].T
|
||||
c1 = hy_obj.ancillary['c1'][[y],[x]].T
|
||||
|
||||
correction_factor = np.ones(data.shape)
|
||||
correction_factor[:,C_bands] = (c1 + C)/(cosine_i + C)
|
||||
data[mask,:] = data[mask,:]*correction_factor[mask,:]
|
||||
|
||||
elif dimension == 'chunk':
|
||||
#index = 200,501,3000,3501
|
||||
x1,x2,y1,y2 = index
|
||||
#data = hy_obj.get_chunk(x1,x2,y1,y2)
|
||||
mask = hy_obj.mask['apply_topo'][y1:y2,x1:x2]
|
||||
cosine_i = hy_obj.ancillary['cosine_i'][y1:y2,x1:x2][:,:,np.newaxis]
|
||||
c1 = hy_obj.ancillary['c1'][y1:y2,x1:x2][:,:,np.newaxis]
|
||||
|
||||
correction_factor = np.ones(data.shape)
|
||||
correction_factor[:,:,C_bands] = (c1 + C)/(cosine_i + C)
|
||||
data[mask,:] = data[mask,:]*correction_factor[mask,:]
|
||||
|
||||
elif (dimension == 'band') and (index in hy_obj.topo['coeffs']):
|
||||
#index= 8
|
||||
#data = hy_obj.get_band(index)
|
||||
C = hy_obj.topo['coeffs'][index]
|
||||
correction_factor = (hy_obj.ancillary['c1'] + C)/(hy_obj.ancillary['cosine_i'] + C)
|
||||
data[hy_obj.mask['apply_topo']] = data[hy_obj.mask['apply_topo']] * correction_factor[hy_obj.mask['apply_topo']]
|
||||
return data
|
||||
183
Flexbrdf/hytools/topo/topo.py
Normal file
183
Flexbrdf/hytools/topo/topo.py
Normal file
@ -0,0 +1,183 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
"""
|
||||
HyTools: Hyperspectral image processing library
|
||||
Copyright (C) 2021 University of Wisconsin
|
||||
|
||||
Authors: Adam Chlus, Zhiwei Ye, Philip Townsend.
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation, version 3 of the License.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
Topographic correction
|
||||
|
||||
"""
|
||||
import json
|
||||
import numpy as np
|
||||
import ray
|
||||
from .modminn import apply_modminn,calc_modminn_coeffs
|
||||
from .scsc import apply_scsc,calc_scsc_coeffs, calc_scsc_coeffs_group
|
||||
from .cosine import apply_cosine,calc_cosine_coeffs
|
||||
from .c import apply_c,calc_c_coeffs, calc_c_coeffs_group
|
||||
from .scs import apply_scs,calc_scs_coeffs
|
||||
from ..masks import mask_create
|
||||
from ..misc import set_topo
|
||||
|
||||
def calc_cosine_i(solar_zn, solar_az, aspect ,slope):
|
||||
"""Generate cosine i image. The cosine of the incidence angle (i) is
|
||||
defined as the angle between the normal to the pixel surface
|
||||
and the solar zenith direction.
|
||||
All input geometry units must be in radians.
|
||||
|
||||
Args:
|
||||
solar_az (numpy.ndarray): Solar azimuth angle.
|
||||
solar_zn (numpy.ndarray): Solar zenith angle.
|
||||
aspect (numpy.ndarray): Ground aspect.
|
||||
slope (numpy.ndarray): Ground slope.
|
||||
|
||||
Returns:
|
||||
cnumpy.ndarray: Cosine i image.
|
||||
|
||||
"""
|
||||
|
||||
relative_az = aspect - solar_az
|
||||
cosine_i = np.cos(solar_zn)*np.cos(slope) + np.sin(solar_zn)*np.sin(slope)* np.cos(relative_az)
|
||||
return cosine_i
|
||||
|
||||
def apply_topo_correct(hy_obj,data,dimension,index):
|
||||
'''
|
||||
|
||||
Args:
|
||||
hy_obj (TYPE): DESCRIPTION.
|
||||
band (TYPE): DESCRIPTION.
|
||||
index (TYPE): DESCRIPTION.
|
||||
|
||||
Returns:
|
||||
band (TYPE): DESCRIPTION.
|
||||
|
||||
'''
|
||||
|
||||
if ('apply_topo' not in hy_obj.mask) & ('apply_mask' in hy_obj.topo):
|
||||
hy_obj.gen_mask(mask_create,'apply_topo',hy_obj.topo['apply_mask'])
|
||||
|
||||
if hy_obj.topo['type'] == 'mod_minneart':
|
||||
data = apply_modminn(hy_obj,data,dimension,index)
|
||||
elif hy_obj.topo['type'] == 'scs+c':
|
||||
data = apply_scsc(hy_obj,data,dimension,index)
|
||||
elif hy_obj.topo['type'] == 'cosine':
|
||||
data = apply_cosine(hy_obj,data,dimension,index)
|
||||
elif hy_obj.topo['type'] == 'c':
|
||||
data = apply_c(hy_obj,data,dimension,index)
|
||||
elif hy_obj.topo['type'] == 'scs':
|
||||
data = apply_scs(hy_obj,data,dimension,index)
|
||||
return data
|
||||
|
||||
def load_topo_precomputed(hy_obj,topo_dict):
|
||||
with open(topo_dict['coeff_files'][hy_obj.file_name], 'r') as outfile:
|
||||
hy_obj.topo = json.load(outfile)
|
||||
|
||||
def get_topo_sample_mask(hy_obj,topo_dict):
|
||||
|
||||
sample_ratio = float(topo_dict["sample_perc"])
|
||||
|
||||
subsample_mask = np.copy(hy_obj.mask['calc_topo'])
|
||||
|
||||
idx = np.array(np.where(subsample_mask!=0)).T
|
||||
|
||||
if idx.shape[0]>5:
|
||||
idxRand= idx[np.random.choice(range(len(idx)),int(len(idx)*(1-sample_ratio)), replace = False)].T
|
||||
subsample_mask[idxRand[0],idxRand[1]] = 0
|
||||
|
||||
subsample_mask = subsample_mask.astype(np.int8)
|
||||
|
||||
hy_obj.ancillary['sample_mask']=subsample_mask
|
||||
|
||||
def calc_topo_coeffs(actors,topo_dict,actor_group_list=None,group_tag_list=None):
|
||||
#def calc_topo_coeffs(actors,actor_group_list,topo_dict,group_tag_list):
|
||||
if topo_dict['type'] == 'precomputed':
|
||||
print("Using precomputed topographic coefficients.")
|
||||
_ = ray.get([a.do.remote(load_topo_precomputed,topo_dict) for a in actors]) # actors
|
||||
|
||||
#_ = ray.get([a.do.remote(lambda x: x.corrections.append('topo')) for a in actors])
|
||||
|
||||
else:
|
||||
print("Calculating topographic coefficients.")
|
||||
|
||||
_ = ray.get([a.do.remote(set_topo,topo_dict) for a in actors])
|
||||
|
||||
_ = ray.get([a.gen_mask.remote(mask_create,'calc_topo',
|
||||
topo_dict['calc_mask']) for a in actors])
|
||||
|
||||
if (actor_group_list is None) or (topo_dict['type'] in ['scs','mod_minneart','cosine']):
|
||||
# no grouping
|
||||
if topo_dict['type'] == 'scs+c':
|
||||
_ = ray.get([a.do.remote(calc_scsc_coeffs,topo_dict) for a in actors])
|
||||
|
||||
elif topo_dict['type'] == 'scs':
|
||||
_ = ray.get([a.do.remote(calc_scs_coeffs,topo_dict) for a in actors])
|
||||
|
||||
elif topo_dict['type'] == 'mod_minneart':
|
||||
_ = ray.get([a.do.remote(calc_modminn_coeffs,topo_dict) for a in actors])
|
||||
|
||||
elif topo_dict['type'] == 'cosine':
|
||||
_ = ray.get([a.do.remote(calc_cosine_coeffs,topo_dict) for a in actors])
|
||||
|
||||
elif topo_dict['type'] == 'c':
|
||||
_ = ray.get([a.do.remote(calc_c_coeffs,topo_dict) for a in actors])
|
||||
|
||||
#_ = ray.get([a.do.remote(lambda x: x.corrections.append('topo')) for a in actors])
|
||||
|
||||
else:
|
||||
|
||||
_ = ray.get([a.do.remote(get_topo_sample_mask,topo_dict) for a in actors])
|
||||
|
||||
for group_order, sub_actors in enumerate(actor_group_list):
|
||||
|
||||
#return 0
|
||||
|
||||
if topo_dict['type'] == 'scs+c':
|
||||
calc_scsc_coeffs_group(sub_actors,topo_dict,group_tag_list[group_order])
|
||||
|
||||
elif topo_dict['type'] == 'c':
|
||||
calc_c_coeffs_group(sub_actors,topo_dict,group_tag_list[group_order])
|
||||
|
||||
_ = ray.get([a.do.remote(lambda x: x.corrections.append('topo')) for a in actors])
|
||||
|
||||
|
||||
def calc_topo_coeffs_single(hy_obj,topo_dict):
|
||||
|
||||
if topo_dict['type'] == 'precomputed':
|
||||
print("Using precomputed topographic coefficients.")
|
||||
load_topo_precomputed(hy_obj,topo_dict)
|
||||
|
||||
else:
|
||||
print("Calculating topographic coefficients.")
|
||||
|
||||
hy_obj.gen_mask(mask_create,'calc_topo',topo_dict['calc_mask'])
|
||||
|
||||
|
||||
if topo_dict['type'] == 'scs+c':
|
||||
calc_scsc_coeffs(hy_obj,topo_dict)
|
||||
|
||||
elif topo_dict['type'] == 'scs':
|
||||
calc_scs_coeffs(hy_obj,topo_dict)
|
||||
|
||||
elif topo_dict['type'] == 'mod_minneart':
|
||||
calc_modminn_coeffs(hy_obj,topo_dict)
|
||||
|
||||
elif topo_dict['type'] == 'cosine':
|
||||
calc_cosine_coeffs(hy_obj,topo_dict)
|
||||
|
||||
elif topo_dict['type'] == 'c':
|
||||
calc_c_coeffs(hy_obj,topo_dict)
|
||||
|
||||
hy_obj.corrections.append('topo')
|
||||
22
Flexbrdf/hytools/transform/__init__.py
Normal file
22
Flexbrdf/hytools/transform/__init__.py
Normal file
@ -0,0 +1,22 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
HyTools: Hyperspectral image processing library
|
||||
Copyright (C) 2021 University of Wisconsin
|
||||
|
||||
Authors: Adam Chlus, Zhiwei Ye, Philip Townsend.
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation, version 3 of the License.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
Transform functions
|
||||
"""
|
||||
from .resampling import *
|
||||
132
Flexbrdf/hytools/transform/resampling.py
Normal file
132
Flexbrdf/hytools/transform/resampling.py
Normal file
@ -0,0 +1,132 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
HyTools: Hyperspectral image processing library
|
||||
Copyright (C) 2021 University of Wisconsin
|
||||
|
||||
Authors: Adam Chlus, Zhiwei Ye, Philip Townsend.
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation, version 3 of the License.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
Spectral resampling functions.
|
||||
|
||||
"""
|
||||
import numpy as np
|
||||
from scipy.interpolate import interp1d
|
||||
|
||||
|
||||
def gaussian(x,mu,fwhm):
|
||||
"""
|
||||
Args:
|
||||
x (numpy.ndarray): Values along which to generate gaussian..
|
||||
mu (float): Mean of the gaussian function..
|
||||
fwhm (float): Full width half maximum..
|
||||
|
||||
Returns:
|
||||
numpy.ndarray: Gaussian along input range.
|
||||
|
||||
"""
|
||||
|
||||
c = fwhm/(2* np.sqrt(2*np.log(2)))
|
||||
return np.exp(-1*((x-mu)**2/(2*c**2)))
|
||||
|
||||
def calc_resample_coeffs(in_wave,in_fwhm,out_wave,out_fwhm, spacing = 1):
|
||||
"""Given a set of source and destination wavelengths and FWHMs this
|
||||
function caculates the relative contribution or each input wavelength
|
||||
to the output wavelength. It assumes that both input and output
|
||||
response functions follow a gaussian distribution.
|
||||
|
||||
All inputs shoud be provide in nanometers.
|
||||
|
||||
|
||||
Args:
|
||||
in_wave (list): Input wavelength centers.
|
||||
in_fwhm (list): Input full width half maxes.
|
||||
out_wave (list): Output wavelength centers.
|
||||
out_fwhm (list): Output full width half maxes.
|
||||
spacing (int, optional): Resolution at which to model the
|
||||
spectral response functions. Defaults to 1.
|
||||
|
||||
Returns:
|
||||
numpy.ndarray: Transform coeffiecients.
|
||||
|
||||
"""
|
||||
|
||||
out_matrix = []
|
||||
min_spectrum = min(out_wave.min(),in_wave.min())//100 *100 - 100
|
||||
max_spectrum = 100 + max(out_wave.max(),in_wave.max())//100 *100
|
||||
one_nm = np.arange(min_spectrum,max_spectrum,spacing)
|
||||
|
||||
for wave,fwhm, in zip(out_wave,out_fwhm):
|
||||
a = gaussian(one_nm,wave,fwhm)
|
||||
out_matrix.append(np.divide(a,np.sum(a)))
|
||||
out_matrix = np.array(out_matrix)
|
||||
|
||||
# For each source wavelength generate the gaussion response
|
||||
in_matrix = []
|
||||
for wave,fwhm in zip(in_wave,in_fwhm):
|
||||
in_matrix.append(gaussian(one_nm ,wave,fwhm))
|
||||
in_matrix = np.array(in_matrix)
|
||||
|
||||
# Calculate the relative contribution of each source response function
|
||||
ratio = in_matrix/in_matrix.sum(axis=0)
|
||||
ratio[np.isnan(ratio)] = 0
|
||||
ratio2 = np.einsum('ab,cb->acb',ratio,out_matrix)
|
||||
|
||||
# Calculate the relative contribution of each input wavelength
|
||||
# to each destination wavelength
|
||||
coeffs = np.trapz(ratio2)
|
||||
|
||||
return coeffs
|
||||
|
||||
|
||||
def apply_resampler(hy_obj,data):
|
||||
''' Apply SCSS correction to a slice of the data
|
||||
|
||||
Args:
|
||||
hy_obj (TYPE): DESCRIPTION.
|
||||
band (TYPE): DESCRIPTION.
|
||||
index (TYPE): DESCRIPTION.
|
||||
|
||||
Returns:
|
||||
band (TYPE): DESCRIPTION.
|
||||
|
||||
'''
|
||||
|
||||
interp_types = ['linear', 'nearest', 'nearest-up',
|
||||
'zero', 'slinear', 'quadratic',
|
||||
'cubic']
|
||||
|
||||
#Convert to float
|
||||
data = data.astype(np.float32)
|
||||
|
||||
if hy_obj.resampler['type'] == 'gaussian':
|
||||
|
||||
# Load resampling coeffs to memory if needed
|
||||
if 'resample_coeffs' not in hy_obj.ancillary.keys():
|
||||
in_wave = hy_obj.wavelengths[~hy_obj.bad_bands]
|
||||
in_fwhm =hy_obj.fwhm[~hy_obj.bad_bands]
|
||||
resample_coeffs = calc_resample_coeffs(in_wave,in_fwhm,
|
||||
hy_obj.resampler['out_waves'],
|
||||
hy_obj.resampler['out_fwhm'])
|
||||
hy_obj.ancillary['resample_coeffs'] = resample_coeffs
|
||||
|
||||
data = np.dot(data, hy_obj.ancillary['resample_coeffs'] )
|
||||
|
||||
|
||||
elif hy_obj.resampler['type'] in interp_types:
|
||||
interp_func = interp1d(hy_obj.wavelengths[~hy_obj.bad_bands], data,
|
||||
kind=hy_obj.resampler['type'],
|
||||
axis=2, fill_value="extrapolate")
|
||||
data = interp_func(hy_obj.resampler['out_waves'])
|
||||
|
||||
return data
|
||||
Reference in New Issue
Block a user