Initial commit

This commit is contained in:
2026-04-10 16:46:45 +08:00
commit 4fd1b0a203
165 changed files with 25698 additions and 0 deletions

8
.idea/.gitignore generated vendored Normal file
View File

@ -0,0 +1,8 @@
# 默认忽略的文件
/shelf/
/workspace.xml
# 基于编辑器的 HTTP 客户端请求
/httpRequests/
# Datasource local storage ignored files
/dataSources/
/dataSources.local.xml

12
.idea/BRDF_GUI.iml generated Normal file
View File

@ -0,0 +1,12 @@
<?xml version="1.0" encoding="UTF-8"?>
<module type="PYTHON_MODULE" version="4">
<component name="NewModuleRootManager">
<content url="file://$MODULE_DIR$" />
<orderEntry type="jdk" jdkName="WQ2" jdkType="Python SDK" />
<orderEntry type="sourceFolder" forTests="false" />
</component>
<component name="PyDocumentationSettings">
<option name="format" value="PLAIN" />
<option name="myDocStringFormat" value="Plain" />
</component>
</module>

View File

@ -0,0 +1,44 @@
<component name="InspectionProjectProfileManager">
<profile version="1.0">
<option name="myName" value="Project Default" />
<inspection_tool class="PyPackageRequirementsInspection" enabled="true" level="WARNING" enabled_by_default="true">
<option name="ignoredPackages">
<value>
<list size="31">
<item index="0" class="java.lang.String" itemvalue="catboost" />
<item index="1" class="java.lang.String" itemvalue="opencv-python" />
<item index="2" class="java.lang.String" itemvalue="xgboost" />
<item index="3" class="java.lang.String" itemvalue="opencv-python-headless" />
<item index="4" class="java.lang.String" itemvalue="zstandard" />
<item index="5" class="java.lang.String" itemvalue="scikit-image" />
<item index="6" class="java.lang.String" itemvalue="scipy" />
<item index="7" class="java.lang.String" itemvalue="tensorflow_gpu" />
<item index="8" class="java.lang.String" itemvalue="h5py" />
<item index="9" class="java.lang.String" itemvalue="matplotlib" />
<item index="10" class="java.lang.String" itemvalue="numpy" />
<item index="11" class="java.lang.String" itemvalue="opencv_python" />
<item index="12" class="java.lang.String" itemvalue="Pillow" />
<item index="13" class="java.lang.String" itemvalue="tensorflow" />
<item index="14" class="java.lang.String" itemvalue="jupyter" />
<item index="15" class="java.lang.String" itemvalue="ipykernel" />
<item index="16" class="java.lang.String" itemvalue="pandas" />
<item index="17" class="java.lang.String" itemvalue="Werkzeug" />
<item index="18" class="java.lang.String" itemvalue="cellpose" />
<item index="19" class="java.lang.String" itemvalue="torchvision" />
<item index="20" class="java.lang.String" itemvalue="Flask" />
<item index="21" class="java.lang.String" itemvalue="fiona" />
<item index="22" class="java.lang.String" itemvalue="joblib" />
<item index="23" class="java.lang.String" itemvalue="pytest" />
<item index="24" class="java.lang.String" itemvalue="geopandas" />
<item index="25" class="java.lang.String" itemvalue="PyYAML" />
<item index="26" class="java.lang.String" itemvalue="pybaselines" />
<item index="27" class="java.lang.String" itemvalue="molmass" />
<item index="28" class="java.lang.String" itemvalue="noise" />
<item index="29" class="java.lang.String" itemvalue="scikit-gstat" />
<item index="30" class="java.lang.String" itemvalue="plotly" />
</list>
</value>
</option>
</inspection_tool>
</profile>
</component>

View File

@ -0,0 +1,6 @@
<component name="InspectionProjectProfileManager">
<settings>
<option name="USE_PROJECT_PROFILE" value="false" />
<version value="1.0" />
</settings>
</component>

7
.idea/misc.xml generated Normal file
View File

@ -0,0 +1,7 @@
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="Black">
<option name="sdkName" value="WQ2" />
</component>
<component name="ProjectRootManager" version="2" project-jdk-name="WQ2" project-jdk-type="Python SDK" />
</project>

8
.idea/modules.xml generated Normal file
View File

@ -0,0 +1,8 @@
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="ProjectModuleManager">
<modules>
<module fileurl="file://$PROJECT_DIR$/.idea/BRDF_GUI.iml" filepath="$PROJECT_DIR$/.idea/BRDF_GUI.iml" />
</modules>
</component>
</project>

40
Flexbrdf/.gitignore vendored Normal file
View File

@ -0,0 +1,40 @@
### macOS ###
# General
.DS_Store
.AppleDouble
.LSOverride
# Thumbnails
._*
### Python ###
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class
# Distribution / packaging
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
pip-wheel-metadata/
share/python-wheels/
*.egg-info/
.installed.cfg
*.egg
MANIFEST
.ipynb_checkpoints
make.bat
Makefile
*~

8
Flexbrdf/.idea/.gitignore generated vendored Normal file
View File

@ -0,0 +1,8 @@
# 默认忽略的文件
/shelf/
/workspace.xml
# 基于编辑器的 HTTP 客户端请求
/httpRequests/
# Datasource local storage ignored files
/dataSources/
/dataSources.local.xml

12
Flexbrdf/.idea/hytools-master.iml generated Normal file
View File

@ -0,0 +1,12 @@
<?xml version="1.0" encoding="UTF-8"?>
<module type="PYTHON_MODULE" version="4">
<component name="NewModuleRootManager">
<content url="file://$MODULE_DIR$" />
<orderEntry type="jdk" jdkName="WQ2" jdkType="Python SDK" />
<orderEntry type="sourceFolder" forTests="false" />
</component>
<component name="PyDocumentationSettings">
<option name="format" value="PLAIN" />
<option name="myDocStringFormat" value="Plain" />
</component>
</module>

View File

@ -0,0 +1,44 @@
<component name="InspectionProjectProfileManager">
<profile version="1.0">
<option name="myName" value="Project Default" />
<inspection_tool class="PyPackageRequirementsInspection" enabled="true" level="WARNING" enabled_by_default="true">
<option name="ignoredPackages">
<value>
<list size="31">
<item index="0" class="java.lang.String" itemvalue="catboost" />
<item index="1" class="java.lang.String" itemvalue="opencv-python" />
<item index="2" class="java.lang.String" itemvalue="xgboost" />
<item index="3" class="java.lang.String" itemvalue="opencv-python-headless" />
<item index="4" class="java.lang.String" itemvalue="zstandard" />
<item index="5" class="java.lang.String" itemvalue="scikit-image" />
<item index="6" class="java.lang.String" itemvalue="scipy" />
<item index="7" class="java.lang.String" itemvalue="tensorflow_gpu" />
<item index="8" class="java.lang.String" itemvalue="h5py" />
<item index="9" class="java.lang.String" itemvalue="matplotlib" />
<item index="10" class="java.lang.String" itemvalue="numpy" />
<item index="11" class="java.lang.String" itemvalue="opencv_python" />
<item index="12" class="java.lang.String" itemvalue="Pillow" />
<item index="13" class="java.lang.String" itemvalue="tensorflow" />
<item index="14" class="java.lang.String" itemvalue="jupyter" />
<item index="15" class="java.lang.String" itemvalue="ipykernel" />
<item index="16" class="java.lang.String" itemvalue="pandas" />
<item index="17" class="java.lang.String" itemvalue="Werkzeug" />
<item index="18" class="java.lang.String" itemvalue="cellpose" />
<item index="19" class="java.lang.String" itemvalue="torchvision" />
<item index="20" class="java.lang.String" itemvalue="Flask" />
<item index="21" class="java.lang.String" itemvalue="fiona" />
<item index="22" class="java.lang.String" itemvalue="joblib" />
<item index="23" class="java.lang.String" itemvalue="pytest" />
<item index="24" class="java.lang.String" itemvalue="geopandas" />
<item index="25" class="java.lang.String" itemvalue="PyYAML" />
<item index="26" class="java.lang.String" itemvalue="pybaselines" />
<item index="27" class="java.lang.String" itemvalue="molmass" />
<item index="28" class="java.lang.String" itemvalue="noise" />
<item index="29" class="java.lang.String" itemvalue="scikit-gstat" />
<item index="30" class="java.lang.String" itemvalue="plotly" />
</list>
</value>
</option>
</inspection_tool>
</profile>
</component>

View File

@ -0,0 +1,6 @@
<component name="InspectionProjectProfileManager">
<settings>
<option name="USE_PROJECT_PROFILE" value="false" />
<version value="1.0" />
</settings>
</component>

7
Flexbrdf/.idea/misc.xml generated Normal file
View File

@ -0,0 +1,7 @@
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="Black">
<option name="sdkName" value="WQ" />
</component>
<component name="ProjectRootManager" version="2" project-jdk-name="WQ2" project-jdk-type="Python SDK" />
</project>

8
Flexbrdf/.idea/modules.xml generated Normal file
View File

@ -0,0 +1,8 @@
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="ProjectModuleManager">
<modules>
<module fileurl="file://$PROJECT_DIR$/.idea/hytools-master.iml" filepath="$PROJECT_DIR$/.idea/hytools-master.iml" />
</modules>
</component>
</project>

17
Flexbrdf/.readthedocs.yml Normal file
View File

@ -0,0 +1,17 @@
# .readthedocs.yml
# Read the Docs configuration file
# See https://docs.readthedocs.io/en/stable/config-file/v2.html for details
# Required
version: 2
# Build documentation in the docs/ directory with Sphinx
sphinx:
configuration: docs/source/conf.py
python:
version: 3.7
install:
- requirements: requirements.txt
- method: setuptools
path: ./

67
Flexbrdf/CITATION.cff Normal file
View File

@ -0,0 +1,67 @@
# This CITATION.cff file was generated with cffinit.
# Visit https://bit.ly/cffinit to generate yours today!
cff-version: 1.2.0
title: HyTools
message: >-
HyTools: a python library for processing airborne and
spaceborne imaging spectroscopy data
type: software
authors:
- given-names: Adam
family-names: Chlus
email: adam.chlus@jpl.nasa.gov
affiliation: Jet Propulsion Laboratory, California Institute of Technology; University of Wisconsin - Madison
- given-names: Zhiwei
family-names: Ye
email: ye6@wisc.edu
affiliation: University of Wisconsin - Madison
orcid: 'https://orcid.org/0000-0001-5673-7034'
- given-names: Ting
family-names: Zheng
email: tzheng39@wisc.edu
affiliation: University of Wisconsin - Madison
orcid: 'https://orcid.org/0000-0003-4728-6627'
- given-names: Natalie
family-names: Queally
email: nqueally@ucla.edu
affiliation: University of California, Los Angeles; University of Wisconsin - Madison
- given-names: Evan
family-names: Greenberg
email: egreenberg@ucsb.edu
affiliation: University of California, Santa Barbara
- given-names: Philip
family-names: Townsend
email: ptownsend@wisc.edu
affiliation: University of Wisconsin - Madison
identifiers:
- type: doi
value: 10.5281/zenodo.12573700
description: Release 1.5.1
- type: doi
value: 10.5281/zenodo.7591966
description: Release 1.5.0
repository-code: 'https://github.com/EnSpec/hytools'
abstract: >-
HyTools is a python library for processing airborne and
spaceborne imaging spectroscopy data. At it's core it
consists of functions for reading and writing ENVI or
NetCDF formatted images and reading NEON AOP HDF files
along with a series of image processing functions
including spectral resampling, topographic, BRDF and glint
correction, spectral transforms, masking and more. We have
also created a series of command line tools which combine
these functions and provide a streamlined workflow for
processing images.
keywords:
- imaging spectroscopy
- remote sensing
- BRDF correction
- glint correction
- topographic correction
- hyperspectral
license: GPL-3.0
commit: acd11db3d655b017ad2dd6ed382fe1e7f385fde9
version: 1.5.1
date-released: '2024-06-27'

442
Flexbrdf/Flex_brdf.py Normal file
View File

@ -0,0 +1,442 @@
import json
import os
import warnings
import sys
import time
import argparse
import ray
import numpy as np
import Flexbrdf.hytools as ht
from Flexbrdf.hytools.io.envi import *
from Flexbrdf.hytools.topo import calc_topo_coeffs
from Flexbrdf.hytools.brdf import calc_brdf_coeffs
from Flexbrdf.hytools.glint import set_glint_parameters
from Flexbrdf.hytools.masks import mask_create
warnings.filterwarnings("ignore")
np.seterr(divide='ignore', invalid='ignore')
# Default configuration values
DEFAULT_CONFIG = {
'file_type': 'envi',
'num_cpus': 16,
'bad_bands': [],
'corrections': [],
'resample': False,
'resampler': {'type': 'cubic', 'out_waves': []},
'export': {
'coeffs':True,
'image': True,
'masks': False,
'subset_waves': [],
'output_dir': './output/',
'suffix': 'BRDF'
},
'brdf': {
'type': 'flex',
'grouped': True,
'geometric': 'li_dense_r',
'volume': 'ross_thick',
'b/r': 2.5,
'h/b': 2.0,
'sample_perc': 0.1,
'interp_kind': 'linear',
'calc_mask': [
['water', {'band_1': 850, 'band_2': 660,"threshold": 290}],
['kernel_finite', {}],
['ancillary', {'name': 'sensor_zn', 'min': 0.03490658503988659, 'max': 'inf'}]
],
'apply_mask': [
['water', {'band_1': 850, 'band_2': 660,"threshold": 290}]
],
'bin_type': 'dynamic',
'num_bins': 18,
'ndvi_bin_min': 0.05,
'ndvi_bin_max': 1.0,
'ndvi_perc_min': 10,
'ndvi_perc_max': 95,
'solar_zn_type': 'scene'
},
# 'topo': {
# 'type': 'scs+c',
# 'calc_mask': [
# ['ndi', {'band_1': 850, 'band_2': 660, 'min': 0.05, 'max': 1.0}],
# ['kernel_finite', {}],
# ['ancillary', {'name': 'sensor_zn', 'min': 0.03490658503988659, 'max': 'inf'}]
# ],
# 'apply_mask': [
# ['ndi', {'band_1': 850, 'band_2': 660, 'min': 0.05, 'max': 1.0}]
# ],
# 'sample_perc': 0.1,
# 'subgrouped': False,
# 'subgroup': {}
# },
# 'glint': {
# 'type': 'hochberg',
# 'correction_band': 560,
# 'deep_water_sample': {},
# 'calc_mask': [
# ['ndi', {'band_1': 550, 'band_2': 2150, 'min': -1, 'max': 0}],
# ['kernel_finite', {}]
# ],
# 'apply_mask': [
# ['ndi', {'band_1': 550, 'band_2': 2150, 'min': -1, 'max': 0}]
# ]
# }
}
def build_config_from_args(args):
"""从命令行参数构建配置字典"""
# 自动发现输入文件
input_files = []
if os.path.isdir(args.input_dir):
# 支持的文件扩展名
extensions = ['.tif', '.tiff', '.envi', '.img', '.dat']
for file in os.listdir(args.input_dir):
if any(file.lower().endswith(ext) for ext in extensions):
input_files.append(os.path.join(args.input_dir, file))
else:
input_files = [args.input_dir] if os.path.isfile(args.input_dir) else []
if not input_files:
raise ValueError(f"No input files found in {args.input_dir}")
# 自动生成anc_files
anc_files = {}
for input_file in input_files:
base_name = os.path.splitext(os.path.basename(input_file))[0]
# 根据文件名模式生成对应的angles文件路径
# 例如: 2025_9_2_3_53_45_202592_35252_0_rad_geo_corrected_reflectance.dat
# 对应的angles文件: 2025_9_2_3_53_45_202592_35252_0_rad_rgbxyz_geo_registered_angles.bip
parts = base_name.split('_')
if len(parts) >= 9: # 确保有足够的parts
anc_base = f"{parts[0]}_{parts[1]}_{parts[2]}_{parts[3]}_{parts[4]}_{parts[5]}_{parts[6]}_{parts[7]}_{parts[8]}_rad_rgbxyz_geo_angles_registered"
# 确保路径分隔符统一使用反斜杠Windows
anc_dir_clean = args.anc_dir.replace('/', '\\')
anc_path = os.path.join(anc_dir_clean, anc_base + ".bip")
# 确保最终路径格式正确
anc_path = os.path.normpath(anc_path)
if os.path.exists(anc_path):
# 生成完整的anc_files字典结构
anc_files[input_file] = {
"path_length": [anc_path, 0],
"sensor_az": [anc_path, 9],
"sensor_zn": [anc_path, 8],
"solar_az": [anc_path, 7],
"solar_zn": [anc_path, 6],
"phase": [anc_path, 0],
"slope": [anc_path, 0],
"aspect": [anc_path, 0],
"cosine_i": [anc_path, 0],
"utc_time": [anc_path, 0]
}
else:
# 如果找不到对应的angles文件使用默认的obs文件
obs_path = os.path.join(args.anc_dir, base_name + "_obs")
if os.path.exists(obs_path):
anc_files[input_file] = obs_path
# 构建配置字典
config_dict = DEFAULT_CONFIG.copy()
config_dict.update({
'input_files': input_files,
'anc_files': anc_files,
'file_type': 'envi', # 假设都是ENVI格式
'num_cpus': args.num_cpus,
'bad_bands': args.bad_bands if args.bad_bands else [],
'corrections': args.corrections if args.corrections else [],
'export': {
'coeffs': args.export_coeffs,
'image': args.export_image,
'masks': args.export_masks,
'subset_waves': args.subset_waves if args.subset_waves else [],
'output_dir': args.output_dir,
'suffix': args.suffix
}
})
# 根据命令行参数更新BRDF配置
if 'brdf' in args.corrections:
brdf_config = config_dict['brdf'].copy()
if hasattr(args, 'brdf_type') and args.brdf_type:
brdf_config['type'] = args.brdf_type
if hasattr(args, 'grouped') and args.grouped is not None:
brdf_config['grouped'] = args.grouped
if hasattr(args, 'geometric') and args.geometric:
brdf_config['geometric'] = args.geometric
if hasattr(args, 'volume') and args.volume:
brdf_config['volume'] = args.volume
if hasattr(args, 'num_bins') and args.num_bins:
brdf_config['num_bins'] = args.num_bins
config_dict['brdf'] = brdf_config
# 根据命令行参数更新TOPO配置
if 'topo' in args.corrections:
topo_config = config_dict['topo'].copy()
if hasattr(args, 'topo_type') and args.topo_type:
topo_config['type'] = args.topo_type
config_dict['topo'] = topo_config
# 根据命令行参数更新Glint配置
if 'glint' in args.corrections:
glint_config = config_dict['glint'].copy()
if hasattr(args, 'glint_type') and args.glint_type:
glint_config['type'] = args.glint_type
config_dict['glint'] = glint_config
return config_dict
def main():
parser = argparse.ArgumentParser(description="High-resolution image correction with automatic configuration")
# 必需参数
parser.add_argument('input_dir', help='Input directory containing image files or single image file path')
parser.add_argument('anc_dir', help='Ancillary directory containing angle/obs files')
parser.add_argument('--output-dir', required=True, help='Output directory for corrected images')
# 可选参数 - 基本设置
parser.add_argument('--num-cpus', type=int, default=16, help='Number of CPUs to use (default: 4)')
parser.add_argument('--bad-bands', nargs='*', type=int, default=[], help='Bad band indices to exclude')
# 校正类型
parser.add_argument('--corrections', nargs='*', choices=['topo', 'brdf', 'glint'],
default=['brdf'], help='Correction types to apply (default: brdf)')
# BRDF参数
parser.add_argument('--brdf-type', choices=['universal', 'flex'], default='flex',
help='BRDF correction type (default: flex)')
parser.add_argument('--grouped', action='store_true', default=True,
help='Group images for BRDF correction (default: True)')
parser.add_argument('--no-grouped', action='store_false', dest='grouped',
help='Do not group images for BRDF correction')
parser.add_argument('--geometric', default='li_dense_r',
choices=['li_sparse', 'li_dense', 'li_dense_r', 'roujean'],
help='Geometric kernel type (default: li_dense_r)')
parser.add_argument('--volume', default='ross_thick',
choices=['ross_thin', 'ross_thick', 'hotspot', 'roujean'],
help='Volume kernel type (default: ross_thick)')
parser.add_argument('--num-bins', type=int, default=18,
help='Number of NDVI bins for FlexBRDF (default: 18)')
# TOPO参数
parser.add_argument('--topo-type', default='scs+c',
choices=['scs', 'scs+c', 'c', 'cosine', 'mod_minneart'],
help='TOPO correction type (default: scs+c)')
# Glint参数
parser.add_argument('--glint-type', default='hochberg',
choices=['hochberg', 'gao', 'hedley'],
help='Glint correction type (default: hochberg)')
# 输出选项
parser.add_argument('--export-coeffs', action='store_true', default=True,
help='Export correction coefficients')
parser.add_argument('--export-image', action='store_true', default=True,
help='Export corrected images (default: True)')
parser.add_argument('--export-masks', action='store_true', default=True,
help='Export correction masks (default: True)')
parser.add_argument('--subset-waves', nargs='*', type=float, default=[],
help='Subset of wavelengths to export (empty for all)')
parser.add_argument('--suffix', default='corrected',
help='Suffix for output files (default: corrected)')
parser.add_argument('--output-config', type=str, default=None,
help='Output current configuration to JSON file')
# 向后兼容如果只有一个参数且是JSON文件则使用传统模式
if len(sys.argv) == 2 and sys.argv[1].endswith('.json'):
config_file = sys.argv[1]
with open(config_file, 'r') as outfile:
config_dict = json.load(outfile)
else:
args = parser.parse_args()
config_dict = build_config_from_args(args)
# 如果指定了输出配置选项则输出配置到JSON文件
if hasattr(args, 'output_config') and args.output_config:
print(f"Outputting configuration to {args.output_config}...")
# 重新排序配置字典以匹配期望的格式
ordered_config = {
"bad_bands": config_dict.get("bad_bands", []),
"file_type": config_dict.get("file_type", "envi"),
"input_files": config_dict.get("input_files", []),
"anc_files": config_dict.get("anc_files", {}),
"num_cpus": config_dict.get("num_cpus", 4),
"export": config_dict.get("export", {}),
"corrections": config_dict.get("corrections", []),
"brdf": config_dict.get("brdf", {}),
"topo": config_dict.get("topo", {}),
"glint": config_dict.get("glint", {}),
"resample": config_dict.get("resample", False),
"resampler": config_dict.get("resampler", {"type": "cubic", "out_waves": []})
}
with open(args.output_config, 'w', encoding='utf-8') as f:
json.dump(ordered_config, f, indent=2, ensure_ascii=False)
print("Configuration output complete.")
return # 如果只是输出配置,则退出程序
print("Starting image correction process...")
total_start = time.time()
images = config_dict["input_files"]
if ray.is_initialized():
ray.shutdown()
print("Using %s CPUs." % config_dict['num_cpus'])
ray.init(num_cpus = config_dict['num_cpus'])
HyTools = ray.remote(ht.HyTools)
actors = [HyTools.remote() for image in images]
print(f"Loading {len(images)} image(s)...")
load_start = time.time()
if config_dict['file_type'] == 'envi':
anc_files = config_dict["anc_files"]
_ = ray.get([a.read_file.remote(image,config_dict['file_type'],
anc_files[image]) for a,image in zip(actors,images)])
elif config_dict['file_type'] == 'neon':
_ = ray.get([a.read_file.remote(image,config_dict['file_type']) for a,image in zip(actors,images)])
_ = ray.get([a.create_bad_bands.remote(config_dict['bad_bands']) for a in actors])
load_time = time.time() - load_start
print(".2f")
# Apply corrections
if config_dict["corrections"]:
print(f"Applying {len(config_dict['corrections'])} correction(s): {', '.join(config_dict['corrections'])}")
correction_start = time.time()
for correction in config_dict["corrections"]:
if correction =='topo':
calc_topo_coeffs(actors,config_dict['topo'])
elif correction == 'brdf':
calc_brdf_coeffs(actors,config_dict)
elif correction == 'glint':
set_glint_parameters(actors,config_dict)
correction_time = time.time() - correction_start
print(".2f")
if config_dict['export']['coeffs'] and len(config_dict["corrections"]) > 0:
print("Exporting correction coefficients...")
coeff_start = time.time()
_ = ray.get([a.do.remote(export_coeffs,config_dict['export']) for a in actors])
coeff_time = time.time() - coeff_start
print(".2f")
if config_dict['export']['image']:
print("Exporting corrected images...")
export_start = time.time()
_ = ray.get([a.do.remote(apply_corrections,config_dict) for a in actors])
export_time = time.time() - export_start
print(".2f")
total_time = time.time() - total_start
print(".2f")
ray.shutdown()
def export_coeffs(hy_obj,export_dict):
'''Export correction coefficients to file.
'''
for correction in hy_obj.corrections:
coeff_file = export_dict['output_dir']
coeff_file += os.path.splitext(os.path.basename(hy_obj.file_name))[0]
coeff_file += "_%s_coeffs_%s.json" % (correction,export_dict["suffix"])
with open(coeff_file, 'w') as outfile:
if correction == 'topo':
corr_dict = hy_obj.topo
elif correction == 'glint':
continue
else:
corr_dict = hy_obj.brdf
json.dump(corr_dict,outfile)
def apply_corrections(hy_obj,config_dict):
'''Apply correction to image and export
to file.
'''
header_dict = hy_obj.get_header()
header_dict['data ignore value'] = hy_obj.no_data
header_dict['data type'] = 12
output_name = config_dict['export']['output_dir']
output_name += os.path.splitext(os.path.basename(hy_obj.file_name))[0]
output_name += "_%s" % config_dict['export']["suffix"]
#Export all wavelengths
if len(config_dict['export']['subset_waves']) == 0:
if config_dict["resample"] == True:
hy_obj.resampler = config_dict['resampler']
waves= hy_obj.resampler['out_waves']
else:
waves = hy_obj.wavelengths
header_dict['bands'] = len(waves)
header_dict['wavelength'] = waves
writer = WriteENVI(output_name,header_dict)
iterator = hy_obj.iterate(by='line', corrections=hy_obj.corrections,
resample=config_dict['resample'])
while not iterator.complete:
line = iterator.read_next()
writer.write_line(line,iterator.current_line)
writer.close()
#Export subset of wavelengths
else:
waves = config_dict['export']['subset_waves']
bands = [hy_obj.wave_to_band(x) for x in waves]
waves = [round(hy_obj.wavelengths[x],2) for x in bands]
header_dict['bands'] = len(bands)
header_dict['wavelength'] = waves
writer = WriteENVI(output_name,header_dict)
for b,band_num in enumerate(bands):
band = hy_obj.get_band(band_num,
corrections=hy_obj.corrections)
writer.write_band(band, b)
writer.close()
#Export masks
if (config_dict['export']['masks']) and (len(config_dict["corrections"]) > 0):
masks = []
mask_names = []
for correction in config_dict["corrections"]:
for mask_type in config_dict[correction]['apply_mask']:
mask_names.append(correction + '_' + mask_type[0])
masks.append(mask_create(hy_obj, [mask_type]))
header_dict['data type'] = 1
header_dict['bands'] = len(masks)
header_dict['band names'] = mask_names
header_dict['samples'] = hy_obj.columns
header_dict['lines'] = hy_obj.lines
header_dict['wavelength'] = []
header_dict['fwhm'] = []
header_dict['wavelength units'] = ''
header_dict['data ignore value'] = 255
output_name = config_dict['export']['output_dir']
output_name += os.path.splitext(os.path.basename(hy_obj.file_name))[0]
output_name += "_%s_mask" % config_dict['export']["suffix"]
writer = WriteENVI(output_name,header_dict)
for band_num,mask in enumerate(masks):
mask = mask.astype(int)
mask[~hy_obj.mask['no_data']] = 255
writer.write_band(mask,band_num)
del masks
if __name__== "__main__":
main()

621
Flexbrdf/LICENSE.txt Normal file
View File

@ -0,0 +1,621 @@
GNU GENERAL PUBLIC LICENSE
Version 3, 29 June 2007
Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
Preamble
The GNU General Public License is a free, copyleft license for
software and other kinds of works.
The licenses for most software and other practical works are designed
to take away your freedom to share and change the works. By contrast,
the GNU General Public License is intended to guarantee your freedom to
share and change all versions of a program--to make sure it remains free
software for all its users. We, the Free Software Foundation, use the
GNU General Public License for most of our software; it applies also to
any other work released this way by its authors. You can apply it to
your programs, too.
When we speak of free software, we are referring to freedom, not
price. Our General Public Licenses are designed to make sure that you
have the freedom to distribute copies of free software (and charge for
them if you wish), that you receive source code or can get it if you
want it, that you can change the software or use pieces of it in new
free programs, and that you know you can do these things.
To protect your rights, we need to prevent others from denying you
these rights or asking you to surrender the rights. Therefore, you have
certain responsibilities if you distribute copies of the software, or if
you modify it: responsibilities to respect the freedom of others.
For example, if you distribute copies of such a program, whether
gratis or for a fee, you must pass on to the recipients the same
freedoms that you received. You must make sure that they, too, receive
or can get the source code. And you must show them these terms so they
know their rights.
Developers that use the GNU GPL protect your rights with two steps:
(1) assert copyright on the software, and (2) offer you this License
giving you legal permission to copy, distribute and/or modify it.
For the developers' and authors' protection, the GPL clearly explains
that there is no warranty for this free software. For both users' and
authors' sake, the GPL requires that modified versions be marked as
changed, so that their problems will not be attributed erroneously to
authors of previous versions.
Some devices are designed to deny users access to install or run
modified versions of the software inside them, although the manufacturer
can do so. This is fundamentally incompatible with the aim of
protecting users' freedom to change the software. The systematic
pattern of such abuse occurs in the area of products for individuals to
use, which is precisely where it is most unacceptable. Therefore, we
have designed this version of the GPL to prohibit the practice for those
products. If such problems arise substantially in other domains, we
stand ready to extend this provision to those domains in future versions
of the GPL, as needed to protect the freedom of users.
Finally, every program is threatened constantly by software patents.
States should not allow patents to restrict development and use of
software on general-purpose computers, but in those that do, we wish to
avoid the special danger that patents applied to a free program could
make it effectively proprietary. To prevent this, the GPL assures that
patents cannot be used to render the program non-free.
The precise terms and conditions for copying, distribution and
modification follow.
TERMS AND CONDITIONS
0. Definitions.
"This License" refers to version 3 of the GNU General Public License.
"Copyright" also means copyright-like laws that apply to other kinds of
works, such as semiconductor masks.
"The Program" refers to any copyrightable work licensed under this
License. Each licensee is addressed as "you". "Licensees" and
"recipients" may be individuals or organizations.
To "modify" a work means to copy from or adapt all or part of the work
in a fashion requiring copyright permission, other than the making of an
exact copy. The resulting work is called a "modified version" of the
earlier work or a work "based on" the earlier work.
A "covered work" means either the unmodified Program or a work based
on the Program.
To "propagate" a work means to do anything with it that, without
permission, would make you directly or secondarily liable for
infringement under applicable copyright law, except executing it on a
computer or modifying a private copy. Propagation includes copying,
distribution (with or without modification), making available to the
public, and in some countries other activities as well.
To "convey" a work means any kind of propagation that enables other
parties to make or receive copies. Mere interaction with a user through
a computer network, with no transfer of a copy, is not conveying.
An interactive user interface displays "Appropriate Legal Notices"
to the extent that it includes a convenient and prominently visible
feature that (1) displays an appropriate copyright notice, and (2)
tells the user that there is no warranty for the work (except to the
extent that warranties are provided), that licensees may convey the
work under this License, and how to view a copy of this License. If
the interface presents a list of user commands or options, such as a
menu, a prominent item in the list meets this criterion.
1. Source Code.
The "source code" for a work means the preferred form of the work
for making modifications to it. "Object code" means any non-source
form of a work.
A "Standard Interface" means an interface that either is an official
standard defined by a recognized standards body, or, in the case of
interfaces specified for a particular programming language, one that
is widely used among developers working in that language.
The "System Libraries" of an executable work include anything, other
than the work as a whole, that (a) is included in the normal form of
packaging a Major Component, but which is not part of that Major
Component, and (b) serves only to enable use of the work with that
Major Component, or to implement a Standard Interface for which an
implementation is available to the public in source code form. A
"Major Component", in this context, means a major essential component
(kernel, window system, and so on) of the specific operating system
(if any) on which the executable work runs, or a compiler used to
produce the work, or an object code interpreter used to run it.
The "Corresponding Source" for a work in object code form means all
the source code needed to generate, install, and (for an executable
work) run the object code and to modify the work, including scripts to
control those activities. However, it does not include the work's
System Libraries, or general-purpose tools or generally available free
programs which are used unmodified in performing those activities but
which are not part of the work. For example, Corresponding Source
includes interface definition files associated with source files for
the work, and the source code for shared libraries and dynamically
linked subprograms that the work is specifically designed to require,
such as by intimate data communication or control flow between those
subprograms and other parts of the work.
The Corresponding Source need not include anything that users
can regenerate automatically from other parts of the Corresponding
Source.
The Corresponding Source for a work in source code form is that
same work.
2. Basic Permissions.
All rights granted under this License are granted for the term of
copyright on the Program, and are irrevocable provided the stated
conditions are met. This License explicitly affirms your unlimited
permission to run the unmodified Program. The output from running a
covered work is covered by this License only if the output, given its
content, constitutes a covered work. This License acknowledges your
rights of fair use or other equivalent, as provided by copyright law.
You may make, run and propagate covered works that you do not
convey, without conditions so long as your license otherwise remains
in force. You may convey covered works to others for the sole purpose
of having them make modifications exclusively for you, or provide you
with facilities for running those works, provided that you comply with
the terms of this License in conveying all material for which you do
not control copyright. Those thus making or running the covered works
for you must do so exclusively on your behalf, under your direction
and control, on terms that prohibit them from making any copies of
your copyrighted material outside their relationship with you.
Conveying under any other circumstances is permitted solely under
the conditions stated below. Sublicensing is not allowed; section 10
makes it unnecessary.
3. Protecting Users' Legal Rights From Anti-Circumvention Law.
No covered work shall be deemed part of an effective technological
measure under any applicable law fulfilling obligations under article
11 of the WIPO copyright treaty adopted on 20 December 1996, or
similar laws prohibiting or restricting circumvention of such
measures.
When you convey a covered work, you waive any legal power to forbid
circumvention of technological measures to the extent such circumvention
is effected by exercising rights under this License with respect to
the covered work, and you disclaim any intention to limit operation or
modification of the work as a means of enforcing, against the work's
users, your or third parties' legal rights to forbid circumvention of
technological measures.
4. Conveying Verbatim Copies.
You may convey verbatim copies of the Program's source code as you
receive it, in any medium, provided that you conspicuously and
appropriately publish on each copy an appropriate copyright notice;
keep intact all notices stating that this License and any
non-permissive terms added in accord with section 7 apply to the code;
keep intact all notices of the absence of any warranty; and give all
recipients a copy of this License along with the Program.
You may charge any price or no price for each copy that you convey,
and you may offer support or warranty protection for a fee.
5. Conveying Modified Source Versions.
You may convey a work based on the Program, or the modifications to
produce it from the Program, in the form of source code under the
terms of section 4, provided that you also meet all of these conditions:
a) The work must carry prominent notices stating that you modified
it, and giving a relevant date.
b) The work must carry prominent notices stating that it is
released under this License and any conditions added under section
7. This requirement modifies the requirement in section 4 to
"keep intact all notices".
c) You must license the entire work, as a whole, under this
License to anyone who comes into possession of a copy. This
License will therefore apply, along with any applicable section 7
additional terms, to the whole of the work, and all its parts,
regardless of how they are packaged. This License gives no
permission to license the work in any other way, but it does not
invalidate such permission if you have separately received it.
d) If the work has interactive user interfaces, each must display
Appropriate Legal Notices; however, if the Program has interactive
interfaces that do not display Appropriate Legal Notices, your
work need not make them do so.
A compilation of a covered work with other separate and independent
works, which are not by their nature extensions of the covered work,
and which are not combined with it such as to form a larger program,
in or on a volume of a storage or distribution medium, is called an
"aggregate" if the compilation and its resulting copyright are not
used to limit the access or legal rights of the compilation's users
beyond what the individual works permit. Inclusion of a covered work
in an aggregate does not cause this License to apply to the other
parts of the aggregate.
6. Conveying Non-Source Forms.
You may convey a covered work in object code form under the terms
of sections 4 and 5, provided that you also convey the
machine-readable Corresponding Source under the terms of this License,
in one of these ways:
a) Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by the
Corresponding Source fixed on a durable physical medium
customarily used for software interchange.
b) Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by a
written offer, valid for at least three years and valid for as
long as you offer spare parts or customer support for that product
model, to give anyone who possesses the object code either (1) a
copy of the Corresponding Source for all the software in the
product that is covered by this License, on a durable physical
medium customarily used for software interchange, for a price no
more than your reasonable cost of physically performing this
conveying of source, or (2) access to copy the
Corresponding Source from a network server at no charge.
c) Convey individual copies of the object code with a copy of the
written offer to provide the Corresponding Source. This
alternative is allowed only occasionally and noncommercially, and
only if you received the object code with such an offer, in accord
with subsection 6b.
d) Convey the object code by offering access from a designated
place (gratis or for a charge), and offer equivalent access to the
Corresponding Source in the same way through the same place at no
further charge. You need not require recipients to copy the
Corresponding Source along with the object code. If the place to
copy the object code is a network server, the Corresponding Source
may be on a different server (operated by you or a third party)
that supports equivalent copying facilities, provided you maintain
clear directions next to the object code saying where to find the
Corresponding Source. Regardless of what server hosts the
Corresponding Source, you remain obligated to ensure that it is
available for as long as needed to satisfy these requirements.
e) Convey the object code using peer-to-peer transmission, provided
you inform other peers where the object code and Corresponding
Source of the work are being offered to the general public at no
charge under subsection 6d.
A separable portion of the object code, whose source code is excluded
from the Corresponding Source as a System Library, need not be
included in conveying the object code work.
A "User Product" is either (1) a "consumer product", which means any
tangible personal property which is normally used for personal, family,
or household purposes, or (2) anything designed or sold for incorporation
into a dwelling. In determining whether a product is a consumer product,
doubtful cases shall be resolved in favor of coverage. For a particular
product received by a particular user, "normally used" refers to a
typical or common use of that class of product, regardless of the status
of the particular user or of the way in which the particular user
actually uses, or expects or is expected to use, the product. A product
is a consumer product regardless of whether the product has substantial
commercial, industrial or non-consumer uses, unless such uses represent
the only significant mode of use of the product.
"Installation Information" for a User Product means any methods,
procedures, authorization keys, or other information required to install
and execute modified versions of a covered work in that User Product from
a modified version of its Corresponding Source. The information must
suffice to ensure that the continued functioning of the modified object
code is in no case prevented or interfered with solely because
modification has been made.
If you convey an object code work under this section in, or with, or
specifically for use in, a User Product, and the conveying occurs as
part of a transaction in which the right of possession and use of the
User Product is transferred to the recipient in perpetuity or for a
fixed term (regardless of how the transaction is characterized), the
Corresponding Source conveyed under this section must be accompanied
by the Installation Information. But this requirement does not apply
if neither you nor any third party retains the ability to install
modified object code on the User Product (for example, the work has
been installed in ROM).
The requirement to provide Installation Information does not include a
requirement to continue to provide support service, warranty, or updates
for a work that has been modified or installed by the recipient, or for
the User Product in which it has been modified or installed. Access to a
network may be denied when the modification itself materially and
adversely affects the operation of the network or violates the rules and
protocols for communication across the network.
Corresponding Source conveyed, and Installation Information provided,
in accord with this section must be in a format that is publicly
documented (and with an implementation available to the public in
source code form), and must require no special password or key for
unpacking, reading or copying.
7. Additional Terms.
"Additional permissions" are terms that supplement the terms of this
License by making exceptions from one or more of its conditions.
Additional permissions that are applicable to the entire Program shall
be treated as though they were included in this License, to the extent
that they are valid under applicable law. If additional permissions
apply only to part of the Program, that part may be used separately
under those permissions, but the entire Program remains governed by
this License without regard to the additional permissions.
When you convey a copy of a covered work, you may at your option
remove any additional permissions from that copy, or from any part of
it. (Additional permissions may be written to require their own
removal in certain cases when you modify the work.) You may place
additional permissions on material, added by you to a covered work,
for which you have or can give appropriate copyright permission.
Notwithstanding any other provision of this License, for material you
add to a covered work, you may (if authorized by the copyright holders of
that material) supplement the terms of this License with terms:
a) Disclaiming warranty or limiting liability differently from the
terms of sections 15 and 16 of this License; or
b) Requiring preservation of specified reasonable legal notices or
author attributions in that material or in the Appropriate Legal
Notices displayed by works containing it; or
c) Prohibiting misrepresentation of the origin of that material, or
requiring that modified versions of such material be marked in
reasonable ways as different from the original version; or
d) Limiting the use for publicity purposes of names of licensors or
authors of the material; or
e) Declining to grant rights under trademark law for use of some
trade names, trademarks, or service marks; or
f) Requiring indemnification of licensors and authors of that
material by anyone who conveys the material (or modified versions of
it) with contractual assumptions of liability to the recipient, for
any liability that these contractual assumptions directly impose on
those licensors and authors.
All other non-permissive additional terms are considered "further
restrictions" within the meaning of section 10. If the Program as you
received it, or any part of it, contains a notice stating that it is
governed by this License along with a term that is a further
restriction, you may remove that term. If a license document contains
a further restriction but permits relicensing or conveying under this
License, you may add to a covered work material governed by the terms
of that license document, provided that the further restriction does
not survive such relicensing or conveying.
If you add terms to a covered work in accord with this section, you
must place, in the relevant source files, a statement of the
additional terms that apply to those files, or a notice indicating
where to find the applicable terms.
Additional terms, permissive or non-permissive, may be stated in the
form of a separately written license, or stated as exceptions;
the above requirements apply either way.
8. Termination.
You may not propagate or modify a covered work except as expressly
provided under this License. Any attempt otherwise to propagate or
modify it is void, and will automatically terminate your rights under
this License (including any patent licenses granted under the third
paragraph of section 11).
However, if you cease all violation of this License, then your
license from a particular copyright holder is reinstated (a)
provisionally, unless and until the copyright holder explicitly and
finally terminates your license, and (b) permanently, if the copyright
holder fails to notify you of the violation by some reasonable means
prior to 60 days after the cessation.
Moreover, your license from a particular copyright holder is
reinstated permanently if the copyright holder notifies you of the
violation by some reasonable means, this is the first time you have
received notice of violation of this License (for any work) from that
copyright holder, and you cure the violation prior to 30 days after
your receipt of the notice.
Termination of your rights under this section does not terminate the
licenses of parties who have received copies or rights from you under
this License. If your rights have been terminated and not permanently
reinstated, you do not qualify to receive new licenses for the same
material under section 10.
9. Acceptance Not Required for Having Copies.
You are not required to accept this License in order to receive or
run a copy of the Program. Ancillary propagation of a covered work
occurring solely as a consequence of using peer-to-peer transmission
to receive a copy likewise does not require acceptance. However,
nothing other than this License grants you permission to propagate or
modify any covered work. These actions infringe copyright if you do
not accept this License. Therefore, by modifying or propagating a
covered work, you indicate your acceptance of this License to do so.
10. Automatic Licensing of Downstream Recipients.
Each time you convey a covered work, the recipient automatically
receives a license from the original licensors, to run, modify and
propagate that work, subject to this License. You are not responsible
for enforcing compliance by third parties with this License.
An "entity transaction" is a transaction transferring control of an
organization, or substantially all assets of one, or subdividing an
organization, or merging organizations. If propagation of a covered
work results from an entity transaction, each party to that
transaction who receives a copy of the work also receives whatever
licenses to the work the party's predecessor in interest had or could
give under the previous paragraph, plus a right to possession of the
Corresponding Source of the work from the predecessor in interest, if
the predecessor has it or can get it with reasonable efforts.
You may not impose any further restrictions on the exercise of the
rights granted or affirmed under this License. For example, you may
not impose a license fee, royalty, or other charge for exercise of
rights granted under this License, and you may not initiate litigation
(including a cross-claim or counterclaim in a lawsuit) alleging that
any patent claim is infringed by making, using, selling, offering for
sale, or importing the Program or any portion of it.
11. Patents.
A "contributor" is a copyright holder who authorizes use under this
License of the Program or a work on which the Program is based. The
work thus licensed is called the contributor's "contributor version".
A contributor's "essential patent claims" are all patent claims
owned or controlled by the contributor, whether already acquired or
hereafter acquired, that would be infringed by some manner, permitted
by this License, of making, using, or selling its contributor version,
but do not include claims that would be infringed only as a
consequence of further modification of the contributor version. For
purposes of this definition, "control" includes the right to grant
patent sublicenses in a manner consistent with the requirements of
this License.
Each contributor grants you a non-exclusive, worldwide, royalty-free
patent license under the contributor's essential patent claims, to
make, use, sell, offer for sale, import and otherwise run, modify and
propagate the contents of its contributor version.
In the following three paragraphs, a "patent license" is any express
agreement or commitment, however denominated, not to enforce a patent
(such as an express permission to practice a patent or covenant not to
sue for patent infringement). To "grant" such a patent license to a
party means to make such an agreement or commitment not to enforce a
patent against the party.
If you convey a covered work, knowingly relying on a patent license,
and the Corresponding Source of the work is not available for anyone
to copy, free of charge and under the terms of this License, through a
publicly available network server or other readily accessible means,
then you must either (1) cause the Corresponding Source to be so
available, or (2) arrange to deprive yourself of the benefit of the
patent license for this particular work, or (3) arrange, in a manner
consistent with the requirements of this License, to extend the patent
license to downstream recipients. "Knowingly relying" means you have
actual knowledge that, but for the patent license, your conveying the
covered work in a country, or your recipient's use of the covered work
in a country, would infringe one or more identifiable patents in that
country that you have reason to believe are valid.
If, pursuant to or in connection with a single transaction or
arrangement, you convey, or propagate by procuring conveyance of, a
covered work, and grant a patent license to some of the parties
receiving the covered work authorizing them to use, propagate, modify
or convey a specific copy of the covered work, then the patent license
you grant is automatically extended to all recipients of the covered
work and works based on it.
A patent license is "discriminatory" if it does not include within
the scope of its coverage, prohibits the exercise of, or is
conditioned on the non-exercise of one or more of the rights that are
specifically granted under this License. You may not convey a covered
work if you are a party to an arrangement with a third party that is
in the business of distributing software, under which you make payment
to the third party based on the extent of your activity of conveying
the work, and under which the third party grants, to any of the
parties who would receive the covered work from you, a discriminatory
patent license (a) in connection with copies of the covered work
conveyed by you (or copies made from those copies), or (b) primarily
for and in connection with specific products or compilations that
contain the covered work, unless you entered into that arrangement,
or that patent license was granted, prior to 28 March 2007.
Nothing in this License shall be construed as excluding or limiting
any implied license or other defenses to infringement that may
otherwise be available to you under applicable patent law.
12. No Surrender of Others' Freedom.
If conditions are imposed on you (whether by court order, agreement or
otherwise) that contradict the conditions of this License, they do not
excuse you from the conditions of this License. If you cannot convey a
covered work so as to satisfy simultaneously your obligations under this
License and any other pertinent obligations, then as a consequence you may
not convey it at all. For example, if you agree to terms that obligate you
to collect a royalty for further conveying from those to whom you convey
the Program, the only way you could satisfy both those terms and this
License would be to refrain entirely from conveying the Program.
13. Use with the GNU Affero General Public License.
Notwithstanding any other provision of this License, you have
permission to link or combine any covered work with a work licensed
under version 3 of the GNU Affero General Public License into a single
combined work, and to convey the resulting work. The terms of this
License will continue to apply to the part which is the covered work,
but the special requirements of the GNU Affero General Public License,
section 13, concerning interaction through a network will apply to the
combination as such.
14. Revised Versions of this License.
The Free Software Foundation may publish revised and/or new versions of
the GNU General Public License from time to time. Such new versions will
be similar in spirit to the present version, but may differ in detail to
address new problems or concerns.
Each version is given a distinguishing version number. If the
Program specifies that a certain numbered version of the GNU General
Public License "or any later version" applies to it, you have the
option of following the terms and conditions either of that numbered
version or of any later version published by the Free Software
Foundation. If the Program does not specify a version number of the
GNU General Public License, you may choose any version ever published
by the Free Software Foundation.
If the Program specifies that a proxy can decide which future
versions of the GNU General Public License can be used, that proxy's
public statement of acceptance of a version permanently authorizes you
to choose that version for the Program.
Later license versions may give you additional or different
permissions. However, no additional obligations are imposed on any
author or copyright holder as a result of your choosing to follow a
later version.
15. Disclaimer of Warranty.
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
16. Limitation of Liability.
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
SUCH DAMAGES.
17. Interpretation of Sections 15 and 16.
If the disclaimer of warranty and limitation of liability provided
above cannot be given local legal effect according to their terms,
reviewing courts shall apply local law that most closely approximates
an absolute waiver of all civil liability in connection with the
Program, unless a warranty or assumption of liability accompanies a
copy of the Program in return for a fee.
END OF TERMS AND CONDITIONS

86
Flexbrdf/README.md Normal file
View File

@ -0,0 +1,86 @@
# HyTools
[![DOI](https://zenodo.org/badge/315419247.svg)](https://zenodo.org/badge/latestdoi/315419247)
HyTools is a python library for processing airborne and spaceborne
imaging spectroscopy data. At it's
core it consists of functions for reading and writing
[ENVI](https://www.l3harrisgeospatial.com/docs/ENVIImageFiles.html)
formatted images and reading [NEON
AOP](https://www.neonscience.org/data-collection/airborne-remote-sensing)
HDF files along with a series of image processing functions including
spectral resampling, topographic, BRDF and glint correction, spectral
transforms, masking and more. We have also created a series of command
line tools which combine these functions and provide a streamlined
workflow for processing images.
For examples see the HyTools basics ipython notebook [here](./examples/hytools_basics_notebook.ipynb). For FlexBRDF correction see the tutorial [here](./examples/FlexBRDF_tutorial.md). An alternative of FlexBRDF correction without using Ray can be checked out [here](./examples/separated_flexbrdf.md). NetCDF (EMIT and AVIRIS-NG) and Geographic Lookup Table (GLT) support are added in the latest version [here](./examples/netcdf_glt.md).
# Installation
To install with pip run:
```bash
pip install hy-tools
```
or
```bash
python -m pip install git+https://github.com/EnSpec/hytools.git
```
or clone
```bash
git clone https://github.com/EnSpec/hytools.git
```
and install with setuptools
```bash
python setup.py install
```
## Basic usage
```python
import hytools as ht
#Create a HyTools container object
hy_obj = ht.HyTools()
#Read and load ENVI file metadata
hy_obj.read_file('./envi_file')
#Calculate NDVI, retrieves closest wavelength to input wavelength
ir = hy_obj.get_wave(900)
red = hy_obj.get_wave(660)
ndvi = (ir-red)/(ir+red)
#or
# Calculate normalized difference index, NDVI by default
ndvi = hy_obj.ndi()
#Other options for retrieving data
band = hy_obj.get_band(10)
column = hy_obj.get_column(1)
line = hy_obj.get_line(234)
chunk = hy_obj.get_chunk(0,100,0,100)
pixels = hy_obj.get_pixels([102,434],[324,345])
# Create a writer object to write to new file
writer = ht.io.WriteENVI('output_envi',hy_obj.get_header())
#Create an iterator object to cycle though image
iterator = hy_obj.iterate(by = 'line')
# Cycle line by line, read from original data
while not iterator.complete:
#Read next line
line = iterator.read_next()
#Do some calculations.......
radiance = line * gain + offset
#Write line to file
writer.write_line(radiance,iterator.current_line)
writer.close()
```

View File

@ -0,0 +1 @@
adam@adams-mbp.russell.wisc.edu.34021

View File

@ -0,0 +1,10 @@
---
# Only the main Sass file needs front matter (the dashes are enough)
---
@import "minimal-mistakes/skins/{{ site.minimal_mistakes_skin | default: 'default' }}"; // skin
@import "minimal-mistakes"; // main partials
html {
font-size: 16px; // change to whatever
}

View File

@ -0,0 +1 @@
.beer-slider{display:inline-block;overflow:hidden;position:relative}.beer-slider *,.beer-slider:after,.beer-slider :after,.beer-slider:before,.beer-slider :before{box-sizing:border-box}.beer-slider img,.beer-slider svg{vertical-align:bottom}.beer-slider>*{height:100%}.beer-slider>img{height:auto;max-width:100%}.beer-reveal{left:0;opacity:0;overflow:hidden;position:absolute;right:50%;top:0;transition:opacity .35s;z-index:1}.beer-reveal>:first-child{height:100%;max-width:none;width:200%}.beer-reveal>img:first-child{height:auto}.beer-range{-moz-appearance:none;-ms-touch-action:auto;-webkit-appearance:slider-horizontal!important;bottom:0;cursor:pointer;height:100%;left:-1px;margin:0;opacity:0;position:absolute;top:0;touch-action:auto;width:calc(100% + 2px);z-index:2}.beer-range::-webkit-slider-thumb{-webkit-appearance:none;height:300vh}.beer-range::-moz-range-thumb{-webkit-appearance:none;height:300vh}.beer-range::-ms-tooltip{display:none}.beer-handle{background:hsla(0,0%,100%,.5);border-radius:50%;box-shadow:0 0 6px transparent;color:#000;height:48px;left:50%;opacity:0;pointer-events:none;position:absolute;top:50%;transform:translate3d(-50%,-50%,0);transition:background .3s,box-shadow .3s,opacity .5s .25s;width:48px;z-index:2}.beer-handle:after,.beer-handle:before{border-left:2px solid;border-top:2px solid;content:"";height:10px;position:absolute;top:50%;transform-origin:0 0;width:10px}.beer-handle:before{left:10px;transform:rotate(-45deg)}.beer-handle:after{right:0;transform:rotate(135deg)}.beer-range:focus~.beer-handle{background:hsla(0,0%,100%,.85);box-shadow:0 0 3px rgba(0,0,0,.4)}.beer-reveal[data-beer-label]:after,.beer-slider[data-beer-label]:after{background:hsla(0,0%,100%,.75);border-radius:.125rem;content:attr(data-beer-label);line-height:1;padding:.5rem;position:absolute;top:1.5rem}.beer-slider[data-beer-label]:after{right:1.5rem}.beer-reveal[data-beer-label]:after{left:1.5rem}.beer-reveal[data-beer-label=""]:after,.beer-slider[data-beer-label=""]:after{content:none}.beer-ready .beer-handle,.beer-ready .beer-reveal{opacity:1}

Binary file not shown.

After

Width:  |  Height:  |  Size: 313 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 511 KiB

File diff suppressed because one or more lines are too long

View File

@ -0,0 +1,22 @@
.. _algorithms:
==================
Advanced use
==================
Spectrum resampling
===================
.. todo:: Spectrum resampling example script
Topographic correction
======================
.. todo:: Topographic correction example script
BRDF correction
===============
.. todo:: BRDF correction example script

View File

@ -0,0 +1,141 @@
.. _basics:
===========
Basic use
===========
Loading images
==============
HyTools includes options for loading both ENVI formatted binary files, NASA NetCDF files,
and NEON AOP HDF files.
.. code-block:: python
import hytools as ht
#Create a HyTools container object
envi = ht.HyTools()
#Read and load file metadata
envi.read_data('./envi_file.bin',file_type= 'envi')
For reading NEON data the process is the same:
.. code-block:: python
#Load an NEON HDF image
neon = ht.HyTools()
neon.read_data("./neon_file.h5",'neon')
Reading data
============
There are several ways to read data using a :class:`~hytools.base.HyTools` object. One option
is to use one of the 'get' methods:
.. code-block:: python
wave = neon.get_wave(900)
band = neon.get_band(10)
column = neon.get_column(1)
line = neon.get_line(234)
chunk = neon.get_chunk(x1,x2,y1,y2)
pixels = neon.get_pixels([0,1,2],[3,4,5])
We can also retrieve masked data, where a binary mask is used to
return a subset of the data. Currently masking only works using the
:meth:`~hytools.base.HyTools.get_band` or
:meth:`~hytools.base.HyTools.get_wave` methods. First we need to
generate a mask, which can be done using the
:meth:`~hytools.base.HyTools.gen_mask` method.
.. code-block:: python
# NDVI masking function
def masker(hy_obj):
ir = hy_obj.get_wave(900)
red = hy_obj.get_wave(660)
ndvi = (ir-red)/(ir+red)
return ndvi > .5
# Generate mask
neon.gen_mask(masker)
# Retrieve pixels where mask is True
pixels = neon.get_band(100, mask_values = True)
Alternatively an :class:`~hytools.base.Iterator` can be used to cycle along a
specified axis of the dataset either by line, column, band or
chunk. This is useful for cycling through and image, applying
a function/algorithm and then writing to a file.
.. code-block:: python
iterator = hy_obj.iterate(by = 'line')
Next cycle through the image line by line until complete:
.. code-block:: python
while not iterator.complete:
line = iterator.read_next()
Writing data
============
Currently writing is only supported for ENVI files and NetCDF files, however data from
NEON hdf files can be easy written to ENVI format using builtin
functions.
First an ENVI header dictionary needs to be generated to specify the
file size, datatype, interleave and other relevant metadata. This is
done using the :func:`~hytools.io.envi.envi_header_from_hdf` function.
.. code-block:: python
header_dict = envi_header_from_hdf(neon)
In this case we are going to export an RGBI image so we need to update
the number of bands:
.. code-block:: python
head_dict['bands'] = 4
Next we create an :class:`~hytools.io.envi.WriteENVI` object which
generates the header and image file using the specifications in the
header dictionary:
.. code-block:: python
output_name = './neon.bin'
writer = WriteENVI(output_name,header_dict)
Finally we can write the bands to file. First we retrieve the closest
wavelength to each input wavelength using the
:meth:`~hytools.base.HyTools.get_wave` method, next we write the band
to the new file with the :meth:`~hytools.io.envi.WriteENVI.write_band`
method.
.. code-block:: python
for band_num,wavelength enumerate([660,550,440,880]):
wave = neon.get_wave(wavelength)
writer.write_band(wave,band_num)
writer.close()

Binary file not shown.

After

Width:  |  Height:  |  Size: 345 KiB

View File

@ -0,0 +1,35 @@
.. _command:
====================
Command line tools
====================
image_correct.py
================
The 'image_correct' script is a general purpose tool that utilizes
topographic, BRDF and wavelength resampling fuctions to modify/correct
an image.
Configuration file
------------------
Image correction options are specified using a JSON file. Example
configuration files can be found in the github repository. We
have also created a script to automatically generates a JSON
configuration file.
trait_estimate.py
=================
The 'trait_estimate.py' script is a tool to generate maps of canopy
foliar traits given a set of model parameters. Currently only PLSR
models are supported, however support for other statistical modeling
frameworks like Gaussian process regression is currently under
development.

View File

@ -0,0 +1,74 @@
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sphinx-apidoc -f -o source/ ../hytools
#
import os
import sys
import sphinx_rtd_theme
sys.path.insert(0, os.path.abspath('..'))
# -- Project information -----------------------------------------------------
project = 'HyTools'
copyright = '2020, Adam Chlus, Zhiwei Ye, Philip Townsend'
author = 'Adam Chlus'
# The full version, including alpha/beta/rc tags
release = '0.0.1'
html_static_path = ['_static']
html_css_files = [
'css/slider.css',
]
html_js_files = [
'js/slider.js',
]
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.napoleon',
'sphinx.ext.autodoc',
'sphinx.ext.viewcode',
'sphinx.ext.coverage',
'sphinx_rtd_theme',
'sphinx.ext.todo'
]
todo_include_todos=True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
master_doc = 'index'
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']

View File

@ -0,0 +1,50 @@
About HyTools
=====================
HyTools is a python library for processing airborne and spaceborne
imaging spectroscopy data, with a focus on terrestrial scenes. At it's
core it consists of functions for reading and writing `ENVI
<https://www.l3harrisgeospatial.com/docs/ENVIImageFiles.html>`_
formatted images and reading `NEON AOP
<https://www.neonscience.org/data-collection/airborne-remote-sensing>`_
HDF files along with a series of image processing functions including
spectral resampling, topographic and BRDF correction, spectral
transforms, masking and more. We have also created a series of command
line tools which combine these functions and provide a streamlined
workflow for processing images.
Examples
--------
BRDF correction
~~~~~~~~~~~~~~~
.. raw:: html
<embed>
<link rel="stylesheet" href="/_static/css/slider.css">
<script src="/_static/js/slider.js" type="text/javascript" ></script>
<div id="slider" class="beer-slider" data-beer-label="">
<img src="/_static/images/research/3d_rgb.jpg" alt="">
<div class="beer-reveal" data-beer-label="">
<img src="/_static/images/research/rgb_rgb.jpg" alt="">
</div>
</div>
<script type="text/javascript">
new BeerSlider(document.getElementById('slider'));
</script>
</embed>
.. image:: brdf_before_after.png
Topographic correction
~~~~~~~~~~~~~~~~~~~~~~
.. image:: topo_correct.gif

View File

@ -0,0 +1,29 @@
hytools.correction package
==========================
Submodules
----------
hytools.correction.brdf module
------------------------------
.. automodule:: hytools.correction.brdf
:members:
:undoc-members:
:show-inheritance:
hytools.correction.topo module
------------------------------
.. automodule:: hytools.correction.topo
:members:
:undoc-members:
:show-inheritance:
Module contents
---------------
.. automodule:: hytools.correction
:members:
:undoc-members:
:show-inheritance:

View File

@ -0,0 +1,37 @@
hytools.io package
==================
Submodules
----------
hytools.io.envi module
----------------------
.. automodule:: hytools.io.envi
:members:
:undoc-members:
:show-inheritance:
hytools.io.neon module
----------------------
.. automodule:: hytools.io.neon
:members:
:undoc-members:
:show-inheritance:
hytools.io.netcdf module
----------------------
.. automodule:: hytools.io.netcdf
:members:
:undoc-members:
:show-inheritance:
Module contents
---------------
.. automodule:: hytools.io
:members:
:undoc-members:
:show-inheritance:

View File

@ -0,0 +1,21 @@
hytools.misc package
====================
Submodules
----------
hytools.misc.misc module
------------------------
.. automodule:: hytools.misc.misc
:members:
:undoc-members:
:show-inheritance:
Module contents
---------------
.. automodule:: hytools.misc
:members:
:undoc-members:
:show-inheritance:

View File

@ -0,0 +1,32 @@
hytools package
===============
Subpackages
-----------
.. toctree::
:maxdepth: 4
hytools.correction
hytools.io
hytools.misc
hytools.transform
Submodules
----------
hytools.base module
-------------------
.. automodule:: hytools.base
:members:
:undoc-members:
:show-inheritance:
Module contents
---------------
.. automodule:: hytools
:members:
:undoc-members:
:show-inheritance:

View File

@ -0,0 +1,29 @@
hytools.transform package
=========================
Submodules
----------
hytools.transform.mnf module
----------------------------
.. automodule:: hytools.transform.mnf
:members:
:undoc-members:
:show-inheritance:
hytools.transform.resampling module
-----------------------------------
.. automodule:: hytools.transform.resampling
:members:
:undoc-members:
:show-inheritance:
Module contents
---------------
.. automodule:: hytools.transform
:members:
:undoc-members:
:show-inheritance:

View File

@ -0,0 +1,12 @@
HyTools Documentation
=====================
.. toctree::
:maxdepth: 2
contents
installation
basics
algorithms
command_line
modules

View File

@ -0,0 +1,29 @@
.. _install:
=============
Installation
=============
Dependencies
============
* numpy
* h5py
* ray
Installing HyTools
==================
To download to hytools simply clone the github repository
.. code-block:: shell
$ git clone https://github.com/EnSpec/hytools.git
and run the following command inside the hytools folder to install
.. code-block:: shell
$ python setup.py install

View File

@ -0,0 +1,7 @@
hytools
=======
.. toctree::
:maxdepth: 4
hytools

View File

@ -0,0 +1 @@
sphinx_rtd_theme

Binary file not shown.

After

Width:  |  Height:  |  Size: 333 KiB

21
Flexbrdf/eph.py Normal file
View File

@ -0,0 +1,21 @@
import numpy as np
# 假设文件名为 f100506t01p00r07rdn_v_lonlat_eph
filename = r"E:\AVRIS\f100831t01p00r09.tar\f100831t01p00r09\f100831t01p00r09rdn_b\f100831t01p00r09rdn_b_eph"
# 读取所有双精度浮点数
data = np.fromfile(filename, dtype=np.float64)
# 每条扫描线有6个值重塑为二维数组行=扫描线,列=6
data = data.reshape(-1, 6)
# 现在 data 的每一行对应一条扫描线
lon = data[:, 1] # 经度
lat = data[:, 2] # 纬度
elev = data[:, 3] # 高程
# 示例打印前5条扫描线的经度
print("前5条经度", lon[:5])
print("前5条纬度", lat[:5])
print("前5条高程", elev[:5])

View File

@ -0,0 +1,189 @@
# FlexBRDF 教程
本教程介绍如何使用 FlexBRDF [[1](#参考文献)] 将高光谱图像(如 AVIRIS-NG中的双向反射分布函数BRDF归一化到标准光照条件。特别地本教程主要介绍如何使用终端命令行脚本来校正图像。
## 1. 准备工作
为了生成 BRDF 校正后的反射率图像,需要反射率图像及其辅助数据集。辅助数据集包括太阳天顶角和方位角、传感器观测天顶角和方位角。对于可选的地形校正,还需要地形的坡度和坡向信息。
对于 AVIRIS 类数据集,辅助数据集存储在 L1B 包中的 *\*_obs_ort / \*_obs* 文件中,而反射率数据集在 L2 包中([AVIRIS-Classic](https://aviris.jpl.nasa.gov/dataportal/20170911_AV_Download.readme)/[AVIRIS-NG](https://avirisng.jpl.nasa.gov/dataportal/ANG_L1B_L2_Data_Product_Readme_v02.txt))。对于 NEON AOP 数据集,辅助数据集与反射率数据集一起存储在 [HDF5 文件](https://www.neonscience.org/resources/learning-hub/tutorials/neon-refl-h5-py)中。NASA 的一些新数据集以 NetCDF 格式提供,[已添加初步支持](./netcdf_glt.md)。
HyTools 及其依赖项已正确[安装](https://github.com/EnSpec/hytools/tree/master#installation)。可以通过此[章节](https://github.com/EnSpec/hytools/blob/master/README.md#basic-usage)中的测试代码进行验证。
## 2. 配置
所有设置和文件路径都在一个 JSON 文件中指定,这是执行完整过程的首要步骤。这些设置应在实际执行前完成。一些配置模板可以在[这里](https://github.com/EnSpec/hytools/tree/master/examples/configs)找到。
要更改特定用途的设置,用户可以直接编辑示例配置 JSON 文件,或运行以下脚本来生成具有修改设置的新配置 JSON 文件。
```bash
python ./scripts/configs/image_correct_json_generate.py
```
将根据设置生成一个新的 JSON 文件。
默认和推荐的设置在[此示例配置文件](https://github.com/EnSpec/hytools/blob/master/examples/configs/topo_brdf_glint_correct_config.json)中描述。关键设置将在接下来的部分介绍。
#### 选择校正输出中要导出的内容
配置中的 *export* 部分决定了是否执行 BRDF 模型估计部分或 BRDF 模型应用部分。这两个部分可以分开按顺序执行,也可以同时执行。
```json
"export": {
"coeffs": false,
"image": true,
"masks": true,
"subset_waves": [],
"output_dir": "/data2/avng/l2/hytools_avng_example/",
"suffix": "topo_brdf_glint"
},
```
* 整个校正过程的主要结果是图像和校正系数。至少应启用其中一个的导出(设置为 *true*)。*coeffs* 可以设置为 *true*,以便保存供将来使用,称为"预计算"系数。
* 如果在过程中生成的掩码层也可以在 *image**masks* 都设置为 *true* 时保存。
* *subset_waves* 是指定要导出哪些波段的列表。它们通过最接近的波长(纳米)指定,例如 ```[440,550,660]``` 表示在可见光范围内导出 3 个波段。空列表 ```[]``` 表示导出完整的图像立方体。
* *output_dir* 和 *suffix* 是关于输出的最终位置和后缀。
#### 选择如何校正图像
目前,在 HyTools 中用户可以选择三种校正方法([TOPO](#TOPO), [BRDF](#BRDF), [Glint](#glint))。它们都可以启用。
```json
"corrections": [
"topo",
"brdf",
"glint"
],
```
校正的顺序很重要。一些常见设置如下所示。
|校正设置|含义|
|---|---|
|[ ]|空,无校正|
|['topo']|仅地形校正|
|['brdf']|仅 BRDF 校正,适用于平坦区域|
|['topo','brdf']|先地形校正,然后 BRDF 校正|
|['brdf','glint']|先 BRDF 校正,然后耀斑校正|
|['topo','brdf','glint']|按顺序进行三种校正|
#### TOPO
地形校正方法的选项包括 ['scs','scs+c','c','cosine','mod_minneart'],分别对应太阳-冠层-传感器方法[[2](#参考文献)]、太阳-冠层-传感器+C 方法[[2](#参考文献)]、C 方法[[3](#参考文献)]、余弦方法 [[3](#参考文献)] 和修改的 Minnaert 方法 [[3](#参考文献)]。推荐的地形校正方法是 "scs+c"。
```json
"topo": {
"type": "scs+c",
... ...
... ...
},
```
#### BRDF
BRDF 校正方法的选项包括 ['flex','universal'],分别对应 FlexBRDF 方法 [[1](#参考文献)] 和通用方法。
几何核可以从 ['li_sparse','li_dense','li_dense_r','li_dense_r','roujean'] 中选择。
体积核可以从 ['ross_thin','ross_thick','hotspot','roujean'] 中选择。
```json
"brdf": {
"type": "flex",
"grouped": true,
"geometric": "li_dense_r",
"volume": "ross_thick",
... ....
... ....
"bin_type": "dynamic",
"num_bins": 18,
"ndvi_bin_min": 0.05,
"ndvi_bin_max": 1.0,
"ndvi_perc_min": 10,
"ndvi_perc_max": 95,
"solar_zn_type": "scene"
},
```
虽然每条飞行线可以独立进行 BRDF 校正,但 FlexBRDF 建议将同一天且地理位置相近的所有线路放在同一组中,并估计共享的 BRDF 校正系数。
FlexBRDF 使用 NDVI 来区分不同的土地覆盖类型。它在 NDVI 范围内动态使用 N 个分箱。默认情况下,飞行组中的像素将根据 NDVI 百分位数分为 18 个子组。在每个子组内估计 BRDF 系数。不在 [*ndvi_bin_min*, *ndvi_bin_max*] 范围内的像素不包含在统计中。
在 BRDF 校正设置下,同一 BRDF 飞行线组中的所有像素最终将归一化到整个"场景"平均太阳天顶角的光照条件。
```json
"num_cpus":2,
```
如果 BRDF 校正组中有多条飞行线,反射率图像应与配置 JSON 文件中的辅助文件配对。这也意味着配置中文件列表("*input_files*" 和 "*anc_files*")的顺序应相互匹配。基本上,分配给 RAY 的 CPU 数量也应与飞行线的总数匹配。在示例中,有两条飞行线。
#### Glint
耀斑校正的选项包括 ['hochberg','gao','hedley'],分别对应 Hochberg 等人 2003 年的方法[[4](#参考文献)]、Gao 等人 2021 年的方法[[5](#参考文献)] 和 Hedley 等人 2005 年的方法[[6](#参考文献)]。
#### 用于生成配置文件的简化 GUI
这个[基于 Python 的 GUI](https://github.com/EnSpec/hytools/blob/master/scripts/configs/image_correct_json_generate_gui.py) 仅提供最少的选项来生成图像校正配置文件。它具有 [image_correct_json_generate.py](https://github.com/EnSpec/hytools/blob/master/scripts/configs/image_correct_json_generate.py) 的大部分功能,但它假设同一组的文件专门位于同一目录中。它不提供所有选项,更像是配置生成方式的一个示例。
![配置文件的 GUI](./img/config_json_gen_gui.jpg "GUI")
## 3. 执行
实际的图像校正脚本是 [image_correct.py](https://github.com/EnSpec/hytools/blob/master/scripts/image_correct.py)。它可以估计 BRDF 系数,也可以使用预计算的系数或实时生成结果图像来进行图像校正。
```bash
python ./scripts/image_correct.py 配置/JSON/文件的路径
```
根据配置设置,在此步骤中获得各种输出。
如果启用了图像导出,将生成完整的图像立方体(```"subset_waves": []```)或波段子集的图像立方体(```subset_waves": [波长1,波长2,波长3,...] ```)。
如果在配置的 *export* 部分中 *image* 和 *mask* 都设置为 *true*,将生成一个单独的掩码文件。
所有结果图像均为 ENVI 格式。
如果启用了 TOPO/BRDF 模型系数导出,它们将以 JSON 格式存储,并可以在其他 hytools 脚本([image_correct_json_generate.py](https://github.com/EnSpec/hytools/blob/master/scripts/configs/image_correct_json_generate.py) 或 [trait_estimate_json_generate.py](https://github.com/EnSpec/hytools/blob/master/scripts/configs/trait_estimate_json_generate.py))或[程序](https://github.com/EnSpec/hytools/blob/master/examples/hytools_basics_notebook.ipynb)中用作预计算系数,以更定制化的方式。每个图像应有自己的 TOPO/BRDF 系数 JSON 文件,尽管同一 FlexBRDF 组中的 BRDF JSON 共享相同的系数集。
## 4. (可选)性状预测
使用线性高光谱模型估计的性状可以在 HyTools 中实现。与 BRDF 校正类似,可以通过组合映射脚本([trait_estimate.py](https://github.com/EnSpec/hytools/blob/master/scripts/trait_estimate.py))、模型系数 JSON 文件([示例](https://github.com/EnSpec/hytools/blob/master/scripts/configs/plsr_model_format_v0_1.py))和配置 JSON 文件([trait_estimate_json_generate.py](https://github.com/EnSpec/hytools/blob/master/scripts/configs/trait_estimate_json_generate.py))来生成图像网格格式的性状图。
可以同时执行多个预测,并且可以在不预先计算存储消耗大的校正图像立方体的情况下,实时对图像反射率进行 TOPO/BRDF/耀斑校正。因此,前一节中的预计算 TOPO/BRDF 系数起着关键作用。
## 5. (实验性)地形校正中的子组
在某些特殊情况下,数据以瓦片形式提供,而不是以线形式提供。这意味着需要在整组 BRDF 校正之前,将同一线路中的一些瓦片分组进行地形校正。
配置文件中添加了一些新项目([topogroup_brdf_correct_config.json](../examples/configs/topogroup_brdf_correct_config.json))来完成此任务。还为此调整了一个新脚本([image_correct_topogroup.py](../scripts/image_correct_topogroup.py))。
所有新项目都添加在 *'topo'* 部分。必须启用 ```subgrouped```。```sample_perc``` 是用于提取随机样本以估计地形校正系数的采样百分比。在 ```subgroup``` 中,配置 JSON 中附加到每个的组标签可以是任何字符串,只要同一子组中的线路共享相同的组标签。
```json
"topo": {
... ...
"subgrouped":true,
"sample_perc": 0.01,
"subgroup": {
"/data/line1_path_name":"group00",
"/data/line2_path_name":"group00",
"/data/line3_path_name":"group01"
}
},
```
## 参考文献
[1] Queally, N., Ye, Z., Zheng, T., Chlus, A., Schneider, F., Pavlick, R. P., & Townsend, P. A. (2022).
FlexBRDF: A flexible BRDF correction for grouped processing of airborne imaging spectroscopy flightlines. *Journal of Geophysical Research: Biogeosciences*, *127*(1), e2021JG006622.
https://doi.org/10.1029/2021JG006622
[2] Scott A. Soenen, Derek R. Peddle, & Craig A. Coburn (2005).
SCS+C: A Modified Sun-Canopy-Sensor Topographic Correction in Forested Terrain. *IEEE Transactions on Geoscience and Remote Sensing*, *43*(9), 2148-2159.
https://doi.org/10.1109/TGRS.2005.852480
[3] Richter, R., Kellenberger, T., & Kaufmann, H. (2009).
Comparison of topographic correction methods. *Remote Sensing*, *1*(3), 184-196.
https://doi.org/10.3390/rs1030184
[4] Hochberg, E. J., Andréfouët, S., & Tyler, M. R. (2003). Sea surface correction of high spatial resolution Ikonos images to improve bottom mapping in near-shore environments. *IEEE transactions on geoscience and remote sensing*, *41*(7), 1724-1729.
https://doi.org/10.1109/TGRS.2003.815408
[5] Gao, B. C., & Li, R. R. (2021). Correction of sunglint effects in high spatial resolution hyperspectral imagery using SWIR or NIR bands and taking account of spectral variation of refractive index of water. *Advances in Environmental and Engineering Research*, *2*(3), 1-15. https://doi.org/10.21926/aeer.2103017
[6] Hedley, J. D., Harborne, A. R., & Mumby, P. J. (2005). Simple and robust removal of sun glint for mapping shallowwater benthos. *International Journal of Remote Sensing*, *26*(10), 2107-2112. https://doi.org/10.1080/01431160500034086

View File

@ -0,0 +1,156 @@
{
"bad_bands": [],
"file_type": "envi",
"input_files": [
"E:/AVRIS/f060925t01p00r16.tar/f060925t01p00r16/f060925t01p00r16rdn_c/R/temp/caijain/16.dat",
"E:/AVRIS/f060925t01p00r17.tar/f060925t01p00r17/f060925t01p00r17rdn_c/R/temp/caijian/17.dat"
],
"anc_files": {
"E:/AVRIS/f060925t01p00r16.tar/f060925t01p00r16/f060925t01p00r16rdn_c/R/temp/caijain/16.dat": {
"path_length": [
"E:/AVRIS/f060925t01p00r16.tar/f060925t01p00r16/f060925t01p00r16rdn_c/R/temp/caijain/16ort.dat",
0
],
"sensor_az": [
"E:/AVRIS/f060925t01p00r16.tar/f060925t01p00r16/f060925t01p00r16rdn_c/R/temp/caijain/16ort.dat",
1
],
"sensor_zn": [
"E:/AVRIS/f060925t01p00r16.tar/f060925t01p00r16/f060925t01p00r16rdn_c/R/temp/caijain/16ort.dat",
2
],
"solar_az": [
"E:/AVRIS/f060925t01p00r16.tar/f060925t01p00r16/f060925t01p00r16rdn_c/R/temp/caijain/16ort.dat",
3
],
"solar_zn": [
"E:/AVRIS/f060925t01p00r16.tar/f060925t01p00r16/f060925t01p00r16rdn_c/R/temp/caijain/16ort.dat",
4
],
"phase": [
"E:/AVRIS/f060925t01p00r16.tar/f060925t01p00r16/f060925t01p00r16rdn_c/R/temp/caijain/16ort.dat",
5
],
"slope": [
"E:/AVRIS/f060925t01p00r16.tar/f060925t01p00r16/f060925t01p00r16rdn_c/R/temp/caijain/16ort.dat",
6
],
"aspect": [
"E:/AVRIS/f060925t01p00r16.tar/f060925t01p00r16/f060925t01p00r16rdn_c/R/temp/caijain/16ort.dat",
7
],
"cosine_i": [
"E:/AVRIS/f060925t01p00r16.tar/f060925t01p00r16/f060925t01p00r16rdn_c/R/temp/caijain/16ort.dat",
8
],
"utc_time": [
"E:/AVRIS/f060925t01p00r16.tar/f060925t01p00r16/f060925t01p00r16rdn_c/R/temp/caijain/16ort.dat",
9
]
},
"E:/AVRIS/f060925t01p00r17.tar/f060925t01p00r17/f060925t01p00r17rdn_c/R/temp/caijian/17.dat": {
"path_length": [
"E:/AVRIS/f060925t01p00r17.tar/f060925t01p00r17/f060925t01p00r17rdn_c/R/temp/caijian/17ort.dat",
0
],
"sensor_az": [
"E:/AVRIS/f060925t01p00r17.tar/f060925t01p00r17/f060925t01p00r17rdn_c/R/temp/caijian/17ort.dat",
1
],
"sensor_zn": [
"E:/AVRIS/f060925t01p00r17.tar/f060925t01p00r17/f060925t01p00r17rdn_c/R/temp/caijian/17ort.dat",
2
],
"solar_az": [
"E:/AVRIS/f060925t01p00r17.tar/f060925t01p00r17/f060925t01p00r17rdn_c/R/temp/caijian/17ort.dat",
3
],
"solar_zn": [
"E:/AVRIS/f060925t01p00r17.tar/f060925t01p00r17/f060925t01p00r17rdn_c/R/temp/caijian/17ort.dat",
4
],
"phase": [
"E:/AVRIS/f060925t01p00r17.tar/f060925t01p00r17/f060925t01p00r17rdn_c/R/temp/caijian/17ort.dat",
5
],
"slope": [
"E:/AVRIS/f060925t01p00r17.tar/f060925t01p00r17/f060925t01p00r17rdn_c/R/temp/caijian/17ort.dat",
6
],
"aspect": [
"E:/AVRIS/f060925t01p00r17.tar/f060925t01p00r17/f060925t01p00r17rdn_c/R/temp/caijian/17ort.dat",
7
],
"cosine_i": [
"E:/AVRIS/f060925t01p00r17.tar/f060925t01p00r17/f060925t01p00r17rdn_c/R/temp/caijian/17ort.dat",
8
],
"utc_time": [
"E:/AVRIS/f060925t01p00r17.tar/f060925t01p00r17/f060925t01p00r17rdn_c/R/temp/caijian/17ort.dat",
9
]
}
},
"num_cpus": 10,
"export": {
"coeffs": true,
"image": true,
"masks": true,
"subset_waves": [],
"output_dir": "E:/code/hytools-master/hytools-master/data/output",
"suffix": "brdf"
},
"corrections": [
"brdf"
],
"brdf": {
"type": "flex",
"grouped": true,
"geometric": "li_dense_r",
"volume": "ross_thick",
"b/r": 2.5,
"h/b": 2,
"sample_perc": 0.1,
"interp_kind": "linear",
"calc_mask": [
[
"water",
{
"band_1": 850,
"band_2": 660,
"threshold": 290
}
],
[
"kernel_finite",
{}
],
[
"ancillary",
{
"name": "sensor_zn",
"min": 0.03490658503988659,
"max": "inf"
}
]
],
"apply_mask": [
[
"water",
{
"band_1": 850,
"band_2": 660,
"threshold": 290
}
]
],
"bin_type": "dynamic",
"num_bins": 18,
"ndvi_bin_min": 0.05,
"ndvi_bin_max": 1.0,
"ndvi_perc_min": 10,
"ndvi_perc_max": 95,
"solar_zn_type": "scene"
},
"resample": false
}

View File

@ -0,0 +1,178 @@
{
"bad_bands": [],
"file_type": "envi",
"input_files": [
"/data2/avng/l2/hytools_avng_example/ang20190629t203832_rfl",
"/data2/avng/l2/hytools_avng_example/ang20190629t210339_rfl"
],
"anc_files": {
"/data2/avng/l2/hytools_avng_example/ang20190629t203832_rfl": {
"path_length": [
"/data2/avng/l2/hytools_avng_example/ang20190629t203832_obs",
0
],
"sensor_az": [
"/data2/avng/l2/hytools_avng_example/ang20190629t203832_obs",
1
],
"sensor_zn": [
"/data2/avng/l2/hytools_avng_example/ang20190629t203832_obs",
2
],
"solar_az": [
"/data2/avng/l2/hytools_avng_example/ang20190629t203832_obs",
3
],
"solar_zn": [
"/data2/avng/l2/hytools_avng_example/ang20190629t203832_obs",
4
],
"phase": [
"/data2/avng/l2/hytools_avng_example/ang20190629t203832_obs",
5
],
"slope": [
"/data2/avng/l2/hytools_avng_example/ang20190629t203832_obs",
6
],
"aspect": [
"/data2/avng/l2/hytools_avng_example/ang20190629t203832_obs",
7
],
"cosine_i": [
"/data2/avng/l2/hytools_avng_example/ang20190629t203832_obs",
8
],
"utc_time": [
"/data2/avng/l2/hytools_avng_example/ang20190629t203832_obs",
9
]
},
"/data2/avng/l2/hytools_avng_example/ang20190629t210339_rfl": {
"path_length": [
"/data2/avng/l2/hytools_avng_example/ang20190629t210339_obs",
0
],
"sensor_az": [
"/data2/avng/l2/hytools_avng_example/ang20190629t210339_obs",
1
],
"sensor_zn": [
"/data2/avng/l2/hytools_avng_example/ang20190629t210339_obs",
2
],
"solar_az": [
"/data2/avng/l2/hytools_avng_example/ang20190629t210339_obs",
3
],
"solar_zn": [
"/data2/avng/l2/hytools_avng_example/ang20190629t210339_obs",
4
],
"phase": [
"/data2/avng/l2/hytools_avng_example/ang20190629t210339_obs",
5
],
"slope": [
"/data2/avng/l2/hytools_avng_example/ang20190629t210339_obs",
6
],
"aspect": [
"/data2/avng/l2/hytools_avng_example/ang20190629t210339_obs",
7
],
"cosine_i": [
"/data2/avng/l2/hytools_avng_example/ang20190629t210339_obs",
8
],
"utc_time": [
"/data2/avng/l2/hytools_avng_example/ang20190629t210339_obs",
9
]
}
},
"num_cpus": 2,
"export": {
"coeffs": false,
"image": true,
"masks": true,
"subset_waves": [],
"output_dir": "/data2/avng/l2/hytools_avng_example/",
"suffix": "brdf"
},
"corrections": [
"brdf"
],
"brdf": {
"type": "flex",
"grouped": true,
"geometric": "li_dense_r",
"volume": "ross_thick",
"b/r": 2.5,
"h/b": 2,
"sample_perc": 0.1,
"interp_kind": "linear",
"calc_mask": [
[
"ndi",
{
"band_1": 550,
"band_2": 2150,
"min": -1,
"max": 0
}
],
[
"ndi",
{
"band_1": 850,
"band_2": 660,
"min": 0.1,
"max": 1
}
],
[
"kernel_finite",
{}
],
[
"ancillary",
{
"name": "sensor_zn",
"min": 0.03490658503988659,
"max": "inf"
}
]
],
"apply_mask": [
[
"ndi",
{
"band_1": 550,
"band_2": 2150,
"min": -1,
"max": 0
}
],
[
"ndi",
{
"band_1": 850,
"band_2": 660,
"min": 0.1,
"max": 1
}
]
],
"bin_type": "dynamic",
"num_bins": 18,
"ndvi_bin_min": 0.05,
"ndvi_bin_max": 1.0,
"ndvi_perc_min": 10,
"ndvi_perc_max": 95,
"solar_zn_type": "scene"
},
},
"resample": false
}

View File

@ -0,0 +1,49 @@
{
"bad_bands": [],
"file_type": "envi",
"input_files": [
"/data2/avng/l2/hytools_avng_example/ang20190629t203832_rfl",
"/data2/avng/l2/hytools_avng_example/ang20190629t210339_rfl"
],
"anc_files": {
"/data2/avng/l2/hytools_avng_example/ang20190629t203832_rfl": {},
"/data2/avng/l2/hytools_avng_example/ang20190629t210339_rfl": {}
},
"num_cpus": 2,
"export": {
"coeffs": false,
"image": true,
"masks": true,
"subset_waves": [],
"output_dir": "/data2/avng/l2/hytools_avng_example/",
"suffix": "glint"
},
"corrections": [
"glint"
],
"glint": {
"type": "hochberg",
"correction_wave": 2150,
"apply_mask": [
[
"ndi",
{
"band_1": 550,
"band_2": 2150,
"min": 0,
"max": 1
}
],
[
"ndi",
{
"band_1": 850,
"band_2": 660,
"min": -1,
"max": 0.1
}
]
]
},
"resample": false
}

View File

@ -0,0 +1,157 @@
{
"bad_bands": [
[
300,
400
],
[
1320,
1430
],
[
1800,
1960
],
[
2450,
2600
]
],
"file_type": "emit",
"input_files": [
"/data/EMIT/EMIT_L2A_RFL_001_20231101T024133_2330502_014.nc"
],
"anc_files": {
"/data/EMIT/EMIT_L2A_RFL_001_20231101T024133_2330502_014.nc": {
"path_length": [
"/data/EMIT/EMIT_L1B_OBS_001_20231101T024133_2330502_014.nc",
0
],
"sensor_az": [
"/data/EMIT/EMIT_L1B_OBS_001_20231101T024133_2330502_014.nc",
1
],
"sensor_zn": [
"/data/EMIT/EMIT_L1B_OBS_001_20231101T024133_2330502_014.nc",
2
],
"solar_az": [
"/data/EMIT/EMIT_L1B_OBS_001_20231101T024133_2330502_014.nc",
3
],
"solar_zn": [
"/data/EMIT/EMIT_L1B_OBS_001_20231101T024133_2330502_014.nc",
4
],
"phase": [
"/data/EMIT/EMIT_L1B_OBS_001_20231101T024133_2330502_014.nc",
5
],
"slope": [
"/data/EMIT/EMIT_L1B_OBS_001_20231101T024133_2330502_014_nc_warp_v1.bsq",
0
],
"aspect": [
"/data/EMIT/EMIT_L1B_OBS_001_20231101T024133_2330502_014_nc_warp_v1.bsq",
1
],
"cosine_i": [
"/data/EMIT/EMIT_L1B_OBS_001_20231101T024133_2330502_014_nc_warp_v1.bsq",
2
],
"utc_time": [
"/data/EMIT/EMIT_L1B_OBS_001_20231101T024133_2330502_014.nc",
9
]
}
},
"glt_files": {
"/data/EMIT/EMIT_L2A_RFL_001_20231101T024133_2330502_014.nc": {
"glt_x": ["/data/EMIT/glt_xy_geocorr_rot_p2.bsq",1],
"glt_y": ["/data/EMIT/glt_xy_geocorr_rot_p2.bsq",0]
}
},
"export": {
"coeffs": true,
"image": true,
"use_glt":true,
"masks": false,
"subset_waves": [
440,
550,
660,
850,
976,
1650,
2217
],
"output_dir": "/data/EMIT/out/",
"image_format":"netcdf",
"suffix": "topo"
},
"corrections": [
"topo"
],
"topo": {
"type": "scs+c",
"calc_mask": [
[
"ndi",
{
"band_1": 850,
"band_2": 660,
"min": 0.05,
"max": 1.0
}
],
[
"ancillary",
{
"name": "slope",
"min": 0.08726646259971647,
"max": "+inf"
}
],
[
"ancillary",
{
"name": "cosine_i",
"min": 0.12,
"max": "+inf"
}
]
],
"apply_mask": [
[
"ndi",
{
"band_1": 850,
"band_2": 660,
"min": 0.05,
"max": 1.0
}
],
[
"ancillary",
{
"name": "slope",
"min": 0.08726646259971647,
"max": "+inf"
}
],
[
"ancillary",
{
"name": "cosine_i",
"min": 0.12,
"max": "+inf"
}
]
],
"c_fit_type": "nnls"
},
"brdf": { },
"resample": false,
"num_cpus": 1,
"outside_metadata":{"reflectance/ref-attr01":"dummy text","root_meta_tag":"dummy0"}
}

View File

@ -0,0 +1,350 @@
{
"bad_bands": [
[
300,
400
],
[
1320,
1430
],
[
1800,
1960
],
[
2450,
2600
]
],
"file_type": "ncav",
"input_files": [
"/data/AVNG/ang20231109t123229_000_L2A_OE_0b4f48b4_RFL_ORT.nc",
"/data/AVNG/ang20231109t123229_001_L2A_OE_0b4f48b4_RFL_ORT.nc",
"/data/AVNG/ang20231109t123229_002_L2A_OE_0b4f48b4_RFL_ORT.nc",
"/data/AVNG/ang20231109t124405_002_L2A_OE_0b4f48b4_RFL_ORT.nc"
],
"anc_files": {
"/data/AVNG/ang20231109t123229_000_L2A_OE_0b4f48b4_RFL_ORT.nc": {
"path_length": [
"/data/AVNG/ang20231109t123229_000_L1B_ORT_fb78102f_OBS.nc",
"path_length"
],
"sensor_az": [
"/data/AVNG/ang20231109t123229_000_L1B_ORT_fb78102f_OBS.nc",
"to_sensor_azimuth"
],
"sensor_zn": [
"/data/AVNG/ang20231109t123229_000_L1B_ORT_fb78102f_OBS.nc",
"to_sensor_zenith"
],
"solar_az": [
"/data/AVNG/ang20231109t123229_000_L1B_ORT_fb78102f_OBS.nc",
"to_sun_azimuth"
],
"solar_zn": [
"/data/AVNG/ang20231109t123229_000_L1B_ORT_fb78102f_OBS.nc",
"to_sun_zenith"
],
"phase": [
"/data/AVNG/ang20231109t123229_000_L1B_ORT_fb78102f_OBS.nc",
"solar_phase"
],
"slope": [
"/data/AVNG/ang20231109t123229_000_L1B_ORT_fb78102f_OBS.nc",
"slope"
],
"aspect": [
"/data/AVNG/ang20231109t123229_000_L1B_ORT_fb78102f_OBS.nc",
"aspect"
],
"cosine_i": [
"/data/AVNG/ang20231109t123229_000_L1B_ORT_fb78102f_OBS.nc",
"cosine_i"
],
"utc_time": [
"/data/AVNG/ang20231109t123229_000_L1B_ORT_fb78102f_OBS.nc",
"utc_time"
]
} ,
"/data/AVNG/ang20231109t123229_001_L2A_OE_0b4f48b4_RFL_ORT.nc": {
"path_length": [
"/data/AVNG/ang20231109t123229_001_L1B_ORT_fb78102f_OBS.nc",
"path_length"
],
"sensor_az": [
"/data/AVNG/ang20231109t123229_001_L1B_ORT_fb78102f_OBS.nc",
"to_sensor_azimuth"
],
"sensor_zn": [
"/data/AVNG/ang20231109t123229_001_L1B_ORT_fb78102f_OBS.nc",
"to_sensor_zenith"
],
"solar_az": [
"/data/AVNG/ang20231109t123229_001_L1B_ORT_fb78102f_OBS.nc",
"to_sun_azimuth"
],
"solar_zn": [
"/data/AVNG/ang20231109t123229_001_L1B_ORT_fb78102f_OBS.nc",
"to_sun_zenith"
],
"phase": [
"/data/AVNG/ang20231109t123229_001_L1B_ORT_fb78102f_OBS.nc",
"solar_phase"
],
"slope": [
"/data/AVNG/ang20231109t123229_001_L1B_ORT_fb78102f_OBS.nc",
"slope"
],
"aspect": [
"/data/AVNG/ang20231109t123229_001_L1B_ORT_fb78102f_OBS.nc",
"aspect"
],
"cosine_i": [
"/data/AVNG/ang20231109t123229_001_L1B_ORT_fb78102f_OBS.nc",
"cosine_i"
],
"utc_time": [
"/data/AVNG/ang20231109t123229_001_L1B_ORT_fb78102f_OBS.nc",
"utc_time"
]
} ,
"/data/AVNG/ang20231109t123229_002_L2A_OE_0b4f48b4_RFL_ORT.nc": {
"path_length": [
"/data/AVNG/ang20231109t123229_002_L1B_ORT_fb78102f_OBS.nc",
"path_length"
],
"sensor_az": [
"/data/AVNG/ang20231109t123229_002_L1B_ORT_fb78102f_OBS.nc",
"to_sensor_azimuth"
],
"sensor_zn": [
"/data/AVNG/ang20231109t123229_002_L1B_ORT_fb78102f_OBS.nc",
"to_sensor_zenith"
],
"solar_az": [
"/data/AVNG/ang20231109t123229_002_L1B_ORT_fb78102f_OBS.nc",
"to_sun_azimuth"
],
"solar_zn": [
"/data/AVNG/ang20231109t123229_002_L1B_ORT_fb78102f_OBS.nc",
"to_sun_zenith"
],
"phase": [
"/data/AVNG/ang20231109t123229_002_L1B_ORT_fb78102f_OBS.nc",
"solar_phase"
],
"slope": [
"/data/AVNG/ang20231109t123229_002_L1B_ORT_fb78102f_OBS.nc",
"slope"
],
"aspect": [
"/data/AVNG/ang20231109t123229_002_L1B_ORT_fb78102f_OBS.nc",
"aspect"
],
"cosine_i": [
"/data/AVNG/ang20231109t123229_002_L1B_ORT_fb78102f_OBS.nc",
"cosine_i"
],
"utc_time": [
"/data/AVNG/ang20231109t123229_002_L1B_ORT_fb78102f_OBS.nc",
"utc_time"
]
} ,
"/data/AVNG/ang20231109t124405_002_L2A_OE_0b4f48b4_RFL_ORT.nc": {
"path_length": [
"/data/AVNG/ang20231109t124405_002_L1B_ORT_fb78102f_OBS.nc",
"path_length"
],
"sensor_az": [
"/data/AVNG/ang20231109t124405_002_L1B_ORT_fb78102f_OBS.nc",
"to_sensor_azimuth"
],
"sensor_zn": [
"/data/AVNG/ang20231109t124405_002_L1B_ORT_fb78102f_OBS.nc",
"to_sensor_zenith"
],
"solar_az": [
"/data/AVNG/ang20231109t124405_002_L1B_ORT_fb78102f_OBS.nc",
"to_sun_azimuth"
],
"solar_zn": [
"/data/AVNG/ang20231109t124405_002_L1B_ORT_fb78102f_OBS.nc",
"to_sun_zenith"
],
"phase": [
"/data/AVNG/ang20231109t124405_002_L1B_ORT_fb78102f_OBS.nc",
"solar_phase"
],
"slope": [
"/data/AVNG/ang20231109t124405_002_L1B_ORT_fb78102f_OBS.nc",
"slope"
],
"aspect": [
"/data/AVNG/ang20231109t124405_002_L1B_ORT_fb78102f_OBS.nc",
"aspect"
],
"cosine_i": [
"/data/AVNG/ang20231109t124405_002_L1B_ORT_fb78102f_OBS.nc",
"cosine_i"
],
"utc_time": [
"/data/AVNG/ang20231109t124405_002_L1B_ORT_fb78102f_OBS.nc",
"utc_time"
]
}
},
"glt_files": {
},
"export": {
"coeffs": true,
"image": true,
"masks": false,
"use_glt":false,
"subset_waves": [
440,
550,
660,
850,
976,
1650,
2217
],
"output_dir": "/data/AVNG/output/",
"suffix": "topo_brdf_gp"
},
"corrections": ["topo","brdf"],
"topo": {
"type": "scs+c",
"calc_mask": [
[
"ndi",
{
"band_1": 850,
"band_2": 660,
"min": 0.05,
"max": 1.0
}
],
[
"ancillary",
{
"name": "slope",
"min": 0.08726646259971647,
"max": "+inf"
}
],
[
"ancillary",
{
"name": "cosine_i",
"min": 0.12,
"max": "+inf"
}
]
],
"apply_mask": [
[
"ndi",
{
"band_1": 850,
"band_2": 660,
"min": 0.05,
"max": 1.0
}
],
[
"ancillary",
{
"name": "slope",
"min": 0.08726646259971647,
"max": "+inf"
}
],
[
"ancillary",
{
"name": "cosine_i",
"min": 0.12,
"max": "+inf"
}
]
],
"c_fit_type": "nnls",
"subgrouped":true,
"sample_perc": 0.2,
"subgroup": {
"/data/AVNG/ang20231109t123229_000_L2A_OE_0b4f48b4_RFL_ORT.nc":"group01",
"/data/AVNG/ang20231109t123229_001_L2A_OE_0b4f48b4_RFL_ORT.nc":"group00",
"/data/AVNG/ang20231109t123229_002_L2A_OE_0b4f48b4_RFL_ORT.nc":"group00",
"/data/AVNG/ang20231109t124405_002_L2A_OE_0b4f48b4_RFL_ORT.nc":"group02"
}
},
"brdf": {
"solar_zn_type": "scene",
"type": "flex",
"grouped": true,
"geometric": "li_sparse_r",
"volume": "ross_thick",
"b/r": 2.5,
"h/b": 2,
"sample_perc": 0.1,
"interp_kind": "linear",
"calc_mask": [
[
"ndi",
{
"band_1": 850,
"band_2": 660,
"min": 0.05,
"max": 1.0
}
],
[
"kernel_finite",
{}
],
[
"ancillary",
{
"name": "sensor_zn",
"min": 0.03490658503988659,
"max": "inf"
}
],
[
"cloud",
{
"method": "zhai_2018",
"cloud": true,
"shadow": true,
"T1": 1,
"t2": 0.1,
"t3": 0.3333333333333333,
"t4": 0.5,
"T7": 16,
"T8": 16
}
]
],
"apply_mask": [
[
"ndi",
{
"band_1": 850,
"band_2": 660,
"min": 0.05,
"max": 1.0
}
]
],
"bin_type": "dynamic",
"num_bins": 18,
"ndvi_bin_min": 0.05,
"ndvi_bin_max": 1.0,
"ndvi_perc_min": 10,
"ndvi_perc_max": 95
},
"resample": false,
"num_cpus": 4
}

View File

@ -0,0 +1,156 @@
{
"bad_bands": [],
"file_type": "envi",
"input_files": [
"D:/BaiduNetdiskDownload/20250902/_3_52_52/rad2geo/R/2025_9_2_3_53_45_202592_35252_0_rad_geo_corrected_reflectance.dat",
"D:/BaiduNetdiskDownload/20250902/_3_52_52/rad2geo/R/2025_9_2_3_53_45_202592_35252_1_rad_geo_corrected_reflectance.dat"
],
"anc_files": {
"D:/BaiduNetdiskDownload/20250902/_3_52_52/rad2geo/R/2025_9_2_3_53_45_202592_35252_0_rad_geo_corrected_reflectance.dat": {
"path_length": [
"D:/BaiduNetdiskDownload/20250902/_3_52_52/BRDF/angle/test/2025_9_2_3_53_45_202592_35252_0_rad_rgbxyz_geo.bip_with_angles.bip",
0
],
"sensor_az": [
"D:/BaiduNetdiskDownload/20250902/_3_52_52/BRDF/angle/test/2025_9_2_3_53_45_202592_35252_0_rad_rgbxyz_geo.bip_with_angles.bip",
9
],
"sensor_zn": [
"D:/BaiduNetdiskDownload/20250902/_3_52_52/BRDF/angle/test/2025_9_2_3_53_45_202592_35252_0_rad_rgbxyz_geo.bip_with_angles.bip",
8
],
"solar_az": [
"D:/BaiduNetdiskDownload/20250902/_3_52_52/BRDF/angle/test/2025_9_2_3_53_45_202592_35252_0_rad_rgbxyz_geo.bip_with_angles.bip",
7
],
"solar_zn": [
"D:/BaiduNetdiskDownload/20250902/_3_52_52/BRDF/angle/test/2025_9_2_3_53_45_202592_35252_0_rad_rgbxyz_geo.bip_with_angles.bip",
6
],
"phase": [
"D:/BaiduNetdiskDownload/20250902/_3_52_52/BRDF/angle/test/2025_9_2_3_53_45_202592_35252_0_rad_rgbxyz_geo.bip_with_angles.bip",
0
],
"slope": [
"D:/BaiduNetdiskDownload/20250902/_3_52_52/BRDF/angle/test/2025_9_2_3_53_45_202592_35252_0_rad_rgbxyz_geo.bip_with_angles.bip",
0
],
"aspect": [
"D:/BaiduNetdiskDownload/20250902/_3_52_52/BRDF/angle/test/2025_9_2_3_53_45_202592_35252_0_rad_rgbxyz_geo.bip_with_angles.bip",
0
],
"cosine_i": [
"D:/BaiduNetdiskDownload/20250902/_3_52_52/BRDF/angle/test/2025_9_2_3_53_45_202592_35252_0_rad_rgbxyz_geo.bip_with_angles.bip",
0
],
"utc_time": [
"D:/BaiduNetdiskDownload/20250902/_3_52_52/BRDF/angle/test/2025_9_2_3_53_45_202592_35252_0_rad_rgbxyz_geo.bip_with_angles.bip",
0
]
},
"D:/BaiduNetdiskDownload/20250902/_3_52_52/rad2geo/R/2025_9_2_3_53_45_202592_35252_1_rad_geo_corrected_reflectance.dat": {
"path_length": [
"D:/BaiduNetdiskDownload/20250902/_3_52_52/BRDF/angle/test/2025_9_2_3_53_45_202592_35252_1_rad_rgbxyz_geo.bip_angles.bip",
0
],
"sensor_az": [
"D:/BaiduNetdiskDownload/20250902/_3_52_52/BRDF/angle/test/2025_9_2_3_53_45_202592_35252_1_rad_rgbxyz_geo.bip_angles.bip",
9
],
"sensor_zn": [
"D:/BaiduNetdiskDownload/20250902/_3_52_52/BRDF/angle/test/2025_9_2_3_53_45_202592_35252_1_rad_rgbxyz_geo.bip_angles.bip",
8
],
"solar_az": [
"D:/BaiduNetdiskDownload/20250902/_3_52_52/BRDF/angle/test/2025_9_2_3_53_45_202592_35252_1_rad_rgbxyz_geo.bip_angles.bip",
7
],
"solar_zn": [
"D:/BaiduNetdiskDownload/20250902/_3_52_52/BRDF/angle/test/2025_9_2_3_53_45_202592_35252_1_rad_rgbxyz_geo.bip_angles.bip",
6
],
"phase": [
"D:/BaiduNetdiskDownload/20250902/_3_52_52/BRDF/angle/test/2025_9_2_3_53_45_202592_35252_1_rad_rgbxyz_geo.bip_angles.bip",
0
],
"slope": [
"D:/BaiduNetdiskDownload/20250902/_3_52_52/BRDF/angle/test/2025_9_2_3_53_45_202592_35252_1_rad_rgbxyz_geo.bip_angles.bip",
0
],
"aspect": [
"D:/BaiduNetdiskDownload/20250902/_3_52_52/BRDF/angle/test/2025_9_2_3_53_45_202592_35252_1_rad_rgbxyz_geo.bip_angles.bip",
0
],
"cosine_i": [
"D:/BaiduNetdiskDownload/20250902/_3_52_52/BRDF/angle/test/2025_9_2_3_53_45_202592_35252_1_rad_rgbxyz_geo.bip_angles.bip",
0
],
"utc_time": [
"D:/BaiduNetdiskDownload/20250902/_3_52_52/BRDF/angle/test/2025_9_2_3_53_45_202592_35252_1_rad_rgbxyz_geo.bip_angles.bip",
0
]
}
},
"num_cpus": 10,
"export": {
"coeffs": true,
"image": true,
"masks": true,
"subset_waves": [],
"output_dir": "E:/code/hytools-master/hytools-master/data/output",
"suffix": "brdf_glint"
},
"corrections": [
"brdf"
],
"brdf": {
"type": "flex",
"grouped": true,
"geometric": "li_dense_r",
"volume": "ross_thick",
"b/r": 2.5,
"h/b": 2,
"sample_perc": 0.1,
"interp_kind": "linear",
"calc_mask": [
[
"water",
{
"band_1": 850,
"band_2": 660,
"threshold": 290
}
],
[
"kernel_finite",
{}
],
[
"ancillary",
{
"name": "sensor_zn",
"min": 0.03490658503988659,
"max": "inf"
}
]
],
"apply_mask": [
[
"water",
{
"band_1": 850,
"band_2": 660,
"threshold": 290
}
]
],
"bin_type": "dynamic",
"num_bins": 18,
"ndvi_bin_min": 0.05,
"ndvi_bin_max": 1.0,
"ndvi_perc_min": 10,
"ndvi_perc_max": 95,
"solar_zn_type": "scene"
},
"resample": false
}

View File

@ -0,0 +1,278 @@
{
"bad_bands": [],
"file_type": "envi",
"input_files": [
"/data2/avng/l2/hytools_avng_example/ang20190629t203832_rfl",
"/data2/avng/l2/hytools_avng_example/ang20190629t210339_rfl"
],
"anc_files": {
"/data2/avng/l2/hytools_avng_example/ang20190629t203832_rfl": {
"path_length": [
"/data2/avng/l2/hytools_avng_example/ang20190629t203832_obs",
0
],
"sensor_az": [
"/data2/avng/l2/hytools_avng_example/ang20190629t203832_obs",
1
],
"sensor_zn": [
"/data2/avng/l2/hytools_avng_example/ang20190629t203832_obs",
2
],
"solar_az": [
"/data2/avng/l2/hytools_avng_example/ang20190629t203832_obs",
3
],
"solar_zn": [
"/data2/avng/l2/hytools_avng_example/ang20190629t203832_obs",
4
],
"phase": [
"/data2/avng/l2/hytools_avng_example/ang20190629t203832_obs",
5
],
"slope": [
"/data2/avng/l2/hytools_avng_example/ang20190629t203832_obs",
6
],
"aspect": [
"/data2/avng/l2/hytools_avng_example/ang20190629t203832_obs",
7
],
"cosine_i": [
"/data2/avng/l2/hytools_avng_example/ang20190629t203832_obs",
8
],
"utc_time": [
"/data2/avng/l2/hytools_avng_example/ang20190629t203832_obs",
9
]
},
"/data2/avng/l2/hytools_avng_example/ang20190629t210339_rfl": {
"path_length": [
"/data2/avng/l2/hytools_avng_example/ang20190629t210339_obs",
0
],
"sensor_az": [
"/data2/avng/l2/hytools_avng_example/ang20190629t210339_obs",
1
],
"sensor_zn": [
"/data2/avng/l2/hytools_avng_example/ang20190629t210339_obs",
2
],
"solar_az": [
"/data2/avng/l2/hytools_avng_example/ang20190629t210339_obs",
3
],
"solar_zn": [
"/data2/avng/l2/hytools_avng_example/ang20190629t210339_obs",
4
],
"phase": [
"/data2/avng/l2/hytools_avng_example/ang20190629t210339_obs",
5
],
"slope": [
"/data2/avng/l2/hytools_avng_example/ang20190629t210339_obs",
6
],
"aspect": [
"/data2/avng/l2/hytools_avng_example/ang20190629t210339_obs",
7
],
"cosine_i": [
"/data2/avng/l2/hytools_avng_example/ang20190629t210339_obs",
8
],
"utc_time": [
"/data2/avng/l2/hytools_avng_example/ang20190629t210339_obs",
9
]
}
},
"num_cpus": 2,
"export": {
"coeffs": false,
"image": true,
"masks": true,
"subset_waves": [],
"output_dir": "/data2/avng/l2/hytools_avng_example/",
"suffix": "topo_brdf_glint"
},
"topo": {
"type": "scs+c",
"calc_mask": [
[
"ndi",
{
"band_1": 550,
"band_2": 2150,
"min": -1,
"max": 0
}
],
[
"ndi",
{
"band_1": 850,
"band_2": 660,
"min": 0.1,
"max": 1
}
],
[
"ancillary",
{
"name": "slope",
"min": 0.08726646259971647,
"max": "+inf"
}
],
[
"ancillary",
{
"name": "cosine_i",
"min": 0.12,
"max": "+inf"
}
]
],
"apply_mask": [
[
"ndi",
{
"band_1": 550,
"band_2": 2150,
"min": -1,
"max": 0
}
],
[
"ndi",
{
"band_1": 850,
"band_2": 660,
"min": 0.1,
"max": 1
}
],
[
"ancillary",
{
"name": "slope",
"min": 0.08726646259971647,
"max": "+inf"
}
],
[
"ancillary",
{
"name": "cosine_i",
"min": 0.12,
"max": "+inf"
}
]
],
"c_fit_type": "nnls"
},
"corrections": [
"topo",
"brdf",
"glint"
],
"brdf": {
"type": "flex",
"grouped": true,
"geometric": "li_dense_r",
"volume": "ross_thick",
"b/r": 2.5,
"h/b": 2,
"sample_perc": 0.1,
"interp_kind": "linear",
"calc_mask": [
[
"ndi",
{
"band_1": 550,
"band_2": 2150,
"min": -1,
"max": 0
}
],
[
"ndi",
{
"band_1": 850,
"band_2": 660,
"min": 0.1,
"max": 1
}
],
[
"kernel_finite",
{}
],
[
"ancillary",
{
"name": "sensor_zn",
"min": 0.03490658503988659,
"max": "inf"
}
]
],
"apply_mask": [
[
"ndi",
{
"band_1": 550,
"band_2": 2150,
"min": -1,
"max": 0
}
],
[
"ndi",
{
"band_1": 850,
"band_2": 660,
"min": 0.1,
"max": 1
}
]
],
"bin_type": "dynamic",
"num_bins": 18,
"ndvi_bin_min": 0.05,
"ndvi_bin_max": 1.0,
"ndvi_perc_min": 10,
"ndvi_perc_max": 95,
"solar_zn_type": "scene"
},
"glint": {
"type": "hochberg",
"correction_wave": 2150,
"apply_mask": [
[
"ndi",
{
"band_1": 550,
"band_2": 2150,
"min": 0,
"max": 1
}
],
[
"ndi",
{
"band_1": 850,
"band_2": 660,
"min": -1,
"max": 0.1
}
]
]
},
"resample": false
}

View File

@ -0,0 +1,281 @@
{
"bad_bands": [
[
300,
500
],
[
900,
2600
]
],
"file_type": "envi",
"input_files": [
"/data/f130612t01p00r05_rfl_v1a_img",
"/data/f130612t01p00r06_rfl_v1a_img",
"/data/f130612t01p00r07_rfl_v1a_img"
],
"anc_files": {
"/data/f130612t01p00r05_rfl_v1a_img": {
"path_length": [
"/data/f130612t01p00r05rdn_e_obs_ort",
0
],
"sensor_az": [
"/data/f130612t01p00r05rdn_e_obs_ort",
1
],
"sensor_zn": [
"/data/f130612t01p00r05rdn_e_obs_ort",
2
],
"solar_az": [
"/data/f130612t01p00r05rdn_e_obs_ort",
3
],
"solar_zn": [
"/data/f130612t01p00r05rdn_e_obs_ort",
4
],
"phase": [
"/data/f130612t01p00r05rdn_e_obs_ort",
5
],
"slope": [
"/data/f130612t01p00r05rdn_e_obs_ort",
6
],
"aspect": [
"/data/f130612t01p00r05rdn_e_obs_ort",
7
],
"cosine_i": [
"/data/f130612t01p00r05rdn_e_obs_ort",
8
],
"utc_time": [
"/data/f130612t01p00r05rdn_e_obs_ort",
9
]
},
"/data/f130612t01p00r06_rfl_v1a_img": {
"path_length": [
"/data/f130612t01p00r06rdn_e_obs_ort",
0
],
"sensor_az": [
"/data/f130612t01p00r06rdn_e_obs_ort",
1
],
"sensor_zn": [
"/data/f130612t01p00r06rdn_e_obs_ort",
2
],
"solar_az": [
"/data/f130612t01p00r06rdn_e_obs_ort",
3
],
"solar_zn": [
"/data/f130612t01p00r06rdn_e_obs_ort",
4
],
"phase": [
"/data/f130612t01p00r06rdn_e_obs_ort",
5
],
"slope": [
"/data/f130612t01p00r06rdn_e_obs_ort",
6
],
"aspect": [
"/data/f130612t01p00r06rdn_e_obs_ort",
7
],
"cosine_i": [
"/data/f130612t01p00r06rdn_e_obs_ort",
8
],
"utc_time": [
"/data/f130612t01p00r06rdn_e_obs_ort",
9
]
},
"/data/f130612t01p00r07_rfl_v1a_img": {
"path_length": [
"/data/f130612t01p00r07rdn_e_obs_ort",
0
],
"sensor_az": [
"/data/f130612t01p00r07rdn_e_obs_ort",
1
],
"sensor_zn": [
"/data/f130612t01p00r07rdn_e_obs_ort",
2
],
"solar_az": [
"/data/f130612t01p00r07rdn_e_obs_ort",
3
],
"solar_zn": [
"/data/f130612t01p00r07rdn_e_obs_ort",
4
],
"phase": [
"/data/f130612t01p00r07rdn_e_obs_ort",
5
],
"slope": [
"/data/f130612t01p00r07rdn_e_obs_ort",
6
],
"aspect": [
"/data/f130612t01p00r07rdn_e_obs_ort",
7
],
"cosine_i": [
"/data/f130612t01p00r07rdn_e_obs_ort",
8
],
"utc_time": [
"/data/f130612t01p00r07rdn_e_obs_ort",
9
]
}
},
"export": {
"coeffs": true,
"image": false,
"masks": false,
"subset_waves": [
440,
560,
660,
850
],
"output_dir": "/data/out/topogroup/",
"suffix": "topo_brdf"
},
"corrections": [
"topo",
"brdf"
],
"topo": {
"type": "scs+c",
"calc_mask": [
[
"ndi",
{
"band_1": 850,
"band_2": 660,
"min": 0.05,
"max": 1.0
}
],
[
"ancillary",
{
"name": "slope",
"min": 0.08726646259971647,
"max": "+inf"
}
],
[
"ancillary",
{
"name": "cosine_i",
"min": 0.12,
"max": "+inf"
}
]
],
"apply_mask": [
[
"ndi",
{
"band_1": 850,
"band_2": 660,
"min": 0.05,
"max": 1.0
}
],
[
"ancillary",
{
"name": "slope",
"min": 0.08726646259971647,
"max": "+inf"
}
],
[
"ancillary",
{
"name": "cosine_i",
"min": 0.12,
"max": "+inf"
}
]
],
"c_fit_type": "nnls",
"subgrouped":true,
"sample_perc": 0.01,
"subgroup": {
"/data/f130612t01p00r05_rfl_v1a_img":"group00",
"/data/f130612t01p00r06_rfl_v1a_img":"group00",
"/data/f130612t01p00r07_rfl_v1a_img":"group01"
}
},
"brdf": {
"solar_zn_type": "scene",
"type": "flex",
"grouped": true,
"geometric": "li_dense_r",
"volume": "ross_thick",
"b/r": 2.5,
"h/b": 2,
"sample_perc": 0.04,
"interp_kind": "linear",
"calc_mask": [
[
"ndi",
{
"band_1": 850,
"band_2": 660,
"min": 0.05,
"max": 1.0
}
],
[
"kernel_finite",
{}
],
[
"ancillary",
{
"name": "sensor_zn",
"min": 0.03490658503988659,
"max": "inf"
}
],
],
"apply_mask": [
[
"ndi",
{
"band_1": 850,
"band_2": 660,
"min": 0.05,
"max": 1.0
}
]
],
"bin_type": "dynamic",
"num_bins": 18,
"ndvi_bin_min": 0.05,
"ndvi_bin_max": 1.0,
"ndvi_perc_min": 10,
"ndvi_perc_max": 95
},
"resample": false,
"num_cpus": 3
}

File diff suppressed because one or more lines are too long

Binary file not shown.

After

Width:  |  Height:  |  Size: 106 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 159 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 146 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 640 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 633 KiB

View File

@ -0,0 +1,162 @@
# Processing NetCDF file
Other than ENVI and HDF5, NetCDF support is added for reading and writing. Meanwhile, Geographic Lookup Table (GLT) of raster image can be used to reproject input image.
Configuration json file is slightly different from the basic configuration (an example for topo correction: [nc_topo_correct_config.json](../examples/configs/nc_topo_correct_config.json)).
## Topograhic Correction
NetCDF and its OBS files can be accessed to implement the basic normalization like topographic correction. Configuration file can be setup like below. anc_files can be either nc format or envi format.
```json
"anc_files": {
"/data/EMIT/EMIT_L2A_RFL_001_20231101T024133_2330502_014.nc": {
"path_length": [
"/data/EMIT/EMIT_L1B_OBS_001_20231101T024133_2330502_014.nc",
0
],
"sensor_az": [
"/data/EMIT/EMIT_L1B_OBS_001_20231101T024133_2330502_014.nc",
1
],
"sensor_zn": [
"/data/EMIT/EMIT_L1B_OBS_001_20231101T024133_2330502_014.nc",
2
],
"solar_az": [
"/data/EMIT/EMIT_L1B_OBS_001_20231101T024133_2330502_014.nc",
3
],
"solar_zn": [
"/data/EMIT/EMIT_L1B_OBS_001_20231101T024133_2330502_014.nc",
4
],
"phase": [
"/data/EMIT/EMIT_L1B_OBS_001_20231101T024133_2330502_014.nc",
5
],
"slope": [
"/data/EMIT/EMIT_L1B_OBS_001_20231101T024133_2330502_014_nc_warp_v1.bsq",
0
],
"aspect": [
"/data/EMIT/EMIT_L1B_OBS_001_20231101T024133_2330502_014_nc_warp_v1.bsq",
1
],
"cosine_i": [
"/data/EMIT/EMIT_L1B_OBS_001_20231101T024133_2330502_014_nc_warp_v1.bsq",
2
],
"utc_time": [
"/data/EMIT/EMIT_L1B_OBS_001_20231101T024133_2330502_014.nc",
9
]
}
},
```
There is a sample image correct script for nc files and GLT ( [image_correct_export_nc.py](../scripts/image_correct_export_nc.py)).
```bash
python ./scripts/image_correct_with_glt.py path/to/the/configuration/json/file
```
Before and after topo correction
![Before](./img/emit_aus_subset1_uncorr.png "Before")
![After](./img/emit_aus_subset1_topo.png "After")
## External GLT
Many NetCDF files have internal GLT, but sometime a geo-rectification is needed, and a new corrected GLT can be used for warping the pixels to new geographic positions.
If internal GLT is used, set a blank glt file path
```json
... ...
},
"glt_files": { },
"export": {
... ...
```
External GLT can be specified in the configuration file, it should have at least two bands: GLT_X and GLT_Y
```json
... ...
},
"glt_files": {
"/data/EMIT/EMIT_L2A_RFL_001_20231101T024133_2330502_014.nc": {
"glt_x": ["/data/EMIT/EMIT_L2A_RFL_001_20231101T024133_2330502_014_glt.bsq",1],
"glt_y": ["/data/EMIT/EMIT_L2A_RFL_001_20231101T024133_2330502_014_glt.bsq",0]
}
},
"export": {
... ...
```
To use GLT to warp image or not is controlled by ```use_glt``` in ```export```.
```json
"export": {
...
"image": true,
"use_glt":true,
...
},
```
Export without using GLT
![RGB without GLT](./img/emit_001_20231101T024133_2330502_014_rgb.jpg "Raw")
Export warped image with GLT
![RGB with GLT](./img/emit_001_20231101T024133_2330502_014_rgb_warp.jpg "warp")
To export NetCDF image or not is controlled by ```image_format``` in ```export```. ```"envi"``` is the default output format, if ```image_format``` is not set. If ```"netcdf"``` is selected, information from ```"outside_metadata"``` will be written in the output. Information can either be in a dictionary in the config file or a json file.
```json
"export": {
...
"image_format": "netcdf",
...
},
...
"outside_metadata":{ "XXX":"XXXXX", ... },
...
```
Alternatively, it can be
```json
"export": {
...
"image_format": "netcdf",
...
},
...
"outside_metadata":"XXX/XXXX.json",
...
```
## Trait Prediction
Output format of trait prediction image file can also be ```"netcdf"```, which is ```"envi"``` by default.
There is a sample trait export script for nc files and GLT ( [trait_estimate_nc.py](../scripts/trait_estimate_nc.py)).
Accordingly, several items in the trait configuration file has to be set for this purpose.
```json
{
"file_type": "ncav",
"export_type":"netcdf",
"use_glt":false,
"output_dir": "...",
"outside_metadata": "...",
...
}
```

View File

@ -0,0 +1,100 @@
## FlexBRDF without Ray
In some distributed system like [High Throughput Computing system](https://chtc.cs.wisc.edu/) (HTC), the workers are not sharing memory, or computation or storage resource are limited. In such situations, not enough computer cores can be assigned to the FlexBRDF process that matches the total number of flightlines in the same group. Ray cannot be run easily with limited number of CPU cores.
To solve that in the FlexBRDF context, samples that needed for BRDF modeling from each flightline can be extracted and saved individually, and then transferred to the same storage location later for the final step of BRDF coefficient estimation. The extraction step can be either executed sequentially or parallelly, depending on the system.
Extracted samples are stored in HDF files in the current solution, which is one h5 file for each flightline in the group. Once all extractions are done, all h5 files are gathered and used for the final step.
Take HTC as an example, DAG management can be used to monitor and control the work flow.
Two scripts will be used consecutively for this purpose
([image_correct_get_sample_chtc.py](../scripts/no_ray/image_correct_get_sample_chtc.py) and [image_correct_combine_sample_chtc.py](../scripts/no_ray/image_correct_combine_sample_chtc.py)).
### Extraction
Target flightline can be set with the last commandline parameter. The order is determined by the order of the ```input_files``` in the configuration json file. In this step, each single run can be independent. The output h5 file will be stored in the output path defined in the configuration file, which may or may not necessarily be the same for each run. If ```topo``` is enabled, the resultant coefficient json file will also be stored there, and the extracted reflectance samples in h5 will be topographically corrected.
Please make sure the bands for computing NDVI are in the good band range of the configuration file.
```bash
# first line
python ./scripts/no_ray/image_correct_get_sample_chtc.py path/to/the/configuration/json/file 0
# second line
python ./scripts/no_ray/image_correct_get_sample_chtc.py path/to/the/configuration/json/file 1
# third line
python ./scripts/no_ray/image_correct_get_sample_chtc.py path/to/the/configuration/json/file 2
# so forth
... ...
```
### Combination
After all the independent jobs are finished, the final step is to combine all samples and continue the BRDF modeling.
```bash
python ./scripts/no_ray/image_correct_combine_sample_chtc.py path/to/the/configuration/json/file folder/of/the/h5files/in/the/same/group
```
All brdf coefficient json files will be stored in the same path specified in the configuration json file.
### Script to run the two steps together
If scripts are not run in a HTC-like system, they can still be run in a single machine with multiple CPU cores. This script is a simplified version of the workflow for combining two steps of FlexBRDF ([run_single_process_merge.py](../scripts/no_ray/run_single_process_merge.py)). It requires "*path/to/the/configuration/json/file*" and the total number of flightlines in the group as inputs. It initiates multiple instances of the script [image_correct_get_sample_chtc.py](../scripts/no_ray/image_correct_get_sample_chtc.py)
```python
import sys, os
import multiprocessing
import subprocess, json
exec_str="python ./script/no_ray/image_correct_get_sample_chtc.py "
merge_str="python ./script/no_ray/image_correct_combine_sample_chtc.py {} {}"
def run_command(command):
print(command)
subprocess.run(command,shell=True)
def main():
config_file = sys.argv[1]
total_count = int(sys.argv[2])
worker_count = min(os.cpu_count()-1,total_count)
with open(config_file, 'r') as outfile:
config_dict = json.load(outfile)
h5_folder=config_dict["export"]["output_dir"]
pool = multiprocessing.Pool(processes=worker_count)
commands = [f"{exec_str} {config_file} {order}" for order in range(total_count)]
pool.map(run_command, commands)
pool.close()
pool.join() # Wait for all subprocesses to finish
print('All extractions are done.')
# Final step to use all pixels in the group to estimate BRDF coefficients
subprocess.run(merge_str.format(config_file,h5_folder),shell=True)
if __name__== "__main__":
main()
```
### Script to run the three steps together for Subgrouped-TOPO correction
There is also a script for combining three steps of FlexBRDF with subgroup for TOPO ([run_single_process_topogroup_merge_v3.py](../scripts/no_ray/run_single_process_topogroup_merge_v3.py)) It controls and monitors the workflow to run three related scripts ([image_correct_get_raw_sample_chtc.py](../scripts/no_ray/image_correct_get_raw_sample_chtc.py) for extracting raw samples, [image_correct_combine_topo_sample_chtc.py](../scripts/no_ray/image_correct_combine_topo_sample_chtc.py) for gathering samples in the same TOPO subgroup, and the final script for merging them for BRDF [image_correct_combine_sample_chtc.py](../scripts/no_ray/image_correct_combine_sample_chtc.py)).
### Export corrected images
After the previous two steps for TOPO/BRDF model estimation, user can export corrected images with those precomputed coefficients.
This script is a simplified version of the workflow for exporting images and masks ([run_single_process_export.py](../scripts/no_ray/run_single_process_export.py)). It requires "*path/to/the/configuration/json/file*" and the total number of flightlines in the group as inputs. It initiates multiple instances of the script [image_correct_export_image.py](../scripts/no_ray/image_correct_export_image.py)
### Export trait estimation with corrected image
The correction coefficients can also be used for trait mapping. If the any of the three corrections is enabled, trait prediction will be applied to corrected pixels.
This script is a simplified version of the workflow for exporting images and masks ([run_single_process_trait.py](../scripts/no_ray/run_single_process_trait.py)). It requires "*path/to/the/trait/export/configuration/json/file*", the total number of flightlines in the group, and the total number of traits as inputs. It initiates multiple instances of the script [trait_estimate_inde.py](../scripts/no_ray/trait_estimate_inde.py)

View File

@ -0,0 +1,22 @@
# -*- coding: utf-8 -*-
"""
HyTools: Hyperspectral image processing library
Copyright (C) 2021 University of Wisconsin
Authors: Adam Chlus, Zhiwei Ye, Philip Townsend.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, version 3 of the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
Initialize hytools
"""
from .base import HyTools

791
Flexbrdf/hytools/base.py Normal file
View File

@ -0,0 +1,791 @@
# -*- coding: utf-8 -*-
"""
HyTools: Hyperspectral image processing library
Copyright (C) 2021 University of Wisconsin
Authors: Adam Chlus, Zhiwei Ye, Philip Townsend.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, version 3 of the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
Base
TODO: Add corrections to ndi()
"""
import os
import json
import numpy as np
import h5py
import warnings
import sys
from .io.envi import envi_read_band,envi_read_pixels
from .io.envi import envi_read_line,envi_read_column,envi_read_chunk
from .io.envi import open_envi,parse_envi_header,envi_header_from_neon,envi_header_from_nc
from .io.neon import open_neon
from .io.netcdf import open_netcdf
from .brdf import apply_brdf_correct
from .glint import apply_glint_correct
from .brdf.kernels import calc_volume_kernel,calc_geom_kernel
from .topo import calc_cosine_i,apply_topo_correct
from .transform.resampling import *
warnings.filterwarnings("ignore")
class HyTools:
"""HyTools file object"""
def __init__(self):
"""Constructor method
"""
self.anc_path = {}
self.ancillary = {}
self.bad_bands = []
self.bands = None
self.base_key = None
self.base_name = None
self.brdf = {'type': None}
self.glint= {'type': None}
self.byte_order = None
self.columns = None
self.columns_glt = None
self.corrections = []
self.crs = None
self.data = None
self.dtype = None
self.endianness = None
self.file_name = None
self.file_type = None
self.fill_mask = None
self.fwhm = []
self.glt_path = {}
self.glt_x = None
self.glt_y = None
self.glt_projection = None
self.glt_map_info = None
self.glt_transform = None
self.hdf_obj = None
self.interleave = None
self.lines = None
self.lines_glt = None
self.map_info = None
self.mask = {}
self.nc4_obj = None
self.no_data = None
self.offset = 0
self.projection = None
self.transform = None
self.resampler = {'type': None}
self.shape = None
self.topo = {'type': None}
self.ulx = None
self.uly = None
self.wavelength_units = None
self.wavelengths = []
def read_file(self,file_name,file_type = 'envi',anc_path = None, ext = False, glt_path = None):
self.file_name = file_name
self.file_type = file_type
if file_type == 'envi':
open_envi(self,anc_path,ext,glt_path)
elif file_type == "neon":
open_neon(self)
elif file_type == "emit":
open_netcdf(self,'EMIT',anc_path,glt_path)
elif file_type == "ncav":
open_netcdf(self,'AV',anc_path,glt_path)
else:
print("Unrecognized file type.")
# Create a no data mask
if self.bands>11:
self.mask['no_data'] = self.get_wave(660) > 0.5*self.no_data
else:
self.mask['no_data'] = self.get_band(0) > 0.5*self.no_data
#Match mask with ancillary mask
if anc_path:
if file_type == 'envi':
ancillary = HyTools()
ancillary.read_file(self.anc_path['solar_zn'][0],'envi')
if not np.array_equal(self.mask['no_data'],ancillary.mask['no_data']):
print('Reflectance and ancillary no data extents do not match, combining no data masks.')
self.mask['no_data'] &= ancillary.mask['no_data']
ancillary.close_data()
del ancillary
elif file_type == 'emit' and not self.anc_path['slope'][0].endswith('.nc'):
ancillary = HyTools()
ancillary.read_file(self.anc_path['slope'][0],'envi')
if not np.array_equal(self.mask['no_data'],ancillary.mask['no_data']):
print('Reflectance and ancillary no data extents do not match, combining no data masks.')
self.mask['no_data'] &= ancillary.mask['no_data']
ancillary.close_data()
del ancillary
self.base_name = os.path.basename(os.path.splitext(self.file_name)[0])
def create_bad_bands(self,bad_regions):
"""Create bad bands mask, Good: True, bad : False.
Args:
bad_regions (list of lists): start and end values of wavelength
regions considered bad. Wavelengths should be in the same units as
data units. ex: [[350,400].....[2450,2500]].
Returns:
None.
"""
bad_bands = []
for wavelength in self.wavelengths:
bad=False
for start,end in bad_regions:
bad = ((wavelength >= start) & (wavelength <=end)) or bad
bad_bands.append(bad)
self.bad_bands = np.array(bad_bands)
def load_data(self, mode = 'r'):
"""Load data object to memory.
Args:
mode (str, optional): File read mode. Defaults to 'r'.
offset (int, optional): Offset in bytes. Defaults to 0.
Returns:
None.
"""
if self.file_type == "envi":
self.data = np.memmap(self.file_name,dtype = self.dtype, mode=mode,
shape = self.shape,offset=self.offset)
if bool(self.glt_path):
self.glt_x = self.load_glt('glt_x')
self.glt_y = self.load_glt('glt_y')
if not self.glt_x is None:
self.fill_mask = self.glt_x>0
self.glt_x = self.glt_x.astype(np.int16)
self.glt_y = self.glt_y.astype(np.int16)
elif self.file_type == "neon":
self.hdf_obj = h5py.File(self.file_name,'r')
self.data = self.hdf_obj[self.base_key]["Reflectance"]["Reflectance_Data"]
elif self.file_type == "emit":
self.nc4_obj = h5py.File(self.file_name,'r')
self.data = self.nc4_obj[self.base_key]
self.glt_x = self.load_glt('glt_x').astype(np.int16)
self.glt_y = self.load_glt('glt_y').astype(np.int16)
self.fill_mask = self.glt_x>0
elif self.file_type == "ncav":
self.nc4_obj = h5py.File(self.file_name,'r')
self.data = self.nc4_obj[self.base_key][self.base_key]
self.glt_x = self.load_glt('glt_x')
self.glt_y = self.load_glt('glt_y')
if not self.glt_x is None:
self.fill_mask = self.glt_x>0
self.glt_x = self.glt_x.astype(np.int16)
self.glt_y = self.glt_y.astype(np.int16)
def close_data(self):
"""Close data object.
"""
if self.file_type == "envi":
del self.data
elif self.file_type == "neon":
self.hdf_obj.close()
self.hdf_obj = None
elif self.file_type == "emit" or self.file_type == "ncav":
self.nc4_obj.close()
self.nc4_obj = None
self.data = None
def iterate(self,by,chunk_size= (100,100),corrections = [],resample=False):
"""Create data Iterator.
Args:
by (str): Dimension along which to iterate: "line","column","band","chunk".
chunk_size (tuple, optional): Two dimensional chunk size (Y,X).
Applies only when "chunk" selected.
Defaults to (100,100).
Returns:
Iterator class object: Data Iterator.
"""
return Iterator(self,by,chunk_size,corrections =corrections,resample=resample)
def wave_to_band(self,wave):
"""Return band index corresponding to input wavelength. Return closest band if
not an exact match.
Args:
wave (float): Wavelength of band to be retrieved in image wavelength units.
Returns:
int: Band index.
"""
if (wave > self.wavelengths.max()) | (wave < self.wavelengths.min()):
print("Input wavelength outside image range!")
band_num = None
else:
band_num = np.argmin(np.abs(self.wavelengths - wave))
return band_num
def get_band(self,index,corrections= [], mask =None):
"""
Args:
index (int): Zero-indexed band index.
mask (str): Return masked values using named mask.
corrections(list): Corrections to apply, will be applied in
order listed.
Returns:
numpy.ndarray: A 2D (lines x columns) array or 1D if masked.
"""
self.load_data()
if self.file_type == "neon":
band = self.data[:,:,index]
elif self.file_type == "emit":
band = self.data[:,:,index]
elif self.file_type == "ncav":
band = self.data[index,:,:]
elif self.file_type == "envi":
band = envi_read_band(self.data,index,self.interleave)
if self.endianness != sys.byteorder:
band = band.byteswap()
self.close_data()
band = self.correct(band,'band',index,corrections)
if mask:
band = band[self.mask[mask]]
return band
def get_wave(self,wave,corrections= [],mask =None):
"""Return the band image corresponding to the input wavelength.
If not an exact match the closest wavelength will be returned.
Args:
wave (float): Wavelength in image units.
mask (str): Return masked values using named mask.
Returns:
numpy.ndarray: Band image array (line,columns).
"""
if (wave > self.wavelengths.max()) | (wave < self.wavelengths.min()):
print("Input wavelength outside wavelength range!")
band = None
else:
band_num = np.argmin(np.abs(self.wavelengths - wave))
band = self.get_band(band_num,corrections= corrections, mask=mask)
return band
def get_pixels(self,lines,columns,corrections= [],resample = False):
"""
Args:
lines (list): List of zero-indexed line indices.
columns (list): List of zero-indexed column indices.
Returns:
numpy.ndarray: Pixel array (pixels,bands).
"""
self.load_data()
if self.file_type in ["neon","emit"]:
pixels = []
for line,column in zip(lines,columns):
pixels.append(self.data[line,column,:])
pixels = np.array(pixels)
elif self.file_type == "ncav":
pixels = []
for line,column in zip(lines,columns):
pixels.append(self.data[:,line,column])
pixels = np.array(pixels)
elif self.file_type == "envi":
pixels = envi_read_pixels(self.data,lines,columns,self.interleave)
if self.endianness != sys.byteorder:
pixels = pixels.byteswap()
self.close_data()
pixels = self.correct(pixels,'pixels',
[lines,columns],corrections)
if resample:
pixels = pixels[np.newaxis,:,~self.bad_bands]
pixels = apply_resampler(self,pixels)[0,:,:]
return pixels
def get_line(self,index, corrections= [],resample = False):
"""
Args:
index (int): Zero-indexed line index.
Returns:
numpy.ndarray: Line array (columns, bands).
"""
self.load_data()
if self.file_type == "neon" or self.file_type == "emit":
line = self.data[index,:,:]
elif self.file_type == "ncav":
line = np.moveaxis(self.data[:,index,:],0,1)
elif self.file_type == "envi":
line = envi_read_line(self.data,index,self.interleave)
if self.endianness != sys.byteorder:
line = line.byteswap()
self.close_data()
line = self.correct(line,'line',index,corrections)
if resample:
line = line[np.newaxis,:,~self.bad_bands]
line = apply_resampler(self,line)[0,:,:]
return line
def get_column(self,index,corrections = [],resample = False):
"""
Args:
index (int): Zero-indexed column index.
Returns:
numpy.ndarray: Column array (lines, bands).
"""
self.load_data()
if self.file_type == "neon" or self.file_type == "emit":
column = self.data[:,index,:]
elif self.file_type == "ncav":
column = np.moveaxis(self.data[:,:,index],0,1)
elif self.file_type == "envi":
column = envi_read_column(self.data,index,self.interleave)
if self.endianness != sys.byteorder:
column = column.byteswap()
self.close_data()
column = self.correct(column,'column',index,corrections)
if resample:
column = column[:,np.newaxis,~self.bad_bands]
column = apply_resampler(self,column)[:,0,:]
return column
def get_chunk(self,col_start,col_end,line_start,line_end, corrections= [],resample = False):
"""
Args:
col_start (int): Chunk starting column.
col_end (int): Noninclusive chunk ending column index.
line_start (int): Chunk starting line.
line_end (int): Noninclusive chunk ending line index.
corrections(list): Corrections to apply, will be applied in
order listed.
resample (bool): Resample wavelengths. Defaults to False.
Returns:
numpy.ndarray: Chunk array (line_end-line_start,col_end-col_start,bands).
"""
self.load_data()
if self.file_type == "neon" or self.file_type == "emit":
chunk = self.data[line_start:line_end,col_start:col_end,:]
elif self.file_type == "ncav":
chunk = np.moveaxis(self.data[:,line_start:line_end,col_start:col_end],0,-1)
elif self.file_type == "envi":
chunk = envi_read_chunk(self.data,col_start,col_end,
line_start,line_end,self.interleave)
if self.endianness != sys.byteorder:
chunk = chunk.byteswap()
self.close_data()
chunk = self.correct(chunk,'chunk',
[col_start,col_end,line_start,line_end],
corrections)
if resample:
chunk = apply_resampler(self,chunk[:,:,~self.bad_bands])
return chunk
def correct(self,data,dimension,index,corrections):
for correction in corrections:
if correction == 'topo':
data = apply_topo_correct(self,data,dimension,index)
elif correction == 'brdf':
data = apply_brdf_correct(self,data,dimension,index)
elif correction == 'glint':
data = apply_glint_correct(self,data,dimension,index)
return data
def get_anc(self,anc,radians = True,mask = None):
"""Read ancillary datasets to memory.
Args:
anc (str): Ancillary dataset name.
radians (bool, optional): Convert angular measures to radians. Defaults to True.
Returns:
anc_data (numpy.ndarray)
"""
angular_anc = ['slope','sensor_az','sensor_zn','aspect','solar_zn','solar_az']
if self.file_type == "envi":
ancillary = HyTools()
ancillary.read_file(self.anc_path[anc][0],'envi')
ancillary.load_data()
anc_data = np.copy(ancillary.get_band(self.anc_path[anc][1]))
if ancillary.endianness != sys.byteorder:
anc_data = anc_data.byteswap()
ancillary.close_data()
elif self.file_type == "neon":
keys = self.anc_path[anc]
if len(keys)==2 and isinstance(keys[-1],int):
ancillary = HyTools()
ancillary.read_file(self.anc_path[anc][0],'envi')
ancillary.load_data()
anc_data = np.copy(ancillary.get_band(self.anc_path[anc][1]))
if ancillary.endianness != sys.byteorder:
anc_data = anc_data.byteswap()
ancillary.close_data()
else:
hdf_obj = h5py.File(self.file_name,'r')
metadata = hdf_obj[self.base_key]["Reflectance"]["Metadata"]
for key in keys:
metadata = metadata[key]
anc_data = metadata[()]
hdf_obj.close()
#Make solar geometry into 2D array
if anc in ['solar_zn','solar_az']:
anc_data = np.ones((self.lines, self.columns)) * anc_data
elif self.file_type in ["emit","ncav"]:
if bool(self.anc_path)==False:
return None
else:
if (self.anc_path[anc][0]).endswith('nc'):
nc4_anc_obj = h5py.File(self.anc_path[anc][0],'r')
if self.file_type == "emit":
anc_data = nc4_anc_obj['obs'][()][:,:,self.anc_path[anc][1]]
elif self.file_type == "ncav":
anc_data_raw = nc4_anc_obj['observation_parameters'][self.anc_path[anc][1]][()]
obs_glt_x = np.abs(nc4_anc_obj['geolocation_lookup_table']['sample'][()]) # some values in the GLT are negative for unknown reason
obs_glt_y = np.abs(nc4_anc_obj['geolocation_lookup_table']['line'][()])
anc_data = np.zeros(obs_glt_x.shape)
anc_data[obs_glt_x<=0] = nc4_anc_obj['observation_parameters'][self.anc_path[anc][1]].attrs['_FillValue'][0] # -9999
data_mask_to_fill = obs_glt_x>0
anc_data[data_mask_to_fill] = anc_data_raw[obs_glt_y[data_mask_to_fill].astype(int)-1,obs_glt_x[data_mask_to_fill].astype(int)-1]
nc4_anc_obj.close()
else:
ancillary = HyTools()
ancillary.read_file(self.anc_path[anc][0],'envi')
ancillary.load_data()
anc_data = np.copy(ancillary.get_band(self.anc_path[anc][1]))
if ancillary.endianness != sys.byteorder:
anc_data = anc_data.byteswap()
ancillary.close_data()
if radians and (anc in angular_anc):
anc_data= np.radians(anc_data)
if mask:
anc_data = anc_data[self.mask[mask]]
return anc_data
def load_anc(self,anc,radians = True):
self.ancillary[anc] = self.get_anc(self,anc,radians)
def load_glt(self,glt):
# check if GLT inside nc is used
if self.glt_path[glt][0] in ['location','geolocation_lookup_table']:
glt_data = self.nc4_obj[self.glt_path[glt][0]][self.glt_path[glt][1]][()]
elif self.glt_path[glt][0] is None:
return None
else:
glt_img = HyTools()
glt_img.read_file(self.glt_path[glt][0],'envi')
glt_img.load_data()
glt_data = np.copy(glt_img.get_band(self.glt_path[glt][1]))
if glt_img.endianness != sys.byteorder:
glt_data = glt_data.byteswap()
glt_img.close_data()
return glt_data
def volume_kernel(self,kernel):
"""Calculate volume scattering kernel.
"""
return calc_volume_kernel(self.get_anc('solar_az'), self.get_anc('solar_zn'),
self.get_anc('sensor_az'), self.get_anc('sensor_zn'),
kernel)
def geom_kernel(self,kernel,b_r=1.,h_b =2.):
"""Calculate volume scattering kernel.
"""
return calc_geom_kernel(self.get_anc('solar_az'),self.get_anc('solar_zn'),
self.get_anc('sensor_az'),self.get_anc('sensor_zn'),
kernel,b_r=b_r,h_b =h_b)
def cosine_i(self):
""" Calculate the cosine of the solar incidence angle. Assumes
path to required ancillary datasets have been specified.
Returns:
cos_i numpy.ndarray: Cosine of solar incidence angle.
"""
cos_i = calc_cosine_i(self.get_anc('solar_zn'), self.get_anc('solar_az'),
self.get_anc('aspect') ,self.get_anc('slope'))
return cos_i
def ndi(self,wave1= 850,wave2 = 660,mask = None):
""" Calculate normalized difference index.
Defaults to NDVI. Assumes input wavelengths are in
nanometers
Args:
wave1 (int,float): Wavelength of first band. Defaults to 850.
wave2 (int,float): Wavelength of second band. Defaults to 660.
mask (bool): Mask data
Returns:
ndi numpy.ndarray:
"""
wave1 = self.get_wave(wave1)
wave2 = self.get_wave(wave2)
ndi = (wave1-wave2)/(wave1+wave2)
if mask:
ndi = ndi[self.mask[mask]]
return ndi
def set_mask(self,mask,name):
"""Generate mask using masking function which takes a HyTools object as
an argument.
"""
self.mask[name] = mask
def gen_mask(self,masker,name,args = None):
"""Generate mask using masking function which takes a HyTools object as
an argument.
"""
if args:
self.mask[name] = masker(self,args)
else:
self.mask[name] = masker(self)
def do(self,function,args = None):
"""Run a function and return the results.
"""
if args:
return function(self, args)
else:
return function(self)
def get_header(self,warp_glt = False):
""" Return header dictionary
"""
if self.file_type == "neon":
header_dict = envi_header_from_neon(self)
elif self.file_type == "emit" or self.file_type == "ncav":
header_dict = envi_header_from_nc(self,warp_glt = warp_glt)
elif self.file_type == "envi":
header_dict = parse_envi_header(self.header_file)
header_dict["projection"] = self.projection
if "coordinate system string" in header_dict.keys():
header_dict["projection"] = header_dict["coordinate system string"]
header_dict['transform'] = self.transform
if warp_glt:
header_dict["samples"] = self.columns_glt
header_dict["lines"] = self.lines_glt
header_dict["map info"] = self.glt_map_info
header_dict["projection"] = self.glt_projection
header_dict['transform'] = self.glt_transform
return header_dict
def load_coeffs(self, coeff_file,kind):
with open(coeff_file, 'r') as outfile:
if kind == 'brdf':
self.brdf = json.load(outfile, cls =Decoder)
elif kind == 'topo':
self.topo = json.load(outfile, cls =Decoder)
class Iterator:
"""Iterator class
"""
def __init__(self,hy_obj,by,chunk_size = None,corrections = [],resample = False):
"""
Args:
hy_obj (Hytools object): Populated Hytools file object.
by (str): Iterator slice dimension: "line", "column", "band"",chunk".
chunk_size (tuple, optional): Chunk size. Defaults to None.
Iterator cannot be pickled when reading HDF files.
Returns:
None.
"""
self.chunk_size= chunk_size
self.by = by
self.current_column = -1
self.current_line = -1
self.current_band = -1
self.complete = False
self.hy_obj = hy_obj
self.resample = resample
self.corrections = corrections
def read_next(self):
""" Return next line/column/band/chunk.
"""
if self.by == "line":
self.current_line +=1
if self.current_line == self.hy_obj.lines-1:
self.complete = True
subset = self.hy_obj.get_line(self.current_line,
corrections =self.corrections,
resample = self.resample)
elif self.by == "column":
self.current_column +=1
if self.current_column == self.hy_obj.columns-1:
self.complete = True
subset = self.hy_obj.get_column(self.current_column,
corrections =self.corrections,
resample = self.resample)
elif self.by == "band":
self.current_band +=1
if self.current_band == self.hy_obj.bands-1:
self.complete = True
subset = self.hy_obj.get_band(self.current_band,
corrections =self.corrections)
elif self.by == "chunk":
if self.current_column == -1:
self.current_column +=1
self.current_line +=1
else:
self.current_column += self.chunk_size[1]
if self.current_column >= self.hy_obj.columns:
self.current_column = 0
self.current_line += self.chunk_size[0]
y_start = self.current_line
y_end = self.current_line + self.chunk_size[0]
if y_end >= self.hy_obj.lines:
y_end = self.hy_obj.lines
x_start = self.current_column
x_end = self.current_column + self.chunk_size[1]
if x_end >= self.hy_obj.columns:
x_end = self.hy_obj.columns
if (y_end == self.hy_obj.lines) and (x_end == self.hy_obj.columns):
self.complete = True
subset = self.hy_obj.get_chunk(x_start,x_end, y_start,y_end,
corrections =self.corrections,
resample = self.resample)
elif self.by == "glt_line":
self.current_line +=1
if self.current_line == self.hy_obj.lines-1:
self.complete = True
valid_mask=self.hy_obj.fill_mask[self.current_line,:]
valid_subset = self.hy_obj.get_pixels(
self.hy_obj.glt_y[self.current_line,valid_mask]-1,self.hy_obj.glt_x[self.current_line,valid_mask]-1,
corrections = self.corrections,
resample = self.resample)
subset = np.full((self.hy_obj.columns_glt,valid_subset.shape[1]),-9999).astype(np.float32)
subset[valid_mask,:] = valid_subset
return subset
def reset(self):
"""Reset counters.
"""
self.current_column = -1
self.current_line = -1
self.current_band = -1
self.complete = False
class Decoder(json.JSONDecoder):
def decode(self, s):
result = super().decode(s) # result = super(Decoder, self).decode(s) for Python 2.x
return self._decode(result)
def _decode(self, o):
if isinstance(o, str):
try:
return int(o)
except ValueError:
return o
elif isinstance(o, dict):
return {k: self._decode(v) for k, v in o.items()}
elif isinstance(o, list):
return [self._decode(v) for v in o]
else:
return o

View File

@ -0,0 +1,19 @@
# -*- coding: utf-8 -*-
"""
HyTools: 高光谱图像处理库
版权所有 (C) 2021 威斯康星大学
作者Adam Chlus, Zhiwei Ye, Philip Townsend。
本程序是自由软件:您可以根据自由软件基金会发布的 GNU 通用公共许可证第 3 版条款重新分发和/或修改它。
本程序的分发是希望它会有用,但没有任何保证;甚至没有对适销性或特定用途适用性的暗示保证。有关更多详细信息,请参阅 GNU 通用公共许可证。
您应该已经随本程序收到了 GNU 通用公共许可证副本。如果没有,请参见 <https://www.gnu.org/licenses/>。
:mod:`hytools.correction` 模块包含用于图像校正的函数。
"""
from .brdf import *
from .kernels import *
from .universal import *
from .flex import *

View File

@ -0,0 +1,246 @@
# -*- coding: utf-8 -*-
"""
HyTools: Hyperspectral image processing library
Copyright (C) 2021 University of Wisconsin
Authors: Adam Chlus, Zhiwei Ye, Philip Townsend.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, version 3 of the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
BRDF 校正
"""
import json
import ray
import numpy as np
import h5py
from .universal import universal_brdf,apply_universal
from .flex import flex_brdf,apply_flex,ndvi_stratify, get_kernel_samples, ndvi_bins, get_band_samples
from ..masks import mask_create
from ..misc import set_brdf, update_brdf, progbar
def apply_brdf_correct(hy_obj,data,dimension,index):
''' 在内存中应用 BRDF 校正。
'''
if hy_obj.brdf['type'] == 'universal':
data = apply_universal(hy_obj,data,dimension,index)
elif hy_obj.brdf['type'] == 'flex':
data = apply_flex(hy_obj,data,dimension,index)
elif hy_obj.brdf['type'] == 'local':
print('Local/class BRDF correction....under development')
return data
def load_brdf_precomputed(hy_obj,brdf_dict):
with open(brdf_dict['coeff_files'][hy_obj.file_name], 'r') as outfile:
hy_obj.brdf = json.load(outfile)
def set_solar_zn(hy_obj):
"""设置太阳天顶角归一化值"""
solar_zn = hy_obj.get_anc('solar_zn')
solar_zn = np.mean(solar_zn[hy_obj.mask['no_data']])
hy_obj.brdf['solar_zn_norm_radians'] = float(solar_zn)
return solar_zn
def ndvi_stratify_samples(combine_dict):
'''创建 NDVI 分箱分层掩膜
'''
ndvi = combine_dict["ndi_samples"]
class_mask = np.zeros(ndvi.shape)
for bin_num in combine_dict['brdf_dict']['bins']:
start,end = combine_dict['brdf_dict']['bins'][bin_num]
class_mask[(ndvi > start) & (ndvi <= end)] = bin_num
class_mask = class_mask.astype(np.int8)
combine_dict['ndvi_classes'] = class_mask
def get_topo_var_samples_pre(hy_obj):
'''获取分组地形校正变量,在 ndvi_stratify() 之后运行
'''
slope = hy_obj.get_anc('slope')
cosine_i = hy_obj.cosine_i()
sample_ind = (hy_obj.ancillary['ndvi_classes'] !=0)
return slope[sample_ind], cosine_i[sample_ind]
def calc_flex_single_post(combine_data_dict,brdf_dict,load_reflectance_mode):
combine_data_dict["brdf_dict"] = brdf_dict
bad_bands = combine_data_dict['bad_bands']
# 确定分箱维度并创建类别掩膜
if brdf_dict['bin_type'] == 'dynamic':
bins = ndvi_bins(combine_data_dict["ndi_samples"],brdf_dict)
# 更新分箱数量
#print(bins)
combine_data_dict["brdf_dict"]['num_bins']=len(bins) #hy_obj.brdf['num_bins'] = len(bins)
else:
bins = brdf_dict['bins']
combine_data_dict['brdf_dict']['bins'] = {k:v for (k,v) in enumerate(bins,start=1)}
ndvi_stratify_samples(combine_data_dict)
coeffs = {}
good_band_count=0
for band_num,band in enumerate(bad_bands):
if ~band:
coeffs[band_num] = {}
if load_reflectance_mode==0:
band_samples = combine_data_dict["reflectance_samples"][:,good_band_count] #ray.get([a.do.remote(get_band_samples,
#{'band_num':band_num}) for a in actors])
else:
combine_refl = []
for h5name in combine_data_dict["reflectance_samples"]:
h5_obj = h5py.File(h5name, "r")
sub_refl_samples = h5_obj["reflectance_samples"][()][:,good_band_count]
combine_refl += [sub_refl_samples]
h5_obj.close()
band_samples = np.concatenate(combine_refl,axis=0)
band_coeffs= []
for bin_num in combine_data_dict['brdf_dict']['bins']:
bin_mask = (combine_data_dict["ndvi_classes"]== bin_num)
X = np.concatenate([combine_data_dict["kernels_samples"],np.ones((bin_mask.shape[0],1))],axis=1)[bin_mask] #kernel_samples[:,:3][bin_mask]
y = band_samples[bin_mask]
band_coeffs.append(np.linalg.lstsq(X, y,rcond=-1)[0].flatten().tolist())
coeffs[band_num] = band_coeffs
progbar(np.sum(~bad_bands[:band_num+1]),np.sum(~bad_bands))
good_band_count+=1
print('\n')
combine_data_dict["brdf_dict"]['coeffs'] = coeffs
def calc_flex_single_pre(hy_obj,brdf_dict):
''' 获取单个图像的样本,用于未来的 BRDF 系数估计
'''
hy_obj.brdf['coeffs'] ={}
# 确定分箱维度并创建类别掩膜
if hy_obj.brdf['bin_type'] == 'dynamic':
bins = ndvi_bins(hy_obj.ndi()[hy_obj.mask['no_data']],brdf_dict)
# 更新分箱数量
hy_obj.brdf['num_bins'] = len(bins)
else:
bins = brdf_dict['bins']
hy_obj.brdf['bins'] = {k:v for (k,v) in enumerate(bins,start=1)}
ndvi_stratify(hy_obj)
kernel_samples= get_kernel_samples(hy_obj)
# 循环每个波段
refl_samples_list = []
used_band = []
for band_num,band in enumerate(hy_obj.bad_bands):
if ~band:
band_samples = hy_obj.do(get_band_samples, {'band_num':band_num})
refl_samples_list+=[band_samples[:,None]]
used_band+=[hy_obj.wavelengths[band_num]]
refl_samples = np.concatenate(refl_samples_list,axis=1)
slope_samples, cos_i_samples = get_topo_var_samples_pre(hy_obj) # 坡度和余弦i
return kernel_samples[:,:2], refl_samples, used_band, slope_samples, cos_i_samples
def calc_brdf_coeffs(actors,config_dict):
brdf_dict = config_dict['brdf']
if brdf_dict['type'] == 'precomputed':
print("使用预计算的 BRDF 系数")
_ = ray.get([a.do.remote(load_brdf_precomputed,
config_dict['brdf']) for a in actors])
else:
# 设置 BRDF 字典
_ = ray.get([a.do.remote(set_brdf,brdf_dict) for a in actors])
# 创建用于计算系数的掩膜
_ = ray.get([a.gen_mask.remote(mask_create,'calc_brdf',
brdf_dict['calc_mask']) for a in actors])
# 计算平均太阳天顶角
if isinstance(brdf_dict['solar_zn_type'],str):
# 分配每条线的平均太阳天顶角
solar_zn_samples = ray.get([a.do.remote(set_solar_zn) for a in actors])
# 计算并分配场景平均太阳天顶角
if brdf_dict['solar_zn_type'] == 'scene':
scene_mean = float(np.mean(solar_zn_samples))
_ = ray.get([a.do.remote(update_brdf,{'key':'solar_zn_norm_radians',
'value': scene_mean }) for a in actors])
print("场景平均太阳天顶角 : %s" % round(np.degrees(scene_mean),3))
elif isinstance(brdf_dict['solar_zn_type'],float):
_ = ray.get([a.do.remote(update_brdf,{'key':'solar_zn_norm_radians',
'value': brdf_dict['solar_zn_type']}) for a in actors])
else:
print('无法识别的太阳天顶角归一化')
print("计算 BRDF 系数")
if brdf_dict['type']== 'universal':
universal_brdf(actors,config_dict)
elif brdf_dict['type'] == 'flex':
flex_brdf(actors,config_dict)
elif brdf_dict['type'] == 'local':
print('本地/类别 BRDF 校正....开发中')
_ = ray.get([a.do.remote(lambda x: x.corrections.append('brdf')) for a in actors])
def calc_brdf_coeffs_pre(hy_obj,config_dict):
brdf_dict = config_dict['brdf']
if brdf_dict['type'] == 'precomputed':
print("使用预计算的 BRDF 系数")
load_brdf_precomputed(hy_obj,config_dict['brdf'])
else:
# 设置 BRDF 字典
set_brdf(hy_obj,brdf_dict)
set_solar_zn_0 = set_solar_zn(hy_obj)
# 创建用于计算系数的掩膜
hy_obj.gen_mask(mask_create,'calc_brdf',brdf_dict['calc_mask'])
kernel_samples, reflectance_samples, used_band, slope_samples, cos_i_samples = calc_flex_single_pre(hy_obj,brdf_dict)
hy_obj.corrections.append('brdf')
return {
"set_solar_zn":set_solar_zn_0,
#"ndvi":hy_obj.ndi(),
"kernel_samples":kernel_samples,
"reflectance_samples":reflectance_samples,
"used_band":used_band,
"slope_samples":slope_samples,
"cos_i_samples":cos_i_samples,
}

View File

@ -0,0 +1,363 @@
# -*- coding: utf-8 -*-
"""
HyTools: Hyperspectral image processing library
Copyright (C) 2021 University of Wisconsin
Authors: Adam Chlus, Zhiwei Ye, Philip Townsend.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, version 3 of the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
本模块包含应用经验性 BRDF 校正的函数,如下论文所述:
方程和常数可在以下论文中找到:
"""
import numpy as np
import ray
from scipy.interpolate import interp1d
from .kernels import calc_volume_kernel,calc_geom_kernel
from ..masks import mask_create
from ..misc import progbar, pairwise
from ..misc import update_brdf
from ..plotting import flex_diagno_plot
def flex_brdf(actors,config_dict):
brdf_dict= config_dict['brdf']
if brdf_dict['grouped']:
calc_flex_group(actors,brdf_dict)
else:
_ = ray.get([a.do.remote(calc_flex_single,brdf_dict) for a in actors])
if "diagnostic_plots" in brdf_dict:
if brdf_dict['diagnostic_plots']:
print('Exporting diagnostic plots.')
_ = ray.get([a.do.remote(flex_diagno_plot,config_dict) for a in actors])
def ndvi_stratify(hy_obj):
'''创建 NDVI 分箱分层掩膜
'''
ndvi = hy_obj.ndi()
class_mask = np.zeros((hy_obj.lines, hy_obj.columns))
for bin_num in hy_obj.brdf['bins']:
start,end = hy_obj.brdf['bins'][bin_num]
class_mask[(ndvi > start) & (ndvi <= end)] = bin_num
class_mask[~hy_obj.mask['calc_brdf']] = 0
#Subsample data
idx = np.array(np.where(class_mask!=0)).T
idxRand= idx[np.random.choice(range(len(idx)),int(len(idx)*(1-hy_obj.brdf['sample_perc'])), replace = False)].T
class_mask[idxRand[0],idxRand[1]] = 0
class_mask = class_mask.astype(np.int8)
hy_obj.ancillary['ndvi_classes'] = class_mask
def ndvi_2nd_split(ndvi_bins_dynamic, all_ndvi_array, ndvi_bin_range_thres=0.15):
''' 执行第二次 NDVI 分割
'''
ndvi_bin_range_thres = -0.015625 * (len(ndvi_bins_dynamic)-1) + 0.43125
ndvi_bin_range = np.array(ndvi_bins_dynamic[1:]) - np.array(ndvi_bins_dynamic[:-1])
bin_for_split = np.argwhere(ndvi_bin_range>=ndvi_bin_range_thres).ravel()
new_break = []
if bin_for_split.shape[0]>0:
for bin_id in bin_for_split:
# Use median of the bin as the new break point
new_break += [np.median(all_ndvi_array[(all_ndvi_array > ndvi_bins_dynamic[bin_id]) & (all_ndvi_array < ndvi_bins_dynamic[bin_id+1])]).astype(np.float64)]
# New list of bin break points
ndvi_bins_dynamic = sorted(ndvi_bins_dynamic + new_break)
return ndvi_bins_dynamic
def ndvi_bins(ndvi,brdf_dict):
'''计算 NDVI 分箱范围
'''
perc_range = brdf_dict['ndvi_perc_max'] - brdf_dict['ndvi_perc_min'] + 1
ndvi_break_dyn_bin = np.percentile(ndvi[ndvi > 0],
np.arange(brdf_dict['ndvi_perc_min'],
brdf_dict['ndvi_perc_max'] + 1,
perc_range / (brdf_dict['num_bins'] - 1)))
ndvi_thres = [brdf_dict['ndvi_bin_min']]
ndvi_thres += ndvi_break_dyn_bin.tolist()
ndvi_thres += [brdf_dict['ndvi_bin_max']]
ndvi_thres = sorted(list(set(ndvi_thres)))
# 对 NDVI 分箱进行第二次分割
ndvi_thres = ndvi_2nd_split(ndvi_thres, ndvi)
bins = [[x,y] for x,y in pairwise(ndvi_thres)]
return bins
def get_kernel_samples(hy_obj):
'''计算并采样 BRDF 核函数
'''
geom_kernel = hy_obj.geom_kernel(hy_obj.brdf['geometric'],
b_r=hy_obj.brdf["b/r"] ,
h_b =hy_obj.brdf["h/b"])
geom_kernel = geom_kernel[hy_obj.ancillary['ndvi_classes'] !=0]
vol_kernel = hy_obj.volume_kernel(hy_obj.brdf['volume'])
vol_kernel = vol_kernel[hy_obj.ancillary['ndvi_classes'] !=0]
classes = hy_obj.ancillary['ndvi_classes'][hy_obj.ancillary['ndvi_classes'] !=0]
X = np.vstack([vol_kernel,geom_kernel,
np.ones(vol_kernel.shape),classes]).T
return X
def get_band_samples(hy_obj,args):
band = hy_obj.get_band(args['band_num'],
corrections = hy_obj.corrections)
return band[hy_obj.ancillary['ndvi_classes'] !=0]
def calc_flex_single(hy_obj,brdf_dict):
''' 计算单个图像的 BRDF 系数
'''
hy_obj.brdf['coeffs'] ={}
# 确定分箱维度并创建类别掩膜
if hy_obj.brdf['bin_type'] == 'dynamic':
bins = ndvi_bins(hy_obj.ndi()[hy_obj.mask['no_data']],brdf_dict)
# 更新分箱数量
hy_obj.brdf['num_bins'] = len(bins)
else:
bins = brdf_dict['bins']
hy_obj.brdf['bins'] = {k:v for (k,v) in enumerate(bins,start=1)}
ndvi_stratify(hy_obj)
kernel_samples= get_kernel_samples(hy_obj)
# 计算每个波段和类别的系数
for band_num,band in enumerate(hy_obj.bad_bands):
if ~band:
hy_obj.brdf['coeffs'][band_num] = {}
band_samples = hy_obj.do(get_band_samples, {'band_num':band_num})
coeffs= []
for bin_num in hy_obj.brdf['bins']:
bin_mask = (kernel_samples[:,3] == bin_num)
X = kernel_samples[:,:3][bin_mask]
y = band_samples[bin_mask]
coeffs.append(np.linalg.lstsq(X, y,rcond=-1)[0].flatten().tolist())
hy_obj.brdf['coeffs'][band_num] = coeffs
def calc_flex_group(actors,brdf_dict):
''' 计算一组图像的 BRDF 系数
'''
# 从图像聚合 NDVI 值
ndvi = ray.get([a.ndi.remote(mask = 'no_data') for a in actors])
ndvi = np.concatenate([n.flatten() for n in ndvi])
# 确定分箱维度
if brdf_dict['bin_type'] == 'dynamic':
bins = ndvi_bins(ndvi,brdf_dict)
# 更新分箱数量
_ = ray.get([a.do.remote(update_brdf,{'key':'num_bins',
'value': len(bins)}) for a in actors])
else:
bins = brdf_dict['bins']
bins = {k:v for (k,v) in enumerate(bins,start=1)}
# 更新 BRDF 系数
_ = ray.get([a.do.remote(update_brdf,{'key':'bins',
'value': bins}) for a in actors])
# 创建 NDVI 类别掩膜并采样核函数
_ = ray.get([a.do.remote(ndvi_stratify) for a in actors])
kernel_samples = ray.get([a.do.remote(get_kernel_samples) for a in actors])
kernel_samples = np.concatenate(kernel_samples)
bad_bands = ray.get(actors[0].do.remote(lambda x: x.bad_bands))
coeffs = {}
for band_num,band in enumerate(bad_bands):
if ~band:
coeffs[band_num] = {}
band_samples = ray.get([a.do.remote(get_band_samples,
{'band_num':band_num}) for a in actors])
band_samples = np.concatenate(band_samples)
band_coeffs= []
for bin_num in bins:
bin_mask = (kernel_samples[:,3] == bin_num)
X = kernel_samples[:,:3][bin_mask]
y = band_samples[bin_mask]
band_coeffs.append(np.linalg.lstsq(X, y,rcond=-1)[0].flatten().tolist())
coeffs[band_num] = band_coeffs
progbar(np.sum(~bad_bands[:band_num+1]),np.sum(~bad_bands))
print('\n')
# 更新 BRDF 系数
_ = ray.get([a.do.remote(update_brdf,{'key':'coeffs',
'value': coeffs}) for a in actors])
def apply_flex(hy_obj,data,dimension,index):
''' 对数据切片应用 flex BRDF 校正
参数:
hy_obj : Hytools 类对象。
data (np.ndarray): 数据切片。
index (int,list): 数据索引。
返回:
data (np.ndarray): BRDF 校正后的数据切片。
'''
if 'k_vol' not in hy_obj.ancillary:
hy_obj.ancillary['k_vol'] = hy_obj.volume_kernel(hy_obj.brdf['volume'])
if 'k_geom' not in hy_obj.ancillary:
hy_obj.ancillary['k_geom'] = hy_obj.geom_kernel(hy_obj.brdf['geometric'],
b_r=hy_obj.brdf["b/r"],
h_b =hy_obj.brdf["h/b"])
if ('k_vol_nadir' not in hy_obj.ancillary) or ('k_geom_nadir' not in hy_obj.ancillary):
solar_zn = hy_obj.brdf['solar_zn_norm_radians'] * np.ones((hy_obj.lines,hy_obj.columns))
hy_obj.ancillary['k_vol_nadir'] = calc_volume_kernel(0,solar_zn,
0,0,hy_obj.brdf['volume'])
hy_obj.ancillary['k_geom_nadir'] = calc_geom_kernel(0,solar_zn,
0,0,hy_obj.brdf['geometric'],
b_r=hy_obj.brdf["b/r"],
h_b =hy_obj.brdf["h/b"])
if 'apply_brdf' not in hy_obj.mask:
hy_obj.gen_mask(mask_create,'apply_brdf',hy_obj.brdf['apply_mask'])
if 'ndvi' not in hy_obj.ancillary:
hy_obj.ancillary['ndvi'] = hy_obj.ndi()
if 'interpolators' not in hy_obj.ancillary:
bin_centers = np.mean(list(hy_obj.brdf['bins'].values()),axis=1)
hy_obj.ancillary['interpolators'] ={}
# 生成插值器
for i in hy_obj.brdf['coeffs']:
coeffs= np.array(hy_obj.brdf['coeffs'][i])
interpolator = interp1d(bin_centers, coeffs, kind = hy_obj.brdf['interp_kind'],
axis=0,fill_value="extrapolate")
hy_obj.ancillary['interpolators'][int(i)] = interpolator
# 转换为浮点数
data = data.astype(np.float32)
brdf_bands = [int(x) for x in hy_obj.ancillary['interpolators']]
if dimension == 'line':
# index= 3000
# data = hy_obj.get_line(3000)
interpolated_f = [hy_obj.ancillary['interpolators'][band](hy_obj.ancillary['ndvi'][index,:]) for band in brdf_bands]
interpolated_f = np.array(interpolated_f)
fvol, fgeo, fiso = interpolated_f[:,:,0], interpolated_f[:,:,1], interpolated_f[:,:,2]
brdf = fvol*hy_obj.ancillary['k_vol'][index,:]
brdf+= fgeo*hy_obj.ancillary['k_geom'][index,:]
brdf+= fiso
brdf_nadir = fvol*hy_obj.ancillary['k_vol_nadir'][index,:]
brdf_nadir+= fgeo*hy_obj.ancillary['k_geom_nadir'][index,:]
brdf_nadir+= fiso
correction_factor = brdf_nadir/brdf
correction_factor[:,~hy_obj.mask['apply_brdf'][index]] = 1
data[:,brdf_bands] = data[:,brdf_bands]*correction_factor.T
elif dimension == 'column':
#index= 300
#data = hy_obj.get_column(index)
interpolated_f = [hy_obj.ancillary['interpolators'][band](hy_obj.ancillary['ndvi'][:,index]) for band in brdf_bands]
interpolated_f = np.array(interpolated_f)
fvol, fgeo, fiso = interpolated_f[:,:,0], interpolated_f[:,:,1], interpolated_f[:,:,2]
brdf = fvol*hy_obj.ancillary['k_vol'][:,index]
brdf+= fgeo*hy_obj.ancillary['k_geom'][:,index]
brdf+= fiso
brdf_nadir = fvol*hy_obj.ancillary['k_vol_nadir'][:,index]
brdf_nadir+= fgeo*hy_obj.ancillary['k_geom_nadir'][:,index]
brdf_nadir+= fiso
correction_factor = brdf_nadir/brdf
correction_factor = np.moveaxis(correction_factor,0,1)
correction_factor[:,~hy_obj.mask['apply_brdf'][index]] = 1
data[:,brdf_bands] = data[:,brdf_bands]*correction_factor.T
elif (dimension == 'band') & (index in brdf_bands):
# index= 8
# data = hy_obj.get_band(index)
interpolated_f = hy_obj.ancillary['interpolators'][index](hy_obj.ancillary['ndvi'])
fvol, fgeo, fiso = interpolated_f[:,:,0], interpolated_f[:,:,1], interpolated_f[:,:,2]
brdf = fvol*hy_obj.ancillary['k_vol']
brdf += fgeo*hy_obj.ancillary['k_geom']
brdf += fiso
brdf_nadir = fvol*hy_obj.ancillary['k_vol_nadir']
brdf_nadir += fgeo*hy_obj.ancillary['k_geom_nadir']
brdf_nadir += fiso
correction_factor = brdf_nadir/brdf
correction_factor[~hy_obj.mask['apply_brdf']] = 1
data= data* correction_factor
elif dimension == 'chunk':
# index = 200,501,3000,3501
x1,x2,y1,y2 = index
# data = hy_obj.get_chunk(x1,x2,y1,y2)
interpolated_f = [hy_obj.ancillary['interpolators'][band](hy_obj.ancillary['ndvi'][y1:y2,x1:x2]) for band in brdf_bands]
interpolated_f = np.array(interpolated_f)
interpolated_f = np.swapaxes(interpolated_f,0,-1)
fvol, fgeo, fiso = interpolated_f[0,:,:,:], interpolated_f[1,:,:,:], interpolated_f[2,:,:,:]
brdf = fvol*hy_obj.ancillary['k_vol'][y1:y2,x1:x2,np.newaxis]
brdf+= fgeo*hy_obj.ancillary['k_geom'][y1:y2,x1:x2,np.newaxis]
brdf+= fiso
brdf_nadir = fvol*hy_obj.ancillary['k_vol_nadir'][y1:y2,x1:x2,np.newaxis]
brdf_nadir+= fgeo*hy_obj.ancillary['k_geom_nadir'][y1:y2,x1:x2,np.newaxis]
brdf_nadir+= fiso
correction_factor = brdf_nadir/brdf
correction_factor[~hy_obj.mask['apply_brdf'][y1:y2,x1:x2]] = 1
data[:,:,brdf_bands] = data[:,:,brdf_bands]*correction_factor
elif dimension == 'pixels':
# index = [[2000,2001],[200,501]]
y,x = index
# data = hy_obj.get_pixels(y,x)
interpolated_f = [hy_obj.ancillary['interpolators'][band](hy_obj.ancillary['ndvi'][y,x]) for band in brdf_bands]
interpolated_f = np.array(interpolated_f)
interpolated_f = np.swapaxes(interpolated_f,0,1)
fvol, fgeo, fiso = interpolated_f[:,:,0], interpolated_f[:,:,1], interpolated_f[:,:,2]
brdf = fvol*hy_obj.ancillary['k_vol'][y,x,np.newaxis]
brdf+= fgeo*hy_obj.ancillary['k_geom'][y,x,np.newaxis]
brdf+= fiso
brdf_nadir = fvol*hy_obj.ancillary['k_vol_nadir'][y,x,np.newaxis]
brdf_nadir+= fgeo*hy_obj.ancillary['k_geom_nadir'][y,x,np.newaxis]
brdf_nadir+= fiso
correction_factor = brdf_nadir/brdf
correction_factor[~hy_obj.mask['apply_brdf'][y,x]] = 1
data[:,brdf_bands] = data[:,brdf_bands]*correction_factor
return data

View File

@ -0,0 +1,167 @@
# -*- coding: utf-8 -*-
"""
HyTools: Hyperspectral image processing library
Copyright (C) 2021 University of Wisconsin
Authors: Adam Chlus, Zhiwei Ye, Philip Townsend.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, version 3 of the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
本模块包含计算 BRDF 散射核函数的函数。
方程和常数可在以下论文中找到:
Colgan, M. S., Baldeck, C. A., Feret, J. B., & Asner, G. P. (2012).
Mapping savanna tree species at ecosystem scales using support vector machine classification
and BRDF correction on airborne hyperspectral and LiDAR data.
Remote Sensing, 4(11), 3462-3480.
https://doi.org/10.3390/rs4113462
Lucht, W., Schaaf, C. B., & Strahler, A. H. (2000).
An algorithm for the retrieval of albedo from space using semiempirical BRDF models.
IEEE Transactions on Geoscience and Remote sensing, 38(2), 977-998.
https://doi.org/10.1109/36.841980
Maignan, F., Bréon, F. M., & Lacaze, R. (2004).
Bidirectional reflectance of Earth targets: Evaluation of analytical
models using a large set of spaceborne measurements with emphasis on the Hot Spot.
Remote Sensing of Environment, 90(2), 210-220.
https://doi.org/10.1016/j.rse.2003.12.006
Roujean, J. L., Leroy, M., & Deschamps, P. Y. (1992).
A bidirectional reflectance model of the Earth's surface for the correction
of remote sensing data.
Journal of Geophysical Research: Atmospheres, 97(D18), 20455-20468.
https://doi.org/10.1029/92JD01411
Schlapfer, D., Richter, R., & Feingersh, T. (2015).
Operational BRDF effects correction for wide-field-of-view optical scanners (BREFCOR).
IEEE Transactions on Geoscience and Remote Sensing, 53(4), 1855-1864.
https://doi.org/10.1109/TGRS.2014.2349946
Wanner, W., Li, X., & Strahler, A. H. (1995).
On the derivation of kernels for kernel-driven models of bidirectional reflectance.
Journal of Geophysical Research: Atmospheres, 100(D10), 21077-21089.
https://doi.org/10.1029/95JD02371
Zhang, X., Jiao, Z., Dong, Y., Zhang, H., Li, Y., He, D., ... & Chang, Y. (2018).
Potential investigation of linking PROSAIL with the ross-li BRDF model for
vegetation characterization.
Remote Sensing, 10(3), 437.
https://doi.org/10.3390/rs10030437SSS
"""
import numpy as np
def calc_geom_kernel(solar_az,solar_zn,sensor_az,sensor_zn,kernel,b_r=1.,h_b =2.):
"""计算几何散射核函数。
常数 b_r (b/r) 和 h_b (h/b) 来自 Colgan 等人 RS 2012
替代方案包括 MODIS 规范:
b/r : 稀疏: 1, 密集: 2.5
h/b : 稀疏, 密集 : 2
所有输入几何单位必须以弧度为单位。
参数:
solar_az (numpy.ndarray): 太阳方位角。
solar_zn (numpy.ndarray): 太阳天顶角。
sensor_az (numpy.ndarray): 传感器视角方位角。
sensor_zn (numpy.ndarray): 传感器视角天顶角。
kernel (str): Li 几何散射核类型 [li_dense,li_sparse, roujean]。
b_r (float, 可选): 物体高度。默认为 10。
h_b (float, 可选): 物体形状。默认为 2。
返回:
numpy.ndarray: 几何散射核。
"""
relative_az = sensor_az - solar_az
# Eq. 37,52. Wanner et al. JGRA 1995
solar_zn_p = np.arctan(b_r * np.tan(solar_zn))
sensor_zn_p = np.arctan(b_r * np.tan(sensor_zn))
# Eq 50. Wanner et al. JGRA 1995
D = np.sqrt((np.tan(solar_zn_p)**2) + (np.tan(sensor_zn_p)**2) - 2*np.tan(solar_zn_p)*np.tan(sensor_zn_p)*np.cos(relative_az))
# Eq 49. Wanner et al. JGRA 1995
t_num = h_b * np.sqrt(D**2 + (np.tan(solar_zn_p)*np.tan(sensor_zn_p)*np.sin(relative_az))**2)
t_denom = (1/np.cos(solar_zn_p)) + (1/np.cos(sensor_zn_p))
t = np.arccos(np.clip(t_num/t_denom,-1,1))
# Eq 33,48. Wanner et al. JGRA 1995
O = (1/np.pi) * (t - np.sin(t)*np.cos(t)) * t_denom
# Eq 51. Wanner et al. JGRA 1995
cos_phase_p = np.cos(solar_zn_p)*np.cos(sensor_zn_p) + np.sin(solar_zn_p)*np.sin(sensor_zn_p)*np.cos(relative_az)
if kernel == 'li_sparse':
# Eq 32. Wanner et al. JGRA 1995
k_geom = O - (1/np.cos(solar_zn_p)) - (1/np.cos(sensor_zn_p)) + .5*(1+ cos_phase_p) * (1/np.cos(sensor_zn_p))
elif kernel == 'li_dense':
# Eq 47. Wanner et al. JGRA 1995
k_geom = (((1+cos_phase_p) * (1/np.cos(sensor_zn_p)))/ (t_denom - O)) - 2
elif kernel == 'li_sparse_r':
# Eq 39. Lucht et al. TGRS 2000
k_geom = O - (1/np.cos(solar_zn_p)) - (1/np.cos(sensor_zn_p)) + .5*(1+ cos_phase_p) * (1/np.cos(sensor_zn_p)) * (1/np.cos(solar_zn_p))
elif kernel == 'li_dense_r':
# Eq 5. Zhang et al. RS 2018 <-- Find a more original reference
k_geom = (((1+cos_phase_p) * (1/np.cos(sensor_zn_p)) * (1/np.cos(solar_zn_p)))/ (t_denom - O)) - 2
elif kernel == 'roujean':
# Eq 2 Roujean et al. JGR 1992
k_geom1 = (1/(2*np.pi)) * ((np.pi - relative_az)*np.cos(relative_az)+np.sin(relative_az)) *np.tan(solar_zn)*np.tan(sensor_zn)
k_geom2 = (1/np.pi) * (np.tan(solar_zn) + np.tan(sensor_zn) + np.sqrt(np.tan(solar_zn)**2 + np.tan(sensor_zn)**2 - 2*np.tan(solar_zn)*np.tan(sensor_zn)*np.cos(relative_az)))
k_geom = k_geom1 - k_geom2
else:
print("Unrecognized kernel type: %s" % kernel)
k_geom = None
return k_geom
def calc_volume_kernel(solar_az,solar_zn,sensor_az,sensor_zn,kernel):
"""计算体积散射核函数。
所有输入几何单位必须以弧度为单位。
参数:
solar_az (numpy.ndarray): 太阳方位角。
solar_zn (numpy.ndarray): 太阳天顶角。
sensor_az (numpy.ndarray): 传感器视角方位角。
sensor_zn (numpy.ndarray): 传感器视角天顶角。
kernel (str): 体积散射核类型 [ross_thick,ross_thin]。
返回:
numpy.ndarray: 体积散射核。
"""
relative_az = sensor_az - solar_az
# Eq 2. Schlapfer et al. IEEE-TGARS 2015
phase = np.arccos(np.cos(solar_zn)*np.cos(sensor_zn) + np.sin(solar_zn)*np.sin(sensor_zn)* np.cos(relative_az))
if kernel == 'ross_thin':
# Eq 13. Wanner et al. JGRA 1995
k_vol = ((np.pi/2 - phase)*np.cos(phase) + np.sin(phase))/ (np.cos(sensor_zn)*np.cos(solar_zn)) - (np.pi/2)
elif kernel == 'ross_thick':
# Eq 7. Wanner et al. JGRA 1995
k_vol = ((np.pi/2 - phase)*np.cos(phase) + np.sin(phase))/ (np.cos(sensor_zn)+np.cos(solar_zn)) - (np.pi/4)
elif kernel in ('hotspot','roujean'):
# Eq 8 Roujean et al. JGR 1992
k_vol1 = (4/(3*np.pi)) * (1/(np.cos(solar_zn) + np.cos(sensor_zn)))
k_vol2 = (((np.pi/2) - phase) * np.cos(phase) + np.sin(phase))
k_vol = k_vol1*(k_vol2- (1/3))
if kernel == 'hotspot':
# Eq. 12 Maignan et al. RSE 2004
k_vol = k_vol1* k_vol2 * (1 + (1 + (phase/np.radians(1.5)))**-1) - (1/3)
else:
print("Unrecognized kernel type: %s" % kernel)
k_vol = None
return k_vol

View File

@ -0,0 +1,21 @@
# -*- coding: utf-8 -*-
'''
HyTools: Hyperspectral image processing library
Copyright (C) 2021 University of Wisconsin
Authors: Adam Chlus, Zhiwei Ye, Philip Townsend.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, version 3 of the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
本地/类别 BRDF 校正占位符....开发中。
'''

View File

@ -0,0 +1,220 @@
# -*- coding: utf-8 -*-
"""
HyTools: Hyperspectral image processing library
Copyright (C) 2021 University of Wisconsin
Authors: Adam Chlus, Zhiwei Ye, Philip Townsend.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, version 3 of the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
本模块包含用于计算和应用单一(“通用”)组乘法 BRDF 校正系数的函数。系数可以按飞行线计算,也可以跨多个飞行线计算。
"""
from itertools import product
from copy import deepcopy
import numpy as np
import ray
from scipy.optimize import minimize
from .kernels import calc_volume_kernel,calc_geom_kernel
from ..misc import progbar
from ..misc import update_brdf
from ..masks import mask_create
from ..plotting import universal_diagno_plot
def universal_brdf(actors,config_dict):
brdf_dict = config_dict['brdf']
if brdf_dict['grouped']:
actors = calc_universal_group(actors)
else:
_ = ray.get([a.do.remote(calc_universal_single) for a in actors])
if brdf_dict['diagnostic_plots']:
print('Exporting diagnostic plots.')
_ = ray.get([a.do.remote(universal_diagno_plot,config_dict) for a in actors])
def sample_kernels(hy_obj):
'''计算并采样 BRDF 核函数
'''
#Sample kernel images
geom_kernel = hy_obj.geom_kernel(hy_obj.brdf['geometric'],
b_r=hy_obj.brdf["b/r"],
h_b =hy_obj.brdf["h/b"])[hy_obj.mask['calc_brdf']]
vol_kernel = hy_obj.volume_kernel(hy_obj.brdf['volume'])[hy_obj.mask['calc_brdf']]
X = np.vstack([vol_kernel,geom_kernel,
np.ones(vol_kernel.shape)]).T
return X
def subsample_mask(hy_obj):
'''对计算掩膜进行子采样并更新
'''
if hy_obj.brdf['sample_perc'] < 1:
idx = np.array(np.where(hy_obj.mask['calc_brdf'])).T
idx_rand= idx[np.random.choice(range(len(idx)),
int(len(idx)*(1- hy_obj.brdf['sample_perc'])),
replace = False)].T
hy_obj.mask['calc_brdf'][idx_rand[0],idx_rand[1]] = False
def calc_universal_single(hy_obj):
'''逐条飞行线计算 BRDF 系数。
'''
subsample_mask(hy_obj)
X = sample_kernels(hy_obj)
hy_obj.brdf['coeffs'] = {}
for band_num,band in enumerate(hy_obj.bad_bands):
if ~band:
band = hy_obj.get_band(band_num,
corrections = hy_obj.corrections, mask='calc_brdf')
brdf_coeff = np.linalg.lstsq(X, band,rcond=None)[0].flatten().tolist()
hy_obj.brdf['coeffs'][band_num] = brdf_coeff
def calc_universal_group(actors):
'''使用所有飞行线的合并数据计算 BRDF 系数。
'''
_ = ray.get([a.do.remote(subsample_mask) for a in actors])
X = ray.get([a.do.remote(sample_kernels) for a in actors])
X = np.concatenate(X)
bad_bands = ray.get(actors[0].do.remote(lambda x: x.bad_bands))
corections = ray.get(actors[0].do.remote(lambda x: x.corrections))
coeffs = {}
for band_num,band in enumerate(bad_bands):
if ~band:
y = ray.get([a.get_band.remote(band_num,mask='calc_brdf',
corrections = corections) for a in actors])
y = np.concatenate(y)
coeffs[band_num] = np.linalg.lstsq(X, y)[0].flatten().tolist()
progbar(np.sum(~bad_bands[:band_num+1]),np.sum(~bad_bands))
print('\n')
# 更新 BRDF 系数
_ = ray.get([a.do.remote(update_brdf,{'key':'coeffs',
'value': coeffs}) for a in actors])
return actors
def apply_universal(hy_obj,data,dimension,index):
''' 对数据切片应用通用 BRDF 校正
参数:
hy_obj : Hytools 类对象。
data (np.ndarray): 数据切片。
index (int,list): 数据索引。
返回:
data (np.ndarray): BRDF 校正后的数据切片。
'''
if 'k_vol' not in hy_obj.ancillary:
hy_obj.ancillary['k_vol'] = hy_obj.volume_kernel(hy_obj.brdf['volume'])
if 'k_geom' not in hy_obj.ancillary:
hy_obj.ancillary['k_geom'] = hy_obj.geom_kernel(hy_obj.brdf['geometric'],
b_r=hy_obj.brdf["b/r"],
h_b =hy_obj.brdf["h/b"])
if ('k_vol_nadir' not in hy_obj.ancillary) or ('k_geom_nadir' not in hy_obj.ancillary):
solar_zn = hy_obj.brdf['solar_zn_norm_radians'] * np.ones((hy_obj.lines,hy_obj.columns))
hy_obj.ancillary['k_vol_nadir'] = calc_volume_kernel(0,solar_zn,
0,0,hy_obj.brdf['volume'])
hy_obj.ancillary['k_geom_nadir'] = calc_geom_kernel(0,solar_zn,
0,0,hy_obj.brdf['geometric'],
b_r=hy_obj.brdf["b/r"],
h_b =hy_obj.brdf["h/b"])
if 'apply_brdf' not in hy_obj.mask:
hy_obj.gen_mask(mask_create,'apply_brdf',hy_obj.brdf['apply_mask'])
brdf_bands = [int(x) for x in hy_obj.brdf['coeffs'].keys()]
fvol, fgeo, fiso = np.array([hy_obj.brdf['coeffs'][band] for band in hy_obj.brdf['coeffs'].keys()]).T
# 转换为浮点数
data = data.astype(np.float32)
if dimension == 'line':
brdf = fvol[:,np.newaxis]*hy_obj.ancillary['k_vol'][[index],:]
brdf+= fgeo[:,np.newaxis]*hy_obj.ancillary['k_geom'][[index],:]
brdf+= fiso[:,np.newaxis]
brdf_nadir = fvol[:,np.newaxis]*hy_obj.ancillary['k_vol_nadir'][[index],:]
brdf_nadir+= fgeo[:,np.newaxis]*hy_obj.ancillary['k_geom_nadir'][[index],:]
brdf_nadir+= fiso[:,np.newaxis]
correction_factor = brdf_nadir/brdf
correction_factor[:,~hy_obj.mask['apply_brdf'][index,:]] = 1
data[:,brdf_bands] = data[:,brdf_bands]*correction_factor.T
elif dimension == 'column':
brdf = fvol[np.newaxis,:]*hy_obj.ancillary['k_vol'][:,[index]]
brdf+= fgeo[np.newaxis,:]*hy_obj.ancillary['k_geom'][:,[index]]
brdf+= fiso[np.newaxis,:]
brdf_nadir = fvol[np.newaxis,:]*hy_obj.ancillary['k_vol_nadir'][:,[index]]
brdf_nadir+= fgeo[np.newaxis,:]*hy_obj.ancillary['k_geom_nadir'][:,[index]]
brdf_nadir+= fiso[np.newaxis,:]
correction_factor = brdf_nadir/brdf
correction_factor[~hy_obj.mask['apply_brdf'][:,index],:] = 1
data[:,brdf_bands] = data[:,brdf_bands]*correction_factor.T
elif dimension == 'band':
fvol, fgeo, fiso = hy_obj.brdf['coeffs'][index]
brdf = fvol*hy_obj.ancillary['k_vol']
brdf += fgeo*hy_obj.ancillary['k_geom']
brdf+=fiso
brdf_nadir = fvol*hy_obj.ancillary['k_vol_nadir']
brdf_nadir+= fgeo*hy_obj.ancillary['k_geom_nadir']
brdf_nadir+= fiso
correction_factor = brdf_nadir/brdf
correction_factor[~hy_obj.mask['apply_brdf']] = 1
data= data* correction_factor
elif dimension == 'chunk':
x1,x2,y1,y2 = index
brdf = fvol[np.newaxis,np.newaxis,:]*hy_obj.ancillary['k_vol'][y1:y2,x1:x2,np.newaxis]
brdf+= fgeo[np.newaxis,np.newaxis,:]*hy_obj.ancillary['k_geom'][y1:y2,x1:x2,np.newaxis]
brdf+= fiso[np.newaxis,np.newaxis,:]
brdf_nadir = fvol[np.newaxis,np.newaxis,:]*hy_obj.ancillary['k_vol_nadir'][y1:y2,x1:x2,np.newaxis]
brdf_nadir+= fgeo[np.newaxis,np.newaxis,:]*hy_obj.ancillary['k_geom_nadir'][y1:y2,x1:x2,np.newaxis]
brdf_nadir+= fiso[np.newaxis,np.newaxis,:]
correction_factor = brdf_nadir/brdf
correction_factor[~hy_obj.mask['apply_brdf'][y1:y2,x1:x2]] = 1
data[:,:,brdf_bands] = data[:,:,brdf_bands]*correction_factor
elif dimension == 'pixels':
y,x = index
brdf = fvol[np.newaxis,:]*hy_obj.ancillary['k_vol'][y,x,np.newaxis]
brdf+= fgeo[np.newaxis,:]*hy_obj.ancillary['k_geom'][y,x,np.newaxis]
brdf+= fiso[np.newaxis,:]
brdf_nadir = fvol[np.newaxis,:]*hy_obj.ancillary['k_vol_nadir'][y,x,np.newaxis]
brdf_nadir+= fgeo[np.newaxis,:]*hy_obj.ancillary['k_geom_nadir'][y,x,np.newaxis]
brdf_nadir+= fiso[np.newaxis,:]
correction_factor = brdf_nadir/brdf
correction_factor[~hy_obj.mask['apply_brdf'][y,x]] = 1
data[:,brdf_bands] = data[:,brdf_bands]*correction_factor
return data

View File

@ -0,0 +1,25 @@
# -*- coding: utf-8 -*-
"""
HyTools: Hyperspectral image processing library
Copyright (C) 2021 University of Wisconsin
Authors: Evan Greenberg.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, version 3 of the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
The :mod:`hytools.correction` module include functions image correction.
"""
from .glint import *
from .gao_2021 import *
from .hedley_2005 import *
from .hochberg_2003 import *

View File

@ -0,0 +1,216 @@
# -*- coding: utf-8 -*-
"""
HyTools: Hyperspectral image processing library
Copyright (C) 2021 University of Wisconsin
Authors: Evan Greenberg.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, version 3 of the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
"""
import numpy as np
from ..masks import mask_create
REFRACTIVE_INDICES = np.array([
[200, 1.396],
[225, 1.373],
[250, 1.362],
[275, 1.354],
[300, 1.349],
[325, 1.346],
[350, 1.343],
[375, 1.341],
[400, 1.339],
[425, 1.338],
[450, 1.337],
[475, 1.336],
[500, 1.335],
[525, 1.334],
[550, 1.333],
[575, 1.333],
[600, 1.332],
[625, 1.332],
[650, 1.331],
[675, 1.331],
[700, 1.331],
[725, 1.33],
[750, 1.33],
[775, 1.33],
[800, 1.329],
[825, 1.329],
[850, 1.329],
[875, 1.328],
[900, 1.328],
[925, 1.328],
[950, 1.327],
[975, 1.327],
[1000, 1.327],
[1200, 1.324],
[1400, 1.321],
[1600, 1.317],
[1800, 1.312],
[2000, 1.306],
[2200, 1.296],
[2400, 1.279],
[2600, 1.242],
[2650, 1.219],
[2700, 1.188],
[2750, 1.157],
[2800, 1.142],
[2850, 1.149],
[2900, 1.201],
[2950, 1.292],
[3000, 1.371]
])
def apply_gao_2021_correction(hy_obj, data, dimension, index):
"""
Glint correction algorithm following:
Gao BC, Li RR.
Correction of Sunglint Effects in High Spatial Resolution
Hyperspectral Imagery Using SWIR or NIR Bands and Taking Account of
Spectral Variation of Refractive Index of Water.
Adv Environ Eng Res 2021;2(3):16; doi:10.21926/aeer.2103017.
"""
if 'apply_glint' not in hy_obj.mask:
hy_obj.gen_mask(mask_create,'apply_glint',hy_obj.glint['apply_mask'])
if hy_obj.mask['apply_glint'].sum() == 0:
return data
hy_obj.glint['correction_band'] = hy_obj.wave_to_band(hy_obj.glint['correction_wave'])
if 'gao_b_simu' not in hy_obj.ancillary:
hy_obj.ancillary['gao_b_simu'] = get_b_simu(hy_obj)
if 'gao_rto' not in hy_obj.ancillary:
hy_obj.ancillary['gao_rto'] = get_rto(hy_obj)
if dimension == 'line':
rto_line = hy_obj.ancillary['gao_rto'][index, :]
rto_line = np.reshape(rto_line, (len(rto_line), 1))
correction = rto_line * hy_obj.ancillary['gao_b_simu']
elif dimension == 'column':
rto_col = hy_obj.ancillary['gao_rto'][:, index]
rto_col = np.reshape(rto_col, (len(rto_col), 1))
correction = rto_col * hy_obj.ancillary['gao_b_simu']
elif (dimension == 'band'):
correction = (
hy_obj.ancillary['gao_b_simu'][0, :][index]
* hy_obj.ancillary['gao_rto']
)
elif dimension == 'chunk':
x1, x2, y1, y2 = index
rto_chunk = hy_obj.ancillary['gao_rto'][y1:y2, x1:x2]
rto_chunk = np.reshape(
rto_chunk,
(
rto_chunk.shape[0],
rto_chunk.shape[1],
1
)
)
correction = rto_chunk * hy_obj.ancillary['gao_b_simu']
elif dimension == 'pixels':
y, x = index
rto_pixels = hy_obj.ancillary['gao_rto'][y, x]
rto_pixels = np.reshape(rto_pixels, (len(rto_pixels), 1))
correction = rto_pixels * hy_obj.ancillary['gao_b_simu']
return data - correction
def zenith_refracted(theta, n):
"""
Find zenith of the outgoing reflected light
n is the refractive index of water at a specific wavelength
"""
theta_p = np.degrees(
np.arcsin(np.sin(np.radians(theta)) / n)
)
return theta_p
def fresnel_reflectence(theta, theta_p):
"""
Uses the fresnel equation to find the
percentege of incident light reflected
"""
theta_rad = np.radians(theta)
theta_p_rad = np.radians(theta_p)
return (
(
(np.sin(theta_rad - theta_p_rad)**2)
/ (np.sin(theta_rad + theta_p_rad)**2)
) + (
(np.tan(theta_rad - theta_p_rad)**2)
/ (np.tan(theta_rad + theta_p_rad)**2)
)
) / 2
def fresnel_spectra(theta, xs, ns):
"""
Solves for the spectrum of reflected light
according to fresnels equations
"""
spectra = []
for x in xs:
n = np.interp(x, ns[:, 0], ns[:, 1])
theta_p = zenith_refracted(theta, n)
spectra.append(fresnel_reflectence(theta, theta_p))
return np.array(spectra)
def get_b_simu(hy_obj):
b_simu = fresnel_spectra(
10**-5,
hy_obj.wavelengths,
REFRACTIVE_INDICES
)
return np.reshape(
b_simu, (1, len(b_simu))
)
def get_rto(hy_obj):
b_ref = hy_obj.get_wave(hy_obj.glint['correction_wave'])
b_ref_min = np.percentile(
b_ref[
(hy_obj.mask['apply_glint'])
& (b_ref > 0)
],
.0001
)
b_ref = b_ref - b_ref_min
rto = (
b_ref
/ hy_obj.ancillary['gao_b_simu'][0, :][hy_obj.glint['correction_band']]
)
rto[~hy_obj.mask['apply_glint']] = 0
return rto

View File

@ -0,0 +1,75 @@
# -*- coding: utf-8 -*-
"""
HyTools: Hyperspectral image processing library
Copyright (C) 2021 University of Wisconsin
Authors: Evan Greenberg.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, version 3 of the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
"""
import ray
from ..misc import set_glint
from .hochberg_2003 import apply_hochberg_2003_correction
from .gao_2021 import apply_gao_2021_correction
from .hedley_2005 import apply_hedley_2005_correction
def set_glint_parameters(actors, config_dict):
# Assign glint dict
glint_dict = config_dict['glint']
# Set Glint dict
_ = ray.get([
a.do.remote(set_glint, glint_dict) for a in actors
])
# Add glint correction
_ = ray.get([
a.do.remote(lambda x: x.corrections.append('glint')) for a in actors
])
def set_glint_parameters_single(hy_obj, config_dict):
# Assign glint dict
glint_dict = config_dict['glint']
# Set Glint dict
set_glint(hy_obj, glint_dict)
# Add glint correction
hy_obj.corrections.append('glint')
def apply_glint_correct(hy_obj, data, dimension, index):
''' Corrects glint based on the specified algorithm in the config.
Options include:
Hochberg et al., 2003: hochberg
Gao et al., 2021: gao
Hedley et al. 2005: hedley
...
'''
# Perform one of the corrections
if hy_obj.glint['type'] == 'hochberg':
data = apply_hochberg_2003_correction(hy_obj, data, dimension, index)
elif hy_obj.glint['type'] == 'gao':
data = apply_gao_2021_correction(hy_obj, data, dimension, index)
elif hy_obj.glint['type'] == 'hedley':
data = apply_hedley_2005_correction(hy_obj, data, dimension, index)
#Truncate reflectance values below 0
if hy_obj.glint['truncate']:
data[(data < 0) & (data != hy_obj.no_data)]= 0
return data

View File

@ -0,0 +1,140 @@
# -*- coding: utf-8 -*-
"""
HyTools: Hyperspectral image processing library
Copyright (C) 2021 University of Wisconsin
Authors: Evan Greenberg.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, version 3 of the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
"""
import numpy as np
from scipy import stats
from ..masks import mask_create
def apply_hedley_2005_correction(hy_obj, data, dimension, index):
"""
Glint correction algorithm following:
Hedley, J. D., Harborne, A. R., & Mumby, P. J. (2005).
Simple and robust removal of sun glint for mapping shallowwater benthos.
International Journal of Remote Sensing, 26(10), 2107-2112.
"""
# Raise exception is there is no deep water sample provided
if isinstance(hy_obj.glint.get('deep_water_sample'), type(None)):
raise KeyError("No Deep Water Sample Provided")
if 'apply_glint' not in hy_obj.mask:
hy_obj.gen_mask(mask_create,'apply_glint',hy_obj.glint['apply_mask'])
if hy_obj.mask['apply_glint'].sum() == 0:
return data
hy_obj.glint['correction_band'] = hy_obj.wave_to_band(
hy_obj.glint['correction_wave']
)
if 'hedley_slopes' not in hy_obj.ancillary:
hy_obj.ancillary['hedley_slopes'] = optimize_slopes(hy_obj)
if 'hedley_nir_swir_diff' not in hy_obj.ancillary:
hy_obj.ancillary['hedley_nir_swir_diff'] = nir_swir_diff(hy_obj)
if dimension == 'line':
correction = (
hy_obj.ancillary['hedley_nir_swir_diff'][index, :].reshape(-1, 1)
* hy_obj.ancillary['hedley_slopes']
)
correction[~hy_obj.mask['apply_glint'][index, :], :] = 0
elif dimension == 'column':
correction = (
hy_obj.ancillary['hedley_nir_swir_diff'][:, index].reshape(-1, 1)
* hy_obj.ancillary['hedley_slopes']
)
correction[~hy_obj.mask['apply_glint'][:, index], :] = 0
elif (dimension == 'band'):
correction = (
hy_obj.ancillary['hedley_nir_swir_diff']
* hy_obj.ancillary['hedley_slopes'][0, index]
)
correction[~hy_obj.mask['apply_glint']] = 0
elif dimension == 'chunk':
x1, x2, y1, y2 = index
corr_diff = hy_obj.ancillary['hedley_nir_swir_diff'][y1:y2, x1:x2]
bandnums = data.shape[2]
corr_diff = np.repeat(
corr_diff[:, :, np.newaxis],
bandnums,
axis=2
)
correction = corr_diff * hy_obj.ancillary['hedley_slopes']
correction[~hy_obj.mask['apply_glint'][y1:y2, x1:x2], :] = 0
elif dimension == 'pixels':
y, x = index
correction = (
hy_obj.ancillary['hedley_nir_swir_diff'][y, x].reshape(-1, 1)
* hy_obj.ancillary['hedley_slopes']
)
correction[~hy_obj.mask['apply_glint'][y, x], :] = 0
return data - correction
def optimize_slopes(hy_obj):
deep_water = hy_obj.get_chunk(
*hy_obj.glint['deep_water_sample'][hy_obj.file_name]
)
deep_correction = (
deep_water[:, :, hy_obj.glint['correction_band']].flatten()
)
# Iterate through each band to find the band-slope
slopes = np.empty([1, len(hy_obj.wavelengths)])
for i, band in enumerate(hy_obj.wavelengths):
# Get flattened deep water sample of band
wave_num = np.argmin(
np.abs(hy_obj.wavelengths - band)
)
wave = deep_water[:, :, wave_num].flatten()
# Regress
(
slope,
intercept,
r_value,
p_value,
std_err
) = stats.linregress(
deep_correction,
wave
)
slopes[0, i] = slope
return slopes
def nir_swir_diff(hy_obj):
nir_swir_array = np.copy(
hy_obj.get_wave(hy_obj.glint['correction_wave'])
)
nir_swir_array[~hy_obj.mask['apply_glint']] = 0
nir_swir_min = np.percentile(nir_swir_array[nir_swir_array > 0], .0001)
return nir_swir_array - nir_swir_min

View File

@ -0,0 +1,88 @@
# -*- coding: utf-8 -*-
"""
HyTools: Hyperspectral image processing library
Copyright (C) 2021 University of Wisconsin
Authors: Evan Greenberg.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, version 3 of the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
"""
import numpy as np
from ..masks import mask_create
def apply_hochberg_2003_correction(hy_obj, data, dimension, index):
"""
Glint correction algorithm following:
Hochberg, EJ, Andréfouët, S and Tyler, MR. 2003.
Sea surface correction of high spatial resolution Ikonos images to
improve bottom mapping in nearshore environments..
IEEE Transactions on Geoscience and Remote Sensing, 41: 17241729.
"""
if 'apply_glint' not in hy_obj.mask:
hy_obj.gen_mask(mask_create,'apply_glint',hy_obj.glint['apply_mask'])
if hy_obj.mask['apply_glint'].sum() == 0:
return data
if 'hochberg_correction' not in hy_obj.ancillary:
hy_obj.ancillary['hochberg_correction'] = (
get_hochberg_correction(hy_obj)
)
if dimension == 'line':
correction = hy_obj.ancillary['hochberg_correction'][index, :][:,np.newaxis]
elif dimension == 'column':
correction = hy_obj.ancillary['hochberg_correction'][:, index][np.newaxis,:]
elif dimension == 'band':
correction = hy_obj.ancillary['hochberg_correction']
elif dimension == 'chunk':
x1, x2, y1, y2 = index
correction = hy_obj.ancillary['hochberg_correction'][y1:y2, x1:x2]
elif dimension == 'pixels':
y, x = index
correction = hy_obj.ancillary['hochberg_correction'][y, x]
return data - correction
def get_hochberg_correction(hy_obj):
"""
Calculates the hochberg correction across entire image.
Uses the NIR or SWIR wavelengths to find the amount of signal
attributed to glint. Zeros out non-water pixels
"""
if isinstance(hy_obj.glint['correction_wave'],list):
nir_swir_array = np.zeros((hy_obj.lines,hy_obj.columns))
for wave in hy_obj.glint['correction_wave']:
nir_swir_array+= hy_obj.get_wave(wave)
nir_swir_array/=len(hy_obj.glint['correction_wave'])
else:
nir_swir_array = np.copy(hy_obj.get_wave(hy_obj.glint['correction_wave']))
nir_swir_array[~hy_obj.mask['apply_glint']] = 0
nir_swir_min = np.percentile(
nir_swir_array[nir_swir_array > 0], .001
)
hochberg_correction = nir_swir_array - nir_swir_min
hochberg_correction[~hy_obj.mask['apply_glint']] = 0
return hochberg_correction

View File

@ -0,0 +1,23 @@
# -*- coding: utf-8 -*-
"""
HyTools: Hyperspectral image processing library
Copyright (C) 2021 University of Wisconsin
Authors: Adam Chlus, Zhiwei Ye, Philip Townsend.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, version 3 of the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
The :mod:`hytools.io` module includes functions for reading
from multiple file formats and writing to ENVI formatted binary files.
"""
from .envi import *

697
Flexbrdf/hytools/io/envi.py Normal file
View File

@ -0,0 +1,697 @@
# -*- coding: utf-8 -*-
"""
HyTools: Hyperspectral image processing library
Copyright (C) 2021 University of Wisconsin
Authors: Adam Chlus, Zhiwei Ye, Philip Townsend.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, version 3 of the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
Functions for reading and writing ENVI formatted binary files
Todo:
* Implement opening of ENVI files with different byte order
"""
import os
import sys
from collections import Counter
import numpy as np
# ENVI datatype conversion dictionary
dtype_dict = {1:np.uint8,
2:np.int16,
3:np.int32,
4:np.float32,
5:np.float64,
12:np.uint16,
13:np.uint32,
14:np.int64,
15:np.uint64}
# Dictionary of all ENVI header fields
field_dict = {"acquisition time": "str",
"band names":"list_str",
"bands": "int",
"bbl": "list_float",
"byte order": "int",
"class lookup": "str",
"class names": "str",
"classes": "int",
"cloud cover": "float",
"complex function": "str",
"coordinate system string": "str",
"correction factors": "list_float",
"data gain values": "list_float",
"data ignore value": "float",
"data offset values": "list_float",
"data reflectance gain values": "list_float",
"data reflectance offset values": "list_float",
"data type": "int",
"default bands": "list_float",
"default stretch": "str",
"dem band": "int",
"dem file": "str",
"description": "str",
"envi description":"str",
"file type": "str",
"fwhm": "list_float",
"geo points": "list_float",
"header offset": "int",
"interleave": "str",
"lines": "int",
"map info": "list_str",
"pixel size": "list_str",
"projection info": "str",
"read procedures": "str",
"reflectance scale factor": "float",
"rpc info": "str",
"samples":"int",
"security tag": "str",
"sensor type": "str",
"smoothing factors": "list_float",
"solar irradiance": "float",
"spectra names": "list_str",
"sun azimuth": "float",
"sun elevation": "float",
"wavelength": "list_float",
"wavelength units": "str",
"x start": "float",
"y start": "float",
"z plot average": "str",
"z plot range": "str",
"z plot titles": "str"}
def open_envi(hy_obj,anc_path = {}, ext = False, glt_path = None):
"""Open ENVI formatted image file and populate Hytools object.
Args:
src_file (str): Pathname of input ENVI image file, header assumed to be located in
same directory.
anc_path (dict): Dictionary with pathnames and band numbers of ancillary datasets.
ext: (bool) Input ENVI file has a file extension
Returns:
HyTools file object: Populated HyTools file object.
"""
header_file = os.path.splitext(hy_obj.file_name)[0] + ".hdr"
if not os.path.isfile(header_file):
print("ERROR: Header file not found.")
return None
header_dict = parse_envi_header(header_file)
hy_obj.lines = header_dict["lines"]
hy_obj.columns = header_dict["samples"]
hy_obj.bands = header_dict["bands"]
hy_obj.bad_bands = np.array([False for band in range(hy_obj.bands)])
hy_obj.interleave = header_dict["interleave"]
hy_obj.fwhm = header_dict["fwhm"]
hy_obj.wavelengths = header_dict["wavelength"]
hy_obj.wavelength_units = header_dict["wavelength units"]
hy_obj.dtype = dtype_dict[header_dict["data type"]]
hy_obj.no_data = header_dict['data ignore value']
hy_obj.map_info = header_dict['map info']
hy_obj.byte_order = header_dict['byte order']
hy_obj.anc_path = anc_path
hy_obj.header_file = header_file
hy_obj.transform = calc_geotransform(header_dict['map info'])
if bool(header_dict['coordinate system string']):
hy_obj.projection = header_dict['coordinate system string']
else:
hy_obj.projection = ''
if hy_obj.byte_order == 1:
hy_obj.endianness = 'big'
else:
hy_obj.endianness = 'little'
if isinstance(header_dict['bbl'],np.ndarray):
hy_obj.bad_bands = np.array([x==1 for x in header_dict['bbl']])
if header_dict["interleave"] == 'bip':
hy_obj.shape = (hy_obj.lines, hy_obj.columns, hy_obj.bands)
elif header_dict["interleave"] == 'bil':
hy_obj.shape = (hy_obj.lines, hy_obj.bands, hy_obj.columns)
elif header_dict["interleave"] == 'bsq':
hy_obj.shape = (hy_obj.bands, hy_obj.lines, hy_obj.columns)
else:
print("ERROR: Unrecognized interleave type.")
hy_obj = None
# If no_data value is not specified guess using image corners.
if hy_obj.no_data is None:
hy_obj.load_data()
band_ind = 5 if hy_obj.bands > 10 else 0
if header_dict["interleave"] == 'bip':
up_l = hy_obj.data[0,0,band_ind]
up_r = hy_obj.data[0,-1,band_ind]
low_l = hy_obj.data[-1,0,band_ind]
low_r = hy_obj.data[-1,-1,band_ind]
elif header_dict["interleave"] == 'bil':
up_l = hy_obj.data[0,band_ind,0]
up_r = hy_obj.data[0,band_ind,-1]
low_l = hy_obj.data[-1,band_ind,0]
low_r = hy_obj.data[-1,band_ind,-1]
elif header_dict["interleave"] == 'bsq':
up_l = hy_obj.data[band_ind,0,0]
up_r = hy_obj.data[band_ind,0,-1]
low_l = hy_obj.data[band_ind,-1,0]
low_r = hy_obj.data[band_ind,-1,-1]
if hy_obj.endianness != sys.byteorder:
up_l = up_l.byteswap()
up_r = up_r.byteswap()
low_l = low_l.byteswap()
low_r = low_r.byteswap()
counts = {v: k for k, v in Counter([up_l,up_r,low_l,low_r]).items()}
hy_obj.no_data = counts[max(counts.keys())]
hy_obj.close_data()
if bool(glt_path):
glt_meta_dict = parse_glt_envi(glt_path)
hy_obj.glt_path = glt_meta_dict["glt_path"]
hy_obj.glt_map_info = glt_meta_dict["map_info"]
hy_obj.lines_glt = glt_meta_dict["lines_glt"]
hy_obj.columns_glt = glt_meta_dict["columns_glt"]
hy_obj.glt_transform = glt_meta_dict["transform"]
hy_obj.glt_projection = glt_meta_dict["projection"]
del glt_meta_dict
del header_dict
return hy_obj
class WriteENVI:
"""Iterator class for writing to an ENVI data file.
"""
def __init__(self,output_name,header_dict):
"""
Args:
output_name (str): Pathname of output ENVI data file.
header_dict (dict): Dictionary containing ENVI header information.
Returns:
None.
"""
self.interleave = header_dict['interleave']
self.header_dict = header_dict
self.output_name =output_name
dtype = dtype_dict[header_dict["data type"]]
lines = header_dict['lines']
columns = header_dict['samples']
bands = header_dict['bands']
if self.interleave == "bip":
self.data = np.memmap(output_name,dtype = dtype,
mode='w+', shape = (lines,columns,bands))
elif self.interleave == "bil":
self.data = np.memmap(output_name,dtype = dtype,
mode='w+', shape =(lines,bands,columns))
elif self.interleave == "bsq":
self.data = np.memmap(output_name,dtype = dtype,
mode='w+',shape =(bands,lines,columns))
write_envi_header(self.output_name,self.header_dict)
def write_line(self,line,index):
"""
Args:
line (numpy.ndarray): Line array (columns,bands).
index (int): Zero-based line index.
Returns:
None.
"""
if self.interleave == "bip":
self.data[index,:,:] = line
elif self.interleave == "bil":
self.data[index,:,:] = np.moveaxis(line,0,1)
elif self.interleave == "bsq":
self.data[:,index,:] = np.moveaxis(line,0,1)
def write_line_glt(self,arr,glt_indices_y,glt_indices_x):
"""
Args:
line (numpy.ndarray): Line array (columns,bands).
index (int): Zero-based line index.
Returns:
None.
"""
if self.interleave == "bip":
self.data[glt_indices_y,glt_indices_x,:] = arr
elif self.interleave == "bil":
self.data[glt_indices_y,:,glt_indices_x] = arr #np.moveaxis(line,0,1)
elif self.interleave == "bsq":
self.data[:,glt_indices_y,glt_indices_x] = np.moveaxis(arr,0,1)
def write_column(self,column,index):
"""
Args:
column (numpy.ndarray): Column array (lines,bands).
index (int): Zero-based column index.
Returns:
None.
"""
if self.interleave == "bip":
self.data[:,index,:] = column
elif self.interleave == "bil":
self.data[:,:,index] = column
elif self.interleave == "bsq":
self.data[:,:,index] = np.moveaxis(column,0,1)
def write_band(self,band,index):
"""
Args:
band (numpy.ndarray): Band array (lines,columns).
index (int): Zero-based band index.
Returns:
None.
"""
if self.interleave == "bip":
self.data[:,:,index] = band
elif self.interleave == "bil":
self.data[:,index,:] = band
elif self.interleave == "bsq":
self.data[index,:,:]= band
def write_band_glt(self,band,index,glt_indices,fill_mask):
"""
Args:
band (numpy.ndarray): Band array (lines,columns).
index (int): Zero-based band index.
glt_indices (numpy.ndarray,numpy.ndarray): Zero-based tuple indices.
Returns:
None.
"""
if self.interleave == "bip":
self.data[:,:,index][fill_mask] = band[glt_indices]
self.data[:,:,index][~fill_mask] = self.header_dict['data ignore value']
elif self.interleave == "bil":
self.data[:,index,:][fill_mask] = band[glt_indices]
self.data[:,index,:][~fill_mask] = self.header_dict['data ignore value']
elif self.interleave == "bsq":
self.data[index,:,:][fill_mask] = band[glt_indices]
self.data[index,:,:][~fill_mask] = self.header_dict['data ignore value']
def write_chunk(self,chunk,line_index,column_index):
"""
Args:
chunk (TYPE): Chunks array (chunk lines,chunk columns,bands).
line_index (int): Zero-based upper line index.
column_index (int): Zero-based left column index.
Returns:
None.
"""
x_start = column_index
x_end = column_index + chunk.shape[1]
y_start = line_index
y_end = line_index + chunk.shape[0]
if self.interleave == "bip":
self.data[y_start:y_end,x_start:x_end,:] = chunk
elif self.interleave == "bil":
self.data[y_start:y_end,:,x_start:x_end] = np.moveaxis(chunk,-1,-2)
elif self.interleave == "bsq":
self.data[:,y_start:y_end,x_start:x_end] = np.moveaxis(chunk,-1,0)
def write_pixel(self,pixel,line_index,column_index):
"""
Args:
pixel (TYPE): pixel array (bands).
line_index (int): Zero-based upper line index.
column_index (int): Zero-based left column index.
Returns:
None.
"""
if self.interleave == "bip":
self.data[line_index,column_index,:] = pixel
elif self.interleave == "bil":
self.data[line_index,:,column_index] = pixel
elif self.interleave == "bsq":
self.data[:,line_index,column_index] = pixel
def close(self):
"""Delete numpy memmap.
"""
del self.data
def envi_header_from_neon(hy_obj, interleave = 'bsq'):
"""Create an ENVI header dictionary from NEON metadata
Args:
hy_obj (Hytools object): Populated HyTools file object.
interleave (str, optional): Date interleave type. Defaults to 'bil'.
Returns:
dict: Populated ENVI header dictionary.
"""
header_dict = {}
header_dict["ENVI description"] = "{}"
header_dict["samples"] = hy_obj.columns
header_dict["lines"] = hy_obj.lines
header_dict["bands"] = hy_obj.bands
header_dict["header offset"] = 0
header_dict["file type"] = "ENVI Standard"
header_dict["data type"] = 2
header_dict["interleave"] = interleave
header_dict["sensor type"] = ""
header_dict["byte order"] = 0
header_dict["map info"] = hy_obj.map_info
header_dict["coordinate system string"] = hy_obj.projection
header_dict["wavelength units"] = hy_obj.wavelength_units
header_dict["data ignore value"] =hy_obj.no_data
header_dict["wavelength"] =hy_obj.wavelengths
return header_dict
def envi_header_from_nc(hy_obj, interleave = 'bsq', warp_glt = False):
"""Create an ENVI header dictionary from NetCDF metadata
Args:
hy_obj (Hytools object): Populated HyTools file object.
interleave (str, optional): Date interleave type. Defaults to 'bil'.
Returns:
dict: Populated ENVI header dictionary.
"""
header_dict = {}
header_dict["ENVI description"] = "{}"
if warp_glt == False:
header_dict["samples"] = hy_obj.columns
header_dict["lines"] = hy_obj.lines
header_dict["map info"] = hy_obj.map_info
header_dict["coordinate system string"] = "{%s}" % hy_obj.projection if hy_obj.projection else "{}"
header_dict["projection"] = hy_obj.projection
header_dict["transform"] = hy_obj.transform
else:
header_dict["samples"] = hy_obj.columns_glt
header_dict["lines"] = hy_obj.lines_glt
header_dict["map info"] = hy_obj.glt_map_info
header_dict["coordinate system string"] = "{%s}" % hy_obj.glt_projection if hy_obj.glt_projection else "{}"
header_dict["projection"] = hy_obj.glt_projection
header_dict["transform"] = hy_obj.glt_transform
header_dict["bands"] = 2 #hy_obj.bands
header_dict["header offset"] = 0
header_dict["file type"] = "ENVI Standard"
header_dict["data type"] = 4
header_dict["interleave"] = interleave
header_dict["sensor type"] = ""
header_dict["byte order"] = 0
header_dict["wavelength units"] = hy_obj.wavelength_units
header_dict["data ignore value"] = hy_obj.no_data
header_dict["wavelength"] = hy_obj.wavelengths
return header_dict
def write_envi_header(output_name,header_dict,mode = 'w'):
"""Write ENVI header file to disk.
Args:
output_name (str): Header file pathname.
header_dict (dict): Populated ENVI header dictionary.
mode (str): File open mode. default: w
Returns:
None.
"""
base_name = os.path.splitext(output_name)[0]
header_file = open(base_name + ".hdr",mode)
header_file.write("ENVI\n")
for key in header_dict.keys():
value = header_dict[key]
# Convert list to comma separated strings
if isinstance(value,(list,np.ndarray)):
value = "{%s}" % ",".join(map(str, value))
elif key == "coordinate system string" and value and isinstance(value, str):
# 对 coordinate system string 字段确保有花括号包围
if not value.startswith("{"):
value = "{%s}" % value
else:
value = str(value)
# Skip entires with nan as value
if value != 'None':
header_file.write("%s = %s\n" % (key,value))
header_file.close()
def envi_header_dict():
"""
Returns:
dict: Empty ENVI header dictionary.
"""
return {key:None for (key,value) in field_dict.items()}
def envi_read_line(data,index,interleave):
"""
Args:
data (numpy.memmap): Numpy memory-map.
index (int): Zero-based line index.
interleave (str): Data interleave type.
Returns:
numpy.ndarray: Line array (columns, bands).
"""
if interleave == "bip":
line = data[index,:,:]
elif interleave == "bil":
line = np.moveaxis(data[index,:,:],0,1)
elif interleave == "bsq":
line = np.moveaxis(data[:,index,:],0,1)
return line
def envi_read_column(data,index,interleave):
"""
Args:
data (numpy.memmap): Numpy memory-map.
index (int): Zero-based column index.
interleave (str): Data interleave type.
Returns:
numpy.ndarray: Column array (lines,bands).
"""
if interleave == "bip":
column = data[:,index,:]
elif interleave == "bil":
column = data[:,:,index]
elif interleave == "bsq":
column = np.moveaxis(data[:,:,index],0,1)
return column
def envi_read_band(data,index,interleave):
"""
Args:
data (numpy.memmap): Numpy memory-map.
index (int): Zero-based line index.
interleave (str): Data interleave type.
Returns:
numpy.ndarray: Band array (lines,columns).
"""
if interleave == "bip":
band = data[:,:,index]
elif interleave == "bil":
band = data[:,index,:]
elif interleave == "bsq":
band = data[index,:,:]
return band
def envi_read_pixels(data,lines,columns,interleave):
"""
Args:
data (numpy.memmap): Numpy memory-map.
lines (list): List of zero-indexed line indices.
columns (list): List of zero-indexed column indices.
interleave (str): Data interleave type.
Returns:
numpy.ndarray: Pixel array (pixels,bands).
"""
if interleave == "bip":
pixels = data[lines,columns,:]
elif interleave == "bil":
pixels = data[lines,:,columns]
elif interleave == "bsq":
pixels = data[:,lines,columns]
return pixels
def envi_read_chunk(data,col_start,col_end,line_start,line_end,interleave):
"""
Args:
data (numpy.memmap): Numpy memory-map.
col_start (int): Zero-based left column index.
col_end (int): Non-inclusive zero-based right column index.
line_start (int): Zero-based top line index.
line_end (int): Non-inclusive zero-based bottom line index.
interleave (str): Data interleave type.
Returns:
numpy.ndarray: Chunk array (line_end-line_start,col_end-col_start,bands).
"""
if interleave == "bip":
chunk = data[line_start:line_end,col_start:col_end,:]
elif interleave == "bil":
chunk = np.moveaxis(data[line_start:line_end,:,col_start:col_end],-1,-2)
elif interleave == "bsq":
chunk = np.moveaxis(data[:,line_start:line_end,col_start:col_end],0,-1)
return chunk
def calc_geotransform(mapinfo):
if mapinfo[-1].startswith('rotation'):
rot_ang_rad = np.radians(float(mapinfo[-1].split('=')[1]))
pixel_size = float(mapinfo[5])
new_rot_mat = pixel_size * np.array([[np.cos(rot_ang_rad),-np.sin(rot_ang_rad)],[np.sin(rot_ang_rad),np.cos(rot_ang_rad)]])@np.array([[1,0],[0,-1]])
geotransform = (float(mapinfo[3]),new_rot_mat[0,0],new_rot_mat[0,1],
float(mapinfo[4]),new_rot_mat[1,0],new_rot_mat[1,1])
else:
# same as 0 rotation
geotransform = (float(mapinfo[3]),float(mapinfo[5]),0,
float(mapinfo[4]),0,-float(mapinfo[6]))
return geotransform
def parse_glt_envi(glt_path):
glt_meta_dict = {}
glt_meta_dict["glt_path"] = glt_path
glt_header_file = os.path.splitext(glt_path[list(glt_path.keys())[0]][0])[0] + ".hdr"
glt_header=parse_envi_header(glt_header_file)
glt_meta_dict["map_info"] = glt_header["map info"]
glt_meta_dict["lines_glt"] = glt_header["lines"]
glt_meta_dict["columns_glt"] = glt_header["samples"]
glt_meta_dict["transform"] = calc_geotransform(glt_header["map info"])
if "coordinate system string" in glt_header:
glt_meta_dict["projection"] = glt_header["coordinate system string"]
else:
glt_meta_dict["projection"] = ''
return glt_meta_dict
def parse_envi_header(header_file):
"""
Args:
header_file (str): Header file pathname.
Returns:
dict: Populated header dictionary.
"""
header_dict = envi_header_dict()
header_file = open(header_file,'r')
line = header_file.readline()
while line :
if "=" in line:
key,value = line.rstrip().split("=",1)
# Add fields not in ENVI default list
if key.strip() not in field_dict.keys():
field_dict[key.strip()] = "str"
val_type = field_dict[key.strip()]
if "{" in value and not "}" in value:
while "}" not in line:
line = header_file.readline()
value+=line
if '{}' in value:
value = None
elif val_type == "list_float":
value= np.array([float(x) for x in value.translate(str.maketrans("\n{}"," ")).split(",")])
elif val_type == "list_int":
value= np.array([int(x) for x in value.translate(str.maketrans("\n{}"," ")).split(",")])
elif val_type == "list_str":
value= [x.strip() for x in value.translate(str.maketrans("\n{}"," ")).split(",")]
elif val_type == "int":
value = int(value.translate(str.maketrans("\n{}"," ")))
elif val_type == "float":
value = float(value.translate(str.maketrans("\n{}"," ")))
elif val_type == "str":
value = value.translate(str.maketrans("\n{}"," ")).strip().lower()
header_dict[key.strip()] = value
line = header_file.readline()
# Fill unused fields with None
for key in field_dict:
if key not in header_dict.keys():
header_dict[key] = None
header_file.close()
return header_dict

View File

@ -0,0 +1,71 @@
# -*- coding: utf-8 -*-
"""
HyTools: Hyperspectral image processing library
Copyright (C) 2021 University of Wisconsin
Authors: Adam Chlus, Zhiwei Ye, Philip Townsend.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, version 3 of the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
NEON AOP HDF opener
"""
import h5py
import numpy as np
def open_neon(hy_obj, no_data = -9999):
"""Load and parse NEON formated HDF image into a HyTools file object.
Args:
src_file (str): pathname of input HDF file.
no_data (float, optional): No data value. Defaults to -9999.
Returns:
HyTools file object: Populated HyTools file object.
"""
hdf_obj = h5py.File(hy_obj.file_name,'r')
hy_obj.base_key = list(hdf_obj.keys())[0]
metadata = hdf_obj[hy_obj.base_key]["Reflectance"]["Metadata"]
data = hdf_obj[hy_obj.base_key]["Reflectance"]["Reflectance_Data"]
hy_obj.projection = metadata['Coordinate_System']['Coordinate_System_String'][()].decode("utf-8")
hy_obj.map_info = metadata['Coordinate_System']['Map_Info'][()].decode("utf-8").split(',')
hy_obj.transform = (float(hy_obj.map_info [3]),float(hy_obj.map_info [1]),0,float(hy_obj.map_info [4]),0,-float(hy_obj.map_info [2]))
hy_obj.fwhm = metadata['Spectral_Data']['FWHM'][()]
hy_obj.wavelengths = metadata['Spectral_Data']['Wavelength'][()]
hy_obj.wavelength_units = metadata['Spectral_Data']['Wavelength'].attrs['Units']
hy_obj.lines = data.shape[0]
hy_obj.columns = data.shape[1]
hy_obj.bands = data.shape[2]
hy_obj.bad_bands = np.array([False for band in range(hy_obj.bands)])
hy_obj.no_data = no_data
hy_obj.anc_path = {'path_length': ['Ancillary_Imagery','Path_Length'],
'sensor_az': ['to-sensor_Azimuth_Angle'],
'sensor_zn': ['to-sensor_Zenith_Angle'],
'solar_az': ['Logs','Solar_Azimuth_Angle'],
'solar_zn': ['Logs','Solar_Zenith_Angle'],
'slope': ['Ancillary_Imagery','Slope'],
'aspect':['Ancillary_Imagery','Aspect'],
'aod': ['Ancillary_Imagery','Aerosol_Optical_Depth'],
'sky_view': ['Ancillary_Imagery','Sky_View_Factor'],
'illum_factor': ['Ancillary_Imagery','Illumination_Factor'],
'elevation;': ['Ancillary_Imagery','Smooth_Surface_Elevation'],
'cast_shadow': ['Ancillary_Imagery','Cast_Shadow'],
'dense_veg': ['Ancillary_Imagery','Dark_Dense_Vegetation_Classification'],
'visibility_index': ['Ancillary_Imagery','Visibility_Index_Map'],
'haze_water_cloud': ['Ancillary_Imagery','Haze_Water_Cloud_Map'],
'water_vapor': ['Ancillary_Imagery','Water_Vapor_Column']}
return hy_obj

View File

@ -0,0 +1,426 @@
# -*- coding: utf-8 -*-
"""
HyTools: Hyperspectral image processing library
Copyright (C) 2021 University of Wisconsin
Authors: Adam Chlus, Zhiwei Ye, Philip Townsend.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, version 3 of the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
NASA NetCDF opener
"""
import os
import h5py
import h5netcdf
import numpy as np
from .envi import parse_envi_header, WriteENVI, parse_glt_envi
unit_dict = {'nm':'nanometers'}
utm_zone_dict = {'N':'North','S':'South'}
def open_netcdf(hy_obj, sensor,anc_path = {}, glt_path = {}):
"""Load and parse NASA formatted NetCDF AVIRIS/EMIT image into a HyTools file object.
Args:
HyTools file object: Populated HyTools file object.
sensor (str): sensor name for reading, either 'emit' (EMIT) or 'ncav' (AVIRIS)
anc_path (dict): Dictionary with pathnames and band numbers of ancillary datasets.
glt_path (list): Dictionary with pathnames and band numbers of external GLT datasets.
Returns:
HyTools file object: Populated HyTools file object.
"""
nc4_obj = h5py.File(hy_obj.file_name,'r')
if "radiance" in list(nc4_obj.keys()):
data_var_name = "radiance"
else:
#elif "reflectance" in list(nc4_obj.keys()):
data_var_name = "reflectance"
hy_obj.base_key = data_var_name
if "geolocation_lookup_table" in list(nc4_obj.keys()):
glt_var_name = "geolocation_lookup_table"
elif "location" in list(nc4_obj.keys()):
glt_var_name = "location"
else:
glt_var_name = None
metadata = nc4_obj.attrs
if sensor=='AV':
data = nc4_obj[data_var_name][data_var_name]
hy_obj.fwhm = nc4_obj[data_var_name]['fwhm'][()]
hy_obj.wavelengths = nc4_obj[data_var_name]['wavelength'][()]
if 'units' in nc4_obj[data_var_name]['wavelength'].attrs.keys():
hy_obj.wavelength_units = unit_dict[get_attr_string(nc4_obj[data_var_name]['wavelength'].attrs['units'])]
elif 'unit' in nc4_obj[data_var_name]['wavelength'].attrs.keys():
hy_obj.wavelength_units = get_attr_string(nc4_obj[data_var_name]['wavelength'].attrs['unit'])
hy_obj.lines = data.shape[1]
hy_obj.columns = data.shape[2]
hy_obj.bands = data.shape[0]
elif sensor == 'EMIT':
data = nc4_obj[data_var_name]
hy_obj.fwhm = nc4_obj['sensor_band_parameters']['fwhm'][()]
hy_obj.wavelengths = nc4_obj['sensor_band_parameters']['wavelengths'][()]
hy_obj.wavelength_units = unit_dict[get_attr_string(nc4_obj['sensor_band_parameters']['wavelengths'].attrs['units'])]
hy_obj.lines = data.shape[0]
hy_obj.columns = data.shape[1]
hy_obj.bands = data.shape[2]
hy_obj.bad_bands = np.array(1-nc4_obj['sensor_band_parameters']['good_wavelengths'][()]).astype(np.bool)
if isinstance(data.attrs['_FillValue'],np.ndarray):
hy_obj.no_data = data.attrs['_FillValue'][0]
else:
hy_obj.no_data = data.attrs['_FillValue']
hy_obj.anc_path = anc_path
if bool(glt_path):
glt_meta_dict = parse_glt_envi(glt_path)
hy_obj.glt_path = glt_meta_dict["glt_path"]
hy_obj.glt_map_info = glt_meta_dict["map_info"]
hy_obj.lines_glt = glt_meta_dict["lines_glt"]
hy_obj.columns_glt = glt_meta_dict["columns_glt"]
hy_obj.glt_transform = glt_meta_dict["transform"]
hy_obj.glt_projection = glt_meta_dict["projection"]
del glt_meta_dict
if sensor == "EMIT":
# EMIT can only has one set of geotransform / GLT, this one will override the built-in GLT
hy_obj.projection = hy_obj.glt_projection
hy_obj.map_info = hy_obj.glt_map_info
hy_obj.transform = hy_obj.glt_transform
else:
if sensor == 'EMIT':
hy_obj.glt_path = { "glt_x": ["location","glt_x"],
"glt_y": ["location","glt_y"]}
hy_obj.projection = get_attr_string(metadata['spatial_ref'])
geotransform = nc4_obj.attrs['geotransform'][()]
hy_obj.map_info = ['Geographic Lat/Lon','1','1',
str(geotransform[0]),str(geotransform[3]),
str(geotransform[1]),str(-geotransform[5]),
'WGS-84']
hy_obj.transform = tuple(metadata['geotransform'][()])
glt_x = nc4_obj['location']['glt_x']
hy_obj.lines_glt = glt_x.shape[0]
hy_obj.columns_glt = glt_x.shape[1]
hy_obj.glt_projection = hy_obj.projection
hy_obj.glt_transform = hy_obj.transform
hy_obj.glt_map_info = hy_obj.map_info
elif sensor == 'AV':
if "transverse_mercator" in nc4_obj.keys():
spatial_ref_name_tag = "transverse_mercator"
elif "projection" in nc4_obj.keys():
spatial_ref_name_tag = "projection"
else:
spatial_ref_name_tag = None
hy_obj.projection = get_attr_string(nc4_obj[spatial_ref_name_tag].attrs['spatial_ref'])
geotransform = [float(x) for x in get_attr_string(nc4_obj[spatial_ref_name_tag].attrs['GeoTransform']).split(' ')]
utm_zone_tag=((hy_obj.projection).split('UTM zone ')[1]).split('",GEOGCS')[0]
hy_obj.map_info = ['UTM','1','1',
str(geotransform[0]),str(geotransform[3]),
str(geotransform[1]),str(-geotransform[5]),
utm_zone_tag[:-1],utm_zone_dict[utm_zone_tag[-1]],'WGS-84']
hy_obj.transform = tuple(geotransform)
hy_obj.glt_path = { "glt_x": [glt_var_name,"sample"], #["geolocation_lookup_table","sample"],
"glt_y": [glt_var_name,"line"]} #["geolocation_lookup_table","line"]}
if glt_var_name is None:
hy_obj.lines_glt = hy_obj.lines
hy_obj.columns_glt = hy_obj.columns
else:
glt_x = nc4_obj[glt_var_name]['sample']
hy_obj.lines_glt = glt_x.shape[0]
hy_obj.columns_glt = glt_x.shape[1]
if hy_obj.base_key=="radiance":
hy_obj.glt_projection = hy_obj.projection
hy_obj.glt_transform = hy_obj.transform
hy_obj.glt_map_info = hy_obj.map_info
return hy_obj
def get_attr_string(attr):
if isinstance(attr, bytes):
return attr.decode("utf-8")
return attr
def set_wavelength_meta(nc4_obj,header_dict,glt_bool):
file_type = (header_dict['file_type']).lower()
if file_type in ["envi","ncav"] or (file_type=="emit" and glt_bool is True):
gp=nc4_obj.create_group("reflectance")
wavelength_var=nc4_obj.create_variable("/reflectance/wavelength",("wavelength",),
data=header_dict['wavelength'],
dtype=np.float32)
fwhm_var = nc4_obj.create_variable("/reflectance/fwhm",("wavelength",),
data=header_dict['fwhm'],
dtype=np.float32)
elif file_type=="emit":
if glt_bool: # handled in above codes
pass
else: # do not warp with GLT
nc4_obj.dimensions["bands"]=header_dict['bands']
wavelength_var=nc4_obj.create_variable("/sensor_band_parameters/wavelengths",("bands",),
data=np.array(header_dict['wavelength']),
dtype=np.float32)
fwhm_var = nc4_obj.create_variable("/sensor_band_parameters/fwhm", ("bands",),
data=header_dict['fwhm'],
dtype=np.float32)
def write_netcdf_refl_meta(nc4_obj,header_dict,glt_bool):
set_wavelength_meta(nc4_obj,header_dict,glt_bool)
write_netcdf_meta(nc4_obj,header_dict,glt_bool)
class WriteNetCDF(WriteENVI):
"""Iterator class for writing to a NetCDF data file.
The class inherites all the write functionss from WriteENVI: write pixel, line, band, chunk, etc.
"""
def __init__(self,output_name, header_dict, attr_dict, glt_bool, type_tag, band_name=None):
"""
Args:
output_name (str): Pathname of output ENVI data file.
header_dict (dict): Dictionary containing ENVI header information.
Returns:
None.
"""
dim1_chunk_size = 2**(min(int(np.log2(header_dict['lines'])),8))
dim2_chunk_size = 2**(min(int(np.log2(header_dict['samples'])),8))
if type_tag=="reflectance": # for reflectance
self.header_dict = header_dict
self.output_name = output_name
self.file_type = header_dict['file_type'].lower()
self.nc4_obj = h5netcdf.File(output_name, "w")
write_netcdf_refl_meta(self.nc4_obj,header_dict,glt_bool)
if self.file_type in ["ncav","envi"]:
self.interleave = "bsq"
self.data = self.nc4_obj.create_variable("/reflectance/reflectance",
("wavelength","northing","easting"),
np.float32,
chunks=(2,dim1_chunk_size,dim2_chunk_size),
compression='gzip')
self.data.attrs["grid_mapping"] = "projection"
elif self.file_type == "emit":
if glt_bool:
self.interleave = "bsq"
self.data = self.nc4_obj.create_variable("/reflectance/reflectance",
("wavelength","northing","easting"),
np.float32,
chunks=(1,dim1_chunk_size,dim2_chunk_size),
compression='gzip')
self.data.attrs["grid_mapping"] = "projection"
else:
self.interleave = "bip"
self.data = self.nc4_obj.create_variable("reflectance",
("downtrack","crosstrack","bands"),
np.float32,
chunks=(dim1_chunk_size,dim2_chunk_size,2),
compression='gzip')
self.data.attrs["_FillValue"]=np.array([-9999.0],dtype=np.float32)
self.external_nc_attrs(attr_dict)
elif type_tag=="mask": # for masks
self.interleave = "bsq"
self.header_dict = header_dict
self.file_type = header_dict['file_type'].lower()
self.nc4_obj = h5netcdf.File(output_name, "r+")
if self.file_type in ["ncav","envi"]:
self.data = self.nc4_obj.create_variable(f"/masks/{band_name}",
("northing","easting"),
np.uint8,
chunks=(dim1_chunk_size,dim2_chunk_size),
compression='gzip')
self.data.attrs["grid_mapping"] = "projection"
elif self.file_type == "emit":
if glt_bool:
self.data = self.nc4_obj.create_variable(f"/masks/{band_name}",
("northing","easting"),
np.uint8,
chunks=(dim1_chunk_size,dim2_chunk_size),
compression='gzip')
self.data.attrs["grid_mapping"] = "projection"
else:
self.data = self.nc4_obj.create_variable(f"/masks/{band_name}",
("downtrack","crosstrack"),
np.uint8,
chunks=(dim1_chunk_size,dim2_chunk_size),
compression='gzip')
self.data.attrs["_FillValue"]=np.array([255],dtype=np.uint8)
self.external_nc_attrs(attr_dict)
elif type_tag=="trait":
self.interleave = "bsq"
self.file_type = header_dict['file_type'].lower()
self.nc4_obj = h5netcdf.File(output_name, "w")
self.nc4_obj.dimensions["bands"]=2
self.interleave = "bsq"
write_netcdf_meta(self.nc4_obj,header_dict,glt_bool)
if self.file_type in ["ncav","envi"]:
self.data = self.nc4_obj.create_variable(f"/{band_name}/stack",
("bands","northing","easting"),
np.float32,
chunks=(1,dim1_chunk_size,dim2_chunk_size),
compression='gzip')
self.data.attrs["grid_mapping"] = "projection"
elif self.file_type == "emit":
if glt_bool:
self.data = self.nc4_obj.create_variable(f"/{band_name}/stack",
("bands","northing","easting"),
np.float32,
chunks=(1,dim1_chunk_size,dim2_chunk_size),
compression='gzip')
self.data.attrs["grid_mapping"] = "projection"
else:
self.data = self.nc4_obj.create_variable(f"/{band_name}/stack",
("bands","downtrack","crosstrack"),
np.float32,
chunks=(1,dim1_chunk_size,dim2_chunk_size),
compression='gzip')
self.data.attrs["band_names"] = header_dict["band names"][:2]
self.data.attrs["_FillValue"] = np.array([-9999.0],dtype=np.float32)
def write_mask_band(self,band):
self.data[:,:] = band
def write_mask_band_glt(self,band,glt_indices,fill_mask):
tmp_band = np.ones(fill_mask.shape)*self.header_dict['data ignore value']
tmp_band[fill_mask] = band[glt_indices]
tmp_band[~fill_mask] = 255
self.data[:,:] = tmp_band
def write_glt_dataset(self,glt_x_arr,glt_y_arr,dim_x_name="ortho_x",dim_y_name="ortho_y"):
var_glt_x = self.nc4_obj.create_variable("/location/glt_x",(dim_y_name,dim_x_name),
data=glt_x_arr,
dtype=np.int32,
chunks=(256,256),
compression='gzip')
var_glt_y = self.nc4_obj.create_variable("/location/glt_y",(dim_y_name,dim_x_name),
data=glt_y_arr,
dtype=np.int32,
chunks=(256,256),
compression='gzip')
var_glt_x.attrs["grid_mapping"] = "projection"
var_glt_y.attrs["grid_mapping"] = "projection"
var_glt_x.attrs["_FillValue"]=np.array([0],dtype=np.int32)
var_glt_y.attrs["_FillValue"]=np.array([0],dtype=np.int32)
def write_netcdf_band_glt(self,band,index,glt_indices,fill_mask):
"""
Args:
band (numpy.ndarray): Band array (lines,columns).
index (int): Zero-based band index.
glt_indices (numpy.ndarray,numpy.ndarray): Zero-based tuple indices.
Returns:
None.
"""
tmp_band = np.ones(fill_mask.shape)*(-9999)
tmp_band[fill_mask] = band[glt_indices]
tmp_band[~fill_mask] = -9999
if self.interleave == "bip":
self.data[:,:,index]=tmp_band
elif self.interleave == "bil":
self.data[:,index,:]=tmp_band
elif self.interleave == "bsq":
self.data[index,:,:]=tmp_band
def external_nc_attrs(self,attr_dict):
if attr_dict is None:
return
for key in attr_dict:
split_key = key.split('/')
if len(split_key[0])==0:
split_key.pop(0)
if len(split_key)>1:
group_path = '/'+'/'.join(split_key[:-1])
self.nc4_obj[group_path].attrs[split_key[-1]]=str(attr_dict[key]).encode("utf-8")
else:
self.nc4_obj.attrs[key]=str(attr_dict[key]).encode("utf-8")
def close(self):
"""Delete
"""
self.nc4_obj.close()
def write_netcdf_meta(nc4_obj,header_dict,glt_bool):
file_type = (header_dict['file_type']).lower()
if file_type in ["envi","ncav"] or (file_type=="emit" and glt_bool is True):
transform=header_dict['transform']
nc4_obj.dimensions["northing"]=header_dict['lines'] #dim0
nc4_obj.dimensions["easting"]=header_dict['samples'] #dim1
tm_var = nc4_obj.create_variable("/projection",data=np.array([0]),dtype=np.uint8)
tm_var.attrs["GeoTransform"]=' '.join([str(x) for x in header_dict['transform']]).encode("utf-8")
tm_var.attrs["crs_wkt"]=header_dict['projection'].encode("utf-8")
tm_var.attrs["spatial_ref"]=header_dict['projection'].encode("utf-8")
elif file_type=="emit":
if glt_bool: # handled in above codes
pass
else: # do not warp with GLT
loc_gp=nc4_obj.create_group("location")
nc4_obj.dimensions["downtrack"]=header_dict['lines'] #dim0
nc4_obj.dimensions["crosstrack"]=header_dict['samples'] #dim1
nc4_obj.dimensions["ortho_y"]=header_dict['lines_glt']
nc4_obj.dimensions["ortho_x"]=header_dict['samples_glt']
nc4_obj.attrs["geotransform"]=' '.join([str(x) for x in header_dict['transform']]).encode("utf-8")
nc4_obj.attrs["spatial_ref"]=header_dict['projection'].encode("utf-8")
nc4_obj.attrs["spatialResolution"]=np.sqrt(header_dict['transform'][1]**2+header_dict['transform'][2]**2)
tm_var = nc4_obj.create_variable("/projection",data=np.array([0]),dtype=np.uint8)
tm_var.attrs["GeoTransform"]=' '.join([str(x) for x in header_dict['transform']]).encode("utf-8")
tm_var.attrs["crs_wkt"]=header_dict['projection'].encode("utf-8")
tm_var.attrs["spatial_ref"]=header_dict['projection'].encode("utf-8")

View File

@ -0,0 +1,27 @@
# -*- coding: utf-8 -*-
"""
HyTools: Hyperspectral image processing library
Copyright (C) 2021 University of Wisconsin
Authors: Adam Chlus, Zhiwei Ye, Philip Townsend.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, version 3 of the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
The :mod:`hytools.masks` module include functions image correction.
"""
from .masks import *
from .cloud import *
from .calc_apply import *

View File

@ -0,0 +1,109 @@
# -*- coding: utf-8 -*-
"""
HyTools: Hyperspectral image processing library
Copyright (C) 2021 University of Wisconsin
Authors: Adam Chlus, Zhiwei Ye, Philip Townsend.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, version 3 of the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
This module contain functions for generating boolean masks specific to apply image corrections and
models.
"""
from scipy.ndimage.morphology import binary_erosion
import numpy as np
from .cloud import zhai_cloud
def ndi(hy_obj,args):
mask = hy_obj.ndi(args['band_1'],args['band_2'])
mask = (mask >= float(args['min'])) & (mask <= float(args['max']))
return mask
def ancillary(hy_obj,args):
''' Mask ancillary datasets based off min and max threshold
'''
if args['name'] == 'cosine_i':
mask= hy_obj.cosine_i()
else:
mask = hy_obj.get_anc(args['name'])
mask = (mask >= float(args['min'])) & (mask <= float(args['max']))
return mask
def neon_edge(hy_obj,args):
'''
Mask artifacts in NEON images around edges.
'''
radius =args['radius']
y_grid, x_grid = np.ogrid[-radius: radius + 1, -radius: radius + 1]
window = (x_grid**2 + y_grid**2 <= radius**2).astype(np.float32)
buffer_edge = binary_erosion(hy_obj.mask['no_data'], window).astype(bool)
return buffer_edge
def kernel_finite(hy_obj,args):
'''
Create NDVI bin class mask
'''
k_vol = hy_obj.volume_kernel(hy_obj.brdf['volume'])
k_geom = hy_obj.geom_kernel(hy_obj.brdf['geometric'],
b_r=hy_obj.brdf["b/r"],
h_b =hy_obj.brdf["h/b"])
mask = np.isfinite(k_vol) & np.isfinite(k_geom)
return mask
def cloud(hy_obj,args):
if args['method'] == 'zhai_2018':
mask = ~zhai_cloud(hy_obj,args['cloud'],args['shadow'],
args['T1'], args['t2'], args['t3'],
args['t4'], args['T7'], args['T8'])
return mask
def water(hy_obj,args):
'''
Create water mask using NDWI threshold
'''
mask = hy_obj.ndi(args['band_1'],args['band_2'])
mask = mask <= float(args['threshold'])
mask = binary_erosion(mask)
return mask
def external(hy_obj,args):
'''Load a mask from an external dataset
'''
hy_obj.anc_path['external_mask'] = [args['files'][hy_obj.file_name], 0]
mask = hy_obj.get_anc('external_mask') == args['class']
return mask
def band(hy_obj,args):
'''
Create mask using band thresholds
'''
mask = hy_obj.get_wave(args['band'])
mask = (mask >= float(args['min'])) & (mask <= float(args['max']))
return mask

View File

@ -0,0 +1,119 @@
# -*- coding: utf-8 -*-
'''
HyTools: Hyperspectral image processing library
Copyright (C) 2021 University of Wisconsin
Authors: Adam Chlus, Zhiwei Ye, Philip Townsend.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, version 3 of the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
Cloud masks
'''
from scipy.ndimage import median_filter
import numpy as np
def zhai_cloud(hy_obj,cloud,shadow,T1=0.01,t2=.1,t3=.25,t4=.5,T7= 9,T8= 9):
'''This function replicates the method of Zhai et al. (2018) for detecting clouds and shadows in
multispectral and hyperspectral imagery but does not apply shadow spatial refinement.
Suggested values for coefficients and params:
T1 : 0.01, 0.1, 1, 10, 100
t2 : 1/10, 1/9, 1/8, 1/7, 1/6, 1/5, 1/4, 1/3, 1/2
t3 : 1/4, 1/3, 1/2, 2/3, 3/4
t4 : 1/2, 2/3, 3/4, 4/5, 5/6
T7 : 3, 5, 7, 9, 11
T8 : 3, 5, 7, 9, 11
Zhai, H., Zhang, H., Zhang, L., & Li, P. (2018).
Cloud/shadow detection based on spectral indices for multi/hyperspectral optical remote sensing imagery.
ISPRS journal of photogrammetry and remote sensing, 144, 235-253.
https://doi.org/10.1016/j.isprsjprs.2018.07.006
Args:
hy_obj : HyTools data container object:
cloud (bool): Detect clouds.
shadow (bool): Detect clouds.
T1 (float): Threshold T1.
t2 (float): Adjusting coefficient t2.
t3 (float): Adjusting coefficient t3.
t4 (float): Adjusting coefficient t4.
T7 (float): Parameter T7.
T8 (float): Parameter T8.
Returns:
mask (nd.array): Boolean array where detected clouds and/or shadows = True.
'''
blue= hy_obj.get_wave(440)
green= hy_obj.get_wave(550)
red= hy_obj.get_wave(660)
nir = hy_obj.get_wave(850)
#If SWIR not available
if hy_obj.wavelengths.max() < 1570:
# Zhai et al. 2018 Eq. 1a,b
CI_1 = (3*nir)/(blue+green+red)
CI_2 = (blue+green+red+nir)/4
# Zhai et al. 2018 Eq. 3
CSI = nir
else:
swir1 = hy_obj.get_wave(1570)
swir2= hy_obj.get_wave(2110)
# Zhai et al. 2018 Eq. 1a,b
CI_1 = (nir+ 2*swir1)/(blue+green+red)
CI_2 = (blue+green+red+nir+swir1+swir2)/6
# Zhai et al. 2018 Eq. 3
CSI = (nir + swir1)/2
# Zhai et al. 2018 Eq.5
T2 = np.mean(CI_2[hy_obj.mask['no_data']]) + t2*(np.max(CI_2[hy_obj.mask['no_data']])-np.mean(CI_2[hy_obj.mask['no_data']]))
# Zhai et al. 2018 Eq.6
T3 = np.min(CSI[hy_obj.mask['no_data']]) + t3*(np.mean(CSI[hy_obj.mask['no_data']])-np.min(CSI[hy_obj.mask['no_data']]))
# Zhai et al. 2018 Eq.7
T4 = np.min(blue[hy_obj.mask['no_data']]) + t4*(np.mean(blue[hy_obj.mask['no_data']])-np.min(blue[hy_obj.mask['no_data']]))
mask = np.zeros((hy_obj.lines,hy_obj.columns)).astype(bool)
if cloud:
clouds = (np.abs(CI_1) < T1) | (CI_2 > T2)
clouds = median_filter(clouds, T7)
mask[clouds] = True
if shadow:
shadows = (CSI<T3) & (blue<T4)
shadows = median_filter(shadows,T8)
mask[shadows] = True
return mask

View File

@ -0,0 +1,42 @@
# -*- coding: utf-8 -*-
'''
HyTools: Hyperspectral image processing library
Copyright (C) 2021 University of Wisconsin
Authors: Adam Chlus, Zhiwei Ye, Philip Townsend.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, version 3 of the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
'''
from .calc_apply import *
from .cloud import *
mask_dict = {'ndi' : ndi,
'neon_edge' : neon_edge,
'kernel_finite': kernel_finite,
'ancillary': ancillary,
'cloud': cloud,
'water': water,
'band': band,
'external' : external}
def mask_create(hy_obj,masks):
''' Combine a series of boolean masks using an
and operator
'''
mask = np.copy(hy_obj.mask['no_data'])
for mask_name,args in masks:
mask &= mask_dict[mask_name](hy_obj,args)
return mask

View File

@ -0,0 +1,23 @@
# -*- coding: utf-8 -*-
"""
HyTools: Hyperspectral image processing library
Copyright (C) 2021 University of Wisconsin
Authors: Adam Chlus, Zhiwei Ye, Philip Townsend.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, version 3 of the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
"""
from .misc import *
from .geog_utm import *
from .point import *

View File

@ -0,0 +1,258 @@
import numpy as np
from types import SimpleNamespace
NAD83_WGS84_dict = {
"a":6378137,
"b":6356752.3142,
"flat":1/298.257223563,
"a_dscp":"Equatorial Radius, meters",
"b_dscp":"Polar Radius, meters",
"flat_dscp":"Flattening (a-b)/a",
}
NAD83_WGS84_obj = SimpleNamespace(**NAD83_WGS84_dict)
class BasicMapObj:
def __init__(self,ellipsoid=NAD83_WGS84_obj,zone=None):
b=ellipsoid.b
a=ellipsoid.a
e=np.sqrt(1-b**2/a**2)
self.b=b
self.a=a
self.e=e
self.ep2=(e*a/b)**2
self.n=(a-b)/(a+b)
self.k0=0.9996
self.easting = 500000
self.zone = zone
#self.northing = None
if zone is None:
self.lon0=None
self.northing = None
else:
if zone.startswith('326'):
zone = zone[3:5] + 'N'
self.zone = zone
elif zone.startswith('327'):
zone = zone[3:5] + 'S'
self.zone = zone
if str(zone)[-1:].isnumeric(): # default is N, not S
zone_number = int(zone)
self.northing = 0
else:
zone_number = int(zone[:-1])
if zone[-1] in ('N','n'):
self.northing = 0
elif zone[-1] in ('S','s'):
self.northing = 1e7
self.lon0 = (zone_number - 1)*6 -180 +3 # in Degrees
def calc_rho(self,lat_rad):
a=self.a
#b=self.b
e=self.e
return a*(1-e**2)/((1-e**2*(np.sin(lat_rad))**2)**(3/2))
def calc_nu(self,lat_rad):
a=self.a
e=self.e
return a / (1-(e*np.sin(lat_rad))**2)**0.5
def calc_p(self,lon_rad):
return lon_rad - np.radians(self.lon0)
def calc_S(self,lat_rad):
#S is the meridional arc
a=self.a
n=self.n
a_p = 1 * a * (1 - n + 5/4*(n**2-n**3) + 81/64*(n**4-n**5))
b_p = 3/2 * a * n * (1 - n + 7/8*(n**2-n**3) + 55/64*(n**4))
c_p = 15/16 * a * (n**2) * (1 - n + 3/4*(n**2-n**3))
d_p = 35/48 * a * (n**3) * (1 - n + 11/16*(n**2))
e_p = 315/512*a * (n**4) * (1 - n)
s = a_p*lat_rad \
- b_p*np.sin(2*lat_rad) \
+ c_p*np.sin(4*lat_rad) \
- d_p*np.sin(6*lat_rad) \
+ e_p*np.sin(8*lat_rad) \
return s
def calc_K3(self,nu,lat_rad):
k0 = self.k0
ep2 = self.ep2
k_3 = k0*nu*np.sin(lat_rad)* (np.cos(lat_rad))**3 / 24
k_3 *= 5 - (np.tan(lat_rad))**2 + 9 * ep2 * (np.cos(lat_rad))**2 + 4 * (ep2**2) * (np.cos(lat_rad))**4
return k_3
def calc_K5(self,nu,lat_rad):
k0 = self.k0
ep2 = self.ep2
k_5 = k0 * nu * (np.cos(lat_rad))**3 /6
k_5 *= 1 - (np.tan(lat_rad))**2 + ep2 * (np.cos(lat_rad))**2
return k_5
def estimate_lon0(self, lon_deg):
if self.lon0 is None:
major_lon = np.median(lon_deg)
central_meridians = np.arange(0,60,1)*6 - 180 +3
close_meridian = central_meridians[np.argmin(np.abs(major_lon-central_meridians))]
self.lon0 = close_meridian
self.zone = int((close_meridian-3 +180)/6)+1 #(zone_number - 1)*6 -180 +3
else:
#use lon0 during initialization
pass
def estimate_northing(self,lat_deg):
if self.northing is None:
major_lat = np.median(lat_deg)
if major_lat>0:
self.northing=0
else:
self.northing=1e7
def convert_xycoord(self,lat_deg,lon_deg):
lat_rad = np.radians(lat_deg)
lon_rad = np.radians(lon_deg)
self.estimate_lon0(lon_deg)
#print(self.lon0)
self.estimate_northing(lat_deg)
s = self.calc_S(lat_rad)
k0 = self.k0
nu = self.calc_nu(lat_rad)
p = self.calc_p(lon_rad)
k_1 = s*k0
k_2 = k0*nu*np.sin(2*lat_rad)/4
k_3 = self.calc_K3(nu,lat_rad)
y = k_1 + k_2 * (p**2) + k_3 * (p**4) + self.northing
k_5 = self.calc_K5(nu,lat_rad)
k_4 = k0 * nu * np.cos(lat_rad)
x = k_4*p + k_5*(p**3)+ self.easting
return x,y
########################
#https://gdal.org/en/stable/proj_list/transverse_mercator.html
# ref: Snyder J.P. (1987) Map projections a working manual, U.S. Geological Survey Professional Paper 1395, 1987. page.61
def convert_xycoord_gdal(self, lat_deg,lon_deg):
lat_rad = np.radians(lat_deg)
lon_rad = np.radians(lon_deg)
self.estimate_lon0(lon_deg)
self.estimate_northing(lat_deg)
k0 = self.k0
E = (self.e)**2
p = self.calc_p(lon_rad)
cos_lat = np.cos(lat_rad)
sin_lat = np.sin(lat_rad)
tan_lat = sin_lat / cos_lat
tan2_lat = tan_lat**2
e_p2 = self.ep2
nu = self.calc_nu(lat_rad)
#nu = self.a / np.sqrt(1 - E * sin_lat**2)
C = e_p2 * cos_lat**2
A = cos_lat * p
E2=E**2
E3=E**3
M1 = 1 - E / 4 - 3 * E2 / 64 - 5 * E3 / 256
M2 = 3 * E / 8 + 3 * E2 / 32 + 45 * E3 / 1024
M3 = 15 * E2 / 256 + 45 * E3 / 1024
M4 = 35 * E3 / 3072
M = self.a * (M1 * lat_rad -
M2 * np.sin(2 * lat_rad) +
M3 * np.sin(4 * lat_rad) -
M4 * np.sin(6 * lat_rad))
#M = a[(1 - e2/4 - 3e4/64 - 5e6/256 -....)* - (3e2/8 + 3e4/32 + 45e6/1024+....)sin2*
#+ (15e4/256 + 45e6/1024 +.....)sin4* - (35e6/3072 + ....)sin6* + .....]
x = k0 * nu * (A +
A**3 / 6 * (1 - tan2_lat + C) +
A**5 / 120 * (5 - 18 * tan2_lat + tan2_lat**2 + 72 * C - 58 * e_p2))+ self.easting
y = k0 * (M + nu * tan_lat * (A**2 / 2 +
A**4 / 24 * (5 - tan2_lat + 9 * C + 4 * C**2) +
A**6 / 720 * (61 - 58 * tan2_lat + tan2_lat**2 + 600 * C - 330 * e_p2)))+ self.northing
return x,y
########################
def calc_mu(self): #calc_e1_mu(self):
e=self.e
a=self.a
mu_recip = a * (1-0.25*(e**2) -3/64*(e**4) -5/256 * (e**6))
#e1 = (1 - eee) / (1 + eee) # same as self.n
return mu_recip
# ref : Snyder J.P. (1987) Map projections a working manual, U.S. Geological Survey Professional Paper 1395, 1987. page.63
# https://pubs.usgs.gov/pp/1395/report.pdf
def convert_latlon(self,x,y):
x_in = x - self.easting
y_in = y - self.northing
ep2 = self.ep2
a = self.a
e =self.e
k0 = self.k0
M = y_in / k0
mu_recip = self.calc_mu() #self.calc_e1_mu()
e1=self.n
mu = M / mu_recip
J1 = 3/2 * e1 - 27/32 * (e1**3)
J2 = 21/16*(e1**2) -55/32*(e1**4)
J3 = 151/96 * (e1**3)
J4 = 1097/512 * (e1**4)
fp = mu + J1*np.sin(2*mu) + J2*np.sin(4*mu) + J3*np.sin(6*mu) + J4*np.sin(8*mu)
C1 = ep2*(np.cos(fp))**2
T1 = (np.tan(fp))**2
R1 = a*(1-e**2) / (1-(e*np.sin(fp))**2)**1.5
N1 = a / (1-(e*np.sin(fp))**2)**0.5
D = x_in / N1 / k0
Q1 = N1*np.tan(fp)/R1
Q2 = D**2 / 2
Q3 = (5 + 3*T1 + 10*C1 - 4*C1**2 -9*ep2) * D**4 / 24
Q4 = (61 + 90*T1 + 298*C1 +45*T1**2 - 3*C1**2 -252*ep2) * D**6 /720
lat_out = fp - Q1*(Q2-Q3+Q4)
Q5 = D
Q6 = (1 + 2*T1 + C1) * D**3 / 6
Q7 = (5 - 2*C1 + 28*T1 -3*C1**2 + 8*ep2 +24*T1**2) * D**5 / 120
lon_out = np.radians(self.lon0) + (Q5-Q6+Q7) / np.cos(fp)
return np.degrees(lat_out), np.degrees(lon_out)

View File

@ -0,0 +1,86 @@
# -*- coding: utf-8 -*-
"""
HyTools: Hyperspectral image processing library
Copyright (C) 2021 University of Wisconsin
Authors: Adam Chlus, Zhiwei Ye, Philip Townsend.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, version 3 of the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
"""
from itertools import tee
def progbar(curr, total, full_progbar = 100):
'''Display progress bar.
Gist from:
https://gist.github.com/marzukr/3ca9e0a1b5881597ce0bcb7fb0adc549
Args:
curr (int, float): Current task level.
total (int, float): Task level at completion.
full_progbar (TYPE): Defaults to 100.
Returns:
None.
'''
frac = curr/total
filled_progbar = round(frac*full_progbar)
print('\r', '#'*filled_progbar + '-'*(full_progbar-filled_progbar), '[{:>7.2%}]'.format(frac), end='')
def pairwise(iterable):
a, b = tee(iterable)
next(b, None)
return zip(a, b)
def set_brdf(hy_obj,brdf_dict):
hy_obj.brdf = brdf_dict
def set_topo(hy_obj,topo_dict):
hy_obj.topo = topo_dict
def update_brdf(hy_obj,args):
hy_obj.brdf[args['key']] = args['value']
def update_topo(hy_obj,args):
hy_obj.topo[args['key']] = args['value']
def set_glint(hy_obj,glint_dict):
# If the type is hedley, need to specify deep water area
if glint_dict['type'] == 'Hedley':
glint_dict['deep_water_sample'] = glint_dict['deep_water_sample'][hy_obj.file_name]
hy_obj.glint = glint_dict
def update_topo_group(subgroup_dict_in):
subgroup_dict = {}
group_tag_list=[]
for file_name in subgroup_dict_in.keys():
group_tag = subgroup_dict_in[file_name]
if group_tag in subgroup_dict:
subgroup_dict[group_tag]+=[file_name]
else:
subgroup_dict[group_tag]=[file_name]
group_tag_list+=[group_tag]
update_name_list=[]
for group_tag in subgroup_dict.keys():
update_name_list+=[subgroup_dict[group_tag]]
return update_name_list,group_tag_list

View File

@ -0,0 +1,258 @@
import pandas as pd
from .geog_utm import *
def local_transform_all_point(mapobj, point_df, uid, xcoord, ycoord,point_epsg_code):
''' Create a dataframe with image georeferenced coordinates of all points of interest
'''
if point_epsg_code is None:
print("Default latlon")
re_df = pd.DataFrame(point_df[[uid,xcoord,ycoord]])
re_df.columns = [uid,'img_x','img_y']
return re_df
else:
ycoord_arr = point_df[ycoord]
xcoord_arr = point_df[xcoord]
lat_arr,lon_arr=mapobj.convert_latlon(xcoord_arr,ycoord_arr)
re_df = point_df[[uid, xcoord, ycoord]].join(pd.DataFrame(np.array((lat_arr,lon_arr)).T))
re_df.columns=[uid,'img_x','img_y','lat','lon']
return re_df
def get_neighbor(hyObj, point_coord_df, n_neighbor, uid, point_epsg_code,mapobj,use_glt_bool):
''' Create a dataframe with columns and lines of all image space neighbors of points of interest
'''
if use_glt_bool:
ul_x, new_x_resolution, new_x_rot, ul_y, new_y_rot, new_y_resolution = hyObj.glt_transform
print(hyObj.glt_projection,hyObj.glt_map_info)
else:
ul_x, new_x_resolution, new_x_rot, ul_y, new_y_rot, new_y_resolution = hyObj.transform
print(hyObj.projection,hyObj.map_info)
transform_matrix = np.array([[new_x_resolution, new_x_rot],[new_y_rot, new_y_resolution]])
if hyObj.map_info[0].startswith("Geographic"):
if mapobj.zone is None: # Not defined, assume to be geographic from csv / point_df
xy_coord_array = point_coord_df[['img_x','img_y']].values-np.array([[ul_x,ul_y]])
else: # assume to has both UTM and coord in point_df
xy_coord_array = point_coord_df[['lon','lat']].values-np.array([[ul_x,ul_y]])
elif hyObj.map_info[0].startswith("UTM"):
if point_epsg_code is None: # latlon in point, but utm in image
img_zone = hyObj.map_info[7]+hyObj.map_info[8][0]
img_mapobj = BasicMapObj(zone=img_zone) #NAD83_WGS84_obj,
x_coord, y_coord = img_mapobj.convert_xycoord_gdal(point_coord_df['img_y'].values, point_coord_df['img_x'].values)
xy_coord_array = np.stack((x_coord, y_coord)).T -np.array([[ul_x,ul_y]])
else:
xy_coord_array = point_coord_df[['img_x','img_y']].values-np.array([[ul_x,ul_y]])
img_loc_array = (xy_coord_array@(np.linalg.inv(transform_matrix).T)).astype(np.int32) # zero-based
n_neighbor = max(0,n_neighbor)
if n_neighbor>=0:
if n_neighbor==0:
offset_arr_col = np.array([[1,0]])
offset_arr_row = np.array([[1,0]])
uid_list = np.repeat(point_coord_df[uid].values,1)
new_uid_list = np.tile([f'_{x}' for x in range(1)],img_loc_array.shape[0])
if n_neighbor== 4:
offset_arr_col = np.array([[1,0],
[1,0],
[1,-1],
[1,1],
[1,0]])
offset_arr_row = np.array([[1,0],
[1,-1],
[1,0],
[1,0],
[1,1]])
uid_list = np.repeat(point_coord_df[uid].values,5)
new_uid_list = np.tile([f'_{x}' for x in range(5)],img_loc_array.shape[0])
if n_neighbor== 8:
offset_arr_col = np.array([[1,0],
[1,0],
[1,-1],
[1,1],
[1,0],
[1,-1],
[1,1],
[1,-1],
[1,1]])
offset_arr_row = np.array([[1,0],
[1,-1],
[1,0],
[1,0],
[1,1],
[1,-1],
[1,-1],
[1,1],
[1,1]])
uid_list = np.repeat(point_coord_df[uid].values,9)
new_uid_list = np.tile([f'_{x}' for x in range(9)],img_loc_array.shape[0])
img_loc_array_with_nb_col = offset_arr_col@np.vstack([img_loc_array[:,0],np.ones(img_loc_array.shape[0])])
img_loc_array_with_nb_row = offset_arr_row@np.vstack([img_loc_array[:,1],np.ones(img_loc_array.shape[0])])
new_uid_list = uid_list+new_uid_list
img_loc_array_with_nb_col = img_loc_array_with_nb_col.T.ravel().astype(np.int32)
img_loc_array_with_nb_row = img_loc_array_with_nb_row.T.ravel().astype(np.int32) # zero-based
return_df = pd.DataFrame({'new_uid':new_uid_list,uid:uid_list,'img_col_glt':img_loc_array_with_nb_col,'img_row_glt':img_loc_array_with_nb_row})
print('use_glt_bool',use_glt_bool)
if use_glt_bool:
valid_mask = (img_loc_array_with_nb_col>=0) & (img_loc_array_with_nb_col< hyObj.columns_glt) & (img_loc_array_with_nb_row>=0) & (img_loc_array_with_nb_row< hyObj.lines_glt)
if valid_mask.sum()==0:
print("No valid GLT locations.")
return pd.DataFrame()
return_df = return_df[valid_mask]
post_glt_col_ind = hyObj.glt_x[(img_loc_array_with_nb_row[valid_mask],img_loc_array_with_nb_col[valid_mask])]-1
post_glt_row_ind = hyObj.glt_y[(img_loc_array_with_nb_row[valid_mask],img_loc_array_with_nb_col[valid_mask])]-1 # one-based to zero-based
return_df["img_col_raw"] = post_glt_col_ind.astype(np.int32)
return_df["img_row_raw"] = post_glt_row_ind.astype(np.int32) # zero-based
else:
return_df["img_col_raw"] = return_df['img_col_glt']
return_df["img_row_raw"] = return_df['img_row_glt']
# check whether points are within the boundary of the image or not
return_df = return_df[(return_df['img_col_raw']>=0) & (return_df['img_col_raw']< hyObj.columns) & (return_df['img_row_raw']>=0) & (return_df['img_row_raw']< hyObj.lines)]
return return_df
def add_df_lat_lon(point_coord_neighbor_df, hyObj, mapobj, offset=0.5, use_glt_bool = False):
''' Add LAT LON of the points in the dataframe
'''
if use_glt_bool:
ul_x, new_x_resolution, new_x_rot, ul_y, new_y_rot, new_y_resolution = hyObj.glt_transform
else:
ul_x, new_x_resolution, new_x_rot, ul_y, new_y_rot, new_y_resolution = hyObj.transform
transform_matrix = np.array([[new_x_resolution, new_x_rot],[new_y_rot, new_y_resolution]])
loc_array = point_coord_neighbor_df[['img_col_glt','img_row_glt']].values.transpose() # zero-based
img_coord_array = np.dot(transform_matrix,loc_array+offset)+np.array([[ul_x],[ul_y]])
if hyObj.map_info[0].startswith("Geographic"):
point_coord_neighbor_df['lat'] = img_coord_array[1,:]
point_coord_neighbor_df['lon'] = img_coord_array[0,:]
elif hyObj.map_info[0].startswith("UTM"):
lat_list,lon_list = mapobj.convert_latlon(img_coord_array[0,:],img_coord_array[1,:])
point_coord_neighbor_df['lat'] = lat_list
point_coord_neighbor_df['lon'] = lon_list
def subset_band_list(hyObj,spec_df,use_band_list, band_list):
# do not subset bands, do nothing
if use_band_list==False:
return spec_df
# subset bands
else:
# user does not provide band list, use bad band list as default
if len(band_list)==0:
# no bad band list in the file, do nothing
if not isinstance(hyObj.bad_bands,np.ndarray):
return spec_df
# use bad band list
else:
return spec_df.iloc[:,hyObj.bad_bands]
# user provides band list
else:
return spec_df.iloc[:, band_list]
def local_point2spec(hyObj, point_csv, uid, xcoord, ycoord, point_epsg_code, n_neighbor=4, use_band_list=True, band_list=[],use_glt_bool=False):
"""Extract spectra with points in a CSV from the hyperspectral image
Parameters
----------
hyObj : HyTools file object
point_csv: str
full filename of the point CSV
uid: str
the user specified unique point ID in the CSV
xcoord: str
the column name in CSV for X coordinate of the points
ycoord: str
the column name in CSV for Y coordinate of the points
point_epsg_code: int
EPSG code for the projection of the points, XY coordinates are based on this projection
n_neighbor: int
default is 4, other options are 0, 8
how many neighbors in the image should be sampled from the center
use_band_list: boolean
default True; whether to use a subset of bands
band_list: list or numpy array
default is a blank list
if it is a list, it should be one like [5,6,7,8,9, 12]
if it is a numpy array, it should be the same size as hyObj.bad_bands with only True or False in the array
use_glt_bool: boolean
default False; whether to use geo-lookup table for pixel indexing
Returns
-------
point_coord_neighbor_df: pandas dataframe
it include all the location and spectra information for all points from the CSV
"""
point_df = pd.read_csv(point_csv, sep=',')
if point_epsg_code is None:
if hyObj.map_info[0].startswith("UTM"):
img_zone = hyObj.map_info[7]+hyObj.map_info[8][0]
parameter_obj = BasicMapObj(zone=img_zone) #NAD83_WGS84_obj,
else:
parameter_obj = BasicMapObj() #NAD83_WGS84_obj
else:
parameter_obj = BasicMapObj(zone=point_epsg_code) #NAD83_WGS84_obj,
# create a dataframe with image georeferenced coordinates of all points of interest
point_coord_df = local_transform_all_point(parameter_obj, point_df, uid, xcoord, ycoord,point_epsg_code)
# create a dataframe with columns and lines of all image space neighbors of points of interest
point_coord_neighbor_df = get_neighbor(hyObj, point_coord_df, n_neighbor, uid,point_epsg_code,parameter_obj,use_glt_bool)
if point_coord_neighbor_df.shape[0]==0:
print("0 point within boundary!\n\n")
return None
else:
# add LAT LON of the points in the dataframe
add_df_lat_lon(point_coord_neighbor_df, hyObj,parameter_obj,use_glt_bool=use_glt_bool)
spec_data = hyObj.get_pixels(point_coord_neighbor_df['img_row_raw'].values,point_coord_neighbor_df['img_col_raw'].values) # zero-based
# determine the column names of the spectra dataframe based on wavelengths
if hyObj.wavelength_units.lower()[:4]=='micr':
new_band_name = ['B{:0.3f}'.format(x) for x in hyObj.wavelengths]
elif hyObj.wavelength_units.lower()[:4]=='nano' :
new_band_name = ['B{:04d}'.format(int(x)) for x in hyObj.wavelengths]
else:
new_band_name = ['B{:d}'.format(x+1) for x in range(hyObj.bands)]
spec_df = pd.DataFrame(spec_data, columns=new_band_name)
# perform the subsetting of the columns in the dataframe according to the band_list or hyObj.bad_bands
spec_df = subset_band_list(hyObj,spec_df,use_band_list, band_list)
point_coord_neighbor_df=point_coord_neighbor_df.reset_index(drop=True)
point_coord_neighbor_df = pd.concat([point_coord_neighbor_df,spec_df], axis=1, join='inner')
return point_coord_neighbor_df

View File

@ -0,0 +1,22 @@
# -*- coding: utf-8 -*-
"""
HyTools: Hyperspectral image processing library
Copyright (C) 2021 University of Wisconsin
Authors: Adam Chlus, Zhiwei Ye, Philip Townsend.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, version 3 of the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
The :mod:`hytools.plottinh` module include functions for plotting.
"""
from .brdf_diagnostics import *

View File

@ -0,0 +1,143 @@
# -*- coding: utf-8 -*-
"""
HyTools: Hyperspectral image processing library
Copyright (C) 2021 University of Wisconsin
Authors: Adam Chlus, Zhiwei Ye, Philip Townsend.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, version 3 of the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
Plotting functions for BRDF
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
def universal_diagno_plot(hy_obj,config_dict):
''' Generate a diagnostic plot of BRDF correction results.
'''
#Flip sign of zenith angle at minimum
sensor_zn = hy_obj.get_anc('sensor_zn',radians =False)
sensor_zn[~hy_obj.mask['no_data']] = np.nan
for i,line in enumerate(sensor_zn):
line[:np.nanargmin(line)] *= -1
sensor_zn[i] = line
sensor_zn = (sensor_zn[hy_obj.mask['calc_brdf']]//2)*2
diagno_df = pd.DataFrame()
diagno_df['sensor_zn'] =sensor_zn
bands = [hy_obj.wave_to_band(wave) for wave in config_dict['brdf']['diagnostic_waves']]
for band_num in bands:
band = hy_obj.get_band(band_num,mask='calc_brdf')
diagno_df['uncorr_%s' % band_num] = band
band = hy_obj.get_band(band_num,
corrections = hy_obj.corrections + ['brdf'],
mask='calc_brdf')
diagno_df['corr_%s' % band_num] = band
fvol, fgeo, fiso = hy_obj.brdf['coeffs'][band_num]
brdf = fvol*hy_obj.ancillary['k_vol']
brdf += fgeo*hy_obj.ancillary['k_geom']
brdf+=fiso
brdf = brdf[hy_obj.mask['calc_brdf']]
diagno_df['brdf_%s' % band_num] = brdf
# Average every 2 degrees of zenith angle
diagno_df= diagno_df.groupby(by= 'sensor_zn').mean()
fig = plt.figure(figsize= (8,6))
fig.suptitle(hy_obj.base_name)
for a,band_num in enumerate(bands,start=1):
ax = fig.add_subplot(2,2,a)
ax.plot(diagno_df.index,diagno_df['brdf_%s' % band_num],c='k',ls ='--')
ax.scatter(diagno_df.index,diagno_df['uncorr_%s' % band_num],marker ='o',fc='w',ec='k')
ax.scatter(diagno_df.index,diagno_df['corr_%s' % band_num],marker ='o',fc='k',ec='k')
ax.text(.85,.9, "%s nm" % int(hy_obj.wavelengths[band_num]), transform=ax.transAxes,
ha = 'center', fontsize = 12)
if a > 2:
ax.set_xlabel('View zenith angle')
if a in [1,3]:
ax.set_ylabel('Reflectance')
#Create legend
custom_points = []
custom_points.append(Line2D([0],[0], marker = 'o',label='Uncorrected',
markerfacecolor='w', markersize=10,lw=0,markeredgecolor='k'))
custom_points.append(Line2D([0],[0], marker = 'o',label='Corrected',
markerfacecolor='k', markersize=10,lw=0,markeredgecolor='k'))
custom_points.append(Line2D([0],[1],label='Modeled BRDF',c='k', ls ='--'))
ax.legend(handles=custom_points, loc='center',frameon=False,
bbox_to_anchor=(-.15, -.3), ncol =3,columnspacing = 1.5,labelspacing=.25)
plt.savefig("%s%s_brdf_plot.png" % (config_dict['export']['output_dir'],hy_obj.base_name),
bbox_inches = 'tight')
plt.close()
def flex_diagno_plot(hy_obj,config_dict):
''' Generate a diagnostic plot of BRDF correction results.
'''
#Flip sign of zenith angle at minimum
sensor_zn = hy_obj.get_anc('sensor_zn',radians =False)
sensor_zn[~hy_obj.mask['no_data']] = np.nan
for i,line in enumerate(sensor_zn):
line[:np.nanargmin(line)] *= -1
sensor_zn[i] = line
sensor_zn = (sensor_zn[hy_obj.mask['calc_brdf']]//2)*2
diagno_df = pd.DataFrame()
diagno_df['sensor_zn'] =sensor_zn
bands = [hy_obj.wave_to_band(wave) for wave in config_dict['brdf']['diagnostic_waves']]
for band_num in bands:
band = hy_obj.get_band(band_num,mask='calc_brdf')
diagno_df['uncorr_%s' % band_num] = band
band = hy_obj.get_band(band_num,
corrections = hy_obj.corrections + ['brdf'],
mask='calc_brdf')
diagno_df['corr_%s' % band_num] = band
# Average every 2 degrees of zenith angle
diagno_df= diagno_df.groupby(by= 'sensor_zn').mean()
fig = plt.figure(figsize= (8,6))
fig.suptitle(hy_obj.base_name)
for a,band_num in enumerate(bands,start=1):
ax = fig.add_subplot(2,2,a)
ax.scatter(diagno_df.index,diagno_df['uncorr_%s' % band_num],marker ='o',fc='w',ec='k')
ax.scatter(diagno_df.index,diagno_df['corr_%s' % band_num],marker ='o',fc='k',ec='k')
ax.text(.85,.9, "%s nm" % int(hy_obj.wavelengths[band_num]), transform=ax.transAxes,
ha = 'center', fontsize = 12)
if a > 2:
ax.set_xlabel('View zenith angle')
if a in [1,3]:
ax.set_ylabel('Reflectance')
#Create legend
custom_points = []
custom_points.append(Line2D([0],[0], marker = 'o',label='Uncorrected',
markerfacecolor='w', markersize=10,lw=0,markeredgecolor='k'))
custom_points.append(Line2D([0],[0], marker = 'o',label='Corrected',
markerfacecolor='k', markersize=10,lw=0,markeredgecolor='k'))
ax.legend(handles=custom_points, loc='center',frameon=False,
bbox_to_anchor=(-.15, -.3), ncol =2,columnspacing = 1.5,labelspacing=.25)
plt.savefig("%s%s_flexbrdf_plot.png" % (config_dict['export']['output_dir'],hy_obj.base_name),
bbox_inches = 'tight')
plt.close()

View File

@ -0,0 +1,27 @@
# -*- coding: utf-8 -*-
"""
HyTools: Hyperspectral image processing library
Copyright (C) 2021 University of Wisconsin
Authors: Adam Chlus, Zhiwei Ye, Philip Townsend.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, version 3 of the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
The :mod:`hytools.correction` module include functions image correction.
"""
from .topo import *
from .scsc import *
from .c import *
from .cosine import *
from .scs import *
from .modminn import *

222
Flexbrdf/hytools/topo/c.py Normal file
View File

@ -0,0 +1,222 @@
# -*- coding: utf-8 -*-
"""
HyTools: Hyperspectral image processing library
Copyright (C) 2021 University of Wisconsin
Authors: Adam Chlus, Zhiwei Ye, Philip Townsend.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, version 3 of the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
This module contains functions to apply a topographic correction (SCS+C)
described in the following papers:
Scott A. Soenen, Derek R. Peddle, & Craig A. Coburn (2005).
SCS+C: A Modified Sun-Canopy-Sensor Topographic Correction in Forested Terrain.
IEEE Transactions on Geoscience and Remote Sensing, 43(9), 2148-2159.
https://doi.org/10.1109/TGRS.2005.852480
Topographic correction consists of the following steps:
1. calculate incidence angle if it is not provided
2. estimate C-Correction value
3. apply C-Correction value to the image data
TODO: Rationale/ examples for using different fitting algorithms
"""
import numpy as np
from scipy.optimize import nnls
from ..io.envi import WriteENVI
def calc_c(data,cosine_i,fit_type = 'ols'):
"""Calculate the topographic correction coefficient (c) for the input data.
Used for both the cosine and SCS+S topographic corrections.
Args:
band (numpy.ndarray): Image array.
cosine_i (numpy.ndarray): Cosine i array.
fit_type (str): Linear model fitting type.
Returns:
numpy.ndarray: Topographic correction coefficient.
"""
# Reshape for regression
cosine_i = np.expand_dims(cosine_i,axis=1)
if cosine_i.shape[0]==0:
return 100000.0
X = np.concatenate([cosine_i,np.ones(cosine_i.shape)],axis=1)
# Eq 7. Soenen et al. 2005
if fit_type == 'ols':
slope, intercept = np.linalg.lstsq(X, data,rcond=-1)[0].flatten()
elif fit_type == 'nnls':
slope, intercept = nnls(X, data)[0].flatten()
# Eq 8. Soenen et al. 2005
c= intercept/slope
# Set a large number if slope is zero
if not np.isfinite(c):
c = 100000.0
return c
def calc_c_coeffs(hy_obj,topo_dict):
'''
Args:
hy_obj (TYPE): DESCRIPTION.
Returns:
None.
'''
topo_dict['coeffs'] = {}
cosine_i = hy_obj.cosine_i()
for band_num,band in enumerate(hy_obj.bad_bands):
if ~band:
band = hy_obj.get_band(band_num,mask='calc_topo')
topo_dict['coeffs'][band_num] = calc_c(band,cosine_i[hy_obj.mask['calc_topo']],
fit_type=topo_dict['c_fit_type'])
hy_obj.topo = topo_dict
def get_band_samples(hy_obj,args):
band = hy_obj.get_band(args['band_num'],
corrections = hy_obj.corrections)
return band[hy_obj.ancillary['sample_mask'] !=0]
def get_cosine_i_samples(hy_obj):
'''Calculate and sample cosine_i
'''
cosine_i=hy_obj.cosine_i()
cosine_i = cosine_i[hy_obj.ancillary['sample_mask'] !=0]
return cosine_i
def calc_c_coeffs_group(actors,topo_dict,group_tag):
cosine_i_samples = ray.get([a.do.remote(get_cosine_i_samples) for a in actors])
cosine_i_samples = np.concatenate(cosine_i_samples)
print(f'Topo Subgroup {group_tag}')
bad_bands = ray.get(actors[0].do.remote(lambda x: x.bad_bands))
coeffs = {}
for band_num,band in enumerate(bad_bands):
if ~band:
coeffs[band_num] = {}
band_samples = ray.get([a.do.remote(get_band_samples,
{'band_num':band_num}) for a in actors])
band_samples = np.concatenate(band_samples)
coeffs[band_num] = calc_c(band_samples,cosine_i_samples,fit_type=topo_dict['c_fit_type'])
progbar(np.sum(~bad_bands[:band_num+1]),np.sum(~bad_bands))
print('\n')
#Update TOPO coeffs
_ = ray.get([a.do.remote(update_topo,{'key':'coeffs',
'value': coeffs}) for a in actors])
_ = ray.get([a.do.remote(update_topo,{'key':'subgroup',
'value': group_tag}) for a in actors])
def apply_c(hy_obj,data,dimension,index):
''' Apply SCSS correction to a slice of the data
Args:
hy_obj (TYPE): DESCRIPTION.
band (TYPE): DESCRIPTION.
index (TYPE): DESCRIPTION.
Returns:
band (TYPE): DESCRIPTION.
'''
if 'cos_sz' not in hy_obj.ancillary.keys():
cos_sz = np.cos(hy_obj.get_anc('solar_zn'))
hy_obj.ancillary['cos_sz'] = cos_sz
if 'cosine_i' not in hy_obj.ancillary.keys():
cosine_i = hy_obj.cosine_i()
hy_obj.ancillary['cosine_i'] = cosine_i
C_bands = list(hy_obj.topo['coeffs'].keys())
C = np.array(list(hy_obj.topo['coeffs'].values()))
#Convert to float
data = data.astype(np.float32)
if dimension == 'line':
#index= 3000
#data = hy_obj.get_line(3000)
data = data[:,C_bands]
mask = hy_obj.mask['apply_topo'][index,:]
cosine_i = hy_obj.ancillary['cosine_i'][[index],:].T
cos_sz = hy_obj.ancillary['cos_sz'][[index],:].T
correction_factor = (cos_sz + C)/(cosine_i + C)
data[mask,:] = data[mask,:]*correction_factor[mask,:]
elif dimension == 'column':
# index= 300
# data = hy_obj.get_column(index)
data = data[:,C_bands]
mask = hy_obj.mask['apply_topo'][:,index]
cosine_i = hy_obj.ancillary['cosine_i'][:,[index]]
cos_sz = hy_obj.ancillary['cos_sz'][:,[index]]
correction_factor = (cos_sz + C)/(cosine_i + C)
data[mask,:] = data[mask,:]*correction_factor[mask,:]
elif dimension == 'band':
#index= 8
#data = hy_obj.get_band(index)
C = hy_obj.topo['coeffs'][index]
correction_factor = (hy_obj.ancillary['cos_sz'] + C)/(hy_obj.ancillary['cosine_i'] + C)
data[hy_obj.mask['apply_topo']] = data[hy_obj.mask['apply_topo']] * correction_factor[hy_obj.mask['apply_topo']]
elif dimension == 'chunk':
# index = 200,501,3000,3501
x1,x2,y1,y2 = index
# data = hy_obj.get_chunk(x1,x2,y1,y2)
data = data[:,:,C_bands]
mask = hy_obj.mask['apply_topo'][y1:y2,x1:x2]
cosine_i = hy_obj.ancillary['cosine_i'][y1:y2,x1:x2][:,:,np.newaxis]
cos_sz = hy_obj.ancillary['cos_sz'][y1:y2,x1:x2][:,:,np.newaxis]
correction_factor = (cos_sz + C)/(cosine_i + C)
data[mask,:] = data[mask,:]*correction_factor[mask,:]
elif dimension == 'pixels':
# index = [[2000,2001],[200,501]]
y,x = index
# data = hy_obj.get_pixels(y,x)
data = data[:,C_bands]
mask = hy_obj.mask['apply_topo'][y,x]
cosine_i = hy_obj.ancillary['cosine_i'][[y],[x]].T
cos_sz = hy_obj.ancillary['cos_sz'][[y],[x]].T
correction_factor = (cos_sz + C)/(cosine_i + C)
data[mask,:] = data[mask,:]*correction_factor[mask,:]
return data

View File

@ -0,0 +1,99 @@
# -*- coding: utf-8 -*-
"""
HyTools: Hyperspectral image processing library
Copyright (C) 2021 University of Wisconsin
Authors: Adam Chlus, Zhiwei Ye, Philip Townsend.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, version 3 of the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
This module contains functions to apply the Modified topographic correction (SCS+C)
described in the following paper:
Richter, R., Kellenberger, T., & Kaufmann, H. (2009).
Comparison of topographic correction methods.
Remote Sensing, 1(3), 184-196.
https://doi.org/10.3390/rs1030184
Topographic correction consists of the following steps:
"""
import numpy as np
def calc_cosine_coeffs(hy_obj,topo_dict):
'''
Args:
hy_obj (TYPE): DESCRIPTION.
Returns:
None.
'''
hy_obj.topo = topo_dict
hy_obj.anc_data = {}
cos_i = hy_obj.cosine_i()
cos_solar_zn = np.cos(hy_obj.get_anc('solar_zn'))
c_factor = cos_solar_zn/cos_i
c_factor[~hy_obj.mask['no_data']] = 1.
hy_obj.ancillary['cosine_factor'] =c_factor
def apply_cosine(hy_obj,data,dimension,index):
''' Apply cosine correction to a slice of the data
Args:
hy_obj (TYPE): DESCRIPTION.
band (TYPE): DESCRIPTION.
index (TYPE): DESCRIPTION.
Returns:
band (TYPE): DESCRIPTION.
'''
if 'cosine_factor' not in hy_obj.ancillary.keys():
calc_cosine_coeffs(hy_obj)
#Convert to float
data = data.astype(np.float32)
if dimension == 'line':
#index= 3000
#data = hy_obj.get_line(3000)
data = data*hy_obj.ancillary['cosine_factor'][np.newaxis,index,:]
elif dimension == 'column':
#index= 300
#data = hy_obj.get_column(index)
data = hy_obj.ancillary['cosine_factor'][:,index,np.newaxis]
elif dimension == 'band':
#index= 8
#data = hy_obj.get_band(index)
data = data * hy_obj.ancillary['cosine_factor']
elif dimension == 'chunk':
#index = 200,501,3000,3501
x1,x2,y1,y2 = index
#data = hy_obj.get_chunk(x1,x2,y1,y2)
data = data*hy_obj.ancillary['cosine_factor'][y1:y2,x1:x2][:,:,np.newaxis]
elif dimension == 'pixels':
#index = [[2000,2001],[200,501]]
y,x = index
#data = hy_obj.get_pixels(y,x)
data = data*hy_obj.ancillary['cosine_factor'][y,x][:, np.newaxis]
return data

View File

@ -0,0 +1,140 @@
# -*- coding: utf-8 -*-
"""
HyTools: Hyperspectral image processing library
Copyright (C) 2021 University of Wisconsin
Authors: Adam Chlus, Zhiwei Ye, Philip Townsend.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, version 3 of the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
This module contains functions to apply the Modified topographic correction (SCS+C)
described in the following paper:
Richter, R., Kellenberger, T., & Kaufmann, H. (2009).
Comparison of topographic correction methods.
Remote Sensing, 1(3), 184-196.
https://doi.org/10.3390/rs1030184
Topographic correction consists of the following steps:
"""
import numpy as np
def calc_modminn_coeffs(hy_obj,topo_dict):
'''
Args:
hy_obj (TYPE): DESCRIPTION.
Returns:
None.
'''
hy_obj.topo =topo_dict
cos_i = hy_obj.cosine_i()
i = np.rad2deg(np.arccos(cos_i))
solar_zn = hy_obj.get_anc('solar_zn',radians=False)
solar_zn_t = np.zeros(solar_zn.shape)
solar_zn_t[solar_zn < 45] = solar_zn[solar_zn < 45] +20
solar_zn_t[(solar_zn >= 45) & (solar_zn <= 55)] = solar_zn[(solar_zn >= 45) & (solar_zn <= 55)] +15
solar_zn_t[solar_zn > 55] = solar_zn[solar_zn > 55] +10
#Create NDVI mask to seperate vegetation
ir = hy_obj.get_wave(850)
red = hy_obj.get_wave(660)
ndvi = (ir-red)/(ir+red)
veg_mask = ndvi > 0.2
c_factors = np.ones((2,hy_obj.lines,hy_obj.columns))
c_factors[:] = cos_i/np.cos(np.radians(solar_zn_t))
# Non vegetation correction factor
c_factors[0][~veg_mask] = c_factors[0][~veg_mask]**(1/2)
c_factors[1][~veg_mask] = c_factors[1][~veg_mask]**(1/2)
# Vegetation correction factor
c_factors[0][veg_mask] = c_factors[0][veg_mask]**(3/4)
c_factors[1][veg_mask] = c_factors[1][veg_mask]**(1/3)
#Adjust correction factors to prevent too strong correction
c_factors[c_factors <.25] = .25
c_factors[c_factors > 1] = 1
#Correct pixels only where i > threshold
c_factors[0][i < solar_zn_t] = 1
c_factors[1][i < solar_zn_t] = 1
c_factors[0][ir == hy_obj.no_data] = 1
c_factors[1][ir == hy_obj.no_data] = 1
hy_obj.ancillary['mm_c_factor'] = c_factors
def apply_modminn(hy_obj,data,dimension,index):
''' Apply SCSS correction to a slice of the data
Args:
hy_obj (TYPE): DESCRIPTION.
band (TYPE): DESCRIPTION.
index (TYPE): DESCRIPTION.
Returns:
band (TYPE): DESCRIPTION.
'''
if 'mm_c_factor' not in hy_obj.ancillary.keys():
calc_modminn_coeffs(hy_obj)
#Convert to float
data = data.astype(np.float32)
wave_mask =hy_obj.wavelengths >=720
if dimension == 'line':
#index= 3000
#data = hy_obj.get_line(3000)
data[:,wave_mask] = data[:,wave_mask]*hy_obj.ancillary['mm_c_factor'][1,index,:][:,np.newaxis]
data[:,~wave_mask] = data[:,~wave_mask]*hy_obj.ancillary['mm_c_factor'][0,index,:][:,np.newaxis]
elif dimension == 'column':
#index= 300
#data = hy_obj.get_column(index)
data[:,wave_mask] = data[:,wave_mask]*hy_obj.ancillary['mm_c_factor'][1,:,index][:,np.newaxis]
data[:,~wave_mask] = data[:,~wave_mask]*hy_obj.ancillary['mm_c_factor'][0,:,index][:,np.newaxis]
elif dimension == 'band':
#index= 50
#data = hy_obj.get_band(index)
if hy_obj.wavelengths[index] >=720:
cf_index = 1
else:
cf_index = 0
data = data * hy_obj.ancillary['mm_c_factor'][cf_index]
elif dimension == 'chunk':
#index = 200,501,3000,3501
x1,x2,y1,y2 = index
#data = hy_obj.get_chunk(x1,x2,y1,y2)
data[:,:,wave_mask] = data[:,:,wave_mask]*hy_obj.ancillary['mm_c_factor'][1,y1:y2,x1:x2][:,:,np.newaxis]
data[:,:,~wave_mask] = data[:,:,~wave_mask]*hy_obj.ancillary['mm_c_factor'][0,y1:y2,x1:x2][:,:,np.newaxis]
elif dimension == 'pixels':
#index = [[2000,2001],[200,501]]
y,x = index
#data = hy_obj.get_pixels(y,x)
data[:,wave_mask] = data[:,wave_mask]*hy_obj.ancillary['mm_c_factor'][1,y,x][:, np.newaxis]
data[:,~wave_mask] = data[:,~wave_mask]*hy_obj.ancillary['mm_c_factor'][0,y,x][:, np.newaxis]
return data

View File

@ -0,0 +1,100 @@
# -*- coding: utf-8 -*-
"""
HyTools: Hyperspectral image processing library
Copyright (C) 2021 University of Wisconsin
Authors: Adam Chlus, Zhiwei Ye, Philip Townsend.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, version 3 of the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
This module contains functions to apply the Modified topographic correction (SCS+C)
described in the following paper:
Richter, R., Kellenberger, T., & Kaufmann, H. (2009).
Comparison of topographic correction methods.
Remote Sensing, 1(3), 184-196.
https://doi.org/10.3390/rs1030184
Topographic correction consists of the following steps:
"""
import numpy as np
def calc_scs_coeffs(hy_obj,topo_dict):
'''
Args:
hy_obj (TYPE): DESCRIPTION.
Returns:
None.
'''
hy_obj.topo = topo_dict
hy_obj.anc_data = {}
cos_i = hy_obj.cosine_i()
cos_solar_zn = np.cos(hy_obj.get_anc('solar_zn'))
cos_slope = np.cos(hy_obj.get_anc('slope'))
c_factor = (cos_slope *cos_solar_zn)/cos_i
c_factor[~hy_obj.mask['no_data']] = 1.
hy_obj.ancillary['scs_factor'] =c_factor
def apply_scs(hy_obj,data,dimension,index):
''' Apply SCSS correction to a slice of the data
Args:
hy_obj (TYPE): DESCRIPTION.
band (TYPE): DESCRIPTION.
index (TYPE): DESCRIPTION.
Returns:
band (TYPE): DESCRIPTION.
'''
if 'scs_factor' not in hy_obj.ancillary.keys():
calc_scs_coeffs(hy_obj)
#Convert to float
data = data.astype(np.float32)
if dimension == 'line':
#index= 3000
#data = hy_obj.get_line(3000)
data = data*hy_obj.ancillary['scs_factor'][np.newaxis,index,:]
elif dimension == 'column':
#index= 300
#data = hy_obj.get_column(index)
data = hy_obj.ancillary['scs_factor'][:,index,np.newaxis]
elif dimension == 'band':
#index= 8
#data = hy_obj.get_band(index)
data = data * hy_obj.ancillary['scs_factor']
elif dimension == 'chunk':
#index = 200,501,3000,3501
x1,x2,y1,y2 = index
#data = hy_obj.get_chunk(x1,x2,y1,y2)
data = data*hy_obj.ancillary['scs_factor'][y1:y2,x1:x2][:,:,np.newaxis]
elif dimension == 'pixels':
#index = [[2000,2001],[200,501]]
y,x = index
#data = hy_obj.get_pixels(y,x)
data = data*hy_obj.ancillary['scs_factor'][y,x][:, np.newaxis]
return data

View File

@ -0,0 +1,204 @@
# -*- coding: utf-8 -*-
"""
HyTools: Hyperspectral image processing library
Copyright (C) 2021 University of Wisconsin
Authors: Adam Chlus, Zhiwei Ye, Philip Townsend.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, version 3 of the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
This module contains functions to apply a topographic correction (SCS+C)
described in the following papers:
Scott A. Soenen, Derek R. Peddle, & Craig A. Coburn (2005).
SCS+C: A Modified Sun-Canopy-Sensor Topographic Correction in Forested Terrain.
IEEE Transactions on Geoscience and Remote Sensing, 43(9), 2148-2159.
https://doi.org/10.1109/TGRS.2005.852480
Topographic correction consists of the following steps:
1. calculate incidence angle if it is not provided
2. estimate C-Correction value
3. apply C-Correction value to the image data
TODO: Rationale/ examples for using different fitting algorithms
"""
import numpy as np
from .c import calc_c, get_band_samples, get_cosine_i_samples
import ray
from ..misc import update_topo
from ..misc import progbar
def calc_scsc_c1(solar_zn,slope):
""" Calculate c1
All input geometry units must be in radians.
Args:
solar_zn (numpy.ndarray): Solar zenith angle.
slope (numpy.ndarray): Ground slope.
Returns:
numpy.ndarray: C1.
"""
# Eq 11. Soenen et al. 2005
scsc_c1 = np.cos(solar_zn) * np.cos(slope)
return scsc_c1
def calc_scsc_coeffs(hy_obj,topo_dict):
'''
Args:
hy_obj (TYPE): DESCRIPTION.
Returns:
None.
'''
topo_dict['coeffs'] = {}
cosine_i = hy_obj.cosine_i()
for band_num,band in enumerate(hy_obj.bad_bands):
if ~band:
band = hy_obj.get_band(band_num,mask='calc_topo')
topo_dict['coeffs'][band_num] = calc_c(band,cosine_i[hy_obj.mask['calc_topo']],
fit_type=topo_dict['c_fit_type'])
hy_obj.topo = topo_dict
def calc_scsc_coeffs_group(actors,topo_dict,group_tag):
cosine_i_samples = ray.get([a.do.remote(get_cosine_i_samples) for a in actors])
cosine_i_samples = np.concatenate(cosine_i_samples)
print(f'Topo Subgroup {group_tag}')
bad_bands = ray.get(actors[0].do.remote(lambda x: x.bad_bands))
coeffs = {}
for band_num,band in enumerate(bad_bands):
if ~band:
coeffs[band_num] = {}
band_samples = ray.get([a.do.remote(get_band_samples,
{'band_num':band_num}) for a in actors])
band_samples = np.concatenate(band_samples)
coeffs[band_num] = calc_c(band_samples,cosine_i_samples,fit_type=topo_dict['c_fit_type'])
progbar(np.sum(~bad_bands[:band_num+1]),np.sum(~bad_bands))
print('\n')
#Update TOPO coeffs
_ = ray.get([a.do.remote(update_topo,{'key':'coeffs',
'value': coeffs}) for a in actors])
_ = ray.get([a.do.remote(update_topo,{'key':'subgroup',
'value': group_tag}) for a in actors])
def apply_scsc_band(hy_obj,band,index):
'''
Args:
hy_obj (TYPE): DESCRIPTION.
band (TYPE): DESCRIPTION.
index (TYPE): DESCRIPTION.
Returns:
band (TYPE): DESCRIPTION.
'''
c1 = np.cos(hy_obj.get_anc('slope')) * np.cos(hy_obj.get_anc('solar_zn'))
cosine_i = hy_obj.cosine_i()
C = hy_obj.topo['coeffs'][index]
correction_factor = (c1 + C)/(cosine_i + C)
band[hy_obj.mask['calc_topo']] = band[hy_obj.mask['calc_topo']] * correction_factor[hy_obj.mask['calc_topo']]
band[~hy_obj.mask['no_data']] = hy_obj.no_data
return band
def apply_scsc(hy_obj,data,dimension,index):
''' Apply SCSS correction to a slice of the data
Args:
hy_obj (TYPE): DESCRIPTION.
band (TYPE): DESCRIPTION.
index (TYPE): DESCRIPTION.
Returns:
band (TYPE): DESCRIPTION.
'''
if 'c1' not in hy_obj.ancillary.keys():
c1 = np.cos(hy_obj.get_anc('slope')) * np.cos(hy_obj.get_anc('solar_zn'))
hy_obj.ancillary['c1'] = c1
if 'cosine_i' not in hy_obj.ancillary.keys():
cosine_i = hy_obj.cosine_i()
hy_obj.ancillary['cosine_i'] = cosine_i
C_bands = list([int(x) for x in hy_obj.topo['coeffs'].keys()])
C = np.array(list(hy_obj.topo['coeffs'].values()))
#Convert to float
data = data.astype(np.float32)
hy_obj.topo['coeffs'] = {int(k): hy_obj.topo['coeffs'][k] for k in hy_obj.topo['coeffs']}
if (dimension != 'band') & (dimension != 'chunk'):
if dimension == 'line':
#index= 3000
#data = hy_obj.get_line(3000)
mask = hy_obj.mask['apply_topo'][index,:]
cosine_i = hy_obj.ancillary['cosine_i'][[index],:].T
c1 = hy_obj.ancillary['c1'][[index],:].T
elif dimension == 'column':
#index= 300
#data = hy_obj.get_column(index)
mask = hy_obj.mask['apply_topo'][:,index]
cosine_i = hy_obj.ancillary['cosine_i'][:,[index]]
c1 = hy_obj.ancillary['c1'][:,[index]]
elif dimension == 'pixels':
#index = [[2000,2001],[200,501]]
y,x = index
#data = hy_obj.get_pixels(y,x)
mask = hy_obj.mask['apply_topo'][y,x]
cosine_i = hy_obj.ancillary['cosine_i'][[y],[x]].T
c1 = hy_obj.ancillary['c1'][[y],[x]].T
correction_factor = np.ones(data.shape)
correction_factor[:,C_bands] = (c1 + C)/(cosine_i + C)
data[mask,:] = data[mask,:]*correction_factor[mask,:]
elif dimension == 'chunk':
#index = 200,501,3000,3501
x1,x2,y1,y2 = index
#data = hy_obj.get_chunk(x1,x2,y1,y2)
mask = hy_obj.mask['apply_topo'][y1:y2,x1:x2]
cosine_i = hy_obj.ancillary['cosine_i'][y1:y2,x1:x2][:,:,np.newaxis]
c1 = hy_obj.ancillary['c1'][y1:y2,x1:x2][:,:,np.newaxis]
correction_factor = np.ones(data.shape)
correction_factor[:,:,C_bands] = (c1 + C)/(cosine_i + C)
data[mask,:] = data[mask,:]*correction_factor[mask,:]
elif (dimension == 'band') and (index in hy_obj.topo['coeffs']):
#index= 8
#data = hy_obj.get_band(index)
C = hy_obj.topo['coeffs'][index]
correction_factor = (hy_obj.ancillary['c1'] + C)/(hy_obj.ancillary['cosine_i'] + C)
data[hy_obj.mask['apply_topo']] = data[hy_obj.mask['apply_topo']] * correction_factor[hy_obj.mask['apply_topo']]
return data

View File

@ -0,0 +1,183 @@
# -*- coding: utf-8 -*-
"""
HyTools: Hyperspectral image processing library
Copyright (C) 2021 University of Wisconsin
Authors: Adam Chlus, Zhiwei Ye, Philip Townsend.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, version 3 of the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
Topographic correction
"""
import json
import numpy as np
import ray
from .modminn import apply_modminn,calc_modminn_coeffs
from .scsc import apply_scsc,calc_scsc_coeffs, calc_scsc_coeffs_group
from .cosine import apply_cosine,calc_cosine_coeffs
from .c import apply_c,calc_c_coeffs, calc_c_coeffs_group
from .scs import apply_scs,calc_scs_coeffs
from ..masks import mask_create
from ..misc import set_topo
def calc_cosine_i(solar_zn, solar_az, aspect ,slope):
"""Generate cosine i image. The cosine of the incidence angle (i) is
defined as the angle between the normal to the pixel surface
and the solar zenith direction.
All input geometry units must be in radians.
Args:
solar_az (numpy.ndarray): Solar azimuth angle.
solar_zn (numpy.ndarray): Solar zenith angle.
aspect (numpy.ndarray): Ground aspect.
slope (numpy.ndarray): Ground slope.
Returns:
cnumpy.ndarray: Cosine i image.
"""
relative_az = aspect - solar_az
cosine_i = np.cos(solar_zn)*np.cos(slope) + np.sin(solar_zn)*np.sin(slope)* np.cos(relative_az)
return cosine_i
def apply_topo_correct(hy_obj,data,dimension,index):
'''
Args:
hy_obj (TYPE): DESCRIPTION.
band (TYPE): DESCRIPTION.
index (TYPE): DESCRIPTION.
Returns:
band (TYPE): DESCRIPTION.
'''
if ('apply_topo' not in hy_obj.mask) & ('apply_mask' in hy_obj.topo):
hy_obj.gen_mask(mask_create,'apply_topo',hy_obj.topo['apply_mask'])
if hy_obj.topo['type'] == 'mod_minneart':
data = apply_modminn(hy_obj,data,dimension,index)
elif hy_obj.topo['type'] == 'scs+c':
data = apply_scsc(hy_obj,data,dimension,index)
elif hy_obj.topo['type'] == 'cosine':
data = apply_cosine(hy_obj,data,dimension,index)
elif hy_obj.topo['type'] == 'c':
data = apply_c(hy_obj,data,dimension,index)
elif hy_obj.topo['type'] == 'scs':
data = apply_scs(hy_obj,data,dimension,index)
return data
def load_topo_precomputed(hy_obj,topo_dict):
with open(topo_dict['coeff_files'][hy_obj.file_name], 'r') as outfile:
hy_obj.topo = json.load(outfile)
def get_topo_sample_mask(hy_obj,topo_dict):
sample_ratio = float(topo_dict["sample_perc"])
subsample_mask = np.copy(hy_obj.mask['calc_topo'])
idx = np.array(np.where(subsample_mask!=0)).T
if idx.shape[0]>5:
idxRand= idx[np.random.choice(range(len(idx)),int(len(idx)*(1-sample_ratio)), replace = False)].T
subsample_mask[idxRand[0],idxRand[1]] = 0
subsample_mask = subsample_mask.astype(np.int8)
hy_obj.ancillary['sample_mask']=subsample_mask
def calc_topo_coeffs(actors,topo_dict,actor_group_list=None,group_tag_list=None):
#def calc_topo_coeffs(actors,actor_group_list,topo_dict,group_tag_list):
if topo_dict['type'] == 'precomputed':
print("Using precomputed topographic coefficients.")
_ = ray.get([a.do.remote(load_topo_precomputed,topo_dict) for a in actors]) # actors
#_ = ray.get([a.do.remote(lambda x: x.corrections.append('topo')) for a in actors])
else:
print("Calculating topographic coefficients.")
_ = ray.get([a.do.remote(set_topo,topo_dict) for a in actors])
_ = ray.get([a.gen_mask.remote(mask_create,'calc_topo',
topo_dict['calc_mask']) for a in actors])
if (actor_group_list is None) or (topo_dict['type'] in ['scs','mod_minneart','cosine']):
# no grouping
if topo_dict['type'] == 'scs+c':
_ = ray.get([a.do.remote(calc_scsc_coeffs,topo_dict) for a in actors])
elif topo_dict['type'] == 'scs':
_ = ray.get([a.do.remote(calc_scs_coeffs,topo_dict) for a in actors])
elif topo_dict['type'] == 'mod_minneart':
_ = ray.get([a.do.remote(calc_modminn_coeffs,topo_dict) for a in actors])
elif topo_dict['type'] == 'cosine':
_ = ray.get([a.do.remote(calc_cosine_coeffs,topo_dict) for a in actors])
elif topo_dict['type'] == 'c':
_ = ray.get([a.do.remote(calc_c_coeffs,topo_dict) for a in actors])
#_ = ray.get([a.do.remote(lambda x: x.corrections.append('topo')) for a in actors])
else:
_ = ray.get([a.do.remote(get_topo_sample_mask,topo_dict) for a in actors])
for group_order, sub_actors in enumerate(actor_group_list):
#return 0
if topo_dict['type'] == 'scs+c':
calc_scsc_coeffs_group(sub_actors,topo_dict,group_tag_list[group_order])
elif topo_dict['type'] == 'c':
calc_c_coeffs_group(sub_actors,topo_dict,group_tag_list[group_order])
_ = ray.get([a.do.remote(lambda x: x.corrections.append('topo')) for a in actors])
def calc_topo_coeffs_single(hy_obj,topo_dict):
if topo_dict['type'] == 'precomputed':
print("Using precomputed topographic coefficients.")
load_topo_precomputed(hy_obj,topo_dict)
else:
print("Calculating topographic coefficients.")
hy_obj.gen_mask(mask_create,'calc_topo',topo_dict['calc_mask'])
if topo_dict['type'] == 'scs+c':
calc_scsc_coeffs(hy_obj,topo_dict)
elif topo_dict['type'] == 'scs':
calc_scs_coeffs(hy_obj,topo_dict)
elif topo_dict['type'] == 'mod_minneart':
calc_modminn_coeffs(hy_obj,topo_dict)
elif topo_dict['type'] == 'cosine':
calc_cosine_coeffs(hy_obj,topo_dict)
elif topo_dict['type'] == 'c':
calc_c_coeffs(hy_obj,topo_dict)
hy_obj.corrections.append('topo')

View File

@ -0,0 +1,22 @@
# -*- coding: utf-8 -*-
"""
HyTools: Hyperspectral image processing library
Copyright (C) 2021 University of Wisconsin
Authors: Adam Chlus, Zhiwei Ye, Philip Townsend.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, version 3 of the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
Transform functions
"""
from .resampling import *

View File

@ -0,0 +1,132 @@
# -*- coding: utf-8 -*-
"""
HyTools: Hyperspectral image processing library
Copyright (C) 2021 University of Wisconsin
Authors: Adam Chlus, Zhiwei Ye, Philip Townsend.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, version 3 of the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
Spectral resampling functions.
"""
import numpy as np
from scipy.interpolate import interp1d
def gaussian(x,mu,fwhm):
"""
Args:
x (numpy.ndarray): Values along which to generate gaussian..
mu (float): Mean of the gaussian function..
fwhm (float): Full width half maximum..
Returns:
numpy.ndarray: Gaussian along input range.
"""
c = fwhm/(2* np.sqrt(2*np.log(2)))
return np.exp(-1*((x-mu)**2/(2*c**2)))
def calc_resample_coeffs(in_wave,in_fwhm,out_wave,out_fwhm, spacing = 1):
"""Given a set of source and destination wavelengths and FWHMs this
function caculates the relative contribution or each input wavelength
to the output wavelength. It assumes that both input and output
response functions follow a gaussian distribution.
All inputs shoud be provide in nanometers.
Args:
in_wave (list): Input wavelength centers.
in_fwhm (list): Input full width half maxes.
out_wave (list): Output wavelength centers.
out_fwhm (list): Output full width half maxes.
spacing (int, optional): Resolution at which to model the
spectral response functions. Defaults to 1.
Returns:
numpy.ndarray: Transform coeffiecients.
"""
out_matrix = []
min_spectrum = min(out_wave.min(),in_wave.min())//100 *100 - 100
max_spectrum = 100 + max(out_wave.max(),in_wave.max())//100 *100
one_nm = np.arange(min_spectrum,max_spectrum,spacing)
for wave,fwhm, in zip(out_wave,out_fwhm):
a = gaussian(one_nm,wave,fwhm)
out_matrix.append(np.divide(a,np.sum(a)))
out_matrix = np.array(out_matrix)
# For each source wavelength generate the gaussion response
in_matrix = []
for wave,fwhm in zip(in_wave,in_fwhm):
in_matrix.append(gaussian(one_nm ,wave,fwhm))
in_matrix = np.array(in_matrix)
# Calculate the relative contribution of each source response function
ratio = in_matrix/in_matrix.sum(axis=0)
ratio[np.isnan(ratio)] = 0
ratio2 = np.einsum('ab,cb->acb',ratio,out_matrix)
# Calculate the relative contribution of each input wavelength
# to each destination wavelength
coeffs = np.trapz(ratio2)
return coeffs
def apply_resampler(hy_obj,data):
''' Apply SCSS correction to a slice of the data
Args:
hy_obj (TYPE): DESCRIPTION.
band (TYPE): DESCRIPTION.
index (TYPE): DESCRIPTION.
Returns:
band (TYPE): DESCRIPTION.
'''
interp_types = ['linear', 'nearest', 'nearest-up',
'zero', 'slinear', 'quadratic',
'cubic']
#Convert to float
data = data.astype(np.float32)
if hy_obj.resampler['type'] == 'gaussian':
# Load resampling coeffs to memory if needed
if 'resample_coeffs' not in hy_obj.ancillary.keys():
in_wave = hy_obj.wavelengths[~hy_obj.bad_bands]
in_fwhm =hy_obj.fwhm[~hy_obj.bad_bands]
resample_coeffs = calc_resample_coeffs(in_wave,in_fwhm,
hy_obj.resampler['out_waves'],
hy_obj.resampler['out_fwhm'])
hy_obj.ancillary['resample_coeffs'] = resample_coeffs
data = np.dot(data, hy_obj.ancillary['resample_coeffs'] )
elif hy_obj.resampler['type'] in interp_types:
interp_func = interp1d(hy_obj.wavelengths[~hy_obj.bad_bands], data,
kind=hy_obj.resampler['type'],
axis=2, fill_value="extrapolate")
data = interp_func(hy_obj.resampler['out_waves'])
return data

41
Flexbrdf/pyproject.toml Normal file
View File

@ -0,0 +1,41 @@
[build-system]
requires = ["setuptools >= 77.0.3"]
build-backend = "setuptools.build_meta"
[project]
name = "hy-tools"
version = "1.6.1"
dependencies = [
"h5py",
"h5netcdf",
"matplotlib",
"numpy",
"pandas",
"ray",
"scikit-learn",
"scipy"
]
requires-python = "> 3.9"
authors = [
{name = "Adam Chlus", email = "adam.chlus@jpl.nasa.gov"},
{name = "Zhiwei Ye", email = "ye6@wisc.edu"},
{name = "Ting Zheng", email = "tzheng39@wisc.edu"},
{name = "Natalie Queally", email = "nqueally@ucla.edu"},
{name = "Evan Greenberg", email = "egreenberg@ucsb.edu"},
{name = "Philip Townsend", email = "ptownsend@wisc.edu"},
]
description = "HyTools: Hyperspectral image processing library"
readme = "README.md"
license = "GPL-3.0-only"
license-files = ["LICEN[CS]E.*"]
keywords = ["imaging spectroscopy", "hyperspectral", "remote sensing", "BRDF correction", "glint correction", "topographic correction", "brightness normalization"]
classifiers = [
"Programming Language :: Python",
]
[project.urls]
Homepage = "https://github.com/EnSpec/hytools"
Documentation = "https://hytools.readthedocs.io/en/latest/"
Repository = "https://github.com/EnSpec/hytools.git"

View File

@ -0,0 +1,9 @@
# hytools dependencies
h5py
h5netcdf
matplotlib
numpy
pandas
ray
scikit-learn
scipy

View File

@ -0,0 +1,137 @@
import argparse
import json
import os
import ray
import hytools as ht
from hytools.io.envi import WriteENVI
from hytools.brdf import calc_brdf_coeffs
def build_anc_mapping(anc_file, anc_names):
return dict(zip(anc_names, [[anc_file, i] for i in range(len(anc_names))]))
def build_brdf_config(args):
mask = [["ndi", {"band_1": args.ndi_band_1, "band_2": args.ndi_band_2, "min": args.ndi_min, "max": args.ndi_max}]]
brdf = {
"type": args.brdf_type,
"grouped": False,
"geometric": args.geometric,
"volume": args.volume,
"b/r": args.b_r,
"h/b": args.h_b,
"sample_perc": args.sample_perc,
"calc_mask": mask,
"apply_mask": mask,
"solar_zn_type": args.solar_zn_type
}
if args.brdf_type == "universal":
brdf["diagnostic_plots"] = False
brdf["diagnostic_waves"] = []
if args.brdf_type == "flex":
brdf["interp_kind"] = "linear"
brdf["bin_type"] = "dynamic"
brdf["num_bins"] = args.num_bins
brdf["ndvi_bin_min"] = args.ndvi_bin_min
brdf["ndvi_bin_max"] = args.ndvi_bin_max
brdf["ndvi_perc_min"] = args.ndvi_perc_min
brdf["ndvi_perc_max"] = args.ndvi_perc_max
return brdf
def export_brdf_corrected(hy_obj, args):
header_dict = hy_obj.get_header()
header_dict["data ignore value"] = hy_obj.no_data
header_dict["data type"] = 4
output_name = os.path.join(
args["output_dir"],
f"{os.path.splitext(os.path.basename(hy_obj.file_name))[0]}_{args['suffix']}"
)
writer = WriteENVI(output_name, header_dict)
iterator = hy_obj.iterate(by="line", corrections=hy_obj.corrections)
while not iterator.complete:
line = iterator.read_next()
writer.write_line(line, iterator.current_line)
writer.close()
def export_brdf_coeffs(hy_obj, args):
output_name = os.path.join(
args["output_dir"],
f"{os.path.splitext(os.path.basename(hy_obj.file_name))[0]}_brdf_coeffs_{args['suffix']}.json"
)
with open(output_name, "w") as outfile:
json.dump(hy_obj.brdf, outfile)
def main():
parser = argparse.ArgumentParser(description="BRDF correction for ENVI BIP images")
parser.add_argument("image", type=str)
parser.add_argument("anc_file", type=str)
parser.add_argument("output_dir", type=str)
parser.add_argument("--anc-map", type=str, default="")
parser.add_argument("--anc-names", type=str, default="path_length,sensor_az,sensor_zn,solar_az,solar_zn,phase,slope,aspect,cosine_i,utc_time")
parser.add_argument("--brdf-type", type=str, default="universal", choices=["universal", "flex"])
parser.add_argument("--suffix", type=str, default="brdf")
parser.add_argument("--num-cpus", type=int, default=1)
parser.add_argument("--bad-bands-json", type=str, default="")
parser.add_argument("--solar-zn-type", type=str, default="scene")
parser.add_argument("--geometric", type=str, default="li_dense_r")
parser.add_argument("--volume", type=str, default="ross_thick")
parser.add_argument("--b-r", type=float, default=2.5)
parser.add_argument("--h-b", type=float, default=2.0)
parser.add_argument("--sample-perc", type=float, default=0.1)
parser.add_argument("--ndi-band-1", type=float, default=850.0)
parser.add_argument("--ndi-band-2", type=float, default=660.0)
parser.add_argument("--ndi-min", type=float, default=0.05)
parser.add_argument("--ndi-max", type=float, default=1.0)
parser.add_argument("--num-bins", type=int, default=18)
parser.add_argument("--ndvi-bin-min", type=float, default=0.05)
parser.add_argument("--ndvi-bin-max", type=float, default=1.0)
parser.add_argument("--ndvi-perc-min", type=float, default=10.0)
parser.add_argument("--ndvi-perc-max", type=float, default=95.0)
parser.add_argument("--export-coeffs", action="store_true")
args = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
if args.anc_map:
with open(args.anc_map, "r") as infile:
anc_map = json.load(infile)
else:
anc_names = [x.strip() for x in args.anc_names.split(",") if x.strip()]
anc_map = build_anc_mapping(args.anc_file, anc_names)
config_dict = {
"file_type": "envi",
"input_files": [args.image],
"anc_files": {args.image: anc_map},
"corrections": ["brdf"],
"brdf": build_brdf_config(args),
"num_cpus": args.num_cpus
}
if args.bad_bands_json:
config_dict["bad_bands"] = json.loads(args.bad_bands_json)
if ray.is_initialized():
ray.shutdown()
ray.init(num_cpus=config_dict["num_cpus"])
HyTools = ray.remote(ht.HyTools)
actor = HyTools.remote()
ray.get(actor.read_file.remote(args.image, "envi", anc_map))
if "bad_bands" in config_dict:
ray.get(actor.create_bad_bands.remote(config_dict["bad_bands"]))
calc_brdf_coeffs([actor], config_dict)
ray.get(actor.do.remote(export_brdf_corrected, {"output_dir": args.output_dir, "suffix": args.suffix}))
if args.export_coeffs:
ray.get(actor.do.remote(export_brdf_coeffs, {"output_dir": args.output_dir, "suffix": args.suffix}))
ray.shutdown()
if __name__ == "__main__":
main()

View File

@ -0,0 +1,294 @@
'''Template script for generating image_correct configuration JSON files.
These setting are meant only as an example, are not appropriate for
all situations and may need to be adjusted
'''
import json
import glob
import numpy as np
#Output path for configuration file
config_file = "/data1/temp/ht_test/ic_test.json"
config_dict = {}
#Only coefficients for good bands will be calculated
config_dict['bad_bands'] =[[300,400],[1337,1430],[1800,1960],[2450,2600]]
#config_dict['bad_bands'] =[[300,400],[900,2600]] # Subset for testing
# Input data settings for NEON
#################################################################
# config_dict['file_type'] = 'neon'
# images= glob.glob("/data1/temp/ht_test/*.h5")
# images.sort()
# config_dict["input_files"] = images
# Input data settings for ENVI
#################################################################
''' Only difference between ENVI and NEON settings is the specification
of the ancillary datasets (ex. viewing and solar geometry). All hytools
functions assume that the ancillary data and the image date are the same
size, spatially, and are ENVI formatted files.
The ancillary parameter is a dictionary with a key per image. Each value
per image is also a dictionary where the key is the dataset name and the
value is list consisting of the file path and the band number.
'''
config_dict['file_type'] = 'envi'
aviris_anc_names = ['path_length','sensor_az','sensor_zn',
'solar_az', 'solar_zn','phase','slope',
'aspect', 'cosine_i','utc_time']
images= glob.glob("/data1/temp/ht_test/ang20190707t203417_rfl_v2v2_img")
images.sort()
config_dict["input_files"] = images
config_dict["anc_files"] = {}
anc_files = glob.glob("/data1/temp/ht_test/ang20190707t203417_rdn_v2v2_obs_ort_corr")
anc_files.sort()
for i,image in enumerate(images):
config_dict["anc_files"][image] = dict(zip(aviris_anc_names,
[[anc_files[i],a] for a in range(len(aviris_anc_names))]))
# Export settings
#################################################################
''' Options for subset waves:
1. List of subset wavelenths
2. Empty list, this will output all good bands, if resampler is
set it will also resample.
- Currently resampler cannot be used in conjuction with option 1
'''
config_dict['export'] = {}
config_dict['export']['coeffs'] = True
config_dict['export']['image'] = True
config_dict['export']['masks'] = True
config_dict['export']['subset_waves'] = []
config_dict['export']['output_dir'] = "/data1/temp/ht_test/"
config_dict['export']["suffix"] = 'anc_nodata_test'
#Corrections
#################################################################
''' Specify correction(s) to be applied, corrections will be applied
in the order they are specified.
Options include:
['topo']
['brdf']
['glint']
['topo','brdf']
['brdf','topo']
['brdf','topo','glint']
[] <---Export uncorrected images
'''
config_dict["corrections"] = ['brdf']
#Topographic Correction options
#################################################################
'''
Types supported:
- 'cosine'
- 'c'
- 'scs
- 'scs+c'
- 'mod_minneart'
- 'precomputed'
Apply and calc masks are only needed for C and SCS+C corrections. They will
be ignored in all other cases and correction will be applied to all
non no-data pixels.
'c_fit_type' is only applicable for the C or SCS+C correction type. Options
include 'ols' or 'nnls'. Choosing 'nnls' can limit overcorrection.
For precomputed topographic coefficients 'coeff_files' is a
dictionary where each key is the full the image path and value
is the full path to coefficients file, one per image.
'''
config_dict["topo"] = {}
config_dict["topo"]['type'] = 'scs+c'
config_dict["topo"]['calc_mask'] = [["ndi", {'band_1': 850,'band_2': 660,
'min': 0.1,'max': 1.0}],
['ancillary',{'name':'slope',
'min': np.radians(5),'max':'+inf' }],
['ancillary',{'name':'cosine_i',
'min': 0.12,'max':'+inf' }],
['cloud',{'method':'zhai_2018',
'cloud':True,'shadow':True,
'T1': 0.01,'t2': 1/10,'t3': 1/4,
't4': 1/2,'T7': 9,'T8': 9}]]
config_dict["topo"]['apply_mask'] = [["ndi", {'band_1': 850,'band_2': 660,
'min': 0.1,'max': 1.0}],
['ancillary',{'name':'slope',
'min': np.radians(5),'max':'+inf' }],
['ancillary',{'name':'cosine_i',
'min': 0.12,'max':'+inf' }]]
config_dict["topo"]['c_fit_type'] = 'nnls'
# config_dict["topo"]['type'] = 'precomputed'
# config_dict["brdf"]['coeff_files'] = {}
#BRDF Correction options
#################################################################3
'''
Types supported:
- 'universal': Simple kernel multiplicative correction.
- 'local': Correction by class. (Future.....)
- 'flex' : Correction by NDVI class
- 'precomputed' : Use precomputed coefficients
If 'bin_type' == 'user'
'bins' should be a list of lists, each list the NDVI bounds [low,high]
Object shapes ('h/b','b/r') only needed for Li kernels.
For precomputed topographic coefficients 'coeff_files' is a
dictionary where each key is the full the image path and value
is the full path to coefficients file, one per image.
'''
config_dict["brdf"] = {}
# Options are 'line','scene', or a float for a custom solar zn
# Custom solar zenith angle should be in radians
config_dict["brdf"]['solar_zn_type'] ='scene'
# Universal BRDF config
#----------------------
# config_dict["brdf"]['type'] = 'universal'
# config_dict["brdf"]['grouped'] = True
# config_dict["brdf"]['sample_perc'] = 0.1
# config_dict["brdf"]['geometric'] = 'li_dense_r'
# config_dict["brdf"]['volume'] = 'ross_thick'
# config_dict["brdf"]["b/r"] = 2.5
# config_dict["brdf"]["h/b"] = 2
# config_dict["brdf"]['calc_mask'] = [["ndi", {'band_1': 850,'band_2': 660,
# 'min': 0.1,'max': 1.0}]]
# config_dict["brdf"]['apply_mask'] = [["ndi", {'band_1': 850,'band_2': 660,
# 'min': 0.1,'max': 1.0}]]
# config_dict["brdf"]['diagnostic_plots'] = True
# config_dict["brdf"]['diagnostic_waves'] = [440,550,660,850]
#----------------------
# ## Flex BRDF configs
# ##------------------
config_dict["brdf"]['type'] = 'flex'
config_dict["brdf"]['grouped'] = True
config_dict["brdf"]['geometric'] = 'li_dense_r'
config_dict["brdf"]['volume'] = 'ross_thick'
config_dict["brdf"]["b/r"] = 2.5
config_dict["brdf"]["h/b"] = 2
config_dict["brdf"]['sample_perc'] = 0.1
config_dict["brdf"]['interp_kind'] = 'linear'
config_dict["brdf"]['calc_mask'] = [["ndi", {'band_1': 850,'band_2': 660,
'min': 0.1,'max': 1.0}],
['kernel_finite',{}],
['ancillary',{'name':'sensor_zn',
'min':np.radians(2),'max':'inf' }],
['neon_edge',{'radius': 30}],
['cloud',{'method':'zhai_2018',
'cloud':True,'shadow':True,
'T1': 0.01,'t2': 1/10,'t3': 1/4,
't4': 1/2,'T7': 9,'T8': 9}]]
config_dict["brdf"]['apply_mask'] = [["ndi", {'band_1': 850,'band_2': 660,
'min': 0.05,'max': 1.0}]]
# ## Flex dynamic NDVI params
config_dict["brdf"]['bin_type'] = 'dynamic'
config_dict["brdf"]['num_bins'] = 18
config_dict["brdf"]['ndvi_bin_min'] = 0.05
config_dict["brdf"]['ndvi_bin_max'] = 1.0
config_dict["brdf"]['ndvi_perc_min'] = 10
config_dict["brdf"]['ndvi_perc_max'] = 95
# ## Flex fixed bins specified by user
# config_dict["brdf"]['bin_type'] = 'user'
# config_dict["brdf"]['bins'] = [[0.1,.25],[.25,.75],[.75,1]]
# ##-----------------
## Precomputed BRDF coefficients
##------------------------------
# config_dict["brdf"]['type'] = 'precomputed'
# config_dict["brdf"]['coeff_files'] = {}
##------------------------------
#Glint Correction options
#################################################################
'''
Types supported:
- hochberg
- hedley
- gao
Common reference bands include:
- 860nm (NIR)
- 1650nm (SWIR)
- 2190nm (SWIR)
The Hedley-specific config would be in the form of:
[ImagePath]: [y1, y2, x1, x1]
e.g.:
config_dict["glint"]["deep_water_sample"] = {
"/path_to_image1": [
137, 574, 8034, 8470
],
"/path_to_image2": [
48, 393, 5780, 5925
],
}
'''
config_dict["glint"] = {}
config_dict['glint']['type'] = 'hedley'
config_dict['glint']['correction_wave'] = 1650
# External masks for glint correction
mask_files = glob.glob("/data2/prisma/rfl/PRS_20210629153937_20210629153942_0001_modtran/*_cls")
mask_files.sort()
file_dict = dict(zip(images,mask_files))
config_dict['glint']['apply_mask'] = [["external", {'class' : 1,
'files' : file_dict}]]
config_dict["glint"]["deep_water_sample"] = {
images[0]: [
225,250,240,260
]}
#Wavelength resampling options
##############################
'''
Types supported:
- 'gaussian': needs output waves and output FWHM
- 'linear', 'nearest', 'nearest-up',
'zero', 'slinear', 'quadratic','cubic': Piecewise
interpolation using Scipy interp1d
config_dict["resampler"] only needed when resampling == True
'''
config_dict["resample"] = False
# config_dict["resampler"] = {}
# config_dict["resampler"]['type'] = 'cubic'
# config_dict["resampler"]['out_waves'] = []
# config_dict["resampler"]['out_fwhm'] = []
# Remove bad bands from output waves
# for wavelength in range(450,660,100):
# bad=False
# for start,end in config_dict['bad_bands']:
# bad = ((wavelength >= start) & (wavelength <=end)) or bad
# if not bad:
# config_dict["resampler"]['out_waves'].append(wavelength)
config_dict['num_cpus'] = len(images)
with open(config_file, 'w') as outfile:
json.dump(config_dict,outfile,indent=3)

View File

@ -0,0 +1,338 @@
# python c:/mydata/image_correct_json_generate_gui.py
import os
import json
import glob
import numpy as np
import tkinter as tk
from tkinter.filedialog import asksaveasfilename, askdirectory
def fill_config(images, anc_files,out_coef_dir,img_file_type,corr_list, flag_pre_compute=False, topo_coeff = [], brdf_coeff=[]):
config_dict = {}
#Only coefficients for good bands will be calculated
config_dict['bad_bands'] =[[300,400],[1337,1430],[1800,1960],[2450,2600]]
config_dict['file_type'] = img_file_type #'envi'
aviris_anc_names = ['path_length','sensor_az','sensor_zn',
'solar_az', 'solar_zn','phase','slope',
'aspect', 'cosine_i','utc_time']
#aviris_anc_names = ['sensor_az','sensor_zn',
# 'solar_az', 'solar_zn']
images.sort()
config_dict["input_files"] = images
if img_file_type=='envi':
config_dict["anc_files"] = {}
anc_files.sort()
for i,image in enumerate(images):
config_dict["anc_files"][image] = dict(zip(aviris_anc_names,
[[anc_files[i],a] for a in range(len(aviris_anc_names))]))
config_dict['export'] = {}
config_dict["topo"] = {}
config_dict["brdf"] = {}
if flag_pre_compute:
config_dict['export']['coeffs'] = False
config_dict['export']['image'] = True
else:
config_dict['export']['coeffs'] = True
config_dict['export']['image'] = False
config_dict['export']['masks'] = False
config_dict['export']['subset_waves'] = [660,550,440] #[440,550,660,850] #
config_dict['export']['output_dir'] = out_coef_dir
print('_'.join(corr_list))
if len(corr_list)>0:
config_dict['export']["suffix"] = '_'.join(corr_list) # 'brdf'
else:
config_dict['export']["suffix"] = 'raw'
config_dict["corrections"] = corr_list # ['brdf']
if flag_pre_compute and len(topo_coeff)==len(images):
config_dict["topo"]['type'] = 'precomputed'
topo_files = sorted(topo_coeff)
#print(dict(zip(images, topo_files)))
config_dict["topo"]['coeff_files'] = dict(zip(images, topo_files))
else:
config_dict["topo"]['type'] = 'scs+c'
config_dict["topo"]['calc_mask'] = [["ndi", {'band_1': 850,'band_2': 660,
'min': 0.05,'max': 1.0}]
]
config_dict["topo"]['apply_mask'] = [["ndi", {'band_1': 850,'band_2': 660,
'min': 0.05,'max': 1.0}]]
config_dict["topo"]['c_fit_type'] = 'nnls' #'ols' #'nnls' #
if flag_pre_compute and len(brdf_coeff)==len(images):
config_dict["brdf"]['type'] = 'precomputed'
brdf_files = sorted(brdf_coeff)
config_dict["brdf"]['coeff_files'] = dict(zip(images, brdf_files))
else:
# Options are 'line','scene', or a float for a custom solar zn
# Custom solar zenith angle should be in radians
config_dict["brdf"]['solar_zn_type'] ='scene'
#----------------------
# ## Flex BRDF configs
# ##------------------
config_dict["brdf"]['type'] = 'flex'
config_dict["brdf"]['grouped'] = True
config_dict["brdf"]['geometric'] = 'li_sparse_r'
config_dict["brdf"]['volume'] = 'ross_thick'
config_dict["brdf"]["b/r"] = 2.5
config_dict["brdf"]["h/b"] = 2
config_dict["brdf"]['sample_perc'] = 0.1
config_dict["brdf"]['interp_kind'] = 'linear'
config_dict["brdf"]['calc_mask'] = [["ndi", {'band_1': 850,'band_2': 660,
'min': 0.05,'max': 1.0}],
['kernel_finite',{}],
['ancillary',{'name':'sensor_zn',
'min':np.radians(2),'max':'inf' }]
]
config_dict["brdf"]['apply_mask'] = [["ndi", {'band_1': 850,'band_2': 660,
'min': 0.05,'max': 1.0}]]
# ## Flex dynamic NDVI params
config_dict["brdf"]['bin_type'] = 'dynamic'
config_dict["brdf"]['num_bins'] = 18
config_dict["brdf"]['ndvi_bin_min'] = 0.05
config_dict["brdf"]['ndvi_bin_max'] = 1.0
config_dict["brdf"]['ndvi_perc_min'] = 10
config_dict["brdf"]['ndvi_perc_max'] = 95
config_dict["resample"] = False
config_dict['num_cpus'] = len(images)
return config_dict
'''
def update_corr_list(corr_list_):
#print(chk_topo.get(),chk_brdf.get())
corr_list_ = ['topo']*chk_topo.get()+['brdf']*chk_brdf.get()
#print(corr_list)
'''
def gen_config(entry_outdir, entry_outjson,img_list_out, obs_list_out, radio_f_type, corr_list,chk_precompute, topo_list_out,brdf_list_out):
outdir_name = entry_outdir.get()+'/'
out_json = entry_outjson.get()
images = img_list_out['text'].split('\n')
anc_files = obs_list_out['text'].split('\n')
img_file_type = str(radio_f_type.get()).lower()
corr_list_str = ['topo']*(corr_list[0].get()) + ['brdf']*(corr_list[1].get())
flag_pre_compute = bool(chk_precompute.get())
#print(flag_pre_compute,chk_precompute.get())
if flag_pre_compute:
topo_json_list = topo_list_out['text'].split('\n')
brdf_json_list = brdf_list_out['text'].split('\n')
return_json_dict = fill_config(images, anc_files,outdir_name,img_file_type,corr_list_str,flag_pre_compute=flag_pre_compute, topo_coeff = topo_json_list, brdf_coeff=brdf_json_list)
else:
return_json_dict = fill_config(images, anc_files,outdir_name,img_file_type,corr_list_str)
with open(out_json, 'w') as outfile:
json.dump(return_json_dict,outfile,indent=3)
window.title(f"File saved- {out_json}")
def save_file(txt_out_json):
"""Save the current file as a new file."""
filepath = asksaveasfilename(
defaultextension=".json",
filetypes=[("JSON Files", "*.json"), ("All Files", "*.*")],
)
if not filepath:
return
txt_out_json.delete(0,tk.END)
txt_out_json.insert(0,filepath)
window.title(f"Export Configuration JSON - {filepath}")
def open_folder(out_component):
"""Open a file for editing."""
in_img_dir = askdirectory()
if not in_img_dir:
return
#print(in_img_dir)
out_component.delete(0, tk.END)
out_component.insert(0,in_img_dir)
window.title(f"Folder- {in_img_dir}")
def open_file(out_component, list_component, radio_f_type=None, pattern=None):
"""Open a file for editing."""
in_img_dir = askdirectory()
if not in_img_dir:
return
#out_component.delete("1.0", tk.END)
#out_component.insert("1.0",in_img_dir)
out_component["text"] = in_img_dir
if pattern is None:
img_file_type = str(radio_f_type.get()).lower()
file_ext_list = {'envi':'*img','neon':'*.h5'}
pattern = file_ext_list[img_file_type]
#if pattern is None:
# return in_img_dir
file_list = sorted(glob.glob(in_img_dir+'/'+pattern))
if len(file_list)==0:
list_component['text']= 'No files selected'
return
list_component['text']= '\n'.join([ os.path.normpath(x) for x in file_list])
#print(list_component['text'])
window.title(f"Folder- {in_img_dir}")
window = tk.Tk()
window.title("Setup image correction configuration file")
#window.rowconfigure(0, minsize=400, weight=1)
#window.columnconfigure(1, minsize=600, weight=1)
#txt_edit = tk.Text(window)
frm_img_buttons = tk.Frame(window, relief=tk.RAISED, bd=2)
frm_img_buttons.columnconfigure(1, minsize=300, weight=1)
frm_img_buttons.rowconfigure(1, minsize=50, weight=1)
frm_obs_buttons = tk.Frame(window, relief=tk.RAISED, bd=2)
frm_obs_buttons.columnconfigure(1, minsize=300, weight=1)
frm_obs_buttons.rowconfigure(1, minsize=50, weight=1)
frm_out_buttons = tk.Frame(window, relief=tk.RAISED, bd=2)
frm_out_buttons.columnconfigure(1, weight=1) #, minsize=300
frm_out_json_buttons = tk.Frame(window, relief=tk.RAISED, bd=2)
frm_out_json_buttons.columnconfigure(1, weight=1) #minsize=300,
frm_file_type = tk.Frame(window, relief=tk.RAISED, bd=2)
frm_corr_type = tk.Frame(window, relief=tk.RAISED, bd=2)
frm_precomp = tk.Frame(window, relief=tk.RAISED, bd=2,highlightbackground="grey",highlightthickness=5,padx=5, pady=5)
frm_precomp.columnconfigure(1,weight=1)
frm_precomp.rowconfigure(1, minsize=50, weight=1)
frm_pre_topo_buttons = tk.Frame(frm_precomp, relief=tk.RAISED, bd=2)
frm_pre_topo_buttons.columnconfigure(1, minsize=280, weight=1)
#frm_pre_topo_buttons.rowconfigure(1, minsize=50, weight=1)
frm_pre_brdf_buttons = tk.Frame(frm_precomp, relief=tk.RAISED, bd=2)
frm_pre_brdf_buttons.columnconfigure(1, minsize=280, weight=1)
#frm_pre_brdf_buttons.rowconfigure(1, minsize=50, weight=1)
frm_final_gen = tk.Frame(window, relief=tk.RAISED, bd=2,highlightbackground="grey",highlightthickness=5)
frm_final_gen.rowconfigure(0, minsize=50, weight=0)
frm_final_gen.columnconfigure(0, minsize=100, weight=1) #
frm_final_gen.columnconfigure(1, minsize=50, weight=0)
frm_final_gen.columnconfigure(2, minsize=100, weight=1)
label_img_dir = tk.Label(frm_img_buttons,text="image", fg="white", bg="black")
label_obs_dir = tk.Label(frm_obs_buttons,text="image", fg="white", bg="black")
txt_outdir = tk.Entry(frm_out_buttons)
img_list_out = tk.Label(frm_img_buttons,bg="grey",anchor="w")
obs_list_out = tk.Label(frm_obs_buttons,bg="grey",anchor="w")
btn_open1 = tk.Button(frm_img_buttons, text="Image Folder...", command=lambda: open_file(label_img_dir,img_list_out,radio_f_type=f_type) ) #'*img'
btn_open2 = tk.Button(frm_obs_buttons, text="Obs_ort Folder...", command=lambda: open_file(label_obs_dir,obs_list_out,pattern='*obs_ort'))
btn_open3 = tk.Button(frm_out_buttons, text="Output coeff Folder...", command=lambda: open_folder(txt_outdir)) #, command=open_file(txt_edit, None)
#btn_open1 = tk.Button(frm_img_buttons, text="Image Folder...", command=lambda: open_file(label_img_dir,img_list_out,radio_f_type=f_type) ) #'*img'
#btn_open2 = tk.Button(frm_obs_buttons, text="Obs_ort Folder...", command=lambda: open_file(label_obs_dir,obs_list_out,pattern='*obs_ort'))
btn_open_topo = tk.Button(frm_pre_topo_buttons, text="TOPO json Folder...", command=lambda: open_file(label_pre_topo_dir,topo_list_out,pattern='*topo_coeffs*.json'))
btn_open_brdf = tk.Button(frm_pre_brdf_buttons, text="BRDF json Folder...", command=lambda: open_file(label_pre_brdf_dir,brdf_list_out,pattern='*brdf_coeffs*.json'))
txt_out_json = tk.Entry(frm_out_json_buttons)
btn_save = tk.Button(frm_out_json_buttons, text="Save As...", command=lambda: save_file(txt_out_json))
btn_open1.grid(row=0, column=0, sticky="ew", padx=5, pady=5)
label_img_dir.grid(row=0, column=1, sticky="ew", padx=5, pady=5)
img_list_out.grid(row=1, columnspan=2, sticky="nsew", padx=5, pady=5)
btn_open2.grid(row=0, column=0, sticky="ew", padx=5, pady=5)
label_obs_dir.grid(row=0, column=1, sticky="ew", padx=5, pady=5)
obs_list_out.grid(row=1, columnspan=2, sticky="nsew", padx=5, pady=5)
btn_open3.grid(row=0, column=0, sticky="ew", padx=5, pady=5)
txt_outdir.grid(row=0, column=1, sticky="ew", padx=5, pady=5)
btn_save.grid(row=0, column=0, sticky="ew", padx=5)
txt_out_json.grid(row=0, column=1, sticky="ew", padx=5, pady=5)
f_type = tk.StringVar(value="envi")
ev_btn = tk.Radiobutton(frm_file_type, text='ENVI (*img)', variable=f_type, value='envi')
h5_btn = tk.Radiobutton(frm_file_type, text='NEON HDF5 (*.h5)', variable=f_type, value='neon')
ev_btn.grid(row=0, column=0, sticky="ew", padx=5)
h5_btn.grid(row=0, column=1, sticky="ew", padx=5)
chk_topo = tk.IntVar(value=0)
chk_brdf = tk.IntVar(value=1)
chk_topo_btn = tk.Checkbutton(frm_corr_type, text='TOPO', variable=chk_topo, onvalue=1, offvalue=0) #, command=lambda: update_corr_list(corr_list))
chk_brdf_btn = tk.Checkbutton(frm_corr_type, text='BRDF', variable=chk_brdf, onvalue=1, offvalue=0) #, command=lambda: update_corr_list(corr_list))
chk_topo_btn.grid(row=0, column=0, sticky="ew", padx=5)
chk_brdf_btn.grid(row=0, column=1, sticky="ew", padx=5)
corr_list = [chk_topo,chk_brdf]
chk_precompute = tk.IntVar(value=0)
coeff_list= []
label_precomput = tk.Checkbutton(frm_precomp, text='Load Precomputed Coefficients', variable=chk_precompute, onvalue=1, offvalue=0,anchor="center") #tk.Label(frm_precomp,text="Precomputed Coefficients")
label_precomput.grid(row=0, columnspan=2, sticky="ew", padx=2, pady=2)
btn_open_topo.grid(row=0, column=0, sticky="ew", padx=2, pady=2)
btn_open_brdf.grid(row=0, column=0, sticky="ew", padx=2, pady=2)
label_pre_topo_dir = tk.Label(frm_pre_topo_buttons,text="topo json", fg="white", bg="black")
label_pre_topo_dir.grid(row=0, column=1, sticky="ew", padx=2, pady=2)
label_pre_brdf_dir = tk.Label(frm_pre_brdf_buttons,text="brdf json", fg="white", bg="black")
label_pre_brdf_dir.grid(row=0, column=1, sticky="ew", padx=2, pady=2)
topo_list_out = tk.Label(frm_pre_topo_buttons,bg="grey",anchor="w")
topo_list_out.grid(row=1, columnspan=2, sticky="ew", padx=2,pady=2)
brdf_list_out = tk.Label(frm_pre_brdf_buttons,bg="grey",anchor="w")
brdf_list_out.grid(row=1, columnspan=2, sticky="ew", padx=2,pady=2)
frm_pre_topo_buttons.grid(row=1, column=0, sticky="ew")
frm_pre_brdf_buttons.grid(row=1, column=1, sticky="ew")
#img_list_out = tk.Label(frm_img_buttons,bg="grey",anchor="w")
#obs_list_out = tk.Label(frm_obs_buttons,bg="grey",anchor="w")
btn_gen = tk.Button(frm_final_gen, text="Generate", font=("Calibri",12,"bold"), command=lambda: gen_config(txt_outdir,txt_out_json,img_list_out, obs_list_out,f_type,corr_list,chk_precompute,topo_list_out, brdf_list_out))
#btn_gen.place(relx=.5, rely=.5,anchor= 'e')
btn_gen.grid(row=0,column=1,sticky="wens") #anchor='center',
#btn_gen.place(relx=0.5, rely=0.95, anchor=tk.CENTER)
frm_img_buttons.grid(row=0, column=0, sticky="ew")
frm_obs_buttons.grid(row=0, column=1, sticky="ew")
frm_out_buttons.grid(row=1, columnspan=2, sticky="we")
frm_file_type.grid(row=2,column=0, sticky="ew")
frm_corr_type.grid(row=2,column=1, sticky="ew")
frm_precomp.grid(row=3,columnspan=2, sticky="nsew")
frm_out_json_buttons.grid(row=4, columnspan=2, sticky="ew")
frm_final_gen.grid(row=6, columnspan=2, sticky="nsew") #, sticky="nsew"
window.mainloop()

View File

@ -0,0 +1,117 @@
# -*- coding: utf-8 -*-
"""
HyTools: Hyperspectral image processing library
Copyright (C) 2021 University of Wisconsin
Authors: Adam Chlus, Zhiwei Ye, Philip Townsend.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, version 3 of the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
JSON format for PLSR trait models v0.1
This version assumes that all transforms are applied using all model wavelengths.
TODO: Develop standard codes for spectrometer, both airborne/spaceborne and field.
TODO: Allow for more options for specifying wavelength subsets
"""
import json
model_dict = {}
# Metadata
#####################################
'''
trait : Trait name (str)
units : Trait units (str)
description: Model description (str)
wavelength_units: Wavelength units (str)
wavelengths : Model wavelengths (list)
Only wavelengths used in the model should be
included in the list of wavelengths.
fwhm : Model fwhm (list)
type : Model type (str)
'''
model_dict["name"] = ''
model_dict["units"] = ''
model_dict["description"] = ''
model_dict["wavelength_units"] = ''
model_dict["wavelengths"] = []
model_dict["fwhm"] = []
model_dict["spectrometer"] = ''
model_dict["type"] = ''
# Diagnostics
#####################################
'''Currently the only required diagnostics are 'min'
and 'max', these are the min and max values of the
dataset used to build the model and are used to generate
the data range mask, which identifies pixels with predictions
outside of the model dataset range.
'''
model_dict["model_diagnostics"] = {}
model_dict["model_diagnostics"]["rmse"] = 0.0
model_dict["model_diagnostics"]["r_squared"] = 0.0
model_dict["model_diagnostics"]["min"] = 0.0
model_dict["model_diagnostics"]["max"] = 0.0
# Model
#####################################
'''
transform: List of transforms to be applied in order of application.
Options:
- 'vector': vector norm using np.linalg.norm
- 'mean' : Normalize to mean
- 'absorb' : log(1/R)
Examples:
['vector','absorb']
Empty list for no transforms ([])
coefficients: List of lists, sublists are the coefficients for
model iterations.
intercepts : Permuted model intercepts (list)
components : Number of model component (int)
'''
model_dict['model'] = {}
model_dict['model']["components"] = 0
model_dict['model']["transform"] = ['mean']
model_dict['model']["intercepts"] = []
model_dict['model']["coefficients"] =[[],[]]
model_path = '*.json'
with open(model_path, 'w') as outfile:
json.dump(model_dict,outfile)

Some files were not shown because too many files have changed in this diff Show More