''' C:\Program Files\SpectrononPro\plugins\cube\correct NOTES AND ASSUMPTIONS: -Response of camera is linear after dark noise removal - If additive binning is utilized, it must be in the header file - If frame windowing infomation is stored on-cameras (like the Basler Ace), gain cube is collected windowed and no window header item is utilized. Otherwise, the spatial/band windowing header item must be included. - There is only one gain cube in vault. It is not binned or averaged. - Offset cubes can be binned. They are located by the # of bands, not by binning numbers. - Georeg cubes must be windowed and binned to match. This code will average down to match size, just in case, but GeoReg won't! - If Auto Dark Noise removal is unchecked and not dark noise cube is given, no dark noise will be removed - If Auto Dark Noise removal is unchecked, and a dark noise cube supplied, its dimensions must match the given cube ''' from resonon.utils import cubetools as tools from spectronon.workbench.plugin import CubePlugin import numpy from resonon.utils.spec import SpecFilename, SpecBool, SpecCube from resonon.utils.cubevault import CubeVault from resonon.core.data.cube import Cube, DummyCube from resonon.core.data import _util class RadianceConversion(CubePlugin): """Calculate radiance units based on a calibration pack (.zip) or cube""" label = "Radiance From Raw Data" position = 3 userLevel = 1 def setup(self): datacube = self.datacube self.calpack_path = SpecFilename('Imager Calibration', wildcard='Imager Calibration Pack, Calibration Pack, Calibration Cube (*.icp, *.zip, *.bip)|*.icp;*.zip; *.bip', must_exist=True) self.removedark = SpecBool("Auto Remove Dark Noise?", defaultValue=True) self.darkcube = SpecCube("Dark Noise Cube", datacube, self.wb, requireMatchedBandCount=True, requireMatchedSampleCount=True) self.returnfloat = SpecBool("Return floating point?", defaultValue=False) def update(self): datacube = self.datacube if self.removedark.value: self.darkcube.hidden = True else: self.darkcube.hidden = False def action(self): datacube = self.datacube data_bands = datacube.getBandCount() data_samples = datacube.getSampleCount() # 这里的windowing parameters是指传感器(CCD)的有效窗口参数------------------------------------------------------------------- # get the windowing parameters, falling back on full frames try: spatial_window = eval(datacube.getMetaValue('spatial window')) # 有效窗口的列数 except: spatial_window = None # (0,data_samples) try: band_window = eval(datacube.getMetaValue('band window')) # 有效窗口的行数 except: band_window = None # (0,data_bands) try: camera_binning = int(datacube.getMetaValue('camera spectral binning')) # ?????????????????????????????????????????????? except: camera_binning = 1 try: spectral_binning = int(datacube.getMetaValue('spectral binning')) # ?????????????????????????????????????????????? except: spectral_binning = 1 try: sample_binning = int(datacube.getMetaValue('sample binning')) except: sample_binning = 1 try: data_gain_db = int(round(datacube.getMetaValue('gain'))) except KeyError: data_gain_db = None try: data_shutter = float(round(datacube.getMetaValue('shutter'))) except KeyError: data_shutter = None try: flip_frame = eval(datacube.getMetaValue('flip radiometric calibration')) except: try: direction = datacube.getMetaValue('direction') if direction == 'logo left': flip_frame = False else: flip_frame = True except: flip_frame = False # self.calpack_path.value是定标文件(icp文件)路径 if self.calpack_path.value[-3:] == 'zip' or self.calpack_path.value[-3:] == 'icp': cvault = CubeVault(self.calpack_path.value) # 打开定标文件,定标文件中包含一个gainCube和一系列offsetCube gainCube = cvault.get_gain_cube(meta={ 'camera spectral binning': camera_binning}) # cube dict prefers any header item to none unless exact_match=True, is this the right behavior? offsetCube = cvault.get_offset_cube(meta={'camera spectral binning': camera_binning, # is it a duplicate to ask for spectral binning on top of bands? 'samples': data_samples, 'bands': data_bands, 'gain': data_gain_db, 'shutter': data_shutter}) print offsetCube.getPath() # window the data:从一帧中裁剪出有效窗口对应的图像矩阵 gain_cube_array = gainCube.getFrame(0, asBIP=True) offset_cube_array = offsetCube.getFrame(0, asBIP=True) if spatial_window: # 有效窗口的列数 gain_cube_array = gain_cube_array[spatial_window[0]:spatial_window[1], :] offset_cube_array = offset_cube_array[spatial_window[0]:spatial_window[1], :] if band_window: # 有效窗口的行数 gain_cube_array = gain_cube_array[:, band_window[0]:band_window[1]] offset_cube_array = offset_cube_array[:, band_window[0]:band_window[1]] # a binned cube is scaled too large by the binning factors, so we need to divide the data by the binning.?????????????????????????????? # since the data gets multiplied by the gain_cube_array, we can just divide it by the binning factor try: gain_spectral_binning = int(gainCube.getMetaValue('spectral binning')) except: gain_spectral_binning = 1 try: gain_sample_binning = int(gainCube.getMetaValue('sample binning')) except: gain_sample_binning = 1 try: offset_spectral_binning = int(offsetCube.getMetaValue('spectral binning')) except: offset_spectral_binning = 1 try: offset_sample_binning = int(offsetCube.getMetaValue('sample binning')) except: offset_sample_binning = 1 gc_binned_gain = gain_cube_array / ( # 这里是除法!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!已理解,这是对的!!!!!!!!!!!!!!!!!! float(spectral_binning) * sample_binning / (gain_spectral_binning * gain_sample_binning)) # the offset of binned data is also scaled by the binning, so we need multiply the measured offset by the binning factor. gc_binned_offset = offset_cube_array * ( # 这里成乘法!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!已理解,这是对的!!!!!!!!!!!!!!!!!! float(spectral_binning) * sample_binning / (offset_spectral_binning * offset_sample_binning)) else: georegcube = Cube(self.calpack_path.value) # just copying the cube so that the legacy code still works gainCube = georegcube # window the data gain_cube_array = georegcube.getFrame(1, asBIP=True) offset_cube_array = georegcube.getFrame(0, asBIP=True) gc_binned_gain = gain_cube_array gc_binned_offset = offset_cube_array if self.darkcube.value is not None: correctionCube = self.wb.tree.getCube(self.darkcube.value) if correctionCube.getSampleCount() != datacube.getSampleCount() or \ correctionCube.getBandCount() != datacube.getBandCount(): self.wb.postMessage('Dark Noise Cube frame size (%s) does not equal datacube frame size count(%s).' % \ (str((correctionCube.getSampleCount(), correctionCube.getBandCount())), str((datacube.getSampleCount(), datacube.getBandCount())))) return gc_binned_offset = tools.meanFrameOfCube(correctionCube, asBIP=True) # determine how the frames should be averaged (to get frame sizes to match)?????????????????????????????????????????????? gc_samples = gc_binned_gain.shape[0] gc_bands = gc_binned_gain.shape[1] oc_samples = gc_binned_offset.shape[0] oc_bands = gc_binned_offset.shape[1] if data_samples > gc_samples or data_bands > gc_bands: self.wb.postMessage('Correction Cube frame size (%s) is smaller than datacube frame size count(%s).' % \ (str((gc_samples, gc_bands)), str((datacube.getSampleCount(), datacube.getBandCount())))) return gain_sample_aveby = gc_samples / data_samples gain_band_aveby = gc_bands / data_bands # ave the correction cube to fit the incoming datacube (doesn't assume that the shape of the gain and offset are the same) gc_binned_gain = tools.aveFrame(gc_binned_gain, spectralAve=gain_band_aveby, spatialAve=gain_sample_aveby, # ??????????????????????????????? interleave="bip") offset_sample_aveby = oc_samples / data_samples offset_band_aveby = oc_bands / data_bands gc_binned_offset = tools.aveFrame(gc_binned_offset, spectralAve=offset_band_aveby, spatialAve=offset_sample_aveby, interleave="bip") if flip_frame: gc_binned_gain = numpy.flipud(gc_binned_gain) gc_binned_offset = numpy.flipud(gc_binned_offset) # calculate the gain and shutter differences!!!!!计算校正gain和offset,在ASD计算反射率时也有类似的操作,这相当于消除积分时间和暗电流的影响!!!!!!!!!!!!!! try: gc_gain = 10 ** (gainCube.getMetaValue('gain') / 20.) except (AttributeError, KeyError): gc_gain = 1.0 gc_shutter = gainCube.getMetaValue('shutter') try: data_gain = 10 ** (datacube.getMetaValue('gain') / 20.) except (AttributeError, KeyError): data_gain = 1.0 data_shutter = datacube.getMetaValue('shutter') gain_factor = (gc_shutter * gc_gain) / (data_shutter * data_gain) # produce the correction frames******************************************* adjusted_gain = gain_factor * gc_binned_gain adjusted_offset = gc_binned_offset # we no longer have any adjustment to offset cubes # setup the outgoing cube************************************************* newcube = datacube.getFramelessCopy(makeTypeFloat=self.returnfloat.value) newcube.setMetaValue("interleave", "bip") for hdr_item in ("reflectance scale factor", "ceiling", "bit depth"): try: del (newcube._metadata[hdr_item]) except: pass # bug fix - calculate maxAllowed based on the dtype BEFORE converting to float dtype = _util._enviType2NumpyDType(newcube.getMetaValue("data type")) maxAllowed = _util.dataTypeMax(dtype) lines = self.datacube.getLineCount() # For memory cubes, it is *much* for efficient to preallocate memory try: newcube.extendLineBuffer(lines) except AttributeError: # Disk cubes don't have this ability pass # produce the new cube, scaling and clipping as necessary for f in range(datacube.getLineCount()): frame = datacube.getFrame(f, asBIP=True).astype('f') if self.removedark.value or (self.darkcube.value is not None): frame = numpy.clip((frame - adjusted_offset) * adjusted_gain, 0, maxAllowed) else: frame = numpy.clip((frame) * adjusted_gain, 0, maxAllowed) newcube.appendFrame(frame.astype(dtype)) return newcube def getDummyResult(self): meta = self.datacube.getMetaDictCopy() meta['interleave'] = 'bip' for hdr_item in ("reflectance scale factor", "ceiling", "bit depth"): if hdr_item in meta: del meta[hdr_item] return DummyCube(meta)