第一次提交

1、hpi的可用代码;
2、修复了多次点击曝光后,福亮度数据错误的问题;
3、定标方式为大的蓝菲积分球的标准能量曲线,而不是基于asd的能量曲线;
This commit is contained in:
tangchao0503
2022-09-06 22:54:14 +08:00
commit 98cf134cca
106 changed files with 39400 additions and 0 deletions

View File

@ -0,0 +1,158 @@
'''
在V1.2的版本上改进:
1自动曝光2显示光谱仪温度
'''
from ximea import xiapi
import cv2
import numpy as np
import matplotlib.pyplot as plt
from osgeo import gdal #读写影像数据
import os
import datetime # 几种Python执行时间的计算方法https://blog.csdn.net/wangshuang1631/article/details/54286551
import sys, struct
#create instance for first connected camera
cam = xiapi.Camera()
#start communication to open specific device, use: cam.open_device_by_SN('41305651')
print('Opening first camera...')
cam.open_device()
# 打开相机后,显示相机信息
print('SN: %s'% str(cam.get_device_sn(), encoding = "utf-8"))
print('Device name: %s'% str(cam.get_device_name(), encoding = "utf-8"))
print('Device name: %s'% str(cam.get_device_type(), encoding = "utf-8"))
print('Instance path: %s'% str(cam.get_device_inst_path(), encoding = "utf-8"))#Returns device instance path in operating system.
print('Location path: %s'% str(cam.get_device_loc_path(), encoding = "utf-8"))
# debug_level、线程数和horizontal flip
print('Debug level: %s'% cam.get_debug_level())
print('Default number of threads per image processor: %d'% cam.get_proc_num_threads())
cam.set_proc_num_threads(8)
print('Current number of threads per image processor: %d'% cam.get_proc_num_threads())
print('Is horizontal flip enabled?, %s'% str(cam.is_horizontal_flip()))
#采集模式This mode is supported by selected camera families: CB, MC, MT, MX
cam.set_acq_timing_mode('XI_ACQ_TIMING_MODE_FRAME_RATE_LIMIT')
mode_used = cam.get_acq_timing_mode()
if mode_used == 'XI_ACQ_TIMING_MODE_FRAME_RATE_LIMIT':
print('Mode is XI_ACQ_TIMING_MODE_FRAME_RATE_LIMIT')
else:
print('Mode is not XI_ACQ_TIMING_MODE_FRAME_RATE_LIMIT')
sys.exit()
# # 设置binning参数
# # XI_BIN_MODE_SUM默认: The response from the combined pixels will be added, resulting in increased sensitivity.
# # XI_BIN_MODE_AVERAGE: The response from the combined pixels will be averaged, resulting in increased signal/noise ratio.
# cam.set_binning_selector('XI_BIN_SELECT_HOST_CPU') #默认为XI_BIN_SELECT_SENSOR不可用XI_BIN_SELECT_DEVICE_FPGA
# cam.set_binning_horizontal_mode('XI_BIN_MODE_SUM')
# cam.set_binning_horizontal(2)
# cam.set_binning_vertical_mode('XI_BIN_MODE_SUM')
# cam.set_binning_vertical(2)
#settings,cam.set_param("exposure",10000)
framerate = 10
cam.set_framerate(framerate)
cam.set_aeag_roi_offset_x(286)
cam.set_aeag_roi_offset_y(340)
cam.set_aeag_roi_height(300)
cam.set_aeag_roi_width(1365)
# cam.set_exp_priority(1) # Exposure priority (0.8 - exposure 80%, gain 20%).XI_PRM_EXP_PRIORITY
# cam.set_ae_max_limit(24000) # Maximum time (us) used for exposure in AEAG procedureXI_PRM_AE_MAX_LIMIT
# cam.set_ag_max_limit(12)
# cam.set_aeag_level(50) # Average intensity of output signal AEAG should achieve(in %)XI_PRM_AEAG_LEVEL
cam.enable_aeag()
# exposure = 1000
# cam.set_exposure(exposure)#曝光时间单位为微秒1s共有1000000微秒
print('Framerate was set to %i FPS' %cam.get_framerate())
print('Exposure was set to %i us' %cam.get_exposure())
print('Gain was set to %i dB' %cam.get_gain())
#创建img = xiapi.Image()前需要设置一系列img参数例如格式、位深度、
cam.set_imgdataformat('XI_RAW16')
# cam.get_buffer_policy()
#create instance of Image to store image data and metadata
img = xiapi.Image()
#start data acquisition
print('Starting data acquisition...')
framenumber = 2000
image_container = np.empty((3, framenumber, 1365))
f = open('delete3', 'wb')
cam.start_acquisition()
starttime = datetime.datetime.now()
for i in range(framenumber):
#get data and pass them from camera to img
cam.get_image(img)
print('Exposure was set to %i us' % cam.get_exposure())
print('Gain was set to %i dB' % cam.get_gain())
#将每一帧有效数据写入文件,方法1通过struct.pack
'''
-------------------------struct.pack缺点----------------------------------------
Based on experience, I'd strongly advise against the use of struct.pack.
Its API requires you to demangle the array (i.e. use the * operator) into variables
which is going affect performance drastically for large array sizes.
Every element of the array will need a new, on-the-fly 64-bit pointer (on a 64-bit machine)
to be created on the stack to point into that element.
Just stick to tostring/tobytes/tofile to avoid this overhead.
For example, in our case, we were dealing with 100 Mb arrays which leads to a whopping 6.4 Gb of RAM so struct.pack can do its work.
'''
# f.write(struct.pack('f' * len(image_raw_numpy[339:639, 285:1650].flatten()),
# *image_raw_numpy[339:639, 285:1650].flatten())) # 这种通过struct的方式特别慢
# 将每一帧有效数据写入文件,方法2通过numpy.tobytes()
'''
丢帧说明:
帧率 丢帧率
50 0%
100 13%
150 17%
'''
# image_raw_numpy = img.get_image_data_numpy()
# image_container[:, i, :] = image_raw_numpy[339:639, 285:1650][[36, 76, 121]]
# f.write(image_raw_numpy[339:639, 285:1650].flatten().tobytes())
# # f.write(image_raw_numpy.flatten().tobytes())
# 将每一帧有效数据写入文件,方法3通过直接读取相机返回的字节流
'''丢帧说明:
帧率 丢帧率
50 0%
100 20%
150 60%
'''
# data_raw = img.get_image_data_raw() # data_raw是字节流一帧的字节流长度为4708352而一帧矩阵有多少个数1216*1936 = 2354176所以返回的数是2字节的短整型
# f.write(data_raw[1313180:2473636])
# f.write(data_raw)
endtime = datetime.datetime.now()
f.close()
print('影像采集用时:%d'%(endtime - starttime).seconds)
#stop data acquisition
print('Stopping acquisition...')
cam.stop_acquisition()
#stop communication
cam.close_device()
plt.imshow(np.dstack((image_container[2, :, :], image_container[1, :, :],
image_container[0, :, :])))
plt.show()
print('Done.')