第一次提交
1、hpi的可用代码; 2、修复了多次点击曝光后,福亮度数据错误的问题; 3、定标方式为大的蓝菲积分球的标准能量曲线,而不是基于asd的能量曲线;
This commit is contained in:
158
record_system_v13/1record_system_v1.3.py
Normal file
158
record_system_v13/1record_system_v1.3.py
Normal file
@ -0,0 +1,158 @@
|
||||
'''
|
||||
在V1.2的版本上改进:
|
||||
(1)自动曝光,(2)显示光谱仪温度
|
||||
'''
|
||||
from ximea import xiapi
|
||||
import cv2
|
||||
import numpy as np
|
||||
import matplotlib.pyplot as plt
|
||||
from osgeo import gdal #读写影像数据
|
||||
import os
|
||||
import datetime # 几种Python执行时间的计算方法:https://blog.csdn.net/wangshuang1631/article/details/54286551
|
||||
import sys, struct
|
||||
|
||||
#create instance for first connected camera
|
||||
cam = xiapi.Camera()
|
||||
|
||||
#start communication to open specific device, use: cam.open_device_by_SN('41305651')
|
||||
print('Opening first camera...')
|
||||
cam.open_device()
|
||||
|
||||
# 打开相机后,显示相机信息
|
||||
print('SN: %s'% str(cam.get_device_sn(), encoding = "utf-8"))
|
||||
print('Device name: %s'% str(cam.get_device_name(), encoding = "utf-8"))
|
||||
print('Device name: %s'% str(cam.get_device_type(), encoding = "utf-8"))
|
||||
print('Instance path: %s'% str(cam.get_device_inst_path(), encoding = "utf-8"))#Returns device instance path in operating system.
|
||||
print('Location path: %s'% str(cam.get_device_loc_path(), encoding = "utf-8"))
|
||||
|
||||
# debug_level、线程数和horizontal flip
|
||||
print('Debug level: %s'% cam.get_debug_level())
|
||||
print('Default number of threads per image processor: %d'% cam.get_proc_num_threads())
|
||||
cam.set_proc_num_threads(8)
|
||||
print('Current number of threads per image processor: %d'% cam.get_proc_num_threads())
|
||||
print('Is horizontal flip enabled?, %s'% str(cam.is_horizontal_flip()))
|
||||
|
||||
|
||||
#采集模式:This mode is supported by selected camera families: CB, MC, MT, MX
|
||||
cam.set_acq_timing_mode('XI_ACQ_TIMING_MODE_FRAME_RATE_LIMIT')
|
||||
mode_used = cam.get_acq_timing_mode()
|
||||
if mode_used == 'XI_ACQ_TIMING_MODE_FRAME_RATE_LIMIT':
|
||||
print('Mode is XI_ACQ_TIMING_MODE_FRAME_RATE_LIMIT')
|
||||
else:
|
||||
print('Mode is not XI_ACQ_TIMING_MODE_FRAME_RATE_LIMIT')
|
||||
sys.exit()
|
||||
|
||||
|
||||
# # 设置binning参数
|
||||
# # XI_BIN_MODE_SUM(默认): The response from the combined pixels will be added, resulting in increased sensitivity.
|
||||
# # XI_BIN_MODE_AVERAGE: The response from the combined pixels will be averaged, resulting in increased signal/noise ratio.
|
||||
# cam.set_binning_selector('XI_BIN_SELECT_HOST_CPU') #默认为XI_BIN_SELECT_SENSOR,不可用:XI_BIN_SELECT_DEVICE_FPGA
|
||||
# cam.set_binning_horizontal_mode('XI_BIN_MODE_SUM')
|
||||
# cam.set_binning_horizontal(2)
|
||||
# cam.set_binning_vertical_mode('XI_BIN_MODE_SUM')
|
||||
# cam.set_binning_vertical(2)
|
||||
|
||||
#settings,cam.set_param("exposure",10000)
|
||||
framerate = 10
|
||||
cam.set_framerate(framerate)
|
||||
cam.set_aeag_roi_offset_x(286)
|
||||
cam.set_aeag_roi_offset_y(340)
|
||||
cam.set_aeag_roi_height(300)
|
||||
cam.set_aeag_roi_width(1365)
|
||||
# cam.set_exp_priority(1) # Exposure priority (0.8 - exposure 80%, gain 20%).XI_PRM_EXP_PRIORITY
|
||||
# cam.set_ae_max_limit(24000) # Maximum time (us) used for exposure in AEAG procedureXI_PRM_AE_MAX_LIMIT
|
||||
# cam.set_ag_max_limit(12)
|
||||
# cam.set_aeag_level(50) # Average intensity of output signal AEAG should achieve(in %)XI_PRM_AEAG_LEVEL
|
||||
cam.enable_aeag()
|
||||
# exposure = 1000
|
||||
# cam.set_exposure(exposure)#曝光时间单位为微秒,1s共有1000000微秒
|
||||
print('Framerate was set to %i FPS' %cam.get_framerate())
|
||||
print('Exposure was set to %i us' %cam.get_exposure())
|
||||
print('Gain was set to %i dB' %cam.get_gain())
|
||||
|
||||
|
||||
#创建img = xiapi.Image()前需要设置一系列img参数,例如:格式、位深度、
|
||||
cam.set_imgdataformat('XI_RAW16')
|
||||
# cam.get_buffer_policy()
|
||||
|
||||
#create instance of Image to store image data and metadata
|
||||
img = xiapi.Image()
|
||||
|
||||
#start data acquisition
|
||||
print('Starting data acquisition...')
|
||||
|
||||
framenumber = 2000
|
||||
image_container = np.empty((3, framenumber, 1365))
|
||||
|
||||
f = open('delete3', 'wb')
|
||||
cam.start_acquisition()
|
||||
starttime = datetime.datetime.now()
|
||||
for i in range(framenumber):
|
||||
#get data and pass them from camera to img
|
||||
cam.get_image(img)
|
||||
print('Exposure was set to %i us' % cam.get_exposure())
|
||||
print('Gain was set to %i dB' % cam.get_gain())
|
||||
|
||||
|
||||
|
||||
|
||||
#将每一帧有效数据写入文件,方法1:通过struct.pack
|
||||
'''
|
||||
-------------------------struct.pack缺点:----------------------------------------
|
||||
Based on experience, I'd strongly advise against the use of struct.pack.
|
||||
Its API requires you to demangle the array (i.e. use the * operator) into variables
|
||||
which is going affect performance drastically for large array sizes.
|
||||
Every element of the array will need a new, on-the-fly 64-bit pointer (on a 64-bit machine)
|
||||
to be created on the stack to point into that element.
|
||||
Just stick to tostring/tobytes/tofile to avoid this overhead.
|
||||
For example, in our case, we were dealing with 100 Mb arrays which leads to a whopping 6.4 Gb of RAM so struct.pack can do its work.
|
||||
'''
|
||||
# f.write(struct.pack('f' * len(image_raw_numpy[339:639, 285:1650].flatten()),
|
||||
# *image_raw_numpy[339:639, 285:1650].flatten())) # 这种通过struct的方式特别慢
|
||||
|
||||
|
||||
|
||||
# 将每一帧有效数据写入文件,方法2:通过numpy.tobytes()
|
||||
'''
|
||||
丢帧说明:
|
||||
帧率 丢帧率
|
||||
50 0%
|
||||
100 13%
|
||||
150 17%
|
||||
'''
|
||||
# image_raw_numpy = img.get_image_data_numpy()
|
||||
# image_container[:, i, :] = image_raw_numpy[339:639, 285:1650][[36, 76, 121]]
|
||||
# f.write(image_raw_numpy[339:639, 285:1650].flatten().tobytes())
|
||||
# # f.write(image_raw_numpy.flatten().tobytes())
|
||||
|
||||
|
||||
|
||||
# 将每一帧有效数据写入文件,方法3:通过直接读取相机返回的字节流
|
||||
'''丢帧说明:
|
||||
帧率 丢帧率
|
||||
50 0%
|
||||
100 20%
|
||||
150 60%
|
||||
'''
|
||||
# data_raw = img.get_image_data_raw() # data_raw是字节流,一帧的字节流长度为4708352,而一帧矩阵有多少个数:1216*1936 = 2354176;所以返回的数是2字节的短整型
|
||||
# f.write(data_raw[1313180:2473636])
|
||||
# f.write(data_raw)
|
||||
|
||||
endtime = datetime.datetime.now()
|
||||
f.close()
|
||||
|
||||
|
||||
print('影像采集用时:%d'%(endtime - starttime).seconds)
|
||||
|
||||
#stop data acquisition
|
||||
print('Stopping acquisition...')
|
||||
cam.stop_acquisition()
|
||||
#stop communication
|
||||
cam.close_device()
|
||||
|
||||
|
||||
plt.imshow(np.dstack((image_container[2, :, :], image_container[1, :, :],
|
||||
image_container[0, :, :])))
|
||||
plt.show()
|
||||
|
||||
print('Done.')
|
Reference in New Issue
Block a user