diff --git a/catkit2/base_services/camera.py b/catkit2/base_services/camera.py new file mode 100644 index 000000000..b2161fe1f --- /dev/null +++ b/catkit2/base_services/camera.py @@ -0,0 +1,407 @@ +from ..testbed.service import Service +from catkit2.testbed.tracing import trace_interval + +import threading +import numpy as np + + +class StoppedAcquisition: + '''Context manager for stopping and restarting the acquisition temporarily. + ''' + def __init__(self, cam): + self.cam = cam + + def __enter__(self): + self.was_running = self.cam.is_acquiring.get()[0] > 0 + + if self.was_running: + self.cam._end_acquisition() + + # Wait for the acquisition to actually end. + while self.cam.is_acquiring.get()[0]: + self.cam.sleep(0.001) + + def __exit__(self, exc_type, exc_value, exc_traceback): + if self.was_running: + self.cam._start_acquisition() + + +class CameraService(Service): + NUM_FRAMES_IN_BUFFER = 20 + PIXEL_DTYPE = 'float32' + + def __init__(self, service_type): + super().__init__(service_type) + + self.should_be_acquiring = threading.Event() + self.should_be_acquiring.set() + + def open(self): + # Use the full sensor size here to always allocate enough shared memory. + self.log.info('Creating data streams using base camera service') + + # frame orientation + self.rot90 = self.config.get('rot90', False) + self.flip_x = self.config.get('flip_x', False) + self.flip_y = self.config.get('flip_y', False) + + self.log.info('Camera orientation: rot90={}, flip_x={}, flip_y={}'.format(self.rot90, self.flip_x, self.flip_y)) + self.log.info('Configured sensor width: {}, height: {}'.format(self.sensor_width, self.sensor_height)) + + # All configured values are in user coordinate system, i.e. the values that the user sees instead of camera coordinates. + + # width and height of the ROI, defaults to full sensor size + self.width = self.config.get('width', self.sensor_width) + self.height = self.config.get('height', self.sensor_height) + self.log.info('Configured ROI width: {}, height: {}'.format(self.width, self.height)) + + self._offset_x = self.config.get('offset_x', 0) + self._offset_y = self.config.get('offset_y', 0) + + self.offset_x = self._offset_x + self.offset_y = self._offset_y + + self.log.info('Configured offsets x: {}, y: {}'.format(self.offset_x, self.offset_y)) + + # Note that transform_offset() must be called before updating the values of self.width and self.height. + + self.gain = self.config.get('gain', 0) + self.exposure_time = self.config.get('exposure_time', 1000) + + # Create datastreams + # Use the full sensor size here to always allocate enough shared memory. + self.images = self.make_data_stream('images', 'float32', [self.sensor_height, self.sensor_width], self.NUM_FRAMES_IN_BUFFER) + self.temperature = self.make_data_stream('temperature', 'float64', [1], self.NUM_FRAMES_IN_BUFFER) + + self.is_acquiring = self.make_data_stream('is_acquiring', 'int8', [1], self.NUM_FRAMES_IN_BUFFER) + self.is_acquiring.submit_data(np.array([0], dtype='int8')) + + x, y = self.transform_offset(self._offset_x, self._offset_y) + x_back, y_back = self.transform_offset(x, y, inverse=True) + assert x_back == self._offset_x and y_back == self._offset_y, f"Inverse transform test returned {(x_back, y_back)}, was expecting {(self._offset_x, self._offset_y)}" + + def make_property_helper(property_name, read_only=False, requires_stopped_acquisition=False, dtype=None): + if dtype is None: + dtype = '' + + if read_only: + self.make_property(property_name, lambda: getattr(self, property_name)) + else: + if requires_stopped_acquisition: + def setter(val): + with StoppedAcquisition(self): + setattr(self, property_name, val) + else: + def setter(val): + setattr(self, property_name, val) + + self.make_property(property_name, lambda: getattr(self, property_name), setter) + + make_property_helper('width', dtype='int64', requires_stopped_acquisition=self.width_requires_stopped_acquisition) + make_property_helper('height', dtype='int64', requires_stopped_acquisition=self.height_requires_stopped_acquisition) + make_property_helper('offset_x', dtype='int64', requires_stopped_acquisition=self.offset_x_requires_stopped_acquisition) + make_property_helper('offset_y', dtype='int64', requires_stopped_acquisition=self.offset_y_requires_stopped_acquisition) + + make_property_helper('exposure_time', dtype='int64', requires_stopped_acquisition=self.exposure_time_requires_stopped_acquisition) + make_property_helper('gain', dtype='int64', requires_stopped_acquisition=self.gain_requires_stopped_acquisition) + + make_property_helper('sensor_width', read_only=True, dtype='int64') + make_property_helper('sensor_height', read_only=True, dtype='int64') + + self.make_command('start_acquisition', self._start_acquisition) + self.make_command('end_acquisition', self._end_acquisition) + + self.temperature_thread = threading.Thread(target=self.monitor_temperature) + self.temperature_thread.start() + + def main(self): + while not self.should_shut_down: + if self.should_be_acquiring.wait(0.05): + self.acquisition_loop() + + def monitor_temperature(self): + while not self.should_shut_down: + temperature = self.get_temperature() + self.temperature.submit_data(np.array([temperature])) + + self.sleep(1) + + def close(self): + self.temperature_thread.join() + + def acquisition_loop(self): + # Make sure the data stream has the right size, and images have right pixel datatype. + has_correct_parameters = np.allclose(self.images.shape, [self.height, self.width]) + has_correct_parameters &= self.images.dtype == self.PIXEL_DTYPE + + if not has_correct_parameters: + self.images.update_parameters(self.PIXEL_DTYPE, [self.height, self.width], self.NUM_FRAMES_IN_BUFFER) + + # Start acquisition on the camera. + self.start_acquisition() + + # Communicate to outside that we started acquisition. + self.is_acquiring.submit_data(np.array([1], dtype='int8')) + + try: + # Wait until we are commanded to stop acquisition. + while self.should_be_acquiring.is_set() and not self.should_shut_down: + img = self.capture_image() + transformed_img = self.rot_flip_image(img) + with trace_interval('processing frame'): + self.images.submit_data(transformed_img.astype('float32')) + + finally: + self.end_acquisition() + self.is_acquiring.submit_data(np.array([0], dtype='int8')) + + def transform_offset(self, x, y, inverse=False): + """Convert relative camera offsets given by the user to absolute offsets in camera coordinates. + + The forward transformation is done by performing the following procedure: + 1.) Translate the origin to the center of the ROI. + 2.) If rot90 is True, rotate 90 degrees counter-clockwise about the center of the ROI. + 3.) If flip_x is True, reflect in x. + 4.) If flip_y is True, reflect in y. + 5.) Translate the origin back to the upper left fo the ROI. + + Parameters + ---------- + x: float + The x-offset coordinate in the user coordinate system. + y: float + The y-offset coordinate in the user coordinate system. + inverse: bool, optional + If True, the transformation is performed in reverse, i.e. from camera coordinates to user coordinates. + Defaults to False. + + Returns + ------- + new_x, new_y: float, float + The transformed x and y coordinates for their location in camera array coordinates. If there is no rotation + or flip in x or y, then this returns the same x, y values that are input. + """ + # Define the translation matrix T_center to get to the center of the sensor. + T_center = np.zeros((3, 3)) + np.fill_diagonal(T_center, 1) + if not inverse and self.rot90: + T_center[0][-1] = -self.sensor_height / 2 + T_center[1][-1] = -self.sensor_width / 2 + else: + T_center[0][-1] = -self.sensor_width / 2 + T_center[1][-1] = -self.sensor_height / 2 + + # Initialize rotation matrix R. + R = np.zeros((3, 3)) + + # Initialize x reflection matrix, X. Defaults to unity matrix. + X = np.eye(3, 3) + + # Initialize y reflection matrix, Y. Defaults to unity matrix. + Y = np.eye(3, 3) + + if self.rot90: + # Define rotation matrix. + R[0][1] = -1 + R[1][0] = 1 + R[2][2] = 1 + else: + # Default to unity matrix. + np.fill_diagonal(R, 1) + + if self.flip_x: + # Define x reflection matrix. + X[0][0] = -1 + + if self.flip_y: + # Define y reflection matrix. + Y[1][1] = -1 + + # Define translation matrix back so that the origin is in the upper left as expected. + T_back = np.eye(3, 3) + + # If forward with rotation, or if inverse without rotation. + if inverse and self.rot90: + # Want to come back to new origin for which the height/width dimensions will be flipped if rotated. + T_back[0][-1] = self.sensor_height / 2 + T_back[1][-1] = self.sensor_width / 2 + else: + T_back[0][-1] = self.sensor_width / 2 + T_back[1][-1] = self.sensor_height / 2 + + # Perform the dot product. + # Translate to ROI center, rotate, flip in x then in y, + # and finally translate back to origin. + coords = [x, y, 1] + if inverse: + new_coords = np.linalg.multi_dot([T_back, R.T, Y, X, T_center, coords]) + else: + new_coords = np.linalg.multi_dot([T_back, Y, X, R, T_center, coords]) + + return int(np.round(new_coords[0])), int(np.round(new_coords[1])) + + def rot_flip_image(self, img): + # rotation needs to happen first + if self.rot90: + img = np.rot90(img) + if self.flip_x: + img = np.flipud(img) + if self.flip_y: + img = np.fliplr(img) + return np.ascontiguousarray(img) + + def _start_acquisition(self): + self.should_be_acquiring.set() + + def _end_acquisition(self): + self.should_be_acquiring.clear() + + @property + def exposure_time(self): + return self.get_exposure_time() + + @exposure_time.setter + def exposure_time(self, exposure_time): + self.set_exposure_time(exposure_time) + + @property + def width(self): + if self.rot90: + return int(self.get_roi_height()) + else: + return int(self.get_roi_width()) + + @width.setter + def width(self, width): + if self.rot90: + self.set_roi_height(width) + else: + self.set_roi_width(width) + + @property + def height(self): + if self.rot90: + return int(self.get_roi_width()) + else: + return int(self.get_roi_height()) + + @height.setter + def height(self, height): + if self.rot90: + self.set_roi_width(height) + else: + self.set_roi_height(height) + + @property + def offset_x(self): + camera_offset_x = self.get_roi_offset_x() + camera_offset_y = self.get_roi_offset_y() + offset_x, _ = self.transform_offset(camera_offset_x, camera_offset_y, inverse=True) + return offset_x + + @offset_x.setter + def offset_x(self, offset_x): + self._offset_x = offset_x + camera_offset_x, camera_offset_y = self.transform_offset(self._offset_x, self._offset_y) + self.set_roi_offset_x(camera_offset_x) + self.set_roi_offset_y(camera_offset_y) + + @property + def offset_y(self): + camera_offset_x = self.get_roi_offset_x() + camera_offset_y = self.get_roi_offset_y() + _, offset_y = self.transform_offset(camera_offset_x, camera_offset_y, inverse=True) + return offset_y + + @offset_y.setter + def offset_y(self, offset_y): + self._offset_y = offset_y + camera_offset_x, camera_offset_y = self.transform_offset(self._offset_x, self._offset_y) + self.set_roi_offset_y(camera_offset_y) + self.set_roi_offset_x(camera_offset_x) + + @property + def sensor_width(self): + if self.rot90: + return self.get_sensor_height() + else: + return self.get_sensor_width() + + @property + def sensor_height(self): + if self.rot90: + return self.get_sensor_width() + else: + return self.get_sensor_height() + + def start_acquisition(self): + """Start acquisition sequence on the camera.""" + raise NotImplementedError() + + def end_acquisition(self): + """End acquisition sequence on the camera.""" + raise NotImplementedError() + + def capture_image(self): + """Capture an image on the camera and return it.""" + raise NotImplementedError() + + def get_roi_width(self): + """Get the width of the ROI on the camera.""" + raise NotImplementedError() + + def set_roi_width(self, width): + """Set the width of the ROI on the camera.""" + raise NotImplementedError() + + def get_roi_height(self): + """Get the height of the ROI on the camera.""" + raise NotImplementedError() + + def set_roi_height(self, height): + """Set the height of the ROI on the camera.""" + raise NotImplementedError() + + def get_roi_offset_x(self): + """Get the x-offset of the ROI on the camera.""" + raise NotImplementedError() + + def set_roi_offset_x(self, offset_x): + """Set the x-offset of the ROI on the camera.""" + raise NotImplementedError() + + def get_roi_offset_y(self): + """Get the y-offset of the ROI on the camera.""" + raise NotImplementedError() + + def set_roi_offset_y(self, offset_y): + """Set the y-offset of the ROI on the camera.""" + raise NotImplementedError() + + def get_sensor_width(self): + """Get the width of the camera sensor.""" + raise NotImplementedError() + + def get_sensor_height(self): + """Get the height of the camera sensor.""" + raise NotImplementedError() + + def get_exposure_time(self): + """Get the exposure time on the camera.""" + raise NotImplementedError() + + def set_exposure_time(self, exposure_time): + """Set the exposure time on the camera.""" + raise NotImplementedError() + + def get_gain(self): + """Get the gain on the camera.""" + raise NotImplementedError() + + def set_gain(self, gain): + """Set the gain on the camera.""" + raise NotImplementedError() + + def get_temperature(self): + """Get the temperature on the camera.""" + raise NotImplementedError() diff --git a/catkit2/services/allied_vision_camera_v2/allied_vision_camera_v2.py b/catkit2/services/allied_vision_camera_v2/allied_vision_camera_v2.py new file mode 100644 index 000000000..6c4a79243 --- /dev/null +++ b/catkit2/services/allied_vision_camera_v2/allied_vision_camera_v2.py @@ -0,0 +1,127 @@ +''' +This module contains a service for Allied Vision cameras. + +This service is a wrapper around the Vimba X SDK. +It provides a simple interface to control the camera and acquire images. +''' + +from __future__ import annotations +import contextlib + +from catkit2.base_services.camera import CameraService, StoppedAcquisition + +from vmbpy import (AllocationMode, + Camera, Frame, Stream, + FrameStatus, + PixelFormat, + VmbSystem, + VmbCameraError) + + +class AlliedVisionCamera(CameraService): + # Stopped acquisition flags for base class + width_requires_stopped_acquisition = True + height_requires_stopped_acquisition = True + offset_x_requires_stopped_acquisition = True + offset_y_requires_stopped_acquisition = True + exposure_time_requires_stopped_acquisition = False + gain_requires_stopped_acquisition = False + + def __init__(self, exit_stack: contextlib.ExitStack): + ''' + Create a new AlliedVisionCamera service. + + Parameters + ---------- + exit_stack : contextlib.ExitStack + The exit stack to which this service should be added. + This is used to ensure that resources are properly cleaned up when the service is closed. + ''' + super().__init__('allied_vision_camera_v2') + self.exit_stack = exit_stack + + # dictionary to store the pixel format and the corresponding numpy dtype and vimba pixel format + self.pixel_formats = { + "Mono8": PixelFormat.Mono8, + "Mono12": PixelFormat.Mono12, + "Mono12Packed": PixelFormat.Mono12Packed, + "Mono14": PixelFormat.Mono14, + "Mono16": PixelFormat.Mono16, + } + self.current_pixel_format = None + + def open(self): + pass + + def close(self): + pass + + def start_acquisition(self): + pass + + def end_acquisition(self): + pass + + def capture_image(self): + pass + + def get_roi_width(self): + pass + + def set_roi_width(self, width): + pass + + def get_roi_height(self): + pass + + def set_roi_height(self, height): + pass + + def get_roi_offset_x(self): + pass + + def set_roi_offset_x(self, offset_x): + pass + + def get_roi_offset_y(self): + pass + + def set_roi_offset_y(self, offset_y): + pass + + def get_sensor_width(self): + pass + + def get_sensor_height(self): + pass + + def get_exposure_time(self): + pass + + def set_exposure_time(self, exposure_time): + pass + + def get_gain(self): + pass + + def set_gain(self, gain): + pass + + def get_temperature(self): + ''' + Get the temperature of the camera. + + This function gets the temperature of the camera. + + Returns + ------- + float: + The temperature of the camera in degrees Celsius. + ''' + return self.cam.DeviceTemperature.get() + + +if __name__ == '__main__': + with contextlib.ExitStack() as main_exit_stack: + service = AlliedVisionCamera(main_exit_stack) + service.run() diff --git a/catkit2/services/dummy_camera/dummy_camera.py b/catkit2/services/dummy_camera/dummy_camera.py index c085f97d6..a0c38233d 100644 --- a/catkit2/services/dummy_camera/dummy_camera.py +++ b/catkit2/services/dummy_camera/dummy_camera.py @@ -1,10 +1,11 @@ from catkit2.testbed.service import Service import time -from hcipy import * +import hcipy import numpy as np import threading + class DummyCamera(Service): def __init__(self): super().__init__('dummy_camera') @@ -16,16 +17,16 @@ def __init__(self): self._offset_x = self.config['offset_x'] self._offset_y = self.config['offset_y'] - self.flux = self.config['flux'] + self.sensor_width = self.config['sensor_width'] self.sensor_height = self.config['sensor_height'] self.should_be_acquiring = threading.Event() self.should_be_acquiring.set() - self.pupil_grid = make_pupil_grid(128) - self.aperture = evaluate_supersampled(make_hicat_aperture(True), self.pupil_grid, 4) - self.wf = Wavefront(self.aperture) + self.pupil_grid = hcipy.make_pupil_grid(128) + self.aperture = hcipy.evaluate_supersampled(hcipy.make_hicat_aperture(True), self.pupil_grid, 4) + self.wf = hcipy.Wavefront(self.aperture) self.wf.total_power = 1 self.images = self.make_data_stream('images', 'uint16', [self.sensor_height, self.sensor_width], 20) @@ -64,17 +65,17 @@ def make_property_helper(name, read_only=False): def get_image(self): try: - focal_grid = make_focal_grid(8, np.array([self.sensor_width, self.sensor_height]) / 16) + focal_grid = hcipy.make_focal_grid(8, np.array([self.sensor_width, self.sensor_height]) / 16) # focal_grid = focal_grid.shifted([-self._offset_x / 8, -self._offset_y / 8]) - prop = FraunhoferPropagator(self.pupil_grid, focal_grid) + prop = hcipy.FraunhoferPropagator(self.pupil_grid, focal_grid) img = prop(self.wf).power.shaped img = img[self._offset_y:self._offset_y + self._height, self._offset_x:self._offset_x + self._width] img = img * self.flux * self.exposure_time / 1e6 - img = large_poisson(img) + img = hcipy.large_poisson(img) img = np.clip(img, 0, 2**16 - 1).astype('uint16') except Exception as e: print(e) diff --git a/catkit2/services/dummy_camera_v2/dummy_camera_v2.py b/catkit2/services/dummy_camera_v2/dummy_camera_v2.py new file mode 100644 index 000000000..58f34f549 --- /dev/null +++ b/catkit2/services/dummy_camera_v2/dummy_camera_v2.py @@ -0,0 +1,103 @@ +import time +from hcipy import evaluate_supersampled, make_hicat_aperture, make_pupil_grid, Wavefront +import numpy as np + +from catkit2.base_services.camera import CameraService + + +class DummyCamera(CameraService): + def __init__(self): + super().__init__('dummy_camera_v2') + self.log.debug('Dummy camera service started') + self.flux = self.config['flux'] + + self.pupil_grid = make_pupil_grid(128) + self.aperture = evaluate_supersampled(make_hicat_aperture(True), self.pupil_grid, 4) + self.wf = Wavefront(self.aperture) + self.wf.total_power = 1 + + self.make_command('repeater', self.repeater) + + def open(self): + # Set up general camera properties. + super().open() + + def close(self): + super().close() + self.camera.close() + + # TODO - write all functions in here + def start_acquisition(self): + # TODO - write a function to start the acquisition + self.should_be_acquiring.set() + pass + + def end_acquisition(self): + # TODO - write a function to start the acquisition + self.should_be_acquiring.clear() + pass + + def monitor_temperature(self): + while not self.should_shut_down: + self.log.debug('Monitoring temperature') + temperature = self.get_temperature() + self.temperature.submit_data(np.array([temperature])) + self.sleep(0.1) + + def get_temperature(self): + return np.sin(2 * np.pi * time.time() / 10) + + def get_roi_width(self): + return self.config['width'] + + def set_roi_width(self, width): + return width + + def get_roi_height(self): + return self.config['height'] + + def set_roi_height(self, height): + return height + + def get_roi_offset_x(self): + pass + + def set_roi_offset_x(self, offset_x): + pass + + def get_roi_offset_y(self): + pass + + def set_roi_offset_y(self, offset_y): + pass + + def get_sensor_width(self): + return self.config['sensor_width'] + + def get_sensor_height(self): + return self.config['sensor_height'] + + def get_exposure_time(self): + pass + + def set_exposure_time(self, exposure_time): + pass + + def get_gain(self): + pass + + def set_gain(self, gain): + pass + + +if __name__ == '__main__': + try: + service = DummyCamera() + service.run() + + except Exception: + import traceback + + print(traceback.format_exc()) + input() + diff --git a/catkit2/services/flir_camera/flir_camera.py b/catkit2/services/flir_camera/flir_camera.py index df4bcb5c6..c68a5ce34 100644 --- a/catkit2/services/flir_camera/flir_camera.py +++ b/catkit2/services/flir_camera/flir_camera.py @@ -7,6 +7,7 @@ from catkit2.testbed.service import Service from catkit2.testbed.tracing import trace_interval + def _create_property(flir_property_name, read_only=False, stopped_acquisition=True): def getter(self): with self.mutex: @@ -32,6 +33,7 @@ def setter(self, value): return property(getter, setter) + def _create_enum_property(flir_property_name, enum_name, stopped_acquisition=True): def getter(self): with self.mutex: @@ -63,6 +65,7 @@ def setter(self, value): return property(getter, setter) + class FlirCamera(Service): NUM_FRAMES_IN_BUFFER = 20 diff --git a/catkit2/services/flir_camera_v2/flir_camera_v2.py b/catkit2/services/flir_camera_v2/flir_camera_v2.py new file mode 100644 index 000000000..4af58898d --- /dev/null +++ b/catkit2/services/flir_camera_v2/flir_camera_v2.py @@ -0,0 +1,180 @@ +import threading +from PySpin import PySpin +from contextlib import nullcontext + +from catkit2.base_services.camera import CameraService, StoppedAcquisition +from catkit2.testbed.tracing import trace_interval + + +def make_getter(flir_property_name): + def getter(self): + with self.mutex: + return getattr(self.cam, flir_property_name).GetValue() + + return getter + + +def make_setter(flir_property_name, requires_stopped_acquisition=True): + def setter(self, value): + if requires_stopped_acquisition: + context_manager = StoppedAcquisition(self) + else: + context_manager = nullcontext() + + with context_manager: + with self.mutex: + getattr(self.cam, flir_property_name).SetValue(value) + + return setter + + +def make_enum_getter(flir_property_name, enum_values): + def getter(self): + with self.mutex: + value = getattr(self.cam, flir_property_name).GetValue() + + # Reverse search in enum dictionary. + for key, val in enum_values.items(): + if value == val: + return key + + return getter + + +def make_enum_setter(flir_property_name, enum_name, requires_stopped_acquisition=True): + def setter(self, value): + value = getattr(self, enum_name)[value] + + if requires_stopped_acquisition: + context_manager = StoppedAcquisition(self) + else: + context_manager = nullcontext() + + with context_manager: + with self.mutex: + getattr(self.cam, flir_property_name).SetValue(value) + + return setter + + +class FlirCamera(CameraService): + pixel_format_enum = { + 'mono8': PySpin.PixelFormat_Mono8, + 'mono12p': PySpin.PixelFormat_Mono12p, + 'mono16': PySpin.PixelFormat_Mono16 + } + + adc_bit_depth_enum = { + '8bit': PySpin.AdcBitDepth_Bit8, + '10bit': PySpin.AdcBitDepth_Bit10, + '12bit': PySpin.AdcBitDepth_Bit12, + '14bit': PySpin.AdcBitDepth_Bit14 + } + + def __init__(self): + super().__init__('flir_camera_v2') + + self.serial_number = self.config['serial_number'] + + # Create lock for camera access + self.mutex = threading.Lock() + + def open(self): + self.system = PySpin.System.GetInstance() + self.cam_list = self.system.GetCameras() + self.cam = self.cam_list.GetBySerial(str(self.serial_number)) + + self.cam.Init() + + # Make sure that the camera is stopped. + self.cam.BeginAcquisition() + self.cam.EndAcquisition() + + # Turn off indicator led + self.cam.DeviceIndicatorMode.SetValue(PySpin.DeviceIndicatorMode_ErrorStatus) + + # Set standard exposure settings + self.cam.ExposureAuto.SetValue(PySpin.ExposureAuto_Off) + self.cam.ExposureMode.SetValue(PySpin.ExposureMode_Timed) + self.cam.GainAuto.SetValue(PySpin.GainAuto_Off) + self.cam.GammaEnable.SetValue(False) + self.cam.BlackLevelClampingEnable.SetValue(True) + self.cam.BlackLevel.SetValue(1 / 255 * 100) + self.cam.AcquisitionMode.SetValue(PySpin.AcquisitionMode_Continuous) + self.cam.TLStream.StreamBufferHandlingMode.SetValue(PySpin.StreamBufferHandlingMode_NewestOnly) + + self.pixel_format = self.config['pixel_format'] + self.adc_bit_depth = self.config['adc_bit_depth'] + + super().open() + + def close(self): + super().close() + + self.cam.DeInit() + self.cam = None + self.cam_list.Clear() + + self.system.ReleaseInstance() + self.system = None + + def start_acquisition(self): + self.cam.BeginAcquisition() + + def end_acquisition(self): + self.cam.EndAcquisition() + + def capture_image(self): + while self.should_be_acquiring.is_set() and not self.should_shut_down: + try: + with self.mutex: + image_result = self.cam.GetNextImage(10) + except PySpin.SpinnakerException as e: + if e.errorcode == -1011: + # The timeout was triggered. Nothing to worry about. + continue + elif e.errorcode == -1010: + # The camera is not streaming anymore. + return + raise + + try: + if image_result.IsIncomplete(): + continue + + img = image_result.Convert(self.pixel_format).GetNDArray().astype(self.PIXEL_DTYPE, copy=False) + + # Submit image to datastream. + self.images.submit_data(img) + + finally: + image_result.Release() + + get_roi_width = make_getter('Width') + set_roi_width = make_setter('Width') + + get_roi_height = make_getter('Height') + set_roi_height = make_setter('Height') + + get_roi_offset_x = make_getter('OffsetX') + set_roi_offset_x = make_setter('OffsetX', requires_stopped_acquisition=False) + + get_roi_offset_y = make_getter('OffsetY') + set_roi_offset_y = make_setter('OffsetY', requires_stopped_acquisition=False) + + get_sensor_width = make_getter('SensorWidth') + get_sensor_height = make_getter('SensorHeight') + + get_exposure_time = make_getter('ExposureTime') + set_exposure_time = make_setter('ExposureTime', requires_stopped_acquisition=False) + + get_gain = make_getter('Gain') + set_gain = make_setter('Gain', requires_stopped_acquisition=False) + + get_temperature = make_getter('DeviceTemperature') + + get_pixel_format = make_enum_getter('PixelFormat', pixel_format_enum) + set_pixel_format = make_enum_setter('PixelFormat', pixel_format_enum) + + get_adc_bit_depth = make_enum_getter('AdcBitDepth', adc_bit_depth_enum) + set_adc_bit_depth = make_enum_setter('AdcBitDepth', adc_bit_depth_enum) diff --git a/catkit2/services/hamamatsu_camera/hamamatsu_camera.py b/catkit2/services/hamamatsu_camera/hamamatsu_camera.py index 41ba926da..4a390d4fd 100644 --- a/catkit2/services/hamamatsu_camera/hamamatsu_camera.py +++ b/catkit2/services/hamamatsu_camera/hamamatsu_camera.py @@ -29,10 +29,12 @@ class CoolerMode(Enum): on = 2.0 # target temperature = -20 deg max = 4.0 # target temperature = -31 deg + class FanStatus(Enum): off = 1.0 on = 2.0 + def _create_property(hamamatsu_property_name, read_only=False, stopped_acquisition=True): def getter(self): with self.mutex: diff --git a/catkit2/services/hamamatsu_camera_v2/hamamatsu_camera_v2.py b/catkit2/services/hamamatsu_camera_v2/hamamatsu_camera_v2.py new file mode 100644 index 000000000..789ea802a --- /dev/null +++ b/catkit2/services/hamamatsu_camera_v2/hamamatsu_camera_v2.py @@ -0,0 +1,283 @@ +""" +This module contains a service for Hamamatsu digital cameras. + +This service is a wrapper around the DCAM-SDK4. +It provides a simple interface to control the camera and acquire images. +""" +from enum import Enum +import os +import sys +import threading +import numpy as np +from catkit2.base_services.camera import CameraService + +try: + sdk_path = os.environ.get('CATKIT_DCAM_SDK_PATH') + if sdk_path is not None: + sys.path.append(sdk_path) + + import dcam +except ImportError: + print('To use Hamamatsu cameras, you need to set the CATKIT_DCAM_SDK_PATH environment variable.') + raise + + +class CoolerMode(Enum): + off = 1.0 + on = 2.0 # target temperature = -20 deg + max = 4.0 # target temperature = -31 deg + + +class FanStatus(Enum): + off = 1.0 + on = 2.0 + + +class HamamatsuCamera(CameraService): + # Stopped acquisition flags for base class + width_requires_stopped_acquisition = True + height_requires_stopped_acquisition = True + offset_x_requires_stopped_acquisition = True + offset_y_requires_stopped_acquisition = True + exposure_time_requires_stopped_acquisition = False + gain_requires_stopped_acquisition = True + + def __init__(self): + """ + Create a new HamamatsuCamera service. + """ + super().__init__('hamamatsu_camera_v2') + self.cam = None + + # Create lock for camera access + self.mutex = threading.Lock() + + def open(self): + # Dictionary to store the pixel format and the corresponding numpy dtype and dcam pixel format + self.pixel_formats = { + "Mono8": dcam.DCAM_PIXELTYPE.MONO8, + "Mono16": dcam.DCAM_PIXELTYPE.MONO16, + } + self.current_pixel_format = None + + if dcam.Dcamapi.init() is False: + raise RuntimeError(f'Dcamapi.init() fails with error {dcam.Dcamapi.lasterr()}') + + camera_id = self.config.get('camera_id', 0) + self.cam = dcam.Dcam(camera_id) + self.log.info('Using camera with ID %s', camera_id) + if self.cam.dev_open() is False: + raise RuntimeError(f'Dcam.dev_open() fails with error {self.cam.lasterr()}') + + # Read ROI of full sensor before ROI is adapted. + self._sensor_width = int(self.cam.prop_getvalue(dcam.DCAM_IDPROP.IMAGE_WIDTH)) + self._sensor_height = int(self.cam.prop_getvalue(dcam.DCAM_IDPROP.IMAGE_HEIGHT)) + + # Set subarray mode to on so that it checks subarray compatibility when picking ROI + self.cam.prop_setvalue(dcam.DCAM_IDPROP.SUBARRAYMODE, 2.0) + + binning = self.config.get('binning', 1) + self.cam.prop_setvalue(dcam.DCAM_IDPROP.BINNING, binning) + + defect_correction = 2.0 if self.config.get('defect_correction', True) else 1.0 + self.cam.prop_setvalue(dcam.DCAM_IDPROP.DEFECTCORRECT_MODE, defect_correction) + + self.hot_pixel_correction = self.config.get('hot_pixel_correction', 'standard') + if self.hot_pixel_correction == "standard": + hot_pixel_correction = 1.0 + elif self.hot_pixel_correction == "minimum": + hot_pixel_correction = 2.0 + elif self.hot_pixel_correction == "aggressive": + hot_pixel_correction = 3.0 + else: + raise ValueError(f'Invalid hot pixel correction: {self.hot_pixel_correction}, must be one of ["standard", "minimum", "aggressive"]') + self.cam.prop_setvalue(dcam.DCAM_IDPROP.HOTPIXELCORRECT_LEVEL, hot_pixel_correction) + + self.camera_mode = self.config.get('camera_mode', "standard") + if self.camera_mode == "ultraquiet": + camera_mode = 1.0 + elif self.camera_mode == "standard": + camera_mode = 2.0 + else: + raise ValueError(f'Invalid camera mode: {self.camera_mode}, must be one of ["ultraquiet", "standard"]') + self.cam.prop_setvalue(dcam.DCAM_IDPROP.READOUTSPEED, camera_mode) + + self.current_pixel_format = self.config.get('pixel_format', 'Mono16') + if self.current_pixel_format not in self.pixel_formats: + raise ValueError(f'Invalid pixel format: {self.current_pixel_format}, must be one of {str(list(self.pixel_formats.keys()))}') + self.log.info('Using pixel format: %s', self.current_pixel_format) + self.cam.prop_setvalue(dcam.DCAM_IDPROP.IMAGE_PIXELTYPE, self.pixel_formats[self.current_pixel_format]) + + def make_property_helper(property_name, read_only=False, dtype=None): + if dtype is None: + dtype = '' + + def getter(): + return getattr(self, property_name) + + if read_only: + self.make_property(property_name, getter, type=dtype) + return + + def setter(value): + setattr(self, property_name, value) + + self.make_property(property_name, getter, setter, type=dtype) + + self.critical_temperature = self.config.get('critical_temperature', 28.0) + self.cooler_mode = self.config.get('cooling_mode', 'on') + self.fan_status = self.config.get('fan_status', 'off') + + make_property_helper('brightness', read_only=True) + make_property_helper('fan_status') + make_property_helper('cooler_mode') + + super().open() + + def close(self): + super().close() + + self.cooler_mode = 'on' + self.cam.dev_close() + self.cam = None + + dcam.Dcamapi.uninit() + + def start_acquisition(self): + if self.cam.buf_alloc(self.NUM_FRAMES_IN_BUFFER) is False: + raise RuntimeError(f'Dcam.buf_alloc() fails with error {self.cam.lasterr()}') + if self.cam.cap_start() is False: + raise RuntimeError(f'Dcam.cap_start() fails with error {self.cam.lasterr()}') + + def end_acquisition(self): + self.cam.cap_stop() + self.cam.buf_release() + + def capture_image(self): + timeout_millisec = 2000 + if self.cam.lasterr().is_timeout(): + self.log.warning('Timeout while waiting for frame') + if self.cam.wait_capevent_frameready(timeout_millisec) is False: + raise RuntimeError( + f'Dcam.wait_capevent_frameready({timeout_millisec}) fails with error {self.cam.lasterr()}') + + img = self.cam.buf_getlastframedata() + + return img + + def get_roi_width(self): + with self.mutex: + return self.cam.prop_getvalue(getattr(dcam.DCAM_IDPROP, 'SUBARRAYHSIZE')) + + def set_roi_width(self, width): + with self.mutex: + self.cam.prop_setvalue(getattr(dcam.DCAM_IDPROP, 'SUBARRAYHSIZE'), width) + + def get_roi_height(self): + with self.mutex: + return self.cam.prop_getvalue(getattr(dcam.DCAM_IDPROP, 'SUBARRAYVSIZE')) + + def set_roi_height(self, height): + with self.mutex: + self.cam.prop_setvalue(getattr(dcam.DCAM_IDPROP, 'SUBARRAYVSIZE'), height) + + def get_roi_offset_x(self): + with self.mutex: + return self.cam.prop_getvalue(getattr(dcam.DCAM_IDPROP, 'SUBARRAYVPOS')) + + def set_roi_offset_x(self, offset_x): + with self.mutex: + self.cam.prop_setvalue(getattr(dcam.DCAM_IDPROP, 'SUBARRAYVPOS'), offset_x) + + def get_roi_offset_y(self): + with self.mutex: + return self.cam.prop_getvalue(getattr(dcam.DCAM_IDPROP, 'SUBARRAYHPOS')) + + def set_roi_offset_y(self, offset_y): + with self.mutex: + self.cam.prop_setvalue(getattr(dcam.DCAM_IDPROP, 'SUBARRAYHPOS'), offset_y) + + def get_sensor_width(self): + return self._sensor_width + + def get_sensor_height(self): + return self._sensor_height + + def get_exposure_time(self): + with self.mutex: + return self.cam.prop_getvalue(getattr(dcam.DCAM_IDPROP, 'EXPOSURETIME')) * 1e6 + + def set_exposure_time(self, exposure_time): + with self.mutex: + self.cam.prop_setvalue(getattr(dcam.DCAM_IDPROP, 'EXPOSURETIME'), exposure_time / 1e6) + + def get_gain(self): + with self.mutex: + return self.cam.prop_getvalue(getattr(dcam.DCAM_IDPROP, 'CONTRASTGAIN')) + + def set_gain(self, gain): + with self.mutex: + self.cam.prop_setvalue(getattr(dcam.DCAM_IDPROP, 'CONTRASTGAIN'), gain) + + def get_temperature(self): + """ + Get the temperature of the camera. + + This function gets the temperature of the camera. + + Returns + ------- + float: + The temperature of the camera in degrees Celsius. + """ + return self.cam.prop_getvalue(dcam.DCAM_IDPROP.SENSORTEMPERATURE) + + def monitor_temperature(self): + """ + Monitor the temperature of the camera. + + This function is a separate thread that monitors the temperature of + the camera and submits the data to the temperature data stream. + """ + while not self.should_shut_down: + temperature = self.get_temperature() + self.temperature.submit_data(np.array([temperature])) + + if temperature > self.critical_temperature and self.is_acquiring.get(): + self.log.warning(f'Camera temperature = {temperature} > {self.critical_temperature} degrees.') + self.log.warning('Stopping acquisition and start fan.') + self.fan_status = 'on' + self.cooler_mode = 'on' + self.end_acquisition() + + self.sleep(0.1) + + @property + def brightness(self): + with self.mutex: + return self.cam.prop_getvalue(getattr(dcam.DCAM_IDPROP, 'SENSITIVITY')) + + @property + def fan_status(self): + with self.mutex: + return FanStatus(self.cam.prop_getvalue(getattr(dcam.DCAM_IDPROP, 'SENSORCOOLERFAN'))).name + + @fan_status.setter + def fan_status(self, status): + with self.mutex: + self.cam.prop_setvalue(getattr(dcam.DCAM_IDPROP, 'SENSORCOOLERFAN'), FanStatus[status].value) + + @property + def cooler_mode(self): + with self.mutex: + return CoolerMode(self.cam.prop_getvalue(getattr(dcam.DCAM_IDPROP, 'SENSORCOOLER'))).name + + @cooler_mode.setter + def cooler_mode(self, mode): + with self.mutex: + self.cam.prop_setvalue(getattr(dcam.DCAM_IDPROP, 'SENSORCOOLER'), CoolerMode[mode].value) + + +if __name__ == '__main__': + service = HamamatsuCamera() + service.run() diff --git a/catkit2/services/zwo_camera/zwo_camera.py b/catkit2/services/zwo_camera/zwo_camera.py index 40ec1201e..0beb21c8f 100644 --- a/catkit2/services/zwo_camera/zwo_camera.py +++ b/catkit2/services/zwo_camera/zwo_camera.py @@ -51,9 +51,6 @@ def __init__(self): self.should_be_acquiring = threading.Event() self.should_be_acquiring.set() - # Create lock for camera access - self.mutex = threading.Lock() - def open(self): # Attempt to find USB camera. num_cameras = zwoasi.get_num_cameras() @@ -100,7 +97,6 @@ def open(self): self.camera.set_control_value(controls[c]['ControlType'], controls[c]['DefaultValue']) print('Bandwidth defaults', self.camera.get_controls()['BandWidth']) - print('Bandwidth before:', self.camera.get_control_value(zwoasi.ASI_BANDWIDTHOVERLOAD)) self._max_bandwidth = self.config.get('max_bandwidth', True) @@ -346,6 +342,7 @@ def max_bandwidth(self, use_max): else: self.camera.set_control_value(zwoasi.ASI_BANDWIDTHOVERLOAD, self.camera.get_controls()['BandWidth']['MinValue']) + if __name__ == '__main__': service = ZwoCamera() service.run() diff --git a/catkit2/services/zwo_camera_v2/zwo_camera_v2.py b/catkit2/services/zwo_camera_v2/zwo_camera_v2.py new file mode 100644 index 000000000..b864d6667 --- /dev/null +++ b/catkit2/services/zwo_camera_v2/zwo_camera_v2.py @@ -0,0 +1,257 @@ +import os +import numpy as np +import zwoasi + +from catkit2.base_services.camera import CameraService, StoppedAcquisition + + +try: + __ZWO_ASI_LIB = 'ZWO_ASI_LIB' + __env_filename = os.getenv(__ZWO_ASI_LIB) + + if not __env_filename: + raise OSError(f"Environment variable '{__ZWO_ASI_LIB}' doesn't exist. Create and point to ASICamera2 lib") + + if not os.path.exists(__env_filename): + raise OSError(f"File not found: '{__ZWO_ASI_LIB}' -> '{__env_filename}'") + + zwoasi.init(__env_filename) +except Exception as error: + raise ImportError(f"Failed to load {__ZWO_ASI_LIB} library backend to {zwoasi.__qualname__}") from error + + +class ZwoCamera(CameraService): + # Stopped acquisition flags for base class + width_requires_stopped_acquisition = True + height_requires_stopped_acquisition = True + offset_x_requires_stopped_acquisition = False + offset_y_requires_stopped_acquisition = False + exposure_time_requires_stopped_acquisition = False + gain_requires_stopped_acquisition = False + + def __init__(self): + super().__init__('zwo_camera_v2') + + def open(self): + # Attempt to find USB camera. + num_cameras = zwoasi.get_num_cameras() + if num_cameras == 0: + raise RuntimeError("Not a single ZWO camera is connected.") + + expected_device_name = self.config['device_name'] + expected_device_id = self.config.get('device_id', None) + + for i in range(num_cameras): + device_name = zwoasi._get_camera_property(i)['Name'] + + if not device_name.startswith(expected_device_name): + continue + + if expected_device_id is None: + camera_index = i + break + else: + zwoasi._open_camera(i) + try: + device_id = zwoasi._get_id(i) + if device_id == str(expected_device_id): + camera_index = i + break + except Exception as e: + raise RuntimeError(f'Impossible to read camera id for camera {expected_device_name}. It probably doesn\'t support an id.') from e + finally: + zwoasi._close_camera(i) + + else: + raise RuntimeError(f'Camera {expected_device_name} with id {expected_device_id} not found.') + + # Create a camera object using the zwoasi library. + self.camera = zwoasi.Camera(camera_index) + + # Get all camera controls. + controls = self.camera.get_controls() + + # Restore all controls to default values, in case any other application modified them. + for c in controls: + # Some control values such as Temperature and CoolerPowerPerc are not writable for zwo cool models + if ('IsWritable' in controls[c]) and controls[c]['IsWritable']: + self.camera.set_control_value(controls[c]['ControlType'], controls[c]['DefaultValue']) + + print('Bandwidth defaults', self.camera.get_controls()['BandWidth']) + print('Bandwidth before:', self.camera.get_control_value(zwoasi.ASI_BANDWIDTHOVERLOAD)) + + self._max_bandwidth = self.config.get('max_bandwidth', True) + + # Max USB bandwidth gives highest FPS performance at small ROI - however it sometimes causes the service to crash with large frame sizes + if self._max_bandwidth: + self.camera.set_control_value(zwoasi.ASI_BANDWIDTHOVERLOAD, self.camera.get_controls()['BandWidth']['MaxValue']) + else: + self.camera.set_control_value(zwoasi.ASI_BANDWIDTHOVERLOAD, self.camera.get_controls()['BandWidth']['MinValue']) + + print('Bandwidth after:', self.camera.get_control_value(zwoasi.ASI_BANDWIDTHOVERLOAD)) + + try: + # Force any single exposure to be halted + self.camera.stop_video_capture() + self.camera.stop_exposure() + except Exception: + # Catch and hide exceptions that get thrown if the camera was already stopped. + pass + + self.device_name = device_name + + # Set image format to be RAW16, although camera is only 12-bit. + self.camera.set_image_type(zwoasi.ASI_IMG_RAW16) + + # Get exposure discretization parameters. + self.exposure_time_step_size = self.config.get('exposure_time_step_size', 1) + self.exposure_time_offset_correction = self.config.get('exposure_time_offset_correction', 0) + self.exposure_time_base_step = self.config.get('exposure_time_base_step', 1) + + def make_property_helper(name, read_only=False, requires_stopped_acquisition=False): + if read_only: + self.make_property(name, lambda: getattr(self, name)) + else: + if requires_stopped_acquisition: + def setter(val): + with StoppedAcquisition(self): + setattr(self, name, val) + else: + def setter(val): + setattr(self, name, val) + + self.make_property(name, lambda: getattr(self, name), setter) + + # Set up general camera properties. + make_property_helper('brightness') + make_property_helper('device_name', read_only=True) + make_property_helper('max_bandwidth') + + super().open() + + def close(self): + super().close() + + self.camera.close() + + def start_acquisition(self): + self.camera.start_video_capture() + + def end_acquisition(self): + self.camera.stop_video_capture() + + def capture_image(self): + timeout = 10000 # ms + img = self.camera.capture_video_frame(timeout=timeout) + + return img.astype('float32') + + def get_roi_width(self): + width, height, bins, image_type = self.camera.get_roi_format() + + return width + + def set_roi_width(self, width): + roi_format = self.camera.get_roi_format() + offset_x, offset_y = self.camera.get_roi_start_position() + + roi_format[0] = width + + self.camera.set_roi_format(*roi_format) + self.camera.set_roi_start_position(offset_x, offset_y) + + def get_roi_height(self): + width, height, bins, image_type = self.camera.get_roi_format() + + return height + + def set_roi_height(self, height): + roi_format = self.camera.get_roi_format() + offset_x, offset_y = self.camera.get_roi_start_position() + + roi_format[1] = height + + self.camera.set_roi_format(*roi_format) + self.camera.set_roi_start_position(offset_x, offset_y) + + def get_roi_offset_x(self): + offset_x, _ = self.camera.get_roi_start_position() + + return offset_x + + def set_roi_offset_x(self, offset_x): + _, offset_y = self.camera.get_roi_start_position() + + self.camera.set_roi_start_position(offset_x, offset_y) + + def get_roi_offset_y(self): + _, offset_y = self.camera.get_roi_start_position() + + return offset_y + + def set_roi_offset_y(self, offset_y): + offset_x, _ = self.camera.get_roi_start_position() + + self.camera.set_roi_start_position(offset_x, offset_y) + + def get_sensor_width(self): + cam_info = self.camera.get_camera_property() + + return cam_info['MaxWidth'] + + def get_sensor_height(self): + cam_info = self.camera.get_camera_property() + + return cam_info['MaxHeight'] + + def get_exposure_time(self): + exposure_time, auto = self.camera.get_control_value(zwoasi.ASI_EXPOSURE) + return exposure_time - self.exposure_time_offset_correction + + def set_exposure_time(self, exposure_time): + exposure_time += self.exposure_time_offset_correction + exposure_time = np.round((exposure_time - self.exposure_time_base_step) / self.exposure_time_step_size) + exposure_time = np.maximum(exposure_time, 0) + exposure_time = exposure_time * self.exposure_time_step_size + self.exposure_time_base_step + + self.camera.set_control_value(zwoasi.ASI_EXPOSURE, int(exposure_time)) + + def get_gain(self): + gain, _ = self.camera.get_control_value(zwoasi.ASI_GAIN) + + return gain + + def set_gain(self, gain): + self.camera.set_control_value(zwoasi.ASI_GAIN, int(gain)) + + def get_temperature(self): + temperature_times_ten, _ = self.camera.get_control_value(zwoasi.ASI_TEMPERATURE) + + return temperature_times_ten / 10 + + @property + def brightness(self): + brightness, auto = self.camera.get_control_value(zwoasi.ASI_BRIGHTNESS) + return brightness + + @brightness.setter + def brightness(self, brightness): + self.camera.set_control_value(zwoasi.ASI_BRIGHTNESS, int(brightness)) + + @property + def max_bandwidth(self): + return self._max_bandwidth + + @max_bandwidth.setter + def max_bandwidth(self, use_max): + # max USB bandwidth allows for maximum frame rates from ZWO camera + # however, some models have issues reading out large frame sizes when max value is set + if use_max: + self.camera.set_control_value(zwoasi.ASI_BANDWIDTHOVERLOAD, self.camera.get_controls()['BandWidth']['MaxValue']) + else: + self.camera.set_control_value(zwoasi.ASI_BANDWIDTHOVERLOAD, self.camera.get_controls()['BandWidth']['MinValue']) + + +if __name__ == '__main__': + service = ZwoCamera() + service.run() diff --git a/pyproject.toml b/pyproject.toml index e6838e4ea..6bec17f68 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -46,6 +46,7 @@ dummy_camera = "catkit2.services.dummy_camera.dummy_camera" empty_service = "catkit2.services.empty_service.empty_service" flir_camera = "catkit2.services.flir_camera.flir_camera" hamamatsu_camera = "catkit2.services.hamamatsu_camera.hamamatsu_camera" +hamamatsu_camera_v2 = "catkit2.services.hamamatsu_camera_v2.hamamatsu_camera_v2" newport_picomotor = "catkit2.services.newport_picomotor.newport_picomotor" newport_picomotor_sim = "catkit2.services.newport_picomotor_sim.newport_picomotor_sim" newport_xps_q8 = "catkit2.services.newport_xps_q8.newport_xps_q8" @@ -88,6 +89,7 @@ watchdog = "catkit2.services.watchdog.watchdog" web_power_switch = "catkit2.services.web_power_switch.web_power_switch" web_power_switch_sim = "catkit2.services.web_power_switch_sim.web_power_switch_sim" zwo_camera = "catkit2.services.zwo_camera.zwo_camera" +zwo_camera_v2 = "catkit2.services.zwo_camera_v2.zwo_camera_v2" [project.entry-points."catkit2.proxies"] camera = "catkit2.testbed.proxies.camera:CameraProxy"