revert: Revert controllerSoftware to commit 6a21816e42

This commit is contained in:
Tempest 2025-12-02 21:54:09 +07:00
parent 413590d1a2
commit c9c8cb7df7
10 changed files with 494 additions and 1292 deletions

View File

@ -1,4 +1,4 @@
from flask import Flask, render_template, request, jsonify, Response from flask import Flask, render_template, request, jsonify
import asyncio import asyncio
from bleak import BleakScanner, BleakClient from bleak import BleakScanner, BleakClient
import threading import threading
@ -7,8 +7,6 @@ import json
import sys import sys
import signal import signal
import os import os
import cv2
from vision import VisionSystem
# ================================================================================================= # =================================================================================================
# APP CONFIGURATION # APP CONFIGURATION
@ -16,17 +14,15 @@ from vision import VisionSystem
# Set to True to run without a physical BLE device for testing purposes. # Set to True to run without a physical BLE device for testing purposes.
# Set to False to connect to the actual lamp matrix. # Set to False to connect to the actual lamp matrix.
DEBUG_MODE = True DEBUG_MODE = False
# --- BLE Device Configuration (Ignored in DEBUG_MODE) --- # --- BLE Device Configuration (Ignored in DEBUG_MODE) ---
DEVICE_NAME = "Pupilometer LED Billboard" DEVICE_NAME = "Pupilometer LED Billboard"
global ble_client global ble_client
global ble_characteristics global ble_characteristics
global ble_connection_status
ble_client = None ble_client = None
ble_characteristics = None ble_characteristics = None
ble_event_loop = None # Will be initialized if not in debug mode ble_event_loop = None # Will be initialized if not in debug mode
ble_connection_status = False
# ================================================================================================= # =================================================================================================
# BLE HELPER FUNCTIONS (Used in LIVE mode) # BLE HELPER FUNCTIONS (Used in LIVE mode)
@ -75,7 +71,6 @@ SPIRAL_MAP_5x5 = create_spiral_map(5)
async def set_full_matrix_on_ble(colorSeries): async def set_full_matrix_on_ble(colorSeries):
global ble_client global ble_client
global ble_characteristics global ble_characteristics
global ble_connection_status
if not ble_client or not ble_client.is_connected: if not ble_client or not ble_client.is_connected:
print("BLE client not connected. Attempting to reconnect...") print("BLE client not connected. Attempting to reconnect...")
@ -125,7 +120,6 @@ async def set_full_matrix_on_ble(colorSeries):
async def connect_to_ble_device(): async def connect_to_ble_device():
global ble_client global ble_client
global ble_characteristics global ble_characteristics
global ble_connection_status
print(f"Scanning for device: {DEVICE_NAME}...") print(f"Scanning for device: {DEVICE_NAME}...")
devices = await BleakScanner.discover() devices = await BleakScanner.discover()
@ -133,7 +127,6 @@ async def connect_to_ble_device():
if not target_device: if not target_device:
print(f"Device '{DEVICE_NAME}' not found.") print(f"Device '{DEVICE_NAME}' not found.")
ble_connection_status = False
return False return False
print(f"Found device: {target_device.name} ({target_device.address})") print(f"Found device: {target_device.name} ({target_device.address})")
@ -151,15 +144,12 @@ async def connect_to_ble_device():
] ]
ble_characteristics = sorted(characteristics, key=lambda char: char.handle) ble_characteristics = sorted(characteristics, key=lambda char: char.handle)
print(f"Found {len(ble_characteristics)} characteristics for lamps.") print(f"Found {len(ble_characteristics)} characteristics for lamps.")
ble_connection_status = True
return True return True
else: else:
print(f"Failed to connect to {target_device.name}") print(f"Failed to connect to {target_device.name}")
ble_connection_status = False
return False return False
except Exception as e: except Exception as e:
print(f"An error occurred during BLE connection: {e}") print(f"An error occurred during BLE connection: {e}")
ble_connection_status = False
return False return False
# ================================================================================================= # =================================================================================================
# COLOR MIXING # COLOR MIXING
@ -265,58 +255,14 @@ def set_matrix():
print(f"Getting current lamp matrix info: {lamp_matrix}") print(f"Getting current lamp matrix info: {lamp_matrix}")
@app.route('/ble_status')
def ble_status():
global ble_connection_status
if DEBUG_MODE:
return jsonify(connected=True)
return jsonify(connected=ble_connection_status)
@app.route('/vision/pupil_data')
def get_pupil_data():
"""
Endpoint to get the latest pupil segmentation data from the vision system.
"""
if vision_system:
data = vision_system.get_pupil_data()
return jsonify(success=True, data=data)
return jsonify(success=False, message="Vision system not initialized"), 500
def gen_frames():
"""Generator function for video streaming."""
while True:
frame = vision_system.get_annotated_frame()
if frame is not None:
ret, buffer = cv2.imencode('.jpg', frame)
frame = buffer.tobytes()
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n')
@app.route('/video_feed')
def video_feed():
"""Video streaming route."""
return Response(gen_frames(),
mimetype='multipart/x-mixed-replace; boundary=frame')
# ================================================================================================= # =================================================================================================
# APP STARTUP # APP STARTUP
# ================================================================================================= # =================================================================================================
vision_system = None
def signal_handler(signum, frame): def signal_handler(signum, frame):
print("Received shutdown signal, gracefully shutting down...") print("Received shutdown signal, gracefully shutting down...")
global ble_connection_status
# Stop the vision system
if vision_system:
print("Stopping vision system...")
vision_system.stop()
print("Vision system stopped.")
if not DEBUG_MODE and ble_client and ble_client.is_connected: if not DEBUG_MODE and ble_client and ble_client.is_connected:
print("Disconnecting BLE client...") print("Disconnecting BLE client...")
ble_connection_status = False
disconnect_future = asyncio.run_coroutine_threadsafe(ble_client.disconnect(), ble_event_loop) disconnect_future = asyncio.run_coroutine_threadsafe(ble_client.disconnect(), ble_event_loop)
try: try:
# Wait for the disconnect to complete with a timeout # Wait for the disconnect to complete with a timeout
@ -339,16 +285,6 @@ if __name__ == '__main__':
signal.signal(signal.SIGINT, signal_handler) signal.signal(signal.SIGINT, signal_handler)
signal.signal(signal.SIGTERM, signal_handler) signal.signal(signal.SIGTERM, signal_handler)
# Initialize and start the Vision System
try:
vision_config = {"camera_id": 0, "model_name": "yolov8n-seg.pt"}
vision_system = VisionSystem(config=vision_config)
vision_system.start()
except Exception as e:
print(f"Failed to initialize or start Vision System: {e}")
vision_system = None
if not DEBUG_MODE: if not DEBUG_MODE:
print("Starting BLE event loop in background thread...") print("Starting BLE event loop in background thread...")
ble_event_loop = asyncio.new_event_loop() ble_event_loop = asyncio.new_event_loop()

View File

@ -1,250 +0,0 @@
import sys
import gi
gi.require_version('Gst', '1.0')
from gi.repository import Gst, GLib
import pyds
import threading
import numpy as np
try:
from pypylon import pylon
except ImportError:
print("pypylon is not installed. DeepStreamBackend will not be able to get frames from Basler camera.")
pylon = None
class DeepStreamPipeline:
"""
A class to manage the DeepStream pipeline for pupil segmentation.
"""
def __init__(self, config):
self.config = config
Gst.init(None)
self.pipeline = None
self.loop = GLib.MainLoop()
self.pupil_data = None
self.annotated_frame = None
self.camera = None
self.frame_feeder_thread = None
self.is_running = False
print("DeepStreamPipeline initialized.")
def _frame_feeder_thread(self, appsrc):
"""
Thread function to feed frames from the Basler camera to the appsrc element.
"""
while self.is_running:
if not self.camera or not self.camera.IsGrabbing():
print("Camera not ready, stopping frame feeder.")
break
try:
grab_result = self.camera.RetrieveResult(5000, pylon.TimeoutHandling_ThrowException)
if grab_result.GrabSucceeded():
frame = grab_result.Array
# Create a Gst.Buffer
buf = Gst.Buffer.new_allocate(None, len(frame), None)
buf.fill(0, frame)
# Push the buffer into the appsrc
appsrc.emit('push-buffer', buf)
else:
print(f"Error grabbing frame: {grab_result.ErrorCode}")
except Exception as e:
print(f"An error occurred in frame feeder thread: {e}")
break
finally:
if 'grab_result' in locals() and grab_result:
grab_result.Release()
def bus_call(self, bus, message, loop):
"""
Callback function for handling messages from the GStreamer bus.
"""
t = message.type
if t == Gst.MessageType.EOS:
sys.stdout.write("End-of-stream\n")
self.is_running = False
loop.quit()
elif t == Gst.MessageType.WARNING:
err, debug = message.parse_warning()
sys.stderr.write("Warning: %s: %s\n" % (err, debug))
elif t == Gst.MessageType.ERROR:
err, debug = message.parse_error()
sys.stderr.write("Error: %s: %s\n" % (err, debug))
self.is_running = False
loop.quit()
return True
def pgie_sink_pad_buffer_probe(self, pad, info, u_data):
"""
Probe callback function for the sink pad of the pgie element.
"""
gst_buffer = info.get_buffer()
if not gst_buffer:
print("Unable to get GstBuffer ")
return Gst.PadProbeReturn.OK
# Retrieve batch metadata from the gst_buffer
# Note that pyds.gst_buffer_get_nvds_batch_meta() expects the address of gst_buffer as input, which is a ptr.
batch_meta = pyds.gst_buffer_get_nvds_batch_meta(hash(gst_buffer))
l_frame = batch_meta.frame_meta_list
while l_frame is not None:
try:
# Note that l_frame.data needs a cast to pyds.NvDsFrameMeta
frame_meta = pyds.glist_get_data(l_frame)
except StopIteration:
break
# Get frame as numpy array
self.annotated_frame = pyds.get_nvds_buf_surface(hash(gst_buffer), frame_meta.batch_id)
l_obj = frame_meta.obj_meta_list
while l_obj is not None:
try:
# Casting l_obj.data to pyds.NvDsObjectMeta
obj_meta = pyds.glist_get_data(l_obj)
except StopIteration:
break
# Access and process object metadata
rect_params = obj_meta.rect_params
top = rect_params.top
left = rect_params.left
width = rect_params.width
height = rect_params.height
self.pupil_data = {
"bounding_box": [left, top, left + width, top + height],
"confidence": obj_meta.confidence
}
print(f"Pupil detected: {self.pupil_data}")
try:
l_obj = l_obj.next
except StopIteration:
break
try:
l_frame = l_frame.next
except StopIteration:
break
return Gst.PadProbeReturn.OK
def start(self):
"""
Builds and starts the DeepStream pipeline.
"""
if not pylon:
raise ImportError("pypylon is not installed. Cannot start DeepStreamPipeline with Basler camera.")
# Initialize camera
try:
self.camera = pylon.InstantCamera(pylon.TlFactory.GetInstance().CreateFirstDevice())
self.camera.Open()
self.camera.StartGrabbing(pylon.GrabStrategy_LatestImageOnly)
print("DeepStreamPipeline: Basler camera opened and started grabbing.")
except Exception as e:
print(f"DeepStreamPipeline: Error opening Basler camera: {e}")
return
self.pipeline = Gst.Pipeline()
if not self.pipeline:
sys.stderr.write(" Unable to create Pipeline \n")
return
source = Gst.ElementFactory.make("appsrc", "app-source")
pgie = Gst.ElementFactory.make("nvinfer", "primary-inference")
sink = Gst.ElementFactory.make("appsink", "app-sink")
videoconvert = Gst.ElementFactory.make("nvvideoconvert", "nv-videoconvert")
# Set appsrc properties
# TODO: Set caps based on camera properties
caps = Gst.Caps.from_string("video/x-raw,format=GRAY8,width=1280,height=720,framerate=30/1")
source.set_property("caps", caps)
source.set_property("format", "time")
pgie.set_property('config-file-path', "pgie_yolov10_config.txt")
# Set appsink properties
sink.set_property("emit-signals", True)
sink.set_property("max-buffers", 1)
sink.set_property("drop", True)
self.pipeline.add(source)
self.pipeline.add(videoconvert)
self.pipeline.add(pgie)
self.pipeline.add(sink)
if not source.link(videoconvert):
sys.stderr.write(" Unable to link source to videoconvert \n")
return
if not videoconvert.link(pgie):
sys.stderr.write(" Unable to link videoconvert to pgie \n")
return
if not pgie.link(sink):
sys.stderr.write(" Unable to link pgie to sink \n")
return
pgie_sink_pad = pgie.get_static_pad("sink")
if not pgie_sink_pad:
sys.stderr.write(" Unable to get sink pad of pgie \n")
return
pgie_sink_pad.add_probe(Gst.PadProbeType.BUFFER, self.pgie_sink_pad_buffer_probe, 0)
bus = self.pipeline.get_bus()
bus.add_signal_watch()
bus.connect("message", self.bus_call, self.loop)
self.is_running = True
self.frame_feeder_thread = threading.Thread(target=self._frame_feeder_thread, args=(source,))
self.frame_feeder_thread.start()
print("Starting pipeline...")
self.pipeline.set_state(Gst.State.PLAYING)
print("DeepStreamPipeline started.")
def stop(self):
"""
Stops the DeepStream pipeline.
"""
self.is_running = False
if self.frame_feeder_thread:
self.frame_feeder_thread.join()
if self.pipeline:
self.pipeline.set_state(Gst.State.NULL)
print("DeepStreamPipeline stopped.")
if self.camera and self.camera.IsGrabbing():
self.camera.StopGrabbing()
if self.camera and self.camera.IsOpen():
self.camera.Close()
print("DeepStreamPipeline: Basler camera closed.")
def get_data(self):
"""
Retrieves data from the pipeline.
"""
return self.pupil_data
def get_annotated_frame(self):
"""
Retrieves the annotated frame from the pipeline.
"""
return self.annotated_frame
if __name__ == '__main__':
config = {}
pipeline = DeepStreamPipeline(config)
pipeline.start()
# Run the GLib main loop in the main thread
try:
pipeline.loop.run()
except KeyboardInterrupt:
print("Interrupted by user.")
pipeline.stop()

View File

@ -1,18 +0,0 @@
[property]
gpu-id=0
net-scale-factor=0.00392156862745098
#onnx-file=yolov10.onnx
model-engine-file=model.engine
#labelfile-path=labels.txt
batch-size=1
process-mode=1
model-color-format=0
network-mode=0
num-detected-classes=1
gie-unique-id=1
output-blob-names=output0
[class-attrs-all]
pre-cluster-threshold=0.2
eps=0.2
group-threshold=1

View File

@ -1,314 +0,0 @@
// State for the entire 5x5 matrix, storing {ww, cw, blue} for each lamp
var lampMatrixState = Array(5).fill(null).map(() => Array(5).fill({ww: 0, cw: 0, blue: 0}));
var selectedLamps = [];
// Function to calculate a visual RGB color from the three light values using a proper additive model
function calculateRgb(ww, cw, blue) {
// Define the RGB components for each light source based on slider track colors
const warmWhiteR = 255;
const warmWhiteG = 192;
const warmWhiteB = 128;
const coolWhiteR = 192;
const coolWhiteG = 224;
const coolWhiteB = 255;
const blueR = 0;
const blueG = 0;
const blueB = 255;
// Normalize the slider values (0-255) and apply them to the base colors
var r = (ww / 255) * warmWhiteR + (cw / 255) * coolWhiteR + (blue / 255) * blueR;
var g = (ww / 255) * warmWhiteG + (cw / 255) * coolWhiteG + (blue / 255) * blueG;
var b = (ww / 255) * warmWhiteB + (cw / 255) * coolWhiteB + (blue / 255) * blueB;
// Clamp the values to 255 and convert to integer
r = Math.min(255, Math.round(r));
g = Math.min(255, Math.round(g));
b = Math.min(255, Math.round(b));
// Convert to hex string
var toHex = (c) => ('0' + c.toString(16)).slice(-2);
return '#' + toHex(r) + toHex(g) + toHex(b);
}
function updateLampUI(lamp, colorState) {
var newColor = calculateRgb(colorState.ww, colorState.cw, colorState.blue);
var lampElement = $(`.lamp[data-row="${lamp.row}"][data-col="${lamp.col}"]`);
lampElement.css('background-color', newColor);
if (newColor === '#000000') {
lampElement.removeClass('on');
lampElement.css('box-shadow', `inset 0 0 5px rgba(0,0,0,0.5)`);
} else {
lampElement.addClass('on');
lampElement.css('box-shadow', `0 0 15px ${newColor}, 0 0 25px ${newColor}`);
}
}
// Function to update the UI and send the full matrix state to the backend
function sendFullMatrixUpdate(lampsToUpdate, isRegionUpdate = false) {
var fullMatrixData = lampMatrixState.map(row => row.map(lamp => ({
ww: lamp.ww,
cw: lamp.cw,
blue: lamp.blue
})));
$.ajax({
url: '/set_matrix',
type: 'POST',
contentType: 'application/json',
data: JSON.stringify({ matrix: fullMatrixData }),
success: function(response) {
if (response.success) {
if (isRegionUpdate) {
// On a region button click, update the entire matrix UI
for (var r = 0; r < 5; r++) {
for (var c = 0; c < 5; c++) {
updateLampUI({row: r, col: c}, lampMatrixState[r][c]);
}
}
} else {
// Otherwise, just update the lamps that changed
lampsToUpdate.forEach(function(lamp) {
updateLampUI(lamp, lampMatrixState[lamp.row][lamp.col]);
});
}
}
}
});
}
function updateSliders(ww, cw, blue, prefix = '') {
$(`#${prefix}ww-slider`).val(ww);
$(`#${prefix}cw-slider`).val(cw);
$(`#${prefix}blue-slider`).val(blue);
$(`#${prefix}ww-number`).val(ww);
$(`#${prefix}cw-number`).val(cw);
$(`#${prefix}blue-number`).val(blue);
}
$(document).ready(function() {
var regionMaps = {
'Upper': [
{row: 0, col: 0}, {row: 0, col: 1}, {row: 0, col: 2}, {row: 0, col: 3}, {row: 0, col: 4},
{row: 1, col: 0}, {row: 1, col: 1}, {row: 1, col: 2}, {row: 1, col: 3}, {row: 1, col: 4},
],
'Lower': [
{row: 3, col: 0}, {row: 3, col: 1}, {row: 3, col: 2}, {row: 3, col: 3}, {row: 3, col: 4},
{row: 4, col: 0}, {row: 4, col: 1}, {row: 4, col: 2}, {row: 4, col: 3}, {row: 4, col: 4},
],
'Left': [
{row: 0, col: 0}, {row: 1, col: 0}, {row: 2, col: 0}, {row: 3, col: 0}, {row: 4, col: 0},
{row: 0, col: 1}, {row: 1, col: 1}, {row: 2, col: 1}, {row: 3, col: 1}, {row: 4, col: 1},
],
'Right': [
{row: 0, col: 3}, {row: 1, col: 3}, {row: 2, col: 3}, {row: 3, col: 3}, {row: 4, col: 3},
{row: 0, col: 4}, {row: 1, col: 4}, {row: 2, col: 4}, {row: 3, col: 4}, {row: 4, col: 4},
],
'Inner ring': [
{row: 1, col: 1}, {row: 1, col: 2}, {row: 1, col: 3},
{row: 2, col: 1}, {row: 2, col: 3},
{row: 3, col: 1}, {row: 3, col: 2}, {row: 3, col: 3}
],
'Outer ring': [
{row: 0, col: 0}, {row: 0, col: 1}, {row: 0, col: 2}, {row: 0, col: 3}, {row: 0, col: 4},
{row: 1, col: 0}, {row: 1, col: 4},
{row: 2, col: 0}, {row: 2, col: 4},
{row: 3, col: 0}, {row: 3, col: 4},
{row: 4, col: 0}, {row: 4, col: 1}, {row: 4, col: 2}, {row: 4, col: 3}, {row: 4, col: 4},
],
'All': [
{row: 0, col: 0}, {row: 0, col: 1}, {row: 0, col: 2}, {row: 0, col: 3}, {row: 0, col: 4},
{row: 1, col: 0}, {row: 1, col: 1}, {row: 1, col: 2}, {row: 1, col: 3}, {row: 1, col: 4},
{row: 2, col: 0}, {row: 2, col: 1}, {row: 2, col: 3}, {row: 2, col: 4},
{row: 3, col: 0}, {row: 3, col: 1}, {row: 3, col: 2}, {row: 3, col: 3}, {row: 3, col: 4},
{row: 4, col: 0}, {row: 4, col: 1}, {row: 4, col: 2}, {row: 4, col: 3}, {row: 4, col: 4},
]
};
// Exclude the center lamp from the 'All' region
var allRegionWithoutCenter = regionMaps['All'].filter(lamp => !(lamp.row === 2 && lamp.col === 2));
regionMaps['All'] = allRegionWithoutCenter;
// Initialize lampMatrixState from the initial HTML colors
$('.lamp').each(function() {
var row = $(this).data('row');
var col = $(this).data('col');
var color = $(this).css('background-color');
var rgb = color.match(/\d+/g);
lampMatrixState[row][col] = {
ww: rgb[0], cw: rgb[1], blue: rgb[2]
};
});
$('#region-select').on('change', function() {
var region = $(this).val();
// Toggle the inactive state of the control panel based on selection
if (region) {
$('.control-panel').removeClass('inactive-control');
} else {
$('.control-panel').addClass('inactive-control');
}
var newlySelectedLamps = regionMaps[region];
// Clear selected class from all lamps
$('.lamp').removeClass('selected');
// Get the current slider values to use as the new default
var ww = parseInt($('#ww-slider').val());
var cw = parseInt($('#cw-slider').val());
var blue = parseInt($('#blue-slider').val());
// Reset all lamps except the center to black in our state
var lampsToUpdate = [];
var centerLampState = lampMatrixState[2][2];
lampMatrixState = Array(5).fill(null).map(() => Array(5).fill({ww: 0, cw: 0, blue: 0}));
lampMatrixState[2][2] = centerLampState; // Preserve center lamp state
// Set newly selected lamps to the current slider values
selectedLamps = newlySelectedLamps;
selectedLamps.forEach(function(lamp) {
$(`.lamp[data-row="${lamp.row}"][data-col="${lamp.col}"]`).addClass('selected');
lampMatrixState[lamp.row][lamp.col] = {ww: ww, cw: cw, blue: blue};
});
if (selectedLamps.length > 0) {
// Update sliders to reflect the state of the first selected lamp
var firstLamp = selectedLamps[0];
var firstLampState = lampMatrixState[firstLamp.row][firstLamp.col];
updateSliders(firstLampState.ww, firstLampState.cw, firstLampState.blue, '');
}
// Send the full matrix state
sendFullMatrixUpdate(lampsToUpdate, true);
});
// Event listener for the region sliders and number inputs
$('.region-slider-group input').on('input', function() {
if (selectedLamps.length === 0) return;
var target = $(this);
var originalVal = target.val();
var value = parseInt(originalVal, 10);
// Clamp value
if (isNaN(value) || value < 0) { value = 0; }
if (value > 255) { value = 255; }
if (target.is('[type="number"]') && value.toString() !== originalVal) {
target.val(value);
}
var id = target.attr('id');
if (target.is('[type="range"]')) {
$(`#${id.replace('-slider', '-number')}`).val(value);
} else if (target.is('[type="number"]')) {
$(`#${id.replace('-number', '-slider')}`).val(value);
}
var ww = parseInt($('#ww-slider').val());
var cw = parseInt($('#cw-slider').val());
var blue = parseInt($('#blue-slider').val());
var lampsToUpdate = [];
selectedLamps.forEach(function(lamp) {
lampMatrixState[lamp.row][lamp.col] = {ww: ww, cw: cw, blue: blue};
lampsToUpdate.push(lamp);
});
sendFullMatrixUpdate(lampsToUpdate);
});
// Event listener for the center lamp sliders and number inputs
$('.center-slider-group input').on('input', function() {
var target = $(this);
var originalVal = target.val();
var value = parseInt(originalVal, 10);
// Clamp value
if (isNaN(value) || value < 0) { value = 0; }
if (value > 255) { value = 255; }
if (target.is('[type="number"]') && value.toString() !== originalVal) {
target.val(value);
}
var id = target.attr('id');
if (target.is('[type="range"]')) {
$(`#${id.replace('-slider', '-number')}`).val(value);
} else if (target.is('[type="number"]')) {
$(`#${id.replace('-number', '-slider')}`).val(value);
}
var ww = parseInt($('#center-ww-slider').val());
var cw = parseInt($('#center-cw-slider').val());
var blue = parseInt($('#center-blue-slider').val());
var centerLamp = {row: 2, col: 2};
lampMatrixState[centerLamp.row][centerLamp.col] = {ww: ww, cw: cw, blue: blue};
sendFullMatrixUpdate([centerLamp]);
});
// Initial check to set the inactive state
if (!$('#region-select').val()) {
$('.control-panel').addClass('inactive-control');
}
function checkBleStatus() {
$.ajax({
url: '/ble_status',
type: 'GET',
success: function(response) {
var statusElement = $('#ble-status');
if (response.connected) {
statusElement.text('BLE Connected');
statusElement.css('color', 'lightgreen');
} else {
statusElement.text('BLE Disconnected');
statusElement.css('color', 'red');
}
},
error: function() {
var statusElement = $('#ble-status');
statusElement.text('Reconnecting...');
statusElement.css('color', 'orange');
}
});
}
setInterval(checkBleStatus, 2000);
checkBleStatus(); // Initial check
function getPupilData() {
$.ajax({
url: '/vision/pupil_data',
type: 'GET',
success: function(response) {
if (response.success && response.data) {
var pupilData = response.data;
var pupilPosition = pupilData.pupil_position;
var pupilDiameter = pupilData.pupil_diameter;
// Update text fields
$('#pupil-center').text(`(${pupilPosition[0]}, ${pupilPosition[1]})`);
$('#pupil-area').text(pupilDiameter);
// Draw on canvas
var canvas = $('#pupil-canvas')[0];
var ctx = canvas.getContext('2d');
ctx.clearRect(0, 0, canvas.width, canvas.height);
ctx.beginPath();
ctx.arc(pupilPosition[0] / 2, pupilPosition[1] / 2, pupilDiameter / 2, 0, 2 * Math.PI);
ctx.fillStyle = 'red';
ctx.fill();
}
}
});
}
setInterval(getPupilData, 500); // Fetch data every 500ms
});

View File

@ -23,35 +23,6 @@ body {
align-items: flex-start; align-items: flex-start;
gap: 40px; gap: 40px;
} }
#vision-system {
display: flex;
flex-direction: column;
align-items: center;
}
#pupil-detection {
margin-bottom: 20px;
text-align: center;
}
#pupil-canvas {
border: 1px solid #ccc;
background-color: #f0f0f0;
}
#pupil-data p {
margin: 5px 0;
}
#video-feed {
text-align: center;
}
#video-feed img {
border: 1px solid #ccc;
}
.matrix-grid { .matrix-grid {
display: grid; display: grid;
grid-template-columns: repeat(5, 70px); grid-template-columns: repeat(5, 70px);
@ -178,14 +149,3 @@ input.blue::-webkit-slider-runnable-track { background: linear-gradient(to right
align-items: center; align-items: center;
} }
} }
#ble-status {
position: fixed;
top: 10px;
right: 10px;
font-size: 16px;
color: #fff;
background-color: #333;
padding: 5px 10px;
border-radius: 5px;
}

View File

@ -4,11 +4,271 @@
<title>Lamp Matrix Control</title> <title>Lamp Matrix Control</title>
<link rel="stylesheet" href="{{ url_for('static', filename='style.css') }}"> <link rel="stylesheet" href="{{ url_for('static', filename='style.css') }}">
<script src="https://code.jquery.com/jquery-3.6.0.min.js"></script> <script src="https://code.jquery.com/jquery-3.6.0.min.js"></script>
<script src="{{ url_for('static', filename='script.js') }}"></script> <script>
// State for the entire 5x5 matrix, storing {ww, cw, blue} for each lamp
var lampMatrixState = Array(5).fill(null).map(() => Array(5).fill({ww: 0, cw: 0, blue: 0}));
var selectedLamps = [];
// Function to calculate a visual RGB color from the three light values using a proper additive model
function calculateRgb(ww, cw, blue) {
// Define the RGB components for each light source based on slider track colors
const warmWhiteR = 255;
const warmWhiteG = 192;
const warmWhiteB = 128;
const coolWhiteR = 192;
const coolWhiteG = 224;
const coolWhiteB = 255;
const blueR = 0;
const blueG = 0;
const blueB = 255;
// Normalize the slider values (0-255) and apply them to the base colors
var r = (ww / 255) * warmWhiteR + (cw / 255) * coolWhiteR + (blue / 255) * blueR;
var g = (ww / 255) * warmWhiteG + (cw / 255) * coolWhiteG + (blue / 255) * blueG;
var b = (ww / 255) * warmWhiteB + (cw / 255) * coolWhiteB + (blue / 255) * blueB;
// Clamp the values to 255 and convert to integer
r = Math.min(255, Math.round(r));
g = Math.min(255, Math.round(g));
b = Math.min(255, Math.round(b));
// Convert to hex string
var toHex = (c) => ('0' + c.toString(16)).slice(-2);
return '#' + toHex(r) + toHex(g) + toHex(b);
}
function updateLampUI(lamp, colorState) {
var newColor = calculateRgb(colorState.ww, colorState.cw, colorState.blue);
var lampElement = $(`.lamp[data-row="${lamp.row}"][data-col="${lamp.col}"]`);
lampElement.css('background-color', newColor);
if (newColor === '#000000') {
lampElement.removeClass('on');
lampElement.css('box-shadow', `inset 0 0 5px rgba(0,0,0,0.5)`);
} else {
lampElement.addClass('on');
lampElement.css('box-shadow', `0 0 15px ${newColor}, 0 0 25px ${newColor}`);
}
}
// Function to update the UI and send the full matrix state to the backend
function sendFullMatrixUpdate(lampsToUpdate, isRegionUpdate = false) {
var fullMatrixData = lampMatrixState.map(row => row.map(lamp => ({
ww: lamp.ww,
cw: lamp.cw,
blue: lamp.blue
})));
$.ajax({
url: '/set_matrix',
type: 'POST',
contentType: 'application/json',
data: JSON.stringify({ matrix: fullMatrixData }),
success: function(response) {
if (response.success) {
if (isRegionUpdate) {
// On a region button click, update the entire matrix UI
for (var r = 0; r < 5; r++) {
for (var c = 0; c < 5; c++) {
updateLampUI({row: r, col: c}, lampMatrixState[r][c]);
}
}
} else {
// Otherwise, just update the lamps that changed
lampsToUpdate.forEach(function(lamp) {
updateLampUI(lamp, lampMatrixState[lamp.row][lamp.col]);
});
}
}
}
});
}
function updateSliders(ww, cw, blue, prefix = '') {
$(`#${prefix}ww-slider`).val(ww);
$(`#${prefix}cw-slider`).val(cw);
$(`#${prefix}blue-slider`).val(blue);
$(`#${prefix}ww-number`).val(ww);
$(`#${prefix}cw-number`).val(cw);
$(`#${prefix}blue-number`).val(blue);
}
$(document).ready(function() {
var regionMaps = {
'Upper': [
{row: 0, col: 0}, {row: 0, col: 1}, {row: 0, col: 2}, {row: 0, col: 3}, {row: 0, col: 4},
{row: 1, col: 0}, {row: 1, col: 1}, {row: 1, col: 2}, {row: 1, col: 3}, {row: 1, col: 4},
],
'Lower': [
{row: 3, col: 0}, {row: 3, col: 1}, {row: 3, col: 2}, {row: 3, col: 3}, {row: 3, col: 4},
{row: 4, col: 0}, {row: 4, col: 1}, {row: 4, col: 2}, {row: 4, col: 3}, {row: 4, col: 4},
],
'Left': [
{row: 0, col: 0}, {row: 1, col: 0}, {row: 2, col: 0}, {row: 3, col: 0}, {row: 4, col: 0},
{row: 0, col: 1}, {row: 1, col: 1}, {row: 2, col: 1}, {row: 3, col: 1}, {row: 4, col: 1},
],
'Right': [
{row: 0, col: 3}, {row: 1, col: 3}, {row: 2, col: 3}, {row: 3, col: 3}, {row: 4, col: 3},
{row: 0, col: 4}, {row: 1, col: 4}, {row: 2, col: 4}, {row: 3, col: 4}, {row: 4, col: 4},
],
'Inner ring': [
{row: 1, col: 1}, {row: 1, col: 2}, {row: 1, col: 3},
{row: 2, col: 1}, {row: 2, col: 3},
{row: 3, col: 1}, {row: 3, col: 2}, {row: 3, col: 3}
],
'Outer ring': [
{row: 0, col: 0}, {row: 0, col: 1}, {row: 0, col: 2}, {row: 0, col: 3}, {row: 0, col: 4},
{row: 1, col: 0}, {row: 1, col: 4},
{row: 2, col: 0}, {row: 2, col: 4},
{row: 3, col: 0}, {row: 3, col: 4},
{row: 4, col: 0}, {row: 4, col: 1}, {row: 4, col: 2}, {row: 4, col: 3}, {row: 4, col: 4},
],
'All': [
{row: 0, col: 0}, {row: 0, col: 1}, {row: 0, col: 2}, {row: 0, col: 3}, {row: 0, col: 4},
{row: 1, col: 0}, {row: 1, col: 1}, {row: 1, col: 2}, {row: 1, col: 3}, {row: 1, col: 4},
{row: 2, col: 0}, {row: 2, col: 1}, {row: 2, col: 3}, {row: 2, col: 4},
{row: 3, col: 0}, {row: 3, col: 1}, {row: 3, col: 2}, {row: 3, col: 3}, {row: 3, col: 4},
{row: 4, col: 0}, {row: 4, col: 1}, {row: 4, col: 2}, {row: 4, col: 3}, {row: 4, col: 4},
]
};
// Exclude the center lamp from the 'All' region
var allRegionWithoutCenter = regionMaps['All'].filter(lamp => !(lamp.row === 2 && lamp.col === 2));
regionMaps['All'] = allRegionWithoutCenter;
// Initialize lampMatrixState from the initial HTML colors
$('.lamp').each(function() {
var row = $(this).data('row');
var col = $(this).data('col');
var color = $(this).css('background-color');
var rgb = color.match(/\d+/g);
lampMatrixState[row][col] = {
ww: rgb[0], cw: rgb[1], blue: rgb[2]
};
});
$('#region-select').on('change', function() {
var region = $(this).val();
// Toggle the inactive state of the control panel based on selection
if (region) {
$('.control-panel').removeClass('inactive-control');
} else {
$('.control-panel').addClass('inactive-control');
}
var newlySelectedLamps = regionMaps[region];
// Clear selected class from all lamps
$('.lamp').removeClass('selected');
// Get the current slider values to use as the new default
var ww = parseInt($('#ww-slider').val());
var cw = parseInt($('#cw-slider').val());
var blue = parseInt($('#blue-slider').val());
// Reset all lamps except the center to black in our state
var lampsToUpdate = [];
var centerLampState = lampMatrixState[2][2];
lampMatrixState = Array(5).fill(null).map(() => Array(5).fill({ww: 0, cw: 0, blue: 0}));
lampMatrixState[2][2] = centerLampState; // Preserve center lamp state
// Set newly selected lamps to the current slider values
selectedLamps = newlySelectedLamps;
selectedLamps.forEach(function(lamp) {
$(`.lamp[data-row="${lamp.row}"][data-col="${lamp.col}"]`).addClass('selected');
lampMatrixState[lamp.row][lamp.col] = {ww: ww, cw: cw, blue: blue};
});
if (selectedLamps.length > 0) {
// Update sliders to reflect the state of the first selected lamp
var firstLamp = selectedLamps[0];
var firstLampState = lampMatrixState[firstLamp.row][firstLamp.col];
updateSliders(firstLampState.ww, firstLampState.cw, firstLampState.blue, '');
}
// Send the full matrix state
sendFullMatrixUpdate(lampsToUpdate, true);
});
// Event listener for the region sliders and number inputs
$('.region-slider-group input').on('input', function() {
if (selectedLamps.length === 0) return;
var target = $(this);
var originalVal = target.val();
var value = parseInt(originalVal, 10);
// Clamp value
if (isNaN(value) || value < 0) { value = 0; }
if (value > 255) { value = 255; }
if (target.is('[type="number"]') && value.toString() !== originalVal) {
target.val(value);
}
var id = target.attr('id');
if (target.is('[type="range"]')) {
$(`#${id.replace('-slider', '-number')}`).val(value);
} else if (target.is('[type="number"]')) {
$(`#${id.replace('-number', '-slider')}`).val(value);
}
var ww = parseInt($('#ww-slider').val());
var cw = parseInt($('#cw-slider').val());
var blue = parseInt($('#blue-slider').val());
var lampsToUpdate = [];
selectedLamps.forEach(function(lamp) {
lampMatrixState[lamp.row][lamp.col] = {ww: ww, cw: cw, blue: blue};
lampsToUpdate.push(lamp);
});
sendFullMatrixUpdate(lampsToUpdate);
});
// Event listener for the center lamp sliders and number inputs
$('.center-slider-group input').on('input', function() {
var target = $(this);
var originalVal = target.val();
var value = parseInt(originalVal, 10);
// Clamp value
if (isNaN(value) || value < 0) { value = 0; }
if (value > 255) { value = 255; }
if (target.is('[type="number"]') && value.toString() !== originalVal) {
target.val(value);
}
var id = target.attr('id');
if (target.is('[type="range"]')) {
$(`#${id.replace('-slider', '-number')}`).val(value);
} else if (target.is('[type="number"]')) {
$(`#${id.replace('-number', '-slider')}`).val(value);
}
var ww = parseInt($('#center-ww-slider').val());
var cw = parseInt($('#center-cw-slider').val());
var blue = parseInt($('#center-blue-slider').val());
var centerLamp = {row: 2, col: 2};
lampMatrixState[centerLamp.row][centerLamp.col] = {ww: ww, cw: cw, blue: blue};
sendFullMatrixUpdate([centerLamp]);
});
// Initial check to set the inactive state
if (!$('#region-select').val()) {
$('.control-panel').addClass('inactive-control');
}
});
</script>
</head> </head>
<body> <body>
<div class="container"> <div class="container">
<div id="ble-status"></div>
<h1>Lamp Matrix Control</h1> <h1>Lamp Matrix Control</h1>
<div class="region-control"> <div class="region-control">
<label for="region-select">Select Region:</label> <label for="region-select">Select Region:</label>
@ -76,20 +336,6 @@
</div> </div>
</div> </div>
</div> </div>
<div id="vision-system">
<div id="pupil-detection">
<h2>Pupil Detection</h2>
<canvas id="pupil-canvas" width="300" height="300"></canvas>
<div id="pupil-data">
<p>Center: <span id="pupil-center">(x, y)</span></p>
<p>Area: <span id="pupil-area">0</span></p>
</div>
</div>
<div id="video-feed">
<h2>Camera Feed</h2>
<img src="{{ url_for('video_feed') }}" width="640" height="480">
</div>
</div>
</div> </div>
</div> </div>
</body> </body>

View File

@ -1,358 +0,0 @@
import sys
import platform
import os
import numpy as np
import cv2
import logging
from ultralytics import YOLO # New import
# Configure logging
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
class VisionSystem:
"""
The main class for the vision system, responsible for pupil segmentation.
It uses a platform-specific backend for the actual implementation.
"""
def __init__(self, config):
self.config = config.copy()
self.config.setdefault('model_name', 'yolov8n-seg.pt') # Set default model
# Ensure model_path in config points to the selected model_name
self.config['model_path'] = self.config['model_name']
self._backend = self._initialize_backend()
def _initialize_backend(self):
"""
Initializes the appropriate backend based on the environment and OS.
"""
# If in a test environment, use the MockBackend
if os.environ.get("PUPILOMETER_ENV") == "test":
logging.info("PUPILOMETER_ENV is set to 'test'. Initializing Mock backend.")
return MockBackend(self.config)
os_name = platform.system()
if os_name == "Linux" or os_name == "Windows":
logging.info(f"Operating system is {os_name}. Attempting to initialize DeepStream backend.")
try:
import gi
gi.require_version('Gst', '1.0')
from gi.repository import Gst
Gst.init(None)
logging.info("DeepStream (GStreamer) is available.")
return DeepStreamBackend(self.config)
except (ImportError, ValueError) as e:
logging.warning(f"Could not initialize DeepStreamBackend: {e}. Falling back to PythonBackend.")
return PythonBackend(self.config)
elif os_name == "Darwin":
logging.info("Operating system is macOS. Initializing Python backend.")
return PythonBackend(self.config)
else:
logging.error(f"Unsupported operating system: {os_name}")
raise NotImplementedError(f"Unsupported operating system: {os_name}")
def start(self):
"""
Starts the vision system.
"""
self._backend.start()
def stop(self):
"""
Stops the vision system.
"""
self._backend.stop()
def get_pupil_data(self):
"""
Returns the latest pupil segmentation data.
"""
return self._backend.get_pupil_data()
def get_annotated_frame(self):
"""
Returns the latest annotated frame.
"""
return self._backend.get_annotated_frame()
class MockBackend:
"""
A mock backend for testing purposes.
"""
def __init__(self, config):
self.config = config
logging.info("MockBackend initialized.")
def start(self):
logging.info("MockBackend started.")
pass
def stop(self):
logging.info("MockBackend stopped.")
pass
def get_pupil_data(self):
logging.info("Getting pupil data from MockBackend.")
return {
"pupil_position": (123, 456),
"pupil_diameter": 789,
"info": "mock_data"
}
def get_annotated_frame(self):
"""
Returns a placeholder image.
"""
frame = np.zeros((480, 640, 3), np.uint8)
cv2.putText(frame, "Mock Camera Feed", (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2)
return frame
class DeepStreamBackend:
"""
A class to handle pupil segmentation on Jetson/Windows using DeepStream.
"""
def __init__(self, config):
"""
Initializes the DeepStreamBackend.
Args:
config (dict): A dictionary containing configuration parameters.
"""
from deepstream_pipeline import DeepStreamPipeline
self.config = config
self.pipeline = DeepStreamPipeline(config)
logging.info("DeepStreamBackend initialized.")
def start(self):
"""
Starts the DeepStream pipeline.
"""
self.pipeline.start()
logging.info("DeepStreamBackend started.")
def stop(self):
"""
Stops the DeepStream pipeline.
"""
self.pipeline.stop()
logging.info("DeepStreamBackend stopped.")
def get_pupil_data(self):
"""
Retrieves pupil data from the DeepStream pipeline.
"""
return self.pipeline.get_data()
def get_annotated_frame(self):
"""
Retrieves the annotated frame from the DeepStream pipeline.
"""
return self.pipeline.get_annotated_frame()
class PythonBackend:
"""
A class to handle pupil segmentation on macOS using pypylon and Ultralytics YOLO models.
"""
def __init__(self, config):
"""
Initializes the PythonBackend.
Args:
config (dict): A dictionary containing configuration parameters
such as 'model_path'.
"""
self.config = config
self.camera = None
self.model = None # Ultralytics YOLO model
self.annotated_frame = None
self.conf_threshold = 0.25 # Confidence threshold for object detection
self.iou_threshold = 0.45 # IoU threshold for Non-Maximum Suppression
# Load the YOLO model (e.g., yolov8n-seg.pt)
try:
model_full_path = os.path.join(os.path.dirname(__file__), self.config['model_path'])
self.model = YOLO(model_full_path)
logging.info(f"PythonBackend: Ultralytics YOLO model loaded from {model_full_path}.")
# Dynamically get class names from the model
self.class_names = self.model.names
except Exception as e:
logging.error(f"PythonBackend: Error loading Ultralytics YOLO model: {e}")
self.model = None
self.class_names = [] # Fallback to empty list
logging.info("PythonBackend initialized.")
def start(self):
"""
Initializes the Basler camera.
"""
try:
from pypylon import pylon
except ImportError:
raise ImportError("pypylon is not installed. Cannot start PythonBackend.")
try:
# Initialize the camera
self.camera = pylon.InstantCamera(pylon.TlFactory.GetInstance().CreateFirstDevice())
self.camera.Open()
# Start grabbing continuously
self.camera.StartGrabbing(pylon.GrabStrategy_LatestImageOnly)
logging.info("PythonBackend: Basler camera opened and started grabbing.")
except Exception as e:
logging.error(f"PythonBackend: Error opening Basler camera: {e}")
self.camera = None
logging.info("PythonBackend started.")
def stop(self):
"""
Releases the camera resources.
"""
if self.camera and self.camera.IsGrabbing():
self.camera.StopGrabbing()
logging.info("PythonBackend: Basler camera stopped grabbing.")
if self.camera and self.camera.IsOpen():
self.camera.Close()
logging.info("PythonBackend: Basler camera closed.")
logging.info("PythonBackend stopped.")
def get_pupil_data(self):
"""
Grabs a frame from the camera, runs inference using Ultralytics YOLO, and returns pupil data.
"""
if not self.camera or not self.camera.IsGrabbing():
logging.warning("PythonBackend: Camera not ready.")
return None
if not self.model:
logging.warning("PythonBackend: YOLO model not loaded.")
return None
grab_result = None
try:
from pypylon import pylon
import cv2
import numpy as np
grab_result = self.camera.RetrieveResult(5000, pylon.TimeoutHandling_ThrowException)
if grab_result.GrabSucceeded():
image_np = grab_result.Array # This is typically a grayscale image from Basler
# Convert grayscale to BGR if necessary for YOLO (YOLO expects 3 channels)
if len(image_np.shape) == 2:
image_bgr = cv2.cvtColor(image_np, cv2.COLOR_GRAY2BGR)
else:
image_bgr = image_np
# Run inference with Ultralytics YOLO
results = self.model.predict(source=image_bgr, conf=self.conf_threshold, iou=self.iou_threshold, verbose=False)
pupil_data = {}
self.annotated_frame = image_bgr.copy() # Start with original image for annotation
if results and len(results[0].boxes) > 0: # Check if any detections are made
# Assuming we are interested in the largest or most confident pupil
# For simplicity, let's process the first detection
result = results[0] # Results for the first (and only) image
# Extract bounding box
box = result.boxes.xyxy[0].cpu().numpy().astype(int) # xyxy format
x1, y1, x2, y2 = box
# Extract confidence and class ID
confidence = result.boxes.conf[0].cpu().numpy().item()
class_id = int(result.boxes.cls[0].cpu().numpy().item())
class_name = self.class_names[class_id]
# Calculate pupil position (center of bounding box)
pupil_center_x = (x1 + x2) // 2
pupil_center_y = (y1 + y2) // 2
# Calculate pupil diameter (average of width and height of bounding box)
pupil_diameter = (x2 - x1 + y2 - y1) // 2
pupil_data = {
"pupil_position": (pupil_center_x, pupil_center_y),
"pupil_diameter": pupil_diameter,
"class_name": class_name,
"confidence": confidence,
"bounding_box": box.tolist() # Convert numpy array to list for JSON serialization
}
# Extract and draw segmentation mask
if result.masks:
# Get the mask for the first detection, upsampled to original image size
mask_np = result.masks.data[0].cpu().numpy() # Raw mask data
# Resize mask to original image dimensions if necessary (ultralytics usually returns scaled masks)
mask_resized = cv2.resize(mask_np, (image_bgr.shape[1], image_bgr.shape[0]), interpolation=cv2.INTER_LINEAR)
binary_mask = (mask_resized > 0.5).astype(np.uint8) * 255 # Threshold to binary
# Draw bounding box
color = (0, 255, 0) # Green for pupil detection
cv2.rectangle(self.annotated_frame, (x1, y1), (x2, y2), color, 2)
# Create a colored mask overlay
mask_color = np.array([0, 255, 0], dtype=np.uint8) # Green color for mask
colored_mask_overlay = np.zeros_like(self.annotated_frame, dtype=np.uint8)
colored_mask_overlay[binary_mask > 0] = mask_color
self.annotated_frame = cv2.addWeighted(self.annotated_frame, 1, colored_mask_overlay, 0.5, 0)
# Draw label
label = f"{class_name}: {confidence:.2f}"
cv2.putText(self.annotated_frame, label, (x1, y1 - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2)
else:
logging.info("No objects detected by YOLO model.")
return pupil_data
else:
logging.error(f"PythonBackend: Error grabbing frame: {grab_result.ErrorCode} {grab_result.ErrorDescription}")
return None
except Exception as e:
logging.error(f"PythonBackend: An error occurred during frame grabbing or inference: {e}")
return None
finally:
if grab_result:
grab_result.Release()
def get_annotated_frame(self):
"""
Returns the latest annotated frame.
"""
return self.annotated_frame
# The if __name__ == '__main__': block should be outside the class
if __name__ == '__main__':
# Example usage
# Ensure 'yolov8n-seg.pt' is in src/controllerSoftware for this example to run
config = {"camera_id": 0, "model_path": "yolov8n-seg.pt"}
try:
vision_system = VisionSystem(config)
vision_system.start()
# In a real application, this would run in a loop
pupil_data = vision_system.get_pupil_data()
if pupil_data:
logging.info(f"Received pupil data: {pupil_data}")
else:
logging.info("No pupil data received.")
# Get and show the annotated frame
annotated_frame = vision_system.get_annotated_frame()
if annotated_frame is not None:
cv2.imshow("Annotated Frame", annotated_frame)
cv2.waitKey(0)
cv2.destroyAllWindows()
vision_system.stop()
except NotImplementedError as e:
logging.error(e)
except Exception as e:
logging.error(f"An error occurred: {e}")