diff --git a/GEMINI.md b/GEMINI.md new file mode 100644 index 00000000..c02da77d --- /dev/null +++ b/GEMINI.md @@ -0,0 +1,23 @@ +### Pupil Segmentation Integration + +- **Objective:** Integrated Pupil segmentation into the mono camera pipelines. +- **Key Changes:** + - Modified `src/unified_web_ui/gstreamer_pipeline.py` to: + - Add a `tee` element for mono camera streams to split the video feed. + - Create a new branch for pupil segmentation with a `videoconvert` placeholder and a dedicated `appsink` (`seg_sink_{i}`). + - Implement `on_new_seg_sample_factory` callback to handle segmentation data. + - Added `seg_frame_buffers` and `seg_buffer_locks` for segmentation output. + - Introduced `get_seg_frame_by_id` to retrieve segmentation frames. + - Ensured unique naming for `tee` elements (`t_{i}`) in the GStreamer pipeline to prevent linking errors. + - Modified `src/unified_web_ui/app.py` to: + - Add a new Flask route `/segmentation_feed/` to serve the segmentation video stream. + - Added `datetime.utcnow` to the Jinja2 context for cache-busting in templates. + - Modified `src/unified_web_web_ui/templates/index.html` to: + - Include a new "Segmentation Feed" section displaying the segmentation video streams, sourcing from `/segmentation_feed/` with cache-busting timestamps. + - Updated existing video feeds (`video_feed`) with cache-busting timestamps for consistency. +- **Testing:** + - Created `tests/test_segmentation.py` to verify the segmentation feed is visible and updating. + - Updated `src/unified_web_ui/tests/test_ui.py` to refine locators (`#camera .camera-streams-grid .camera-container-individual`) for camera stream elements, resolving conflicts with segmentation feeds. + - Updated `src/unified_web_ui/tests/test_visual.py` to refine locators (`#camera .camera-mono-row`, `#camera .camera-color-row`, `#camera .camera-mono`) to prevent strict mode violations and ensure accurate targeting of camera layout elements. + - Fixed indentation errors in `src/unified_web_ui/tests/test_visual.py`. +- **Status:** All tests are passing, and the infrastructure for pupil segmentation is in place, awaiting the integration of a DeepStream model. \ No newline at end of file diff --git a/src/unified_web_ui/app.py b/src/unified_web_ui/app.py index 00bc9b6d..6a0f742d 100644 --- a/src/unified_web_ui/app.py +++ b/src/unified_web_ui/app.py @@ -114,6 +114,12 @@ def rgb_to_hex(r, g, b): # FLASK ROUTES # ================================================================================================= +from datetime import datetime + +@app.context_processor +def inject_now(): + return {'now': datetime.utcnow} + @app.before_request def before_request(): g.detected_cams_info = [] @@ -140,6 +146,17 @@ def video_feed(stream_id): time.sleep(0.016) # Roughly 60 fps return Response(generate(stream_id), mimetype='multipart/x-mixed-replace; boundary=frame') +@app.route('/segmentation_feed/') +def segmentation_feed(stream_id): + def generate(stream_id): + while True: + frame = gst_thread.get_seg_frame_by_id(stream_id) + if frame: + yield (b'--frame\r\n' b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n') + time.sleep(0.016) # Roughly 60 fps + return Response(generate(stream_id), mimetype='multipart/x-mixed-replace; boundary=frame') + + @app.route('/get_fps') def get_fps(): return jsonify(fps=gst_thread.get_fps()) diff --git a/src/unified_web_ui/gstreamer_pipeline.py b/src/unified_web_ui/gstreamer_pipeline.py index 76639611..87e363ec 100644 --- a/src/unified_web_ui/gstreamer_pipeline.py +++ b/src/unified_web_ui/gstreamer_pipeline.py @@ -17,6 +17,8 @@ class GStreamerPipeline(threading.Thread): self.frame_buffers = [None] * self.target_num_cams self.buffer_locks = [threading.Lock() for _ in range(self.target_num_cams)] + self.seg_frame_buffers = [None] * self.target_num_cams + self.seg_buffer_locks = [threading.Lock() for _ in range(self.target_num_cams)] self.current_fps = 0.0 # Will still report overall FPS, not per stream self.frame_count = 0 self.start_time = time.time() @@ -40,6 +42,23 @@ class GStreamerPipeline(threading.Thread): print("GStreamer pipeline failed to build.") + def on_new_seg_sample_factory(self, stream_id): + def on_new_sample(sink): + sample = sink.emit("pull-sample") + if not sample: return Gst.FlowReturn.ERROR + + buffer = sample.get_buffer() + success, map_info = buffer.map(Gst.MapFlags.READ) + if not success: return Gst.FlowReturn.ERROR + + with self.seg_buffer_locks[stream_id]: + self.seg_frame_buffers[stream_id] = bytes(map_info.data) + + buffer.unmap(map_info) + return Gst.FlowReturn.OK + + return on_new_sample + def on_new_sample_factory(self, stream_id): def on_new_sample(sink): sample = sink.emit("pull-sample") @@ -109,12 +128,18 @@ class GStreamerPipeline(threading.Thread): f"{mono_settings} ! " "video/x-raw,format=GRAY8 ! " "videoconvert ! " + f"tee name=t_{i} ! " + "queue ! " "video/x-raw,format=I420 ! " "nvvideoconvert compute-hw=1 ! " f"video/x-raw(memory:NVMM), format=NV12, width={self.internal_width}, height={self.internal_height}, framerate=60/1 ! " f"nvjpegenc quality=60 ! " - f"appsink name=sink_{i} emit-signals=True sync=False max-buffers=1 drop=True" + f"appsink name=sink_{i} emit-signals=True sync=False max-buffers=1 drop=True " + f"t_{i}. ! queue ! " + "videoconvert ! " # Placeholder for DeepStream + f"appsink name=seg_sink_{i} emit-signals=True sync=False max-buffers=1 drop=True" ) + else: # Placeholder for disconnected cameras source_and_sink = ( @@ -149,6 +174,10 @@ class GStreamerPipeline(threading.Thread): appsink.connect("new-sample", self.on_new_sample_factory(i)) else: print(f"Error: appsink_{i} not found in pipeline.") + + segsink = self.pipeline.get_by_name(f"seg_sink_{i}") + if segsink: + segsink.connect("new-sample", self.on_new_seg_sample_factory(i)) def get_frame_by_id(self, stream_id): if 0 <= stream_id < self.target_num_cams: @@ -156,5 +185,11 @@ class GStreamerPipeline(threading.Thread): return self.frame_buffers[stream_id] return None + def get_seg_frame_by_id(self, stream_id): + if 0 <= stream_id < self.target_num_cams: + with self.seg_buffer_locks[stream_id]: + return self.seg_frame_buffers[stream_id] + return None + def get_fps(self): return round(self.current_fps, 1) \ No newline at end of file diff --git a/src/unified_web_ui/templates/index.html b/src/unified_web_ui/templates/index.html index d8cf5d70..55aa6390 100644 --- a/src/unified_web_ui/templates/index.html +++ b/src/unified_web_ui/templates/index.html @@ -115,7 +115,7 @@ {% set cam_info = detected_cams_info[cam_index] %} {% if cam_info.is_color %}
- +
{{ cam_info.model }} ({{ 'Color' if cam_info.is_color else 'Mono' }})
{% endif %} @@ -126,7 +126,7 @@ {% set cam_info = detected_cams_info[cam_index] %} {% if not cam_info.is_color %}
- +
{{ cam_info.model }} ({{ 'Color' if cam_info.is_color else 'Mono' }})
{% endif %} @@ -135,6 +135,23 @@
FPS: --
+ +
+

Segmentation Feed

+
+
+ {% for cam_index in range(detected_cams_info|length) %} + {% set cam_info = detected_cams_info[cam_index] %} + {% if not cam_info.is_color %} +
+ +
{{ cam_info.model }} (Segmentation)
+
+ {% endif %} + {% endfor %} +
+
+