diff --git a/src/detectionSoftware/run.py b/src/detectionSoftware/run.py index 9909eabd..c6d80cdf 100644 --- a/src/detectionSoftware/run.py +++ b/src/detectionSoftware/run.py @@ -1,22 +1,58 @@ import sys +import subprocess import threading -import gi import logging from flask import Flask, Response, render_template_string -# GStreamer dependencies +# --- PART 1: ROBUST DETECTION (Unchanged) --- +def detect_camera_resolution_isolated(): + print("--- Spawning isolated process for detection ---") + detection_script = """ +import sys +try: + from pypylon import pylon + tl_factory = pylon.TlFactory.GetInstance() + devices = tl_factory.EnumerateDevices() + if not devices: + print("0,0") + else: + cam = pylon.InstantCamera(tl_factory.CreateDevice(devices[0])) + cam.Open() + print(f"{cam.Width.GetValue()},{cam.Height.GetValue()}") + cam.Close() +except Exception: + print("0,0") +""" + try: + result = subprocess.run( + [sys.executable, "-c", detection_script], + capture_output=True, text=True, check=True + ) + parts = result.stdout.strip().split(',') + w, h = int(parts[0]), int(parts[1]) + if w == 0: return 1920, 1080 + print(f"Isolated Detection Success: {w}x{h}") + return w, h + except Exception as e: + print(f"Subprocess detection failed: {e}") + return 1920, 1080 + +CAM_W, CAM_H = detect_camera_resolution_isolated() + +# Calculate Grid +STREAM_WIDTH = CAM_W +STREAM_HEIGHT = CAM_H +TILED_WIDTH = CAM_W * 2 +TILED_HEIGHT = CAM_H + +# --- PART 2: FLASK & GSTREAMER --- +import gi gi.require_version('Gst', '1.0') from gi.repository import Gst, GLib -# --- Configuration --- CAMERA_1_SERIAL = "40650847" CAMERA_2_SERIAL = "40653314" -STREAM_WIDTH = 1920 -STREAM_HEIGHT = 1080 -TILED_WIDTH = 1920 -TILED_HEIGHT = 1080 - app = Flask(__name__) frame_buffer = None buffer_lock = threading.Lock() @@ -44,65 +80,76 @@ class GStreamerPipeline(threading.Thread): buffer = sample.get_buffer() success, map_info = buffer.map(Gst.MapFlags.READ) - if not success: return Gst.FlowReturn.ERROR global frame_buffer with buffer_lock: + # Copy memory from GPU buffer to CPU for Flask frame_buffer = bytes(map_info.data) buffer.unmap(map_info) return Gst.FlowReturn.OK def build_pipeline(self): - # FIX: Added 'compute-hw=1' to nvvideoconvert. - # This forces the conversion to happen on the GPU (CUDA) instead of the VIC, - # which fixes the "RGB/BGR not supported" error. + # PERFORMANCE TUNING: + # 1. PacketSize=8192: Critical for GigE cameras to reduce CPU interrupts (Enable Jumbo Frames on NIC!) + # 2. FrameRate: Capped at 30 to prevent saturating the USB/GigE bus. + + cam_settings = ( + "cam::TriggerMode=Off " + "cam::AcquisitionFrameRateEnable=true cam::AcquisitionFrameRate=30.0 " + "cam::ExposureAuto=Continuous " + "cam::GainAuto=Continuous " + ) - # Source 1 + # Source 1: Mono -> CPU Convert (Light) -> GPU Upload src1 = ( - f"pylonsrc device-serial-number={CAMERA_1_SERIAL} " - "cam::TriggerMode=Off cam::AcquisitionFrameRateEnable=true cam::AcquisitionFrameRate=30.0 ! " - "videoconvert ! " - "nvvideoconvert compute-hw=1 ! " + f"pylonsrc device-serial-number={CAMERA_1_SERIAL} {cam_settings} ! " + "video/x-raw,format=GRAY8 ! " + "videoconvert ! " # Light CPU load: Gray -> I420 + "video/x-raw,format=I420 ! " + "nvvideoconvert compute-hw=1 ! " # Upload to GPU Memory (NVMM) + "video/x-raw(memory:NVMM) ! " # Explicitly state we want NVMM "m.sink_0 " ) # Source 2 src2 = ( - f"pylonsrc device-serial-number={CAMERA_2_SERIAL} " - "cam::TriggerMode=Off cam::AcquisitionFrameRateEnable=true cam::AcquisitionFrameRate=30.0 ! " + f"pylonsrc device-serial-number={CAMERA_2_SERIAL} {cam_settings} ! " + "video/x-raw,format=GRAY8 ! " "videoconvert ! " + "video/x-raw,format=I420 ! " "nvvideoconvert compute-hw=1 ! " + "video/x-raw(memory:NVMM) ! " "m.sink_1 " ) - # Muxer -> Tiler -> Output + # Muxer -> Tiler -> Hardware Encoder + # nvjpegenc is the key here. It uses the dedicated JPEG block on the Jetson. processing = ( f"nvstreammux name=m batch-size=2 width={STREAM_WIDTH} height={STREAM_HEIGHT} live-source=1 ! " - f"nvmultistreamtiler width={TILED_WIDTH} height={TILED_HEIGHT} rows=2 columns=1 ! " - "nvvideoconvert ! " - "video/x-raw, format=I420 ! " - "jpegenc quality=85 ! " + f"nvmultistreamtiler width={TILED_WIDTH} height={TILED_HEIGHT} rows=1 columns=2 ! " + "nvvideoconvert compute-hw=1 ! " # Ensure we are ready for encoding + "video/x-raw(memory:NVMM) ! " # Keep in NVMM + "nvjpegenc quality=85 ! " # HARDWARE ENCODE "appsink name=sink emit-signals=True sync=False max-buffers=1 drop=True" ) pipeline_str = f"{src1} {src2} {processing}" - print(f"Launching Pipeline (GPU Mode)...") + print(f"Launching CUDA-Accelerated Pipeline...") self.pipeline = Gst.parse_launch(pipeline_str) appsink = self.pipeline.get_by_name("sink") appsink.connect("new-sample", self.on_new_sample) -# --- Flask --- - +# --- Flask Routes --- @app.route('/') def index(): return render_template_string(''' -

Basler Feed

- +

Basler CUDA Feed

+ ''') @@ -114,7 +161,7 @@ def video_feed(): if frame_buffer: yield (b'--frame\r\n' b'Content-Type: image/jpeg\r\n\r\n' + frame_buffer + b'\r\n') - GLib.usleep(33000) + GLib.usleep(25000) # ~40 FPS cap for web return Response(generate(), mimetype='multipart/x-mixed-replace; boundary=frame')