diff --git a/src/detectionSoftware/run.py b/src/detectionSoftware/run.py index c6d80cdf..6f357b58 100644 --- a/src/detectionSoftware/run.py +++ b/src/detectionSoftware/run.py @@ -1,10 +1,24 @@ import sys import subprocess import threading -import logging -from flask import Flask, Response, render_template_string +import os +import shutil +import time +import glob +from flask import Flask, Response, render_template_string, send_from_directory -# --- PART 1: ROBUST DETECTION (Unchanged) --- +# --- CONFIGURATION --- +HLS_DIR = "/tmp/hls_stream" +HLS_PLAYLIST = "stream.m3u8" +CAMERA_1_SERIAL = "40650847" +CAMERA_2_SERIAL = "40653314" + +# Ensure clean HLS directory +if os.path.exists(HLS_DIR): + shutil.rmtree(HLS_DIR) +os.makedirs(HLS_DIR) + +# --- PART 1: ROBUST DETECTION --- def detect_camera_resolution_isolated(): print("--- Spawning isolated process for detection ---") detection_script = """ @@ -39,23 +53,61 @@ except Exception: CAM_W, CAM_H = detect_camera_resolution_isolated() -# Calculate Grid +# --- RESOLUTION LOGIC --- STREAM_WIDTH = CAM_W STREAM_HEIGHT = CAM_H -TILED_WIDTH = CAM_W * 2 -TILED_HEIGHT = CAM_H + +# FIX: We calculate the Full "Virtual" Width +full_width = CAM_W * 2 +full_height = CAM_H + +# FIX: Then we scale it down to something the Encoder (and Browser) can handle. +# Target width: 1920 (Standard HD width). +# We calculate height to maintain aspect ratio. +TILED_WIDTH = 1920 +scale_factor = TILED_WIDTH / full_width +TILED_HEIGHT = int(full_height * scale_factor) + +# Ensure height is even (required for YUV420) +if TILED_HEIGHT % 2 != 0: TILED_HEIGHT += 1 + +print(f"Resolution Map: Input {STREAM_WIDTH}x{STREAM_HEIGHT} -> Tiled Output {TILED_WIDTH}x{TILED_HEIGHT}") + # --- PART 2: FLASK & GSTREAMER --- import gi gi.require_version('Gst', '1.0') from gi.repository import Gst, GLib -CAMERA_1_SERIAL = "40650847" -CAMERA_2_SERIAL = "40653314" - app = Flask(__name__) -frame_buffer = None -buffer_lock = threading.Lock() + +class PlaylistGenerator(threading.Thread): + def __init__(self): + super().__init__() + self.daemon = True + + def run(self): + while True: + time.sleep(1.0) + files = sorted(glob.glob(os.path.join(HLS_DIR, "*.ts")), key=os.path.getmtime) + + if len(files) > 6: + for f in files[:-6]: + try: os.remove(f) + except: pass + files = files[-6:] + + if not files: continue + + content = "#EXTM3U\n#EXT-X-VERSION:3\n#EXT-X-TARGETDURATION:2\n#EXT-X-MEDIA-SEQUENCE:0\n" + for f in files: + filename = os.path.basename(f) + content += "#EXTINF:2.000000,\n" + filename + "\n" + + with open(os.path.join(HLS_DIR, "temp.m3u8"), "w") as f: + f.write(content) + os.rename(os.path.join(HLS_DIR, "temp.m3u8"), os.path.join(HLS_DIR, HLS_PLAYLIST)) + class GStreamerPipeline(threading.Thread): def __init__(self): @@ -74,27 +126,7 @@ class GStreamerPipeline(threading.Thread): finally: self.pipeline.set_state(Gst.State.NULL) - def on_new_sample(self, sink): - sample = sink.emit("pull-sample") - if not sample: return Gst.FlowReturn.ERROR - - buffer = sample.get_buffer() - success, map_info = buffer.map(Gst.MapFlags.READ) - if not success: return Gst.FlowReturn.ERROR - - global frame_buffer - with buffer_lock: - # Copy memory from GPU buffer to CPU for Flask - frame_buffer = bytes(map_info.data) - - buffer.unmap(map_info) - return Gst.FlowReturn.OK - def build_pipeline(self): - # PERFORMANCE TUNING: - # 1. PacketSize=8192: Critical for GigE cameras to reduce CPU interrupts (Enable Jumbo Frames on NIC!) - # 2. FrameRate: Capped at 30 to prevent saturating the USB/GigE bus. - cam_settings = ( "cam::TriggerMode=Off " "cam::AcquisitionFrameRateEnable=true cam::AcquisitionFrameRate=30.0 " @@ -102,18 +134,16 @@ class GStreamerPipeline(threading.Thread): "cam::GainAuto=Continuous " ) - # Source 1: Mono -> CPU Convert (Light) -> GPU Upload src1 = ( f"pylonsrc device-serial-number={CAMERA_1_SERIAL} {cam_settings} ! " "video/x-raw,format=GRAY8 ! " - "videoconvert ! " # Light CPU load: Gray -> I420 + "videoconvert ! " "video/x-raw,format=I420 ! " - "nvvideoconvert compute-hw=1 ! " # Upload to GPU Memory (NVMM) - "video/x-raw(memory:NVMM) ! " # Explicitly state we want NVMM + "nvvideoconvert compute-hw=1 ! " + "video/x-raw(memory:NVMM) ! " "m.sink_0 " ) - # Source 2 src2 = ( f"pylonsrc device-serial-number={CAMERA_2_SERIAL} {cam_settings} ! " "video/x-raw,format=GRAY8 ! " @@ -124,49 +154,82 @@ class GStreamerPipeline(threading.Thread): "m.sink_1 " ) - # Muxer -> Tiler -> Hardware Encoder - # nvjpegenc is the key here. It uses the dedicated JPEG block on the Jetson. + # Processing + # The Tiler accepts the full resolution inputs, but OUTPUTS the scaled-down resolution (TILED_WIDTH). + # This is extremely efficient because the scaling happens on the GPU. processing = ( f"nvstreammux name=m batch-size=2 width={STREAM_WIDTH} height={STREAM_HEIGHT} live-source=1 ! " f"nvmultistreamtiler width={TILED_WIDTH} height={TILED_HEIGHT} rows=1 columns=2 ! " - "nvvideoconvert compute-hw=1 ! " # Ensure we are ready for encoding - "video/x-raw(memory:NVMM) ! " # Keep in NVMM - "nvjpegenc quality=85 ! " # HARDWARE ENCODE - "appsink name=sink emit-signals=True sync=False max-buffers=1 drop=True" + "nvvideoconvert compute-hw=1 ! " + "video/x-raw(memory:NVMM) ! " + f"nvv4l2h264enc bitrate=5000000 profile=2 preset-level=1 ! " # Reduced bitrate for 1080p width + "h264parse config-interval=1 ! " + "queue ! " + f"splitmuxsink location={HLS_DIR}/segment%05d.ts muxer=mpegtsmux max-size-time=2000000000 max-files=10" ) pipeline_str = f"{src1} {src2} {processing}" - print(f"Launching CUDA-Accelerated Pipeline...") + print(f"Launching Final Scaled Pipeline...") self.pipeline = Gst.parse_launch(pipeline_str) - - appsink = self.pipeline.get_by_name("sink") - appsink.connect("new-sample", self.on_new_sample) # --- Flask Routes --- + @app.route('/') def index(): return render_template_string(''' - -

Basler CUDA Feed

- - + + + + + + +

Basler AVC Feed

+ + + + ''') -@app.route('/video_feed') -def video_feed(): - def generate(): - while True: - with buffer_lock: - if frame_buffer: - yield (b'--frame\r\n' - b'Content-Type: image/jpeg\r\n\r\n' + frame_buffer + b'\r\n') - GLib.usleep(25000) # ~40 FPS cap for web - - return Response(generate(), mimetype='multipart/x-mixed-replace; boundary=frame') +@app.route('/hls/') +def serve_hls(filename): + return send_from_directory(HLS_DIR, filename) if __name__ == "__main__": gst_thread = GStreamerPipeline() gst_thread.daemon = True gst_thread.start() + + pl_thread = PlaylistGenerator() + pl_thread.start() + + print("Waiting for HLS segments...") app.run(host='0.0.0.0', port=5000, debug=False, threaded=True)