Changed to adaptive camera count

This commit is contained in:
Tempest 2025-12-01 11:55:51 +07:00
parent 4a46b12c05
commit d12931641b

View File

@ -5,68 +5,101 @@ import time
import gc
from flask import Flask, Response, render_template_string
# --- PART 1: ROBUST DETECTION ---
def detect_camera_config_isolated():
# Runs in a separate process to prevent driver locking
# --- PART 1: ADAPTIVE DETECTION ---
def scan_connected_cameras():
"""
Returns a list of serials ['400...', '400...'] and their config.
"""
print("--- Scanning for Basler Cameras ---")
detection_script = """
import sys
try:
from pypylon import pylon
tl_factory = pylon.TlFactory.GetInstance()
devices = tl_factory.EnumerateDevices()
if not devices:
print("0,0,0")
print("NONE")
else:
# Collect all serials
serials = [d.GetSerialNumber() for d in devices]
# Open the first one just to check capabilities/resolution
cam = pylon.InstantCamera(tl_factory.CreateDevice(devices[0]))
cam.Open()
# Check Binning support
supported = 0
try:
# Check if Binning is supported
cam.BinningHorizontal.Value = 2
cam.BinningVertical.Value = 2
w = cam.Width.GetValue()
h = cam.Height.GetValue()
cam.BinningHorizontal.Value = 1
cam.BinningVertical.Value = 1
print(f"{w},{h},1")
supported = 1
except:
print(f"{cam.Width.GetValue()},{cam.Height.GetValue()},0")
pass
w = cam.Width.GetValue()
h = cam.Height.GetValue()
cam.Close()
except Exception:
print("0,0,0")
# Output format: SERIAL1,SERIAL2|WIDTH|HEIGHT|BINNING_SUPPORTED
print(f"{','.join(serials)}|{w}|{h}|{supported}")
except Exception as e:
print(f"ERROR:{e}")
"""
try:
result = subprocess.run([sys.executable, "-c", detection_script], capture_output=True, text=True)
parts = result.stdout.strip().split(',')
w, h, supported = int(parts[0]), int(parts[1]), int(parts[2])
if w == 0: return 1920, 1080, False
return w, h, (supported == 1)
except: return 1920, 1080, False
output = result.stdout.strip()
if "NONE" in output or "ERROR" in output or not output:
print("No cameras detected!")
return [], 1920, 1080, False
CAM_W, CAM_H, BINNING_SUPPORTED = detect_camera_config_isolated()
# Parse output
parts = output.split('|')
serials_list = parts[0].split(',')
w = int(parts[1])
h = int(parts[2])
binning = (parts[3] == '1')
print(f"Found {len(serials_list)} cameras: {serials_list}")
return serials_list, w, h, binning
except Exception as e:
print(f"Scanner failed: {e}")
return [], 1920, 1080, False
# --- STABILITY CONFIGURATION ---
# We limit the internal processing resolution to 1280x960 (or 720p).
# This prevents the "Failed in mem copy" error by keeping buffers small.
# Run Scan
DETECTED_SERIALS, CAM_W, CAM_H, BINNING_SUPPORTED = scan_connected_cameras()
NUM_CAMS = len(DETECTED_SERIALS)
# --- DYNAMIC RESOLUTION ---
INTERNAL_WIDTH = 1280
scale = INTERNAL_WIDTH / CAM_W
INTERNAL_HEIGHT = int(CAM_H * scale)
if INTERNAL_HEIGHT % 2 != 0: INTERNAL_HEIGHT += 1
TILED_WIDTH = 1280
scale_tiled = TILED_WIDTH / (INTERNAL_WIDTH * 2)
TILED_HEIGHT = int(INTERNAL_HEIGHT * scale_tiled)
if TILED_HEIGHT % 2 != 0: TILED_HEIGHT += 1
# Web Tiling Logic
WEB_WIDTH = 1280
if NUM_CAMS > 0:
# If 1 camera: Output is 1280x960
# If 2 cameras: Output is 1280x(Height scaled for 2-wide)
total_source_width = INTERNAL_WIDTH * NUM_CAMS
scale_tiled = WEB_WIDTH / total_source_width
WEB_HEIGHT = int(INTERNAL_HEIGHT * scale_tiled)
if WEB_HEIGHT % 2 != 0: WEB_HEIGHT += 1
else:
WEB_HEIGHT = 720 # Fallback
print(f"STABILITY MODE: Input {CAM_W}x{CAM_H} -> Pre-Scale {INTERNAL_WIDTH}x{INTERNAL_HEIGHT}")
print(f"ADAPTIVE MODE: Found {NUM_CAMS} Cams -> Layout {NUM_CAMS}x1 -> Web {WEB_WIDTH}x{WEB_HEIGHT}")
# --- FLASK & GSTREAMER ---
import gi
gi.require_version('Gst', '1.0')
from gi.repository import Gst, GLib
CAMERA_1_SERIAL = "40650847"
CAMERA_2_SERIAL = "40653314"
app = Flask(__name__)
frame_buffer = None
buffer_lock = threading.Lock()
@ -104,7 +137,20 @@ class GStreamerPipeline(threading.Thread):
return Gst.FlowReturn.OK
def build_pipeline(self):
# Settings optimized for USB3 stability
# Handle 0 Cameras gracefully (Placeholder)
if NUM_CAMS == 0:
print("Launching Placeholder Pipeline (No Cameras)...")
# Uses 'videotestsrc' to generate a test pattern so the web UI works
pipeline_str = (
f"videotestsrc pattern=smpte ! video/x-raw,width={WEB_WIDTH},height={WEB_HEIGHT},framerate=30/1 ! "
"jpegenc ! appsink name=sink emit-signals=True sync=False max-buffers=1 drop=True"
)
self.pipeline = Gst.parse_launch(pipeline_str)
appsink = self.pipeline.get_by_name("sink")
appsink.connect("new-sample", self.on_new_sample)
return
# Settings
settings = (
"cam::TriggerMode=Off "
"cam::AcquisitionFrameRateEnable=true cam::AcquisitionFrameRate=60.0 "
@ -116,49 +162,41 @@ class GStreamerPipeline(threading.Thread):
if BINNING_SUPPORTED:
settings += "cam::BinningHorizontal=2 cam::BinningVertical=2 "
# --- PRE-SCALER ---
# Converts to NV12 and scales down immediately to save RAM
# Pre-scaler (Crucial for stability)
pre_scale = (
"nvvideoconvert compute-hw=1 ! "
"video/x-raw(memory:NVMM), format=NV12, "
f"width={INTERNAL_WIDTH}, height={INTERNAL_HEIGHT} ! "
)
src1 = (
f"pylonsrc device-serial-number={CAMERA_1_SERIAL} {settings} ! "
"video/x-raw,format=GRAY8 ! "
"videoconvert ! "
"video/x-raw,format=I420 ! "
"nvvideoconvert compute-hw=1 ! "
"video/x-raw(memory:NVMM) ! "
f"{pre_scale}"
"m.sink_0 "
)
# 1. GENERATE SOURCES DYNAMICALLY
sources_str = ""
for i, serial in enumerate(DETECTED_SERIALS):
sources_str += (
f"pylonsrc device-serial-number={serial} {settings} ! "
"video/x-raw,format=GRAY8 ! "
"videoconvert ! "
"video/x-raw,format=I420 ! "
"nvvideoconvert compute-hw=1 ! "
"video/x-raw(memory:NVMM) ! "
f"{pre_scale}"
f"m.sink_{i} " # Link to the correct pad (0, 1, 2...)
)
src2 = (
f"pylonsrc device-serial-number={CAMERA_2_SERIAL} {settings} ! "
"video/x-raw,format=GRAY8 ! "
"videoconvert ! "
"video/x-raw,format=I420 ! "
"nvvideoconvert compute-hw=1 ! "
"video/x-raw(memory:NVMM) ! "
f"{pre_scale}"
"m.sink_1 "
)
# Muxer -> Tiler -> MJPEG Encode
# 2. CONFIGURE MUXER & TILER
# Batch size MUST match number of cameras
processing = (
f"nvstreammux name=m batch-size=2 width={INTERNAL_WIDTH} height={INTERNAL_HEIGHT} live-source=1 ! "
f"nvmultistreamtiler width={TILED_WIDTH} height={TILED_HEIGHT} rows=1 columns=2 ! "
f"nvstreammux name=m batch-size={NUM_CAMS} width={INTERNAL_WIDTH} height={INTERNAL_HEIGHT} live-source=1 ! "
f"nvmultistreamtiler width={WEB_WIDTH} height={WEB_HEIGHT} rows=1 columns={NUM_CAMS} ! "
"nvvideoconvert compute-hw=1 ! "
"video/x-raw(memory:NVMM) ! "
f"nvjpegenc quality=60 ! "
"appsink name=sink emit-signals=True sync=False max-buffers=1 drop=True"
)
pipeline_str = f"{src1} {src2} {processing}"
pipeline_str = f"{sources_str} {processing}"
print(f"Launching Pipeline...")
print(f"Launching ADAPTIVE Pipeline ({NUM_CAMS} Cameras)...")
self.pipeline = Gst.parse_launch(pipeline_str)
appsink = self.pipeline.get_by_name("sink")
@ -169,15 +207,17 @@ class GStreamerPipeline(threading.Thread):
def index():
return render_template_string('''
<html><body style="background:#111; color:white; text-align:center;">
<h1>Basler Stable Feed</h1>
<h1>Basler Feed ({{ num }} Cameras)</h1>
{% if num == 0 %}
<h2 style="color:red">NO CAMERAS DETECTED</h2>
{% endif %}
<img src="{{ url_for('video_feed') }}" style="border: 2px solid #4CAF50; width:95%;">
</body></html>
''')
''', num=NUM_CAMS)
@app.route('/video_feed')
def video_feed():
def generate():
# FIX: Local counter variable initialized here
count = 0
while True:
with buffer_lock:
@ -185,18 +225,13 @@ def video_feed():
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + frame_buffer + b'\r\n')
time.sleep(0.016)
# FIX: Increment and check local counter
count += 1
if count % 200 == 0:
gc.collect()
if count % 200 == 0: gc.collect()
return Response(generate(), mimetype='multipart/x-mixed-replace; boundary=frame')
if __name__ == "__main__":
# Initial cleanup
subprocess.run([sys.executable, "-c", "import gc; gc.collect()"])
gst_thread = GStreamerPipeline()
gst_thread.daemon = True
gst_thread.start()