Stable Dual Camera setup
This commit is contained in:
parent
d11288165b
commit
4a46b12c05
@ -1,26 +1,13 @@
|
||||
import sys
|
||||
import subprocess
|
||||
import threading
|
||||
import os
|
||||
import shutil
|
||||
import time
|
||||
import glob
|
||||
from flask import Flask, Response, render_template_string, send_from_directory
|
||||
|
||||
# --- CONFIGURATION ---
|
||||
HLS_DIR = "/tmp/hls_stream"
|
||||
HLS_PLAYLIST = "stream.m3u8"
|
||||
CAMERA_1_SERIAL = "40650847"
|
||||
CAMERA_2_SERIAL = "40653314"
|
||||
|
||||
# Ensure clean HLS directory
|
||||
if os.path.exists(HLS_DIR):
|
||||
shutil.rmtree(HLS_DIR)
|
||||
os.makedirs(HLS_DIR)
|
||||
import gc
|
||||
from flask import Flask, Response, render_template_string
|
||||
|
||||
# --- PART 1: ROBUST DETECTION ---
|
||||
def detect_camera_resolution_isolated():
|
||||
print("--- Spawning isolated process for detection ---")
|
||||
def detect_camera_config_isolated():
|
||||
# Runs in a separate process to prevent driver locking
|
||||
detection_script = """
|
||||
import sys
|
||||
try:
|
||||
@ -28,86 +15,61 @@ try:
|
||||
tl_factory = pylon.TlFactory.GetInstance()
|
||||
devices = tl_factory.EnumerateDevices()
|
||||
if not devices:
|
||||
print("0,0")
|
||||
print("0,0,0")
|
||||
else:
|
||||
cam = pylon.InstantCamera(tl_factory.CreateDevice(devices[0]))
|
||||
cam.Open()
|
||||
print(f"{cam.Width.GetValue()},{cam.Height.GetValue()}")
|
||||
try:
|
||||
# Check if Binning is supported
|
||||
cam.BinningHorizontal.Value = 2
|
||||
cam.BinningVertical.Value = 2
|
||||
w = cam.Width.GetValue()
|
||||
h = cam.Height.GetValue()
|
||||
cam.BinningHorizontal.Value = 1
|
||||
cam.BinningVertical.Value = 1
|
||||
print(f"{w},{h},1")
|
||||
except:
|
||||
print(f"{cam.Width.GetValue()},{cam.Height.GetValue()},0")
|
||||
cam.Close()
|
||||
except Exception:
|
||||
print("0,0")
|
||||
print("0,0,0")
|
||||
"""
|
||||
try:
|
||||
result = subprocess.run(
|
||||
[sys.executable, "-c", detection_script],
|
||||
capture_output=True, text=True, check=True
|
||||
)
|
||||
result = subprocess.run([sys.executable, "-c", detection_script], capture_output=True, text=True)
|
||||
parts = result.stdout.strip().split(',')
|
||||
w, h = int(parts[0]), int(parts[1])
|
||||
if w == 0: return 1920, 1080
|
||||
print(f"Isolated Detection Success: {w}x{h}")
|
||||
return w, h
|
||||
except Exception as e:
|
||||
print(f"Subprocess detection failed: {e}")
|
||||
return 1920, 1080
|
||||
w, h, supported = int(parts[0]), int(parts[1]), int(parts[2])
|
||||
if w == 0: return 1920, 1080, False
|
||||
return w, h, (supported == 1)
|
||||
except: return 1920, 1080, False
|
||||
|
||||
CAM_W, CAM_H = detect_camera_resolution_isolated()
|
||||
CAM_W, CAM_H, BINNING_SUPPORTED = detect_camera_config_isolated()
|
||||
|
||||
# --- RESOLUTION LOGIC ---
|
||||
STREAM_WIDTH = CAM_W
|
||||
STREAM_HEIGHT = CAM_H
|
||||
# --- STABILITY CONFIGURATION ---
|
||||
# We limit the internal processing resolution to 1280x960 (or 720p).
|
||||
# This prevents the "Failed in mem copy" error by keeping buffers small.
|
||||
INTERNAL_WIDTH = 1280
|
||||
scale = INTERNAL_WIDTH / CAM_W
|
||||
INTERNAL_HEIGHT = int(CAM_H * scale)
|
||||
if INTERNAL_HEIGHT % 2 != 0: INTERNAL_HEIGHT += 1
|
||||
|
||||
# FIX: We calculate the Full "Virtual" Width
|
||||
full_width = CAM_W * 2
|
||||
full_height = CAM_H
|
||||
|
||||
# FIX: Then we scale it down to something the Encoder (and Browser) can handle.
|
||||
# Target width: 1920 (Standard HD width).
|
||||
# We calculate height to maintain aspect ratio.
|
||||
TILED_WIDTH = 1920
|
||||
scale_factor = TILED_WIDTH / full_width
|
||||
TILED_HEIGHT = int(full_height * scale_factor)
|
||||
|
||||
# Ensure height is even (required for YUV420)
|
||||
TILED_WIDTH = 1280
|
||||
scale_tiled = TILED_WIDTH / (INTERNAL_WIDTH * 2)
|
||||
TILED_HEIGHT = int(INTERNAL_HEIGHT * scale_tiled)
|
||||
if TILED_HEIGHT % 2 != 0: TILED_HEIGHT += 1
|
||||
|
||||
print(f"Resolution Map: Input {STREAM_WIDTH}x{STREAM_HEIGHT} -> Tiled Output {TILED_WIDTH}x{TILED_HEIGHT}")
|
||||
print(f"STABILITY MODE: Input {CAM_W}x{CAM_H} -> Pre-Scale {INTERNAL_WIDTH}x{INTERNAL_HEIGHT}")
|
||||
|
||||
|
||||
# --- PART 2: FLASK & GSTREAMER ---
|
||||
# --- FLASK & GSTREAMER ---
|
||||
import gi
|
||||
gi.require_version('Gst', '1.0')
|
||||
from gi.repository import Gst, GLib
|
||||
|
||||
CAMERA_1_SERIAL = "40650847"
|
||||
CAMERA_2_SERIAL = "40653314"
|
||||
|
||||
app = Flask(__name__)
|
||||
|
||||
class PlaylistGenerator(threading.Thread):
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.daemon = True
|
||||
|
||||
def run(self):
|
||||
while True:
|
||||
time.sleep(1.0)
|
||||
files = sorted(glob.glob(os.path.join(HLS_DIR, "*.ts")), key=os.path.getmtime)
|
||||
|
||||
if len(files) > 6:
|
||||
for f in files[:-6]:
|
||||
try: os.remove(f)
|
||||
except: pass
|
||||
files = files[-6:]
|
||||
|
||||
if not files: continue
|
||||
|
||||
content = "#EXTM3U\n#EXT-X-VERSION:3\n#EXT-X-TARGETDURATION:2\n#EXT-X-MEDIA-SEQUENCE:0\n"
|
||||
for f in files:
|
||||
filename = os.path.basename(f)
|
||||
content += "#EXTINF:2.000000,\n" + filename + "\n"
|
||||
|
||||
with open(os.path.join(HLS_DIR, "temp.m3u8"), "w") as f:
|
||||
f.write(content)
|
||||
os.rename(os.path.join(HLS_DIR, "temp.m3u8"), os.path.join(HLS_DIR, HLS_PLAYLIST))
|
||||
|
||||
frame_buffer = None
|
||||
buffer_lock = threading.Lock()
|
||||
|
||||
class GStreamerPipeline(threading.Thread):
|
||||
def __init__(self):
|
||||
@ -126,110 +88,116 @@ class GStreamerPipeline(threading.Thread):
|
||||
finally:
|
||||
self.pipeline.set_state(Gst.State.NULL)
|
||||
|
||||
def on_new_sample(self, sink):
|
||||
sample = sink.emit("pull-sample")
|
||||
if not sample: return Gst.FlowReturn.ERROR
|
||||
|
||||
buffer = sample.get_buffer()
|
||||
success, map_info = buffer.map(Gst.MapFlags.READ)
|
||||
if not success: return Gst.FlowReturn.ERROR
|
||||
|
||||
global frame_buffer
|
||||
with buffer_lock:
|
||||
frame_buffer = bytes(map_info.data)
|
||||
|
||||
buffer.unmap(map_info)
|
||||
return Gst.FlowReturn.OK
|
||||
|
||||
def build_pipeline(self):
|
||||
cam_settings = (
|
||||
# Settings optimized for USB3 stability
|
||||
settings = (
|
||||
"cam::TriggerMode=Off "
|
||||
"cam::AcquisitionFrameRateEnable=true cam::AcquisitionFrameRate=30.0 "
|
||||
"cam::ExposureAuto=Continuous "
|
||||
"cam::AcquisitionFrameRateEnable=true cam::AcquisitionFrameRate=60.0 "
|
||||
"cam::ExposureAuto=Off "
|
||||
"cam::ExposureTime=20000.0 "
|
||||
"cam::GainAuto=Continuous "
|
||||
"cam::DeviceLinkThroughputLimitMode=Off "
|
||||
)
|
||||
if BINNING_SUPPORTED:
|
||||
settings += "cam::BinningHorizontal=2 cam::BinningVertical=2 "
|
||||
|
||||
# --- PRE-SCALER ---
|
||||
# Converts to NV12 and scales down immediately to save RAM
|
||||
pre_scale = (
|
||||
"nvvideoconvert compute-hw=1 ! "
|
||||
"video/x-raw(memory:NVMM), format=NV12, "
|
||||
f"width={INTERNAL_WIDTH}, height={INTERNAL_HEIGHT} ! "
|
||||
)
|
||||
|
||||
src1 = (
|
||||
f"pylonsrc device-serial-number={CAMERA_1_SERIAL} {cam_settings} ! "
|
||||
f"pylonsrc device-serial-number={CAMERA_1_SERIAL} {settings} ! "
|
||||
"video/x-raw,format=GRAY8 ! "
|
||||
"videoconvert ! "
|
||||
"video/x-raw,format=I420 ! "
|
||||
"nvvideoconvert compute-hw=1 ! "
|
||||
"video/x-raw(memory:NVMM) ! "
|
||||
f"{pre_scale}"
|
||||
"m.sink_0 "
|
||||
)
|
||||
|
||||
src2 = (
|
||||
f"pylonsrc device-serial-number={CAMERA_2_SERIAL} {cam_settings} ! "
|
||||
f"pylonsrc device-serial-number={CAMERA_2_SERIAL} {settings} ! "
|
||||
"video/x-raw,format=GRAY8 ! "
|
||||
"videoconvert ! "
|
||||
"video/x-raw,format=I420 ! "
|
||||
"nvvideoconvert compute-hw=1 ! "
|
||||
"video/x-raw(memory:NVMM) ! "
|
||||
f"{pre_scale}"
|
||||
"m.sink_1 "
|
||||
)
|
||||
|
||||
# Processing
|
||||
# The Tiler accepts the full resolution inputs, but OUTPUTS the scaled-down resolution (TILED_WIDTH).
|
||||
# This is extremely efficient because the scaling happens on the GPU.
|
||||
# Muxer -> Tiler -> MJPEG Encode
|
||||
processing = (
|
||||
f"nvstreammux name=m batch-size=2 width={STREAM_WIDTH} height={STREAM_HEIGHT} live-source=1 ! "
|
||||
f"nvstreammux name=m batch-size=2 width={INTERNAL_WIDTH} height={INTERNAL_HEIGHT} live-source=1 ! "
|
||||
f"nvmultistreamtiler width={TILED_WIDTH} height={TILED_HEIGHT} rows=1 columns=2 ! "
|
||||
"nvvideoconvert compute-hw=1 ! "
|
||||
"video/x-raw(memory:NVMM) ! "
|
||||
f"nvv4l2h264enc bitrate=5000000 profile=2 preset-level=1 ! " # Reduced bitrate for 1080p width
|
||||
"h264parse config-interval=1 ! "
|
||||
"queue ! "
|
||||
f"splitmuxsink location={HLS_DIR}/segment%05d.ts muxer=mpegtsmux max-size-time=2000000000 max-files=10"
|
||||
f"nvjpegenc quality=60 ! "
|
||||
"appsink name=sink emit-signals=True sync=False max-buffers=1 drop=True"
|
||||
)
|
||||
|
||||
pipeline_str = f"{src1} {src2} {processing}"
|
||||
|
||||
print(f"Launching Final Scaled Pipeline...")
|
||||
print(f"Launching Pipeline...")
|
||||
self.pipeline = Gst.parse_launch(pipeline_str)
|
||||
|
||||
appsink = self.pipeline.get_by_name("sink")
|
||||
appsink.connect("new-sample", self.on_new_sample)
|
||||
|
||||
# --- Flask Routes ---
|
||||
|
||||
@app.route('/')
|
||||
def index():
|
||||
return render_template_string('''
|
||||
<html>
|
||||
<head>
|
||||
<script src="https://cdn.jsdelivr.net/npm/hls.js@latest"></script>
|
||||
<style>body { background: #111; text-align: center; color: white; }</style>
|
||||
</head>
|
||||
<body>
|
||||
<h1>Basler AVC Feed</h1>
|
||||
<video id="video" controls autoplay muted style="width: 90%; border: 2px solid #4CAF50;"></video>
|
||||
<script>
|
||||
var video = document.getElementById('video');
|
||||
var videoSrc = "/hls/stream.m3u8";
|
||||
|
||||
function loadStream() {
|
||||
if (Hls.isSupported()) {
|
||||
var hls = new Hls({
|
||||
maxBufferLength: 5,
|
||||
liveSyncDurationCount: 3,
|
||||
lowLatencyMode: true
|
||||
});
|
||||
hls.loadSource(videoSrc);
|
||||
hls.attachMedia(video);
|
||||
hls.on(Hls.Events.MANIFEST_PARSED, function() { video.play(); });
|
||||
hls.on(Hls.Events.ERROR, function (event, data) {
|
||||
if (data.fatal) hls.startLoad();
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
var checkExist = setInterval(function() {
|
||||
fetch(videoSrc).then(function(response) {
|
||||
if (response.status == 200) {
|
||||
clearInterval(checkExist);
|
||||
loadStream();
|
||||
}
|
||||
});
|
||||
}, 1000);
|
||||
</script>
|
||||
</body>
|
||||
</html>
|
||||
<html><body style="background:#111; color:white; text-align:center;">
|
||||
<h1>Basler Stable Feed</h1>
|
||||
<img src="{{ url_for('video_feed') }}" style="border: 2px solid #4CAF50; width:95%;">
|
||||
</body></html>
|
||||
''')
|
||||
|
||||
@app.route('/hls/<path:filename>')
|
||||
def serve_hls(filename):
|
||||
return send_from_directory(HLS_DIR, filename)
|
||||
@app.route('/video_feed')
|
||||
def video_feed():
|
||||
def generate():
|
||||
# FIX: Local counter variable initialized here
|
||||
count = 0
|
||||
while True:
|
||||
with buffer_lock:
|
||||
if frame_buffer:
|
||||
yield (b'--frame\r\n'
|
||||
b'Content-Type: image/jpeg\r\n\r\n' + frame_buffer + b'\r\n')
|
||||
time.sleep(0.016)
|
||||
|
||||
# FIX: Increment and check local counter
|
||||
count += 1
|
||||
if count % 200 == 0:
|
||||
gc.collect()
|
||||
|
||||
return Response(generate(), mimetype='multipart/x-mixed-replace; boundary=frame')
|
||||
|
||||
if __name__ == "__main__":
|
||||
# Initial cleanup
|
||||
subprocess.run([sys.executable, "-c", "import gc; gc.collect()"])
|
||||
|
||||
gst_thread = GStreamerPipeline()
|
||||
gst_thread.daemon = True
|
||||
gst_thread.start()
|
||||
|
||||
pl_thread = PlaylistGenerator()
|
||||
pl_thread.start()
|
||||
|
||||
print("Waiting for HLS segments...")
|
||||
app.run(host='0.0.0.0', port=5000, debug=False, threaded=True)
|
||||
|
||||
58
src/detectionSoftware/test.py
Normal file
58
src/detectionSoftware/test.py
Normal file
@ -0,0 +1,58 @@
|
||||
from pypylon import pylon
|
||||
import time
|
||||
import sys
|
||||
|
||||
try:
|
||||
# Get the Transport Layer Factory
|
||||
tl_factory = pylon.TlFactory.GetInstance()
|
||||
devices = tl_factory.EnumerateDevices()
|
||||
|
||||
if not devices:
|
||||
print("No cameras found!")
|
||||
sys.exit(1)
|
||||
|
||||
print(f"Found {len(devices)} cameras. Checking Camera 1...")
|
||||
|
||||
# Connect to first camera
|
||||
cam = pylon.InstantCamera(tl_factory.CreateDevice(devices[0]))
|
||||
cam.Open()
|
||||
|
||||
# 1. Reset to Defaults
|
||||
print("Reseting to Defaults...")
|
||||
cam.UserSetSelector.Value = "Default"
|
||||
cam.UserSetLoad.Execute()
|
||||
|
||||
# 2. Enable Auto Exposure/Gain
|
||||
print("Enabling Auto Exposure & Gain...")
|
||||
cam.ExposureAuto.Value = "Continuous"
|
||||
cam.GainAuto.Value = "Continuous"
|
||||
|
||||
# 3. Wait for it to settle (Camera adjusts to light)
|
||||
print("Waiting 3 seconds for auto-adjustment...")
|
||||
for i in range(3):
|
||||
print(f"{3-i}...")
|
||||
time.sleep(1)
|
||||
|
||||
# 4. READ VALUES
|
||||
current_exposure = cam.ExposureTime.GetValue() # In Microseconds (us)
|
||||
current_fps_readout = cam.ResultingFrameRate.GetValue()
|
||||
|
||||
print("-" * 30)
|
||||
print(f"REPORT FOR SERIAL: {cam.GetDeviceInfo().GetSerialNumber()}")
|
||||
print("-" * 30)
|
||||
print(f"Current Exposure Time: {current_exposure:.1f} us ({current_exposure/1000:.1f} ms)")
|
||||
print(f"Theoretical Max FPS: {1000000 / current_exposure:.1f} FPS")
|
||||
print(f"Camera Internal FPS: {current_fps_readout:.1f} FPS")
|
||||
print("-" * 30)
|
||||
|
||||
if current_exposure > 33000:
|
||||
print("⚠️ PROBLEM FOUND: Exposure is > 33ms.")
|
||||
print(" This physically prevents the camera from reaching 30 FPS.")
|
||||
print(" Solution: Add more light or limit AutoExposureUpperLimit.")
|
||||
else:
|
||||
print("✅ Exposure looks fast enough for 30 FPS.")
|
||||
|
||||
cam.Close()
|
||||
|
||||
except Exception as e:
|
||||
print(f"Error: {e}")
|
||||
Loading…
Reference in New Issue
Block a user