fix: Optimize multi-dongle inference for proper parallel processing

- Enable USB timeout (5000ms) for stable communication
- Fix send thread timeout from 0.01s to 1.0s for better blocking
- Update WebcamInferenceRunner to use async pattern (non-blocking)
- Add non-blocking put_input option to prevent frame drops
- Improve thread stopping mechanism with better cleanup

These changes follow Kneron official example pattern and should
enable proper parallel processing across multiple dongles.

🤖 Generated with [Claude Code](https://claude.ai/code)

Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
Masonmason 2025-07-24 10:39:20 +08:00
parent cb9dff10a9
commit bc92761a83

View File

@ -289,9 +289,9 @@ class MultiDongle:
sys.exit(1)
# setting timeout of the usb communication with the device
# print('[Set Device Timeout]')
# kp.core.set_timeout(device_group=self.device_group, milliseconds=5000)
# print(' - Success')
print('[Set Device Timeout]')
kp.core.set_timeout(device_group=self.device_group, milliseconds=5000)
print(' - Success')
# if self.upload_fw:
try:
@ -417,19 +417,19 @@ class MultiDongle:
try:
# Get image and format from the input queue
# Blocks until an item is available or stop event is set/timeout occurs
# Block until an item is available
try:
# Use get with timeout or check stop event in a loop
# This pattern allows thread to check stop event while waiting on queue
item = self._input_queue.get(block=True, timeout=0.1)
# Check if this is our sentinel value
item = self._input_queue.get(block=True, timeout=1.0)
# Check if this is our sentinel value to stop
if item is None:
continue
break
# Now safely unpack the tuple
image_data, image_format_enum = item
except queue.Empty:
# If queue is empty after timeout, check stop event and continue loop
# Check stop event and continue if timeout
if self._stop_event.is_set():
break
continue
# Configure and send the image
@ -501,24 +501,26 @@ class MultiDongle:
print("Stopping threads...")
self._stop_event.set()
# Clear queues to unblock threads
while not self._input_queue.empty():
try:
self._input_queue.get_nowait()
except queue.Empty:
break
# Signal send thread to wake up
self._input_queue.put(None)
# Signal send thread to stop by putting sentinel value
try:
self._input_queue.put(None, timeout=1.0)
except queue.Full:
# Clear queue if it's full and try again
while not self._input_queue.empty():
try:
self._input_queue.get_nowait()
except queue.Empty:
break
self._input_queue.put(None)
# Join threads with timeout
for thread, name in [(self._send_thread, "Send"), (self._receive_thread, "Receive")]:
if thread and thread.is_alive():
thread.join(timeout=2.0)
thread.join(timeout=3.0)
if thread.is_alive():
print(f"Warning: {name} thread didn't stop cleanly")
def put_input(self, image: Union[str, np.ndarray], format: str, target_size: Tuple[int, int] = None):
def put_input(self, image: Union[str, np.ndarray], format: str, target_size: Tuple[int, int] = None, block: bool = True, timeout: float = None):
"""
Put an image into the input queue with flexible preprocessing
"""
@ -539,7 +541,13 @@ class MultiDongle:
else:
raise ValueError(f"Unsupported format: {format}")
self._input_queue.put((image_data, image_format_enum))
try:
self._input_queue.put((image_data, image_format_enum), block=block, timeout=timeout)
except queue.Full:
if not block:
print("Warning: Input queue is full, dropping frame")
else:
raise
def get_output(self, timeout: float = None):
"""
@ -687,12 +695,12 @@ class WebcamInferenceRunner:
self.display_fps_start_time = time.time()
self.display_frame_counter += 1
# Preprocess and send frame
# Preprocess and send frame (non-blocking)
processed_frame = self.multidongle.preprocess_frame(frame, self.image_format)
self.multidongle.put_input(processed_frame, self.image_format)
self.multidongle.put_input(processed_frame, self.image_format, block=False)
# Get inference result
prob, result = self.multidongle.get_latest_inference_result()
# Try to get inference result (non-blocking)
prob, result = self.multidongle.get_latest_inference_result(timeout=0.001)
if prob is not None:
# Track inference FPS
if self.inference_fps_start_time is None: