Merge branch 'main' of github.com:HuangMason320/cluster4npu

This commit is contained in:
HuangMason320 2025-07-24 12:53:13 +08:00
commit f9ba162c81

View File

@ -23,6 +23,8 @@ import sys
import json import json
import threading import threading
import traceback import traceback
import io
import contextlib
from typing import Dict, Any, List, Optional from typing import Dict, Any, List, Optional
from PyQt5.QtWidgets import ( from PyQt5.QtWidgets import (
QDialog, QVBoxLayout, QHBoxLayout, QLabel, QTextEdit, QPushButton, QDialog, QVBoxLayout, QHBoxLayout, QLabel, QTextEdit, QPushButton,
@ -54,6 +56,55 @@ except ImportError as e:
PIPELINE_AVAILABLE = False PIPELINE_AVAILABLE = False
class StdoutCapture:
"""Context manager to capture stdout/stderr and emit to signal."""
def __init__(self, signal_emitter):
self.signal_emitter = signal_emitter
self.original_stdout = None
self.original_stderr = None
self.captured_output = io.StringIO()
def __enter__(self):
self.original_stdout = sys.stdout
self.original_stderr = sys.stderr
# Create a custom write function that both prints to original and captures
class TeeWriter:
def __init__(self, original, captured, emitter):
self.original = original
self.captured = captured
self.emitter = emitter
self._emitting = False # Prevent recursion
def write(self, text):
# Write to original stdout/stderr (so it still appears in terminal)
self.original.write(text)
self.original.flush()
# Capture for GUI if it's a substantial message and not already emitting
if text.strip() and not self._emitting:
self._emitting = True
try:
self.emitter(text)
finally:
self._emitting = False
def flush(self):
self.original.flush()
# Replace stdout and stderr with our tee writers
sys.stdout = TeeWriter(self.original_stdout, self.captured_output, self.signal_emitter)
sys.stderr = TeeWriter(self.original_stderr, self.captured_output, self.signal_emitter)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
# Restore original stdout/stderr
sys.stdout = self.original_stdout
sys.stderr = self.original_stderr
class DeploymentWorker(QThread): class DeploymentWorker(QThread):
"""Worker thread for pipeline deployment to avoid blocking UI.""" """Worker thread for pipeline deployment to avoid blocking UI."""
@ -67,6 +118,7 @@ class DeploymentWorker(QThread):
frame_updated = pyqtSignal('PyQt_PyObject') # For live view frame_updated = pyqtSignal('PyQt_PyObject') # For live view
result_updated = pyqtSignal(dict) # For inference results result_updated = pyqtSignal(dict) # For inference results
terminal_output = pyqtSignal(str) # For terminal output in GUI terminal_output = pyqtSignal(str) # For terminal output in GUI
stdout_captured = pyqtSignal(str) # For captured stdout/stderr
def __init__(self, pipeline_data: Dict[str, Any]): def __init__(self, pipeline_data: Dict[str, Any]):
super().__init__() super().__init__()
@ -123,8 +175,10 @@ class DeploymentWorker(QThread):
self.deployment_completed.emit(True, "Pipeline configuration prepared successfully. Dongle system not available for actual deployment.") self.deployment_completed.emit(True, "Pipeline configuration prepared successfully. Dongle system not available for actual deployment.")
return return
# Create InferencePipeline instance # Create InferencePipeline instance with stdout capture
try: try:
# Capture all stdout/stderr during pipeline operations
with StdoutCapture(self.stdout_captured.emit):
pipeline = converter.create_inference_pipeline(config) pipeline = converter.create_inference_pipeline(config)
self.progress_updated.emit(80, "Initializing workflow orchestrator...") self.progress_updated.emit(80, "Initializing workflow orchestrator...")
@ -144,13 +198,12 @@ class DeploymentWorker(QThread):
self.orchestrator.set_result_callback(combined_result_callback) self.orchestrator.set_result_callback(combined_result_callback)
self.orchestrator.start() self.orchestrator.start()
self.progress_updated.emit(100, "Pipeline deployed successfully!") self.progress_updated.emit(100, "Pipeline deployed successfully!")
self.deployment_completed.emit(True, f"Pipeline '{config.pipeline_name}' deployed with {len(config.stage_configs)} stages") self.deployment_completed.emit(True, f"Pipeline '{config.pipeline_name}' deployed with {len(config.stage_configs)} stages")
# Keep running until stop is requested # Keep running until stop is requested with continued stdout capture
while not self.should_stop: while not self.should_stop:
self.msleep(100) # Sleep for 100ms and check again self.msleep(100) # Sleep for 100ms and check again
@ -657,6 +710,7 @@ Stage Configurations:
self.deployment_worker.frame_updated.connect(self.update_live_view) self.deployment_worker.frame_updated.connect(self.update_live_view)
self.deployment_worker.result_updated.connect(self.update_inference_results) self.deployment_worker.result_updated.connect(self.update_inference_results)
self.deployment_worker.terminal_output.connect(self.update_terminal_output) self.deployment_worker.terminal_output.connect(self.update_terminal_output)
self.deployment_worker.stdout_captured.connect(self.update_terminal_output)
self.deployment_worker.start() self.deployment_worker.start()