feat: Add real-time inference results display to deployment UI
- Add result callback mechanism to WorkflowOrchestrator - Implement result_updated signal in DeploymentWorker - Create detailed inference results display with timestamps and formatted output - Support both tuple and dict result formats - Add auto-scrolling results panel with history management - Connect pipeline results to Live View tab for real-time monitoring 🤖 Generated with [Claude Code](https://claude.ai/code) Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
parent
e97fd7a025
commit
e6c9817a98
@ -31,6 +31,7 @@ class WorkflowOrchestrator:
|
||||
self.running = False
|
||||
self._stop_event = threading.Event()
|
||||
self.frame_callback = None
|
||||
self.result_callback = None
|
||||
|
||||
def start(self):
|
||||
"""
|
||||
@ -94,6 +95,12 @@ class WorkflowOrchestrator:
|
||||
Sets the callback function for frame updates.
|
||||
"""
|
||||
self.frame_callback = callback
|
||||
|
||||
def set_result_callback(self, callback):
|
||||
"""
|
||||
Sets the callback function for inference results.
|
||||
"""
|
||||
self.result_callback = callback
|
||||
|
||||
def _create_data_source(self) -> Optional[Any]:
|
||||
"""
|
||||
@ -157,6 +164,10 @@ class WorkflowOrchestrator:
|
||||
self.pipeline.pipeline_name,
|
||||
format=self.output_config.get('format', 'json').lower()
|
||||
)
|
||||
|
||||
# Also call the result callback if set
|
||||
if self.result_callback:
|
||||
self.result_callback(result_dict)
|
||||
except Exception as e:
|
||||
print(f"Error handling result: {e}")
|
||||
|
||||
|
||||
@ -65,6 +65,7 @@ class DeploymentWorker(QThread):
|
||||
deployment_completed = pyqtSignal(bool, str) # success, message
|
||||
error_occurred = pyqtSignal(str)
|
||||
frame_updated = pyqtSignal('PyQt_PyObject') # For live view
|
||||
result_updated = pyqtSignal(dict) # For inference results
|
||||
|
||||
def __init__(self, pipeline_data: Dict[str, Any]):
|
||||
super().__init__()
|
||||
@ -131,6 +132,7 @@ class DeploymentWorker(QThread):
|
||||
# Create and start the orchestrator
|
||||
self.orchestrator = WorkflowOrchestrator(pipeline, config.input_config, config.output_config)
|
||||
self.orchestrator.set_frame_callback(self.frame_updated.emit)
|
||||
self.orchestrator.set_result_callback(self.result_updated.emit)
|
||||
self.orchestrator.start()
|
||||
|
||||
self.progress_updated.emit(100, "Pipeline deployed successfully!")
|
||||
@ -525,6 +527,7 @@ Stage Configurations:
|
||||
self.deployment_worker.deployment_completed.connect(self.on_deployment_completed)
|
||||
self.deployment_worker.error_occurred.connect(self.on_deployment_error)
|
||||
self.deployment_worker.frame_updated.connect(self.update_live_view)
|
||||
self.deployment_worker.result_updated.connect(self.update_inference_results)
|
||||
|
||||
self.deployment_worker.start()
|
||||
|
||||
@ -634,6 +637,54 @@ Stage Configurations:
|
||||
except Exception as e:
|
||||
print(f"Error updating live view: {e}")
|
||||
|
||||
def update_inference_results(self, result_dict):
|
||||
"""Update the inference results display."""
|
||||
try:
|
||||
import json
|
||||
from datetime import datetime
|
||||
|
||||
# Format the results for display
|
||||
timestamp = datetime.fromtimestamp(result_dict.get('timestamp', 0)).strftime("%H:%M:%S.%f")[:-3]
|
||||
stage_results = result_dict.get('stage_results', {})
|
||||
|
||||
result_text = f"[{timestamp}] Pipeline ID: {result_dict.get('pipeline_id', 'Unknown')}\n"
|
||||
|
||||
# Display results from each stage
|
||||
for stage_id, result in stage_results.items():
|
||||
result_text += f" {stage_id}:\n"
|
||||
if isinstance(result, tuple) and len(result) == 2:
|
||||
# Handle tuple results (probability, result_string)
|
||||
probability, result_string = result
|
||||
result_text += f" Result: {result_string}\n"
|
||||
result_text += f" Probability: {probability:.3f}\n"
|
||||
elif isinstance(result, dict):
|
||||
# Handle dict results
|
||||
for key, value in result.items():
|
||||
if key == 'probability':
|
||||
result_text += f" Probability: {value:.3f}\n"
|
||||
else:
|
||||
result_text += f" {key}: {value}\n"
|
||||
else:
|
||||
result_text += f" {result}\n"
|
||||
|
||||
result_text += "-" * 50 + "\n"
|
||||
|
||||
# Append to results display (keep last 100 lines)
|
||||
current_text = self.results_text.toPlainText()
|
||||
lines = current_text.split('\n')
|
||||
if len(lines) > 100:
|
||||
lines = lines[-50:] # Keep last 50 lines
|
||||
current_text = '\n'.join(lines)
|
||||
|
||||
self.results_text.setPlainText(current_text + result_text)
|
||||
|
||||
# Auto-scroll to bottom
|
||||
scrollbar = self.results_text.verticalScrollBar()
|
||||
scrollbar.setValue(scrollbar.maximum())
|
||||
|
||||
except Exception as e:
|
||||
print(f"Error updating inference results: {e}")
|
||||
|
||||
def apply_theme(self):
|
||||
"""Apply consistent theme to the dialog."""
|
||||
self.setStyleSheet("""
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user