feat: Add comprehensive terminal result printing for dongle deployments
- Enhanced deployment workflow to print detailed inference results to terminal in real-time - Added rich formatting with emojis, confidence indicators, and performance metrics - Combined GUI and terminal callbacks for dual output during module deployment - Improved workflow orchestrator startup/shutdown feedback - Added demonstration script showing terminal output examples - Supports multi-stage pipelines with individual stage result display - Includes processing time, FPS calculations, and metadata visualization 🤖 Generated with [Claude Code](https://claude.ai/code) Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
parent
e6c9817a98
commit
0e3295a780
@ -69,7 +69,13 @@ class WorkflowOrchestrator:
|
||||
# Start the data source
|
||||
self.data_source.start()
|
||||
|
||||
print("Workflow orchestrator started successfully.")
|
||||
print("🚀 Workflow orchestrator started successfully.")
|
||||
print(f"📊 Pipeline: {self.pipeline.pipeline_name}")
|
||||
print(f"🎥 Input: {self.input_config.get('source_type', 'Unknown')} source")
|
||||
print(f"💾 Output: {self.output_config.get('output_type', 'Unknown')} destination")
|
||||
print("🔄 Inference pipeline is now processing data...")
|
||||
print("📡 Inference results will appear below:")
|
||||
print("="*60)
|
||||
|
||||
def stop(self):
|
||||
"""
|
||||
@ -78,17 +84,20 @@ class WorkflowOrchestrator:
|
||||
if not self.running:
|
||||
return
|
||||
|
||||
print("Stopping workflow orchestrator...")
|
||||
print("🛑 Stopping workflow orchestrator...")
|
||||
self.running = False
|
||||
self._stop_event.set()
|
||||
|
||||
if self.data_source:
|
||||
self.data_source.stop()
|
||||
print("📹 Data source stopped")
|
||||
|
||||
if self.pipeline:
|
||||
self.pipeline.stop()
|
||||
print("⚙️ Inference pipeline stopped")
|
||||
|
||||
print("Workflow orchestrator stopped.")
|
||||
print("✅ Workflow orchestrator stopped successfully.")
|
||||
print("="*60)
|
||||
|
||||
def set_frame_callback(self, callback):
|
||||
"""
|
||||
@ -169,7 +178,7 @@ class WorkflowOrchestrator:
|
||||
if self.result_callback:
|
||||
self.result_callback(result_dict)
|
||||
except Exception as e:
|
||||
print(f"Error handling result: {e}")
|
||||
print(f"❌ Error handling result: {e}")
|
||||
|
||||
def _parse_resolution(self, resolution_str: Optional[str]) -> Optional[tuple[int, int]]:
|
||||
"""
|
||||
|
||||
237
cluster4npu_ui/deployment_terminal_example.py
Normal file
237
cluster4npu_ui/deployment_terminal_example.py
Normal file
@ -0,0 +1,237 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Deployment Terminal Example
|
||||
==========================
|
||||
|
||||
This script demonstrates how to deploy modules on dongles with terminal result printing.
|
||||
It shows how the enhanced deployment system now prints detailed inference results to the console.
|
||||
|
||||
Usage:
|
||||
python deployment_terminal_example.py
|
||||
|
||||
Requirements:
|
||||
- Dongles connected (or simulation mode)
|
||||
- Pipeline configuration (.mflow file or manual config)
|
||||
"""
|
||||
|
||||
import sys
|
||||
import os
|
||||
import time
|
||||
import threading
|
||||
from datetime import datetime
|
||||
|
||||
# Add core functions to path
|
||||
sys.path.append(os.path.join(os.path.dirname(__file__), 'core', 'functions'))
|
||||
|
||||
# Hardware dependencies not needed for simulation
|
||||
COMPONENTS_AVAILABLE = False
|
||||
|
||||
def simulate_terminal_results():
|
||||
"""Simulate what terminal output looks like during deployment."""
|
||||
print("🚀 DEPLOYMENT TERMINAL OUTPUT SIMULATION")
|
||||
print("="*60)
|
||||
print()
|
||||
|
||||
# Simulate pipeline start
|
||||
print("🚀 Workflow orchestrator started successfully.")
|
||||
print("📊 Pipeline: FireDetectionCascade")
|
||||
print("🎥 Input: camera source")
|
||||
print("💾 Output: file destination")
|
||||
print("🔄 Inference pipeline is now processing data...")
|
||||
print("📡 Inference results will appear below:")
|
||||
print("="*60)
|
||||
|
||||
# Simulate some inference results
|
||||
sample_results = [
|
||||
{
|
||||
"timestamp": time.time(),
|
||||
"pipeline_id": "fire_cascade_001",
|
||||
"stage_results": {
|
||||
"object_detection": {
|
||||
"result": "Fire Detected",
|
||||
"probability": 0.85,
|
||||
"confidence": "High"
|
||||
},
|
||||
"fire_classification": {
|
||||
"result": "Fire Confirmed",
|
||||
"probability": 0.92,
|
||||
"combined_probability": 0.88,
|
||||
"confidence": "Very High"
|
||||
}
|
||||
},
|
||||
"metadata": {
|
||||
"total_processing_time": 0.045,
|
||||
"dongle_count": 4,
|
||||
"stage_count": 2
|
||||
}
|
||||
},
|
||||
{
|
||||
"timestamp": time.time() + 1,
|
||||
"pipeline_id": "fire_cascade_002",
|
||||
"stage_results": {
|
||||
"object_detection": {
|
||||
"result": "No Fire",
|
||||
"probability": 0.12,
|
||||
"confidence": "Low"
|
||||
}
|
||||
},
|
||||
"metadata": {
|
||||
"total_processing_time": 0.038
|
||||
}
|
||||
},
|
||||
{
|
||||
"timestamp": time.time() + 2,
|
||||
"pipeline_id": "fire_cascade_003",
|
||||
"stage_results": {
|
||||
"rgb_analysis": ("Fire Detected", 0.75),
|
||||
"edge_analysis": ("Fire Detected", 0.68),
|
||||
"thermal_analysis": ("Fire Detected", 0.82),
|
||||
"result_fusion": {
|
||||
"result": "Fire Detected",
|
||||
"fused_probability": 0.78,
|
||||
"individual_probs": {
|
||||
"rgb": 0.75,
|
||||
"edge": 0.68,
|
||||
"thermal": 0.82
|
||||
},
|
||||
"confidence": "High"
|
||||
}
|
||||
},
|
||||
"metadata": {
|
||||
"total_processing_time": 0.067
|
||||
}
|
||||
}
|
||||
]
|
||||
|
||||
# Print each result with delay to simulate real-time
|
||||
for i, result_dict in enumerate(sample_results):
|
||||
time.sleep(2) # Simulate processing delay
|
||||
print_terminal_results(result_dict)
|
||||
|
||||
time.sleep(1)
|
||||
print("🛑 Stopping workflow orchestrator...")
|
||||
print("📹 Data source stopped")
|
||||
print("⚙️ Inference pipeline stopped")
|
||||
print("✅ Workflow orchestrator stopped successfully.")
|
||||
print("="*60)
|
||||
|
||||
def print_terminal_results(result_dict):
|
||||
"""Print inference results to terminal with detailed formatting."""
|
||||
try:
|
||||
# Header with timestamp
|
||||
timestamp = datetime.fromtimestamp(result_dict.get('timestamp', 0)).strftime("%H:%M:%S.%f")[:-3]
|
||||
pipeline_id = result_dict.get('pipeline_id', 'Unknown')
|
||||
|
||||
print(f"\n🔥 INFERENCE RESULT [{timestamp}]")
|
||||
print(f" Pipeline ID: {pipeline_id}")
|
||||
print(" " + "="*50)
|
||||
|
||||
# Stage results
|
||||
stage_results = result_dict.get('stage_results', {})
|
||||
if stage_results:
|
||||
for stage_id, result in stage_results.items():
|
||||
print(f" 📊 Stage: {stage_id}")
|
||||
|
||||
if isinstance(result, tuple) and len(result) == 2:
|
||||
# Handle tuple results (result_string, probability)
|
||||
result_string, probability = result
|
||||
print(f" ✅ Result: {result_string}")
|
||||
print(f" 📈 Probability: {probability:.3f}")
|
||||
|
||||
# Add confidence level
|
||||
if probability > 0.8:
|
||||
confidence = "🟢 Very High"
|
||||
elif probability > 0.6:
|
||||
confidence = "🟡 High"
|
||||
elif probability > 0.4:
|
||||
confidence = "🟠 Medium"
|
||||
else:
|
||||
confidence = "🔴 Low"
|
||||
print(f" 🎯 Confidence: {confidence}")
|
||||
|
||||
elif isinstance(result, dict):
|
||||
# Handle dict results
|
||||
for key, value in result.items():
|
||||
if key == 'probability':
|
||||
print(f" 📈 {key.title()}: {value:.3f}")
|
||||
elif key == 'result':
|
||||
print(f" ✅ {key.title()}: {value}")
|
||||
elif key == 'confidence':
|
||||
print(f" 🎯 {key.title()}: {value}")
|
||||
elif key == 'fused_probability':
|
||||
print(f" 🔀 Fused Probability: {value:.3f}")
|
||||
elif key == 'individual_probs':
|
||||
print(f" 📋 Individual Probabilities:")
|
||||
for prob_key, prob_value in value.items():
|
||||
print(f" {prob_key}: {prob_value:.3f}")
|
||||
else:
|
||||
print(f" 📝 {key}: {value}")
|
||||
else:
|
||||
# Handle other result types
|
||||
print(f" 📝 Raw Result: {result}")
|
||||
|
||||
print() # Blank line between stages
|
||||
else:
|
||||
print(" ⚠️ No stage results available")
|
||||
|
||||
# Processing time if available
|
||||
metadata = result_dict.get('metadata', {})
|
||||
if 'total_processing_time' in metadata:
|
||||
processing_time = metadata['total_processing_time']
|
||||
print(f" ⏱️ Processing Time: {processing_time:.3f}s")
|
||||
|
||||
# Add FPS calculation
|
||||
if processing_time > 0:
|
||||
fps = 1.0 / processing_time
|
||||
print(f" 🚄 Theoretical FPS: {fps:.2f}")
|
||||
|
||||
# Additional metadata
|
||||
if metadata:
|
||||
interesting_keys = ['dongle_count', 'stage_count', 'queue_sizes', 'error_count']
|
||||
for key in interesting_keys:
|
||||
if key in metadata:
|
||||
print(f" 📋 {key.replace('_', ' ').title()}: {metadata[key]}")
|
||||
|
||||
print(" " + "="*50)
|
||||
|
||||
except Exception as e:
|
||||
print(f"❌ Error printing terminal results: {e}")
|
||||
|
||||
def main():
|
||||
"""Main function to demonstrate terminal result printing."""
|
||||
print("Terminal Result Printing Demo")
|
||||
print("============================")
|
||||
print()
|
||||
print("This script demonstrates how inference results are printed to the terminal")
|
||||
print("when deploying modules on dongles using the enhanced deployment system.")
|
||||
print()
|
||||
|
||||
if COMPONENTS_AVAILABLE:
|
||||
print("✅ All components available - ready for real deployment")
|
||||
print("💡 To use with real deployment:")
|
||||
print(" 1. Run the UI: python UI.py")
|
||||
print(" 2. Create or load a pipeline")
|
||||
print(" 3. Use Deploy Pipeline dialog")
|
||||
print(" 4. Watch terminal for inference results")
|
||||
else:
|
||||
print("⚠️ Some components missing - running simulation only")
|
||||
|
||||
print()
|
||||
print("Running simulation of terminal output...")
|
||||
print()
|
||||
|
||||
try:
|
||||
simulate_terminal_results()
|
||||
except KeyboardInterrupt:
|
||||
print("\n⏹️ Simulation stopped by user")
|
||||
|
||||
print()
|
||||
print("✅ Demo completed!")
|
||||
print()
|
||||
print("Real deployment usage:")
|
||||
print(" uv run python UI.py # Start the full UI application")
|
||||
print(" # OR")
|
||||
print(" uv run python core/functions/test.py --example single # Direct pipeline test")
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@ -132,7 +132,15 @@ class DeploymentWorker(QThread):
|
||||
# Create and start the orchestrator
|
||||
self.orchestrator = WorkflowOrchestrator(pipeline, config.input_config, config.output_config)
|
||||
self.orchestrator.set_frame_callback(self.frame_updated.emit)
|
||||
self.orchestrator.set_result_callback(self.result_updated.emit)
|
||||
|
||||
# Set up both GUI and terminal result callbacks
|
||||
def combined_result_callback(result_dict):
|
||||
# Print to terminal
|
||||
self._print_terminal_results(result_dict)
|
||||
# Emit for GUI
|
||||
self.result_updated.emit(result_dict)
|
||||
|
||||
self.orchestrator.set_result_callback(combined_result_callback)
|
||||
self.orchestrator.start()
|
||||
|
||||
self.progress_updated.emit(100, "Pipeline deployed successfully!")
|
||||
@ -154,6 +162,90 @@ class DeploymentWorker(QThread):
|
||||
if self.orchestrator:
|
||||
self.orchestrator.stop()
|
||||
|
||||
def _print_terminal_results(self, result_dict):
|
||||
"""Print inference results to terminal with detailed formatting."""
|
||||
try:
|
||||
from datetime import datetime
|
||||
|
||||
# Header with timestamp
|
||||
timestamp = datetime.fromtimestamp(result_dict.get('timestamp', 0)).strftime("%H:%M:%S.%f")[:-3]
|
||||
pipeline_id = result_dict.get('pipeline_id', 'Unknown')
|
||||
|
||||
print(f"\n🔥 INFERENCE RESULT [{timestamp}]")
|
||||
print(f" Pipeline ID: {pipeline_id}")
|
||||
print(" " + "="*50)
|
||||
|
||||
# Stage results
|
||||
stage_results = result_dict.get('stage_results', {})
|
||||
if stage_results:
|
||||
for stage_id, result in stage_results.items():
|
||||
print(f" 📊 Stage: {stage_id}")
|
||||
|
||||
if isinstance(result, tuple) and len(result) == 2:
|
||||
# Handle tuple results (result_string, probability)
|
||||
result_string, probability = result
|
||||
print(f" ✅ Result: {result_string}")
|
||||
print(f" 📈 Probability: {probability:.3f}")
|
||||
|
||||
# Add confidence level
|
||||
if probability > 0.8:
|
||||
confidence = "🟢 Very High"
|
||||
elif probability > 0.6:
|
||||
confidence = "🟡 High"
|
||||
elif probability > 0.4:
|
||||
confidence = "🟠 Medium"
|
||||
else:
|
||||
confidence = "🔴 Low"
|
||||
print(f" 🎯 Confidence: {confidence}")
|
||||
|
||||
elif isinstance(result, dict):
|
||||
# Handle dict results
|
||||
for key, value in result.items():
|
||||
if key == 'probability':
|
||||
print(f" 📈 {key.title()}: {value:.3f}")
|
||||
elif key == 'result':
|
||||
print(f" ✅ {key.title()}: {value}")
|
||||
elif key == 'confidence':
|
||||
print(f" 🎯 {key.title()}: {value}")
|
||||
elif key == 'fused_probability':
|
||||
print(f" 🔀 Fused Probability: {value:.3f}")
|
||||
elif key == 'individual_probs':
|
||||
print(f" 📋 Individual Probabilities:")
|
||||
for prob_key, prob_value in value.items():
|
||||
print(f" {prob_key}: {prob_value:.3f}")
|
||||
else:
|
||||
print(f" 📝 {key}: {value}")
|
||||
else:
|
||||
# Handle other result types
|
||||
print(f" 📝 Raw Result: {result}")
|
||||
|
||||
print() # Blank line between stages
|
||||
else:
|
||||
print(" ⚠️ No stage results available")
|
||||
|
||||
# Processing time if available
|
||||
metadata = result_dict.get('metadata', {})
|
||||
if 'total_processing_time' in metadata:
|
||||
processing_time = metadata['total_processing_time']
|
||||
print(f" ⏱️ Processing Time: {processing_time:.3f}s")
|
||||
|
||||
# Add FPS calculation
|
||||
if processing_time > 0:
|
||||
fps = 1.0 / processing_time
|
||||
print(f" 🚄 Theoretical FPS: {fps:.2f}")
|
||||
|
||||
# Additional metadata
|
||||
if metadata:
|
||||
interesting_keys = ['dongle_count', 'stage_count', 'queue_sizes', 'error_count']
|
||||
for key in interesting_keys:
|
||||
if key in metadata:
|
||||
print(f" 📋 {key.replace('_', ' ').title()}: {metadata[key]}")
|
||||
|
||||
print(" " + "="*50)
|
||||
|
||||
except Exception as e:
|
||||
print(f"❌ Error printing terminal results: {e}")
|
||||
|
||||
|
||||
class DeploymentDialog(QDialog):
|
||||
"""Main deployment dialog with comprehensive deployment management."""
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user