237 lines
8.7 KiB
Python
237 lines
8.7 KiB
Python
#!/usr/bin/env python3
|
|
"""
|
|
Deployment Terminal Example
|
|
==========================
|
|
|
|
This script demonstrates how to deploy modules on dongles with terminal result printing.
|
|
It shows how the enhanced deployment system now prints detailed inference results to the console.
|
|
|
|
Usage:
|
|
python deployment_terminal_example.py
|
|
|
|
Requirements:
|
|
- Dongles connected (or simulation mode)
|
|
- Pipeline configuration (.mflow file or manual config)
|
|
"""
|
|
|
|
import sys
|
|
import os
|
|
import time
|
|
import threading
|
|
from datetime import datetime
|
|
|
|
# Add core functions to path
|
|
sys.path.append(os.path.join(os.path.dirname(__file__), 'core', 'functions'))
|
|
|
|
# Hardware dependencies not needed for simulation
|
|
COMPONENTS_AVAILABLE = False
|
|
|
|
def simulate_terminal_results():
|
|
"""Simulate what terminal output looks like during deployment."""
|
|
print("🚀 DEPLOYMENT TERMINAL OUTPUT SIMULATION")
|
|
print("="*60)
|
|
print()
|
|
|
|
# Simulate pipeline start
|
|
print("🚀 Workflow orchestrator started successfully.")
|
|
print("📊 Pipeline: FireDetectionCascade")
|
|
print("🎥 Input: camera source")
|
|
print("💾 Output: file destination")
|
|
print("🔄 Inference pipeline is now processing data...")
|
|
print("📡 Inference results will appear below:")
|
|
print("="*60)
|
|
|
|
# Simulate some inference results
|
|
sample_results = [
|
|
{
|
|
"timestamp": time.time(),
|
|
"pipeline_id": "fire_cascade_001",
|
|
"stage_results": {
|
|
"object_detection": {
|
|
"result": "Fire Detected",
|
|
"probability": 0.85,
|
|
"confidence": "High"
|
|
},
|
|
"fire_classification": {
|
|
"result": "Fire Confirmed",
|
|
"probability": 0.92,
|
|
"combined_probability": 0.88,
|
|
"confidence": "Very High"
|
|
}
|
|
},
|
|
"metadata": {
|
|
"total_processing_time": 0.045,
|
|
"dongle_count": 4,
|
|
"stage_count": 2
|
|
}
|
|
},
|
|
{
|
|
"timestamp": time.time() + 1,
|
|
"pipeline_id": "fire_cascade_002",
|
|
"stage_results": {
|
|
"object_detection": {
|
|
"result": "No Fire",
|
|
"probability": 0.12,
|
|
"confidence": "Low"
|
|
}
|
|
},
|
|
"metadata": {
|
|
"total_processing_time": 0.038
|
|
}
|
|
},
|
|
{
|
|
"timestamp": time.time() + 2,
|
|
"pipeline_id": "fire_cascade_003",
|
|
"stage_results": {
|
|
"rgb_analysis": ("Fire Detected", 0.75),
|
|
"edge_analysis": ("Fire Detected", 0.68),
|
|
"thermal_analysis": ("Fire Detected", 0.82),
|
|
"result_fusion": {
|
|
"result": "Fire Detected",
|
|
"fused_probability": 0.78,
|
|
"individual_probs": {
|
|
"rgb": 0.75,
|
|
"edge": 0.68,
|
|
"thermal": 0.82
|
|
},
|
|
"confidence": "High"
|
|
}
|
|
},
|
|
"metadata": {
|
|
"total_processing_time": 0.067
|
|
}
|
|
}
|
|
]
|
|
|
|
# Print each result with delay to simulate real-time
|
|
for i, result_dict in enumerate(sample_results):
|
|
time.sleep(2) # Simulate processing delay
|
|
print_terminal_results(result_dict)
|
|
|
|
time.sleep(1)
|
|
print("🛑 Stopping workflow orchestrator...")
|
|
print("📹 Data source stopped")
|
|
print("⚙️ Inference pipeline stopped")
|
|
print("✅ Workflow orchestrator stopped successfully.")
|
|
print("="*60)
|
|
|
|
def print_terminal_results(result_dict):
|
|
"""Print inference results to terminal with detailed formatting."""
|
|
try:
|
|
# Header with timestamp
|
|
timestamp = datetime.fromtimestamp(result_dict.get('timestamp', 0)).strftime("%H:%M:%S.%f")[:-3]
|
|
pipeline_id = result_dict.get('pipeline_id', 'Unknown')
|
|
|
|
print(f"\n🔥 INFERENCE RESULT [{timestamp}]")
|
|
print(f" Pipeline ID: {pipeline_id}")
|
|
print(" " + "="*50)
|
|
|
|
# Stage results
|
|
stage_results = result_dict.get('stage_results', {})
|
|
if stage_results:
|
|
for stage_id, result in stage_results.items():
|
|
print(f" 📊 Stage: {stage_id}")
|
|
|
|
if isinstance(result, tuple) and len(result) == 2:
|
|
# Handle tuple results (result_string, probability)
|
|
result_string, probability = result
|
|
print(f" ✅ Result: {result_string}")
|
|
print(f" 📈 Probability: {probability:.3f}")
|
|
|
|
# Add confidence level
|
|
if probability > 0.8:
|
|
confidence = "🟢 Very High"
|
|
elif probability > 0.6:
|
|
confidence = "🟡 High"
|
|
elif probability > 0.4:
|
|
confidence = "🟠 Medium"
|
|
else:
|
|
confidence = "🔴 Low"
|
|
print(f" 🎯 Confidence: {confidence}")
|
|
|
|
elif isinstance(result, dict):
|
|
# Handle dict results
|
|
for key, value in result.items():
|
|
if key == 'probability':
|
|
print(f" 📈 {key.title()}: {value:.3f}")
|
|
elif key == 'result':
|
|
print(f" ✅ {key.title()}: {value}")
|
|
elif key == 'confidence':
|
|
print(f" 🎯 {key.title()}: {value}")
|
|
elif key == 'fused_probability':
|
|
print(f" 🔀 Fused Probability: {value:.3f}")
|
|
elif key == 'individual_probs':
|
|
print(f" 📋 Individual Probabilities:")
|
|
for prob_key, prob_value in value.items():
|
|
print(f" {prob_key}: {prob_value:.3f}")
|
|
else:
|
|
print(f" 📝 {key}: {value}")
|
|
else:
|
|
# Handle other result types
|
|
print(f" 📝 Raw Result: {result}")
|
|
|
|
print() # Blank line between stages
|
|
else:
|
|
print(" ⚠️ No stage results available")
|
|
|
|
# Processing time if available
|
|
metadata = result_dict.get('metadata', {})
|
|
if 'total_processing_time' in metadata:
|
|
processing_time = metadata['total_processing_time']
|
|
print(f" ⏱️ Processing Time: {processing_time:.3f}s")
|
|
|
|
# Add FPS calculation
|
|
if processing_time > 0:
|
|
fps = 1.0 / processing_time
|
|
print(f" 🚄 Theoretical FPS: {fps:.2f}")
|
|
|
|
# Additional metadata
|
|
if metadata:
|
|
interesting_keys = ['dongle_count', 'stage_count', 'queue_sizes', 'error_count']
|
|
for key in interesting_keys:
|
|
if key in metadata:
|
|
print(f" 📋 {key.replace('_', ' ').title()}: {metadata[key]}")
|
|
|
|
print(" " + "="*50)
|
|
|
|
except Exception as e:
|
|
print(f"❌ Error printing terminal results: {e}")
|
|
|
|
def main():
|
|
"""Main function to demonstrate terminal result printing."""
|
|
print("Terminal Result Printing Demo")
|
|
print("============================")
|
|
print()
|
|
print("This script demonstrates how inference results are printed to the terminal")
|
|
print("when deploying modules on dongles using the enhanced deployment system.")
|
|
print()
|
|
|
|
if COMPONENTS_AVAILABLE:
|
|
print("✅ All components available - ready for real deployment")
|
|
print("💡 To use with real deployment:")
|
|
print(" 1. Run the UI: python UI.py")
|
|
print(" 2. Create or load a pipeline")
|
|
print(" 3. Use Deploy Pipeline dialog")
|
|
print(" 4. Watch terminal for inference results")
|
|
else:
|
|
print("⚠️ Some components missing - running simulation only")
|
|
|
|
print()
|
|
print("Running simulation of terminal output...")
|
|
print()
|
|
|
|
try:
|
|
simulate_terminal_results()
|
|
except KeyboardInterrupt:
|
|
print("\n⏹️ Simulation stopped by user")
|
|
|
|
print()
|
|
print("✅ Demo completed!")
|
|
print()
|
|
print("Real deployment usage:")
|
|
print(" uv run python UI.py # Start the full UI application")
|
|
print(" # OR")
|
|
print(" uv run python core/functions/test.py --example single # Direct pipeline test")
|
|
|
|
if __name__ == "__main__":
|
|
main() |