#!/usr/bin/env python3 """ Deployment Terminal Example ========================== This script demonstrates how to deploy modules on dongles with terminal result printing. It shows how the enhanced deployment system now prints detailed inference results to the console. Usage: python deployment_terminal_example.py Requirements: - Dongles connected (or simulation mode) - Pipeline configuration (.mflow file or manual config) """ import sys import os import time import threading from datetime import datetime # Add core functions to path sys.path.append(os.path.join(os.path.dirname(__file__), 'core', 'functions')) # Hardware dependencies not needed for simulation COMPONENTS_AVAILABLE = False def simulate_terminal_results(): """Simulate what terminal output looks like during deployment.""" print("šŸš€ DEPLOYMENT TERMINAL OUTPUT SIMULATION") print("="*60) print() # Simulate pipeline start print("šŸš€ Workflow orchestrator started successfully.") print("šŸ“Š Pipeline: FireDetectionCascade") print("šŸŽ„ Input: camera source") print("šŸ’¾ Output: file destination") print("šŸ”„ Inference pipeline is now processing data...") print("šŸ“” Inference results will appear below:") print("="*60) # Simulate some inference results sample_results = [ { "timestamp": time.time(), "pipeline_id": "fire_cascade_001", "stage_results": { "object_detection": { "result": "Fire Detected", "probability": 0.85, "confidence": "High" }, "fire_classification": { "result": "Fire Confirmed", "probability": 0.92, "combined_probability": 0.88, "confidence": "Very High" } }, "metadata": { "total_processing_time": 0.045, "dongle_count": 4, "stage_count": 2 } }, { "timestamp": time.time() + 1, "pipeline_id": "fire_cascade_002", "stage_results": { "object_detection": { "result": "No Fire", "probability": 0.12, "confidence": "Low" } }, "metadata": { "total_processing_time": 0.038 } }, { "timestamp": time.time() + 2, "pipeline_id": "fire_cascade_003", "stage_results": { "rgb_analysis": ("Fire Detected", 0.75), "edge_analysis": ("Fire Detected", 0.68), "thermal_analysis": ("Fire Detected", 0.82), "result_fusion": { "result": "Fire Detected", "fused_probability": 0.78, "individual_probs": { "rgb": 0.75, "edge": 0.68, "thermal": 0.82 }, "confidence": "High" } }, "metadata": { "total_processing_time": 0.067 } } ] # Print each result with delay to simulate real-time for i, result_dict in enumerate(sample_results): time.sleep(2) # Simulate processing delay print_terminal_results(result_dict) time.sleep(1) print("šŸ›‘ Stopping workflow orchestrator...") print("šŸ“¹ Data source stopped") print("āš™ļø Inference pipeline stopped") print("āœ… Workflow orchestrator stopped successfully.") print("="*60) def print_terminal_results(result_dict): """Print inference results to terminal with detailed formatting.""" try: # Header with timestamp timestamp = datetime.fromtimestamp(result_dict.get('timestamp', 0)).strftime("%H:%M:%S.%f")[:-3] pipeline_id = result_dict.get('pipeline_id', 'Unknown') print(f"\nšŸ”„ INFERENCE RESULT [{timestamp}]") print(f" Pipeline ID: {pipeline_id}") print(" " + "="*50) # Stage results stage_results = result_dict.get('stage_results', {}) if stage_results: for stage_id, result in stage_results.items(): print(f" šŸ“Š Stage: {stage_id}") if isinstance(result, tuple) and len(result) == 2: # Handle tuple results (result_string, probability) result_string, probability = result print(f" āœ… Result: {result_string}") print(f" šŸ“ˆ Probability: {probability:.3f}") # Add confidence level if probability > 0.8: confidence = "🟢 Very High" elif probability > 0.6: confidence = "🟔 High" elif probability > 0.4: confidence = "🟠 Medium" else: confidence = "šŸ”“ Low" print(f" šŸŽÆ Confidence: {confidence}") elif isinstance(result, dict): # Handle dict results for key, value in result.items(): if key == 'probability': print(f" šŸ“ˆ {key.title()}: {value:.3f}") elif key == 'result': print(f" āœ… {key.title()}: {value}") elif key == 'confidence': print(f" šŸŽÆ {key.title()}: {value}") elif key == 'fused_probability': print(f" šŸ”€ Fused Probability: {value:.3f}") elif key == 'individual_probs': print(f" šŸ“‹ Individual Probabilities:") for prob_key, prob_value in value.items(): print(f" {prob_key}: {prob_value:.3f}") else: print(f" šŸ“ {key}: {value}") else: # Handle other result types print(f" šŸ“ Raw Result: {result}") print() # Blank line between stages else: print(" āš ļø No stage results available") # Processing time if available metadata = result_dict.get('metadata', {}) if 'total_processing_time' in metadata: processing_time = metadata['total_processing_time'] print(f" ā±ļø Processing Time: {processing_time:.3f}s") # Add FPS calculation if processing_time > 0: fps = 1.0 / processing_time print(f" šŸš„ Theoretical FPS: {fps:.2f}") # Additional metadata if metadata: interesting_keys = ['dongle_count', 'stage_count', 'queue_sizes', 'error_count'] for key in interesting_keys: if key in metadata: print(f" šŸ“‹ {key.replace('_', ' ').title()}: {metadata[key]}") print(" " + "="*50) except Exception as e: print(f"āŒ Error printing terminal results: {e}") def main(): """Main function to demonstrate terminal result printing.""" print("Terminal Result Printing Demo") print("============================") print() print("This script demonstrates how inference results are printed to the terminal") print("when deploying modules on dongles using the enhanced deployment system.") print() if COMPONENTS_AVAILABLE: print("āœ… All components available - ready for real deployment") print("šŸ’” To use with real deployment:") print(" 1. Run the UI: python UI.py") print(" 2. Create or load a pipeline") print(" 3. Use Deploy Pipeline dialog") print(" 4. Watch terminal for inference results") else: print("āš ļø Some components missing - running simulation only") print() print("Running simulation of terminal output...") print() try: simulate_terminal_results() except KeyboardInterrupt: print("\nā¹ļø Simulation stopped by user") print() print("āœ… Demo completed!") print() print("Real deployment usage:") print(" uv run python UI.py # Start the full UI application") print(" # OR") print(" uv run python core/functions/test.py --example single # Direct pipeline test") if __name__ == "__main__": main()