cluster4npu/tests/test_result_formatting_fix.py
HuangMason320 ccd7cdd6b9 feat: Reorganize test scripts and improve YOLOv5 postprocessing
- Move test scripts to tests/ directory for better organization
- Add improved YOLOv5 postprocessing with reference implementation
- Update gitignore to exclude *.mflow files and include main.spec
- Add debug capabilities and coordinate scaling improvements
- Enhance multi-series support with proper validation
- Add AGENTS.md documentation and example utilities

🤖 Generated with [Claude Code](https://claude.ai/code)

Co-Authored-By: Claude <noreply@anthropic.com>
2025-09-11 19:23:59 +08:00

173 lines
6.9 KiB
Python

#!/usr/bin/env python3
"""
Test script to verify result formatting fixes for string probability values
"""
import sys
import os
# Add UI dialogs to path
current_dir = os.path.dirname(os.path.abspath(__file__))
parent_dir = os.path.dirname(current_dir)
sys.path.append(os.path.join(parent_dir, 'ui', 'dialogs'))
def test_probability_formatting():
"""Test that probability formatting handles both numeric and string values"""
print("Testing probability formatting fixes...")
# Test cases with different probability value types
test_cases = [
# Numeric probability (should work with :.3f)
{"probability": 0.85, "result_string": "Fire", "expected_error": False},
# String probability that can be converted to float
{"probability": "0.75", "result_string": "Fire", "expected_error": False},
# String probability that cannot be converted to float
{"probability": "High", "result_string": "Fire", "expected_error": False},
# None probability
{"probability": None, "result_string": "No result", "expected_error": False},
# Dict result with numeric probability
{"dict_result": {"probability": 0.65, "class_name": "Fire"}, "expected_error": False},
# Dict result with string probability
{"dict_result": {"probability": "Medium", "class_name": "Fire"}, "expected_error": False},
]
all_passed = True
for i, case in enumerate(test_cases, 1):
print(f"\nTest case {i}:")
try:
if "dict_result" in case:
# Test dict formatting
result = case["dict_result"]
for key, value in result.items():
if key == 'probability':
try:
prob_value = float(value)
formatted = f" Probability: {prob_value:.3f}"
print(f" Dict probability formatted: {formatted}")
except (ValueError, TypeError):
formatted = f" Probability: {value}"
print(f" Dict probability (as string): {formatted}")
else:
formatted = f" {key}: {value}"
print(f" Dict {key}: {formatted}")
else:
# Test tuple formatting
probability = case["probability"]
result_string = case["result_string"]
print(f" Testing probability: {probability} (type: {type(probability)})")
# Test the formatting logic
try:
prob_value = float(probability)
formatted_prob = f" Probability: {prob_value:.3f}"
print(f" Formatted as float: {formatted_prob}")
except (ValueError, TypeError):
formatted_prob = f" Probability: {probability}"
print(f" Formatted as string: {formatted_prob}")
formatted_result = f" Result: {result_string}"
print(f" Formatted result: {formatted_result}")
print(f" ✓ Test case {i} passed")
except Exception as e:
print(f" ✗ Test case {i} failed: {e}")
if not case["expected_error"]:
all_passed = False
return all_passed
def test_terminal_results_mock():
"""Mock test of the terminal results formatting logic"""
print("\n" + "="*50)
print("Testing terminal results formatting logic...")
# Mock result dictionary with various probability types
mock_result_dict = {
'timestamp': 1234567890.123,
'pipeline_id': 'test-pipeline',
'stage_results': {
'stage1': (0.85, "Fire Detected"), # Numeric probability
'stage2': ("High", "Object Found"), # String probability
'stage3': {"probability": 0.65, "result": "Classification"}, # Dict with numeric
'stage4': {"probability": "Medium", "result": "Detection"} # Dict with string
}
}
try:
# Simulate the formatting logic
from datetime import datetime
timestamp = datetime.fromtimestamp(mock_result_dict.get('timestamp', 0)).strftime("%H:%M:%S.%f")[:-3]
pipeline_id = mock_result_dict.get('pipeline_id', 'Unknown')
output_lines = []
output_lines.append(f"\nINFERENCE RESULT [{timestamp}]")
output_lines.append(f" Pipeline ID: {pipeline_id}")
output_lines.append(" " + "="*50)
stage_results = mock_result_dict.get('stage_results', {})
for stage_id, result in stage_results.items():
output_lines.append(f" Stage: {stage_id}")
if isinstance(result, tuple) and len(result) == 2:
probability, result_string = result
output_lines.append(f" Result: {result_string}")
# Test the safe formatting
try:
prob_value = float(probability)
output_lines.append(f" Probability: {prob_value:.3f}")
except (ValueError, TypeError):
output_lines.append(f" Probability: {probability}")
elif isinstance(result, dict):
for key, value in result.items():
if key == 'probability':
try:
prob_value = float(value)
output_lines.append(f" {key.title()}: {prob_value:.3f}")
except (ValueError, TypeError):
output_lines.append(f" {key.title()}: {value}")
else:
output_lines.append(f" {key.title()}: {value}")
output_lines.append("")
formatted_output = "\n".join(output_lines)
print("Formatted terminal output:")
print(formatted_output)
print("✓ Terminal formatting test passed")
return True
except Exception as e:
print(f"✗ Terminal formatting test failed: {e}")
return False
if __name__ == "__main__":
print("Running result formatting fix tests...")
try:
test1_passed = test_probability_formatting()
test2_passed = test_terminal_results_mock()
if test1_passed and test2_passed:
print("\n🎉 All formatting fix tests passed! The format string errors should be resolved.")
else:
print("\n❌ Some tests failed. Please check the output above.")
except Exception as e:
print(f"\n❌ Test suite failed with error: {e}")
import traceback
traceback.print_exc()
sys.exit(1)