347 lines
12 KiB
Python
347 lines
12 KiB
Python
"""
|
|
Test Multi-Series Dongle Integration
|
|
|
|
This test script validates the complete multi-series dongle integration
|
|
including the enhanced model node, converter, and pipeline components.
|
|
|
|
Usage:
|
|
python test_multi_series_integration.py
|
|
|
|
This will create a test assets folder structure and validate all components.
|
|
"""
|
|
|
|
import os
|
|
import sys
|
|
import json
|
|
import tempfile
|
|
from pathlib import Path
|
|
|
|
# Add project root to path
|
|
project_root = Path(__file__).parent
|
|
sys.path.insert(0, str(project_root))
|
|
|
|
def test_exact_model_node():
|
|
"""Test the enhanced ExactModelNode functionality"""
|
|
print("🧪 Testing ExactModelNode...")
|
|
|
|
try:
|
|
from core.nodes.exact_nodes import ExactModelNode, NODEGRAPH_AVAILABLE
|
|
|
|
if not NODEGRAPH_AVAILABLE:
|
|
print("⚠️ NodeGraphQt not available, testing limited functionality")
|
|
# Test basic instantiation
|
|
node = ExactModelNode()
|
|
print("✅ ExactModelNode basic instantiation works")
|
|
return True
|
|
|
|
# Create node and test properties
|
|
node = ExactModelNode()
|
|
|
|
# Test single-series mode (default)
|
|
assert node.get_property('multi_series_mode') == False
|
|
assert node.get_property('dongle_series') == '520'
|
|
assert node.get_property('max_queue_size') == 100
|
|
|
|
# Test property display logic
|
|
display_props = node.get_display_properties()
|
|
expected_single_series = [
|
|
'multi_series_mode', 'model_path', 'scpu_fw_path', 'ncpu_fw_path',
|
|
'dongle_series', 'num_dongles', 'port_id', 'upload_fw'
|
|
]
|
|
assert display_props == expected_single_series
|
|
|
|
# Test multi-series mode
|
|
node.set_property('multi_series_mode', True)
|
|
display_props = node.get_display_properties()
|
|
expected_multi_series = [
|
|
'multi_series_mode', 'assets_folder', 'enabled_series',
|
|
'max_queue_size', 'result_buffer_size', 'batch_size',
|
|
'enable_preprocessing', 'enable_postprocessing'
|
|
]
|
|
assert display_props == expected_multi_series
|
|
|
|
# Test inference config generation
|
|
config = node.get_inference_config()
|
|
assert config['multi_series_mode'] == True
|
|
assert 'enabled_series' in config
|
|
|
|
# Test hardware requirements
|
|
hw_req = node.get_hardware_requirements()
|
|
assert hw_req['multi_series_mode'] == True
|
|
|
|
print("✅ ExactModelNode functionality tests passed")
|
|
return True
|
|
|
|
except Exception as e:
|
|
print(f"❌ ExactModelNode test failed: {e}")
|
|
import traceback
|
|
traceback.print_exc()
|
|
return False
|
|
|
|
def test_multi_series_setup_utility():
|
|
"""Test the multi-series setup utility"""
|
|
print("🧪 Testing multi-series setup utility...")
|
|
|
|
try:
|
|
from utils.multi_series_setup import MultiSeriesSetup
|
|
|
|
# Create temporary directory for testing
|
|
with tempfile.TemporaryDirectory() as temp_dir:
|
|
# Test folder structure creation
|
|
success = MultiSeriesSetup.create_folder_structure(temp_dir, ['520', '720'])
|
|
assert success, "Failed to create folder structure"
|
|
|
|
assets_path = os.path.join(temp_dir, 'Assets')
|
|
assert os.path.exists(assets_path), "Assets folder not created"
|
|
|
|
# Check structure
|
|
firmware_path = os.path.join(assets_path, 'Firmware')
|
|
models_path = os.path.join(assets_path, 'Models')
|
|
assert os.path.exists(firmware_path), "Firmware folder not created"
|
|
assert os.path.exists(models_path), "Models folder not created"
|
|
|
|
# Check series folders
|
|
for series in ['520', '720']:
|
|
series_fw = os.path.join(firmware_path, f'KL{series}')
|
|
series_model = os.path.join(models_path, f'KL{series}')
|
|
assert os.path.exists(series_fw), f"KL{series} firmware folder not created"
|
|
assert os.path.exists(series_model), f"KL{series} models folder not created"
|
|
|
|
# Test validation (should fail initially - no files)
|
|
is_valid, issues = MultiSeriesSetup.validate_folder_structure(assets_path)
|
|
assert not is_valid, "Validation should fail with empty folders"
|
|
assert len(issues) > 0, "Should have validation issues"
|
|
|
|
# Test series listing
|
|
series_info = MultiSeriesSetup.list_available_series(assets_path)
|
|
assert len(series_info) == 0, "Should have no valid series initially"
|
|
|
|
print("✅ Multi-series setup utility tests passed")
|
|
return True
|
|
|
|
except Exception as e:
|
|
print(f"❌ Multi-series setup utility test failed: {e}")
|
|
import traceback
|
|
traceback.print_exc()
|
|
return False
|
|
|
|
def test_multi_series_converter():
|
|
"""Test the multi-series MFlow converter"""
|
|
print("🧪 Testing multi-series converter...")
|
|
|
|
try:
|
|
from core.functions.multi_series_mflow_converter import MultiSeriesMFlowConverter
|
|
|
|
# Create test mflow data
|
|
test_mflow_data = {
|
|
"project_name": "Test Multi-Series Pipeline",
|
|
"description": "Test pipeline with multi-series configuration",
|
|
"nodes": [
|
|
{
|
|
"id": "input_1",
|
|
"name": "Input Node",
|
|
"type": "input_node",
|
|
"custom": {
|
|
"source_type": "Camera",
|
|
"resolution": "640x480"
|
|
}
|
|
},
|
|
{
|
|
"id": "model_1",
|
|
"name": "Multi-Series Model",
|
|
"type": "model_node",
|
|
"custom": {
|
|
"multi_series_mode": True,
|
|
"assets_folder": "/test/assets",
|
|
"enabled_series": ["520", "720"],
|
|
"max_queue_size": 100,
|
|
"result_buffer_size": 1000
|
|
}
|
|
},
|
|
{
|
|
"id": "output_1",
|
|
"name": "Output Node",
|
|
"type": "output_node",
|
|
"custom": {
|
|
"output_type": "Display"
|
|
}
|
|
}
|
|
],
|
|
"connections": [
|
|
{"input_node": "input_1", "output_node": "model_1"},
|
|
{"input_node": "model_1", "output_node": "output_1"}
|
|
]
|
|
}
|
|
|
|
# Test converter instantiation
|
|
converter = MultiSeriesMFlowConverter()
|
|
|
|
# Test basic conversion (will fail validation due to missing files, but should parse)
|
|
try:
|
|
config = converter._convert_mflow_to_enhanced_config(test_mflow_data)
|
|
|
|
# Check basic structure
|
|
assert config.pipeline_name == "Test Multi-Series Pipeline"
|
|
assert len(config.stage_configs) > 0
|
|
assert config.has_multi_series == True
|
|
assert config.multi_series_count == 1
|
|
|
|
print("✅ Multi-series converter basic parsing works")
|
|
|
|
except ValueError as e:
|
|
# Expected to fail validation due to missing assets folder
|
|
if "not found" in str(e):
|
|
print("✅ Multi-series converter correctly validates missing assets")
|
|
else:
|
|
raise
|
|
|
|
print("✅ Multi-series converter tests passed")
|
|
return True
|
|
|
|
except Exception as e:
|
|
print(f"❌ Multi-series converter test failed: {e}")
|
|
import traceback
|
|
traceback.print_exc()
|
|
return False
|
|
|
|
def test_pipeline_components():
|
|
"""Test multi-series pipeline components"""
|
|
print("🧪 Testing pipeline components...")
|
|
|
|
try:
|
|
from core.functions.multi_series_pipeline import (
|
|
MultiSeriesStageConfig,
|
|
MultiSeriesPipelineStage,
|
|
create_multi_series_config_from_model_node
|
|
)
|
|
|
|
# Test MultiSeriesStageConfig creation
|
|
config = MultiSeriesStageConfig(
|
|
stage_id="test_stage",
|
|
multi_series_mode=True,
|
|
firmware_paths={"KL520": {"scpu": "test.bin", "ncpu": "test.bin"}},
|
|
model_paths={"KL520": "test.nef"},
|
|
max_queue_size=100
|
|
)
|
|
|
|
assert config.stage_id == "test_stage"
|
|
assert config.multi_series_mode == True
|
|
assert config.max_queue_size == 100
|
|
|
|
# Test config creation from model node
|
|
model_config = {
|
|
'multi_series_mode': True,
|
|
'node_name': 'test_node',
|
|
'firmware_paths': {"KL520": {"scpu": "test.bin", "ncpu": "test.bin"}},
|
|
'model_paths': {"KL520": "test.nef"},
|
|
'max_queue_size': 50
|
|
}
|
|
|
|
stage_config = create_multi_series_config_from_model_node(model_config)
|
|
assert stage_config.multi_series_mode == True
|
|
assert stage_config.stage_id == 'test_node'
|
|
|
|
print("✅ Pipeline components tests passed")
|
|
return True
|
|
|
|
except Exception as e:
|
|
print(f"❌ Pipeline components test failed: {e}")
|
|
import traceback
|
|
traceback.print_exc()
|
|
return False
|
|
|
|
def create_test_assets_structure():
|
|
"""Create a complete test assets structure for manual testing"""
|
|
print("🏗️ Creating test assets structure...")
|
|
|
|
try:
|
|
from utils.multi_series_setup import MultiSeriesSetup
|
|
|
|
# Create test structure in project directory
|
|
test_assets_path = os.path.join(project_root, "test_assets")
|
|
|
|
if os.path.exists(test_assets_path):
|
|
import shutil
|
|
shutil.rmtree(test_assets_path)
|
|
|
|
# Create structure
|
|
success = MultiSeriesSetup.create_folder_structure(
|
|
project_root,
|
|
series_list=['520', '720', '730']
|
|
)
|
|
|
|
if success:
|
|
assets_full_path = os.path.join(project_root, "Assets")
|
|
print(f"✅ Test assets structure created at: {assets_full_path}")
|
|
print("\n📋 To complete the setup:")
|
|
print("1. Copy your firmware files to Assets/Firmware/KLxxx/ folders")
|
|
print("2. Copy your model files to Assets/Models/KLxxx/ folders")
|
|
print("3. Run validation: python -m utils.multi_series_setup validate --path Assets")
|
|
print("4. Configure your model node to use the Assets folder")
|
|
|
|
return assets_full_path
|
|
else:
|
|
print("❌ Failed to create test assets structure")
|
|
return None
|
|
|
|
except Exception as e:
|
|
print(f"❌ Error creating test assets structure: {e}")
|
|
return None
|
|
|
|
def run_all_tests():
|
|
"""Run all integration tests"""
|
|
print("🚀 Starting Multi-Series Dongle Integration Tests\n")
|
|
|
|
tests = [
|
|
("ExactModelNode", test_exact_model_node),
|
|
("Setup Utility", test_multi_series_setup_utility),
|
|
("Converter", test_multi_series_converter),
|
|
("Pipeline Components", test_pipeline_components)
|
|
]
|
|
|
|
results = {}
|
|
|
|
for test_name, test_func in tests:
|
|
print(f"\n{'='*50}")
|
|
print(f"Testing: {test_name}")
|
|
print(f"{'='*50}")
|
|
|
|
try:
|
|
result = test_func()
|
|
results[test_name] = result
|
|
except Exception as e:
|
|
print(f"❌ {test_name} test crashed: {e}")
|
|
results[test_name] = False
|
|
|
|
print()
|
|
|
|
# Print summary
|
|
print(f"\n{'='*50}")
|
|
print("📊 TEST SUMMARY")
|
|
print(f"{'='*50}")
|
|
|
|
passed = sum(1 for r in results.values() if r)
|
|
total = len(results)
|
|
|
|
for test_name, result in results.items():
|
|
status = "✅ PASS" if result else "❌ FAIL"
|
|
print(f"{test_name:<20} {status}")
|
|
|
|
print(f"\nResults: {passed}/{total} tests passed")
|
|
|
|
if passed == total:
|
|
print("🎉 All tests passed! Multi-series integration is ready.")
|
|
|
|
# Offer to create test structure
|
|
response = input("\n❓ Create test assets structure for manual testing? (y/n): ")
|
|
if response.lower() in ['y', 'yes']:
|
|
create_test_assets_structure()
|
|
|
|
return True
|
|
else:
|
|
print("⚠️ Some tests failed. Check the output above for details.")
|
|
return False
|
|
|
|
if __name__ == "__main__":
|
|
success = run_all_tests()
|
|
sys.exit(0 if success else 1) |