forked from masonhuang/cluster4npu
Phase 1 — Performance Benchmarking: - PerformanceBenchmarker: sequential vs parallel benchmark with injectable runner - PerformanceHistory: JSON-backed benchmark history with regression support - PerformanceDashboard: real-time FPS/latency display widget - BenchmarkDialog: one-click benchmark with 3-phase progress bar Phase 2 — Device Management: - DeviceManager: NPU dongle scan, assign/unassign, load balance recommendation - DeviceManagementPanel: live device status cards with auto-refresh - BottleneckAlert: dataclass for pipeline bottleneck detection Phase 3 — Advanced Features: - OptimizationEngine: 3 optimization rules (rebalance/adjust_queue/add_devices) - TemplateManager: 3 built-in pipeline templates (YOLOv5, fire detection, dual-model) Phase 4 — Report Export: - ReportExporter: PDF (reportlab, optional) and CSV export - ExportReportDialog: format selection + path picker UI 192 unit tests, all passing. Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
180 lines
6.2 KiB
Python
180 lines
6.2 KiB
Python
"""
|
||
tests/unit/test_export_report_dialog.py — ExportReportDialog 單元測試。
|
||
|
||
在無 PyQt5 環境下,使用 conftest.py 中的 Stub 進行測試。
|
||
"""
|
||
from unittest.mock import MagicMock, patch
|
||
import pytest
|
||
|
||
from core.performance.benchmarker import BenchmarkResult
|
||
from core.performance.report_exporter import DeviceSummary, ReportData
|
||
from ui.dialogs.export_report_dialog import ExportReportDialog
|
||
|
||
|
||
# ---------------------------------------------------------------------------
|
||
# Fixtures
|
||
# ---------------------------------------------------------------------------
|
||
|
||
def _make_benchmark_result(mode: str = "sequential", fps: float = 14.2) -> BenchmarkResult:
|
||
return BenchmarkResult(
|
||
mode=mode,
|
||
fps=fps,
|
||
avg_latency_ms=70.4,
|
||
p95_latency_ms=95.0,
|
||
total_frames=426,
|
||
timestamp=1743856222.0,
|
||
device_config={"KL720": 1},
|
||
id=f"benchmark_20260405_143022_{mode}",
|
||
)
|
||
|
||
|
||
def _make_dialog(
|
||
benchmarker=None,
|
||
history=None,
|
||
device_manager=None,
|
||
dashboard=None,
|
||
) -> ExportReportDialog:
|
||
"""建立 ExportReportDialog,所有依賴預設為 MagicMock。"""
|
||
if benchmarker is None:
|
||
benchmarker = MagicMock()
|
||
benchmarker.history = []
|
||
if history is None:
|
||
history = MagicMock()
|
||
history.get_history.return_value = []
|
||
if device_manager is None:
|
||
device_manager = MagicMock()
|
||
device_manager.scan_devices.return_value = []
|
||
if dashboard is None:
|
||
dashboard = MagicMock()
|
||
|
||
return ExportReportDialog(
|
||
parent=None,
|
||
benchmarker=benchmarker,
|
||
history=history,
|
||
device_manager=device_manager,
|
||
dashboard=dashboard,
|
||
)
|
||
|
||
|
||
# ---------------------------------------------------------------------------
|
||
# 基本建立
|
||
# ---------------------------------------------------------------------------
|
||
|
||
class TestExportReportDialogCreation:
|
||
def test_dialog_can_be_created(self):
|
||
"""ExportReportDialog 應可正常建立"""
|
||
dialog = _make_dialog()
|
||
assert dialog is not None
|
||
|
||
def test_dialog_is_instance_of_qdialog(self):
|
||
"""ExportReportDialog 應繼承自 QDialog(或其 Stub)"""
|
||
from PyQt5.QtWidgets import QDialog
|
||
dialog = _make_dialog()
|
||
assert isinstance(dialog, QDialog)
|
||
|
||
def test_dialog_default_format_is_pdf(self):
|
||
"""格式選擇預設應為 PDF"""
|
||
dialog = _make_dialog()
|
||
assert dialog._selected_format == "pdf"
|
||
|
||
|
||
# ---------------------------------------------------------------------------
|
||
# _collect_report_data
|
||
# ---------------------------------------------------------------------------
|
||
|
||
class TestCollectReportData:
|
||
def test_returns_report_data_instance(self):
|
||
"""_collect_report_data() 應回傳 ReportData 型別"""
|
||
dialog = _make_dialog()
|
||
result = dialog._collect_report_data()
|
||
assert isinstance(result, ReportData)
|
||
|
||
def test_uses_history_records(self):
|
||
"""_collect_report_data() 應使用 history.get_history() 的結果"""
|
||
history = MagicMock()
|
||
records = [_make_benchmark_result("parallel")]
|
||
history.get_history.return_value = records
|
||
|
||
dialog = _make_dialog(history=history)
|
||
result = dialog._collect_report_data()
|
||
|
||
history.get_history.assert_called_once()
|
||
assert result.history_records == records
|
||
|
||
def test_uses_device_manager_scan(self):
|
||
"""_collect_report_data() 應呼叫 device_manager.scan_devices()"""
|
||
device_manager = MagicMock()
|
||
device_manager.scan_devices.return_value = []
|
||
|
||
dialog = _make_dialog(device_manager=device_manager)
|
||
dialog._collect_report_data()
|
||
|
||
device_manager.scan_devices.assert_called_once()
|
||
|
||
def test_handles_history_failure_gracefully(self):
|
||
"""history.get_history() 拋出例外時,應回傳空的 history_records"""
|
||
history = MagicMock()
|
||
history.get_history.side_effect = Exception("history error")
|
||
|
||
dialog = _make_dialog(history=history)
|
||
result = dialog._collect_report_data()
|
||
|
||
assert result.history_records == []
|
||
|
||
def test_handles_device_manager_failure_gracefully(self):
|
||
"""device_manager.scan_devices() 拋出例外時,devices 應為空列表"""
|
||
device_manager = MagicMock()
|
||
device_manager.scan_devices.side_effect = Exception("device error")
|
||
|
||
dialog = _make_dialog(device_manager=device_manager)
|
||
result = dialog._collect_report_data()
|
||
|
||
assert result.devices == []
|
||
|
||
def test_uses_latest_benchmark_from_history_as_parallel_result(self):
|
||
"""benchmarker.history 有記錄時,應使用最新一筆作為 parallel_result"""
|
||
benchmarker = MagicMock()
|
||
latest = _make_benchmark_result("parallel", fps=45.6)
|
||
benchmarker.history = [_make_benchmark_result("sequential"), latest]
|
||
|
||
dialog = _make_dialog(benchmarker=benchmarker)
|
||
result = dialog._collect_report_data()
|
||
|
||
# parallel_result 應為最新一筆(index -1)
|
||
assert result.parallel_result == latest
|
||
|
||
def test_parallel_result_is_none_when_history_empty(self):
|
||
"""benchmarker.history 為空時,parallel_result 應為 None"""
|
||
benchmarker = MagicMock()
|
||
benchmarker.history = []
|
||
|
||
dialog = _make_dialog(benchmarker=benchmarker)
|
||
result = dialog._collect_report_data()
|
||
|
||
assert result.parallel_result is None
|
||
|
||
def test_chart_image_bytes_is_none(self):
|
||
"""chart_image_bytes 應為 None(截圖整合留未來)"""
|
||
dialog = _make_dialog()
|
||
result = dialog._collect_report_data()
|
||
assert result.chart_image_bytes is None
|
||
|
||
|
||
# ---------------------------------------------------------------------------
|
||
# 格式選擇
|
||
# ---------------------------------------------------------------------------
|
||
|
||
class TestFormatSelection:
|
||
def test_set_format_to_csv(self):
|
||
"""可將格式設為 CSV"""
|
||
dialog = _make_dialog()
|
||
dialog._set_format("csv")
|
||
assert dialog._selected_format == "csv"
|
||
|
||
def test_set_format_to_pdf(self):
|
||
"""可將格式設回 PDF"""
|
||
dialog = _make_dialog()
|
||
dialog._set_format("csv")
|
||
dialog._set_format("pdf")
|
||
assert dialog._selected_format == "pdf"
|