feat: implement Phase 1-4 performance visualization and device management

Phase 1 — Performance Benchmarking:
- PerformanceBenchmarker: sequential vs parallel benchmark with injectable runner
- PerformanceHistory: JSON-backed benchmark history with regression support
- PerformanceDashboard: real-time FPS/latency display widget
- BenchmarkDialog: one-click benchmark with 3-phase progress bar

Phase 2 — Device Management:
- DeviceManager: NPU dongle scan, assign/unassign, load balance recommendation
- DeviceManagementPanel: live device status cards with auto-refresh
- BottleneckAlert: dataclass for pipeline bottleneck detection

Phase 3 — Advanced Features:
- OptimizationEngine: 3 optimization rules (rebalance/adjust_queue/add_devices)
- TemplateManager: 3 built-in pipeline templates (YOLOv5, fire detection, dual-model)

Phase 4 — Report Export:
- ReportExporter: PDF (reportlab, optional) and CSV export
- ExportReportDialog: format selection + path picker UI

192 unit tests, all passing.

Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
This commit is contained in:
abin 2026-04-06 19:32:05 +08:00
parent 5aa374625f
commit 55040733fe
28 changed files with 4917 additions and 0 deletions

1
core/device/__init__.py Normal file
View File

@ -0,0 +1 @@
"""core.device — device management subpackage."""

32
core/device/bottleneck.py Normal file
View File

@ -0,0 +1,32 @@
"""
core/device/bottleneck.py
BottleneckAlert dataclass describes a detected pipeline bottleneck.
Integration with InferencePipeline is deferred to a later phase.
This module only defines the data structure.
"""
from dataclasses import dataclass
@dataclass
class BottleneckAlert:
"""Describes a detected pipeline bottleneck in a single Stage.
Attributes
----------
stage_id:
The pipeline Stage that is experiencing the bottleneck.
queue_fill_rate:
Input queue utilisation as a fraction in [0.0, 1.0].
suggested_action:
Human-readable suggestion (e.g. "Add more Dongles to this stage").
severity:
Either ``"warning"`` (fill_rate > 0.8) or ``"critical"``
(fill_rate > 0.95).
"""
stage_id: str
queue_fill_rate: float
suggested_action: str
severity: str # "warning" | "critical"

View File

@ -0,0 +1,217 @@
"""
core/device/device_manager.py
DeviceManager manages NPU Dongle discovery, health, and assignment.
Design:
- scan_devices() calls the Kneron KP SDK but accepts an injectable kp_api
parameter so tests can supply a Mock without real hardware.
- DongleSeriesSpec constants are inlined here to avoid a circular import
from core.functions.Multidongle.
"""
from __future__ import annotations
from dataclasses import dataclass, field
from typing import Dict, List, Optional
# ---------------------------------------------------------------------------
# GOPS table (mirrors DongleSeriesSpec in Multidongle.py)
# ---------------------------------------------------------------------------
_PRODUCT_ID_TO_SERIES: Dict[int, str] = {
0x100: "KL520",
0x720: "KL720",
0x630: "KL630",
0x730: "KL730",
}
_SERIES_GOPS: Dict[str, int] = {
"KL520": 2,
"KL720": 28,
"KL630": 400,
"KL730": 1600,
}
# ---------------------------------------------------------------------------
# Data classes
# ---------------------------------------------------------------------------
@dataclass
class DeviceInfo:
"""Snapshot of a single NPU Dongle's state."""
device_id: str # unique id, e.g. "usb-<port_id>"
series: str # "KL520" | "KL720" | ...
product_id: int # raw USB product ID
status: str # "online" | "offline" | "busy"
gops: int # compute capacity
assigned_stage: Optional[str] # currently assigned stage ID, or None
current_fps: float # live inference throughput
utilization_pct: float # 0.0 100.0
@dataclass
class DeviceHealth:
"""Health snapshot of a single NPU Dongle."""
device_id: str
temperature_celsius: Optional[float] # None if SDK does not support it
error_count: int
last_error: Optional[str]
uptime_seconds: float
# ---------------------------------------------------------------------------
# DeviceManager
# ---------------------------------------------------------------------------
class DeviceManager:
"""Manages NPU Dongle discovery, health queries, and stage assignment.
Parameters
----------
kp_api:
Kneron KP SDK module reference. Pass ``None`` to import the real
``kp`` module at runtime, or inject a Mock in tests.
"""
def __init__(self, kp_api=None) -> None:
if kp_api is None:
import kp as _kp # real SDK (requires hardware)
self._kp = _kp
else:
self._kp = kp_api
# Known devices, populated by scan_devices()
self._devices: Dict[str, DeviceInfo] = {}
# stage assignments: {device_id: stage_id}
self._assignments: Dict[str, str] = {}
# ------------------------------------------------------------------
# Public API
# ------------------------------------------------------------------
def scan_devices(self) -> List[DeviceInfo]:
"""Scan for connected Kneron Dongles and update internal state.
Returns
-------
List[DeviceInfo]
All currently connected devices, each with status "online".
"""
try:
descriptors = self._kp.core.scan_devices()
except Exception:
return []
if not descriptors or descriptors.device_descriptor_number == 0:
return []
found: Dict[str, DeviceInfo] = {}
for desc in descriptors.device_descriptor_list:
try:
port_id = desc.usb_port_id
product_id = desc.product_id
device_id = f"usb-{port_id}"
series = _PRODUCT_ID_TO_SERIES.get(product_id, "Unknown")
gops = _SERIES_GOPS.get(series, 0)
assigned = self._assignments.get(device_id)
info = DeviceInfo(
device_id=device_id,
series=series,
product_id=product_id,
status="online",
gops=gops,
assigned_stage=assigned,
current_fps=0.0,
utilization_pct=0.0,
)
found[device_id] = info
except Exception:
continue
self._devices = found
return list(self._devices.values())
def get_device_health(self, device_id: str) -> DeviceHealth:
"""Return a health snapshot for the given device.
Temperature is returned as ``None`` because the current KP SDK
version does not expose thermal sensors.
"""
return DeviceHealth(
device_id=device_id,
temperature_celsius=None,
error_count=0,
last_error=None,
uptime_seconds=0.0,
)
def assign_device(self, device_id: str, stage_id: str) -> bool:
"""Assign *device_id* to *stage_id*.
Returns
-------
bool
``False`` if the device is unknown or already assigned to a
different stage; ``True`` on success.
"""
device = self._devices.get(device_id)
if device is None or device.status == "offline":
return False
existing_stage = self._assignments.get(device_id)
if existing_stage is not None and existing_stage != stage_id:
return False # already assigned to a different stage
self._assignments[device_id] = stage_id
self._devices[device_id].assigned_stage = stage_id
return True
def unassign_device(self, device_id: str) -> bool:
"""Release *device_id* from its current stage assignment.
Returns
-------
bool
``False`` if the device is unknown; ``True`` on success.
"""
if device_id not in self._devices:
return False
self._assignments.pop(device_id, None)
self._devices[device_id].assigned_stage = None
return True
def get_load_balance_recommendation(
self, stages: List[str]
) -> Dict[str, str]:
"""Recommend device-to-stage assignment by GOPS (descending).
Higher-GOPS devices are assigned to earlier stages. Stages with
no available device are mapped to an empty string.
Parameters
----------
stages:
Ordered list of stage IDs (first stage has highest priority).
Returns
-------
Dict[str, str]
``{stage_id: device_id}``; device_id is "" if unavailable.
"""
available = sorted(
self._devices.values(),
key=lambda d: d.gops,
reverse=True,
)
recommendation: Dict[str, str] = {}
for i, stage_id in enumerate(stages):
if i < len(available):
recommendation[stage_id] = available[i].device_id
else:
recommendation[stage_id] = ""
return recommendation
def get_device_statistics(self) -> Dict[str, DeviceInfo]:
"""Return a snapshot of all known devices keyed by device_id."""
return dict(self._devices)

View File

@ -0,0 +1 @@
"""core/optimization — Pipeline 優化建議模組。"""

248
core/optimization/engine.py Normal file
View File

@ -0,0 +1,248 @@
"""
core/optimization/engine.py
OptimizationEngine 分析 Pipeline 執行統計產生可執行的優化建議
設計重點
- analyze_pipeline 接受來自 InferencePipeline.get_pipeline_statistics() stats 字典
- 三條優化規則rebalance_devicesadjust_queueadd_devices各自獨立
可個別觸發不互斥
- apply_suggestion rebalance_devices 呼叫 device_manager.assign_device
其他類型add_devicesadjust_queue需要人工操作僅記錄 log 後回傳 True
- predict_performance 使用保守係數 0.6 的啟發式估算
"""
from __future__ import annotations
import logging
import uuid
from dataclasses import dataclass, field
from typing import Any, Dict, List, Tuple
logger = logging.getLogger(__name__)
# 優化規則閾值
_QUEUE_FILL_THRESHOLD = 0.70 # queue_fill_rate > 此值觸發 rebalance_devices
_TIME_RATIO_THRESHOLD = 2.0 # max/min avg_processing_time > 此值觸發 adjust_queue
_UTILIZATION_THRESHOLD = 85.0 # 所有裝置 utilization_pct > 此值觸發 add_devices
_CONSERVATIVE_FACTOR = 0.6 # predict_performance 的保守係數
@dataclass
class OptimizationSuggestion:
"""單一優化建議。
屬性
suggestion_id: 唯一識別碼UUID 字串
type: 建議類型 "rebalance_devices" | "adjust_queue" | "add_devices"
description: 使用者可讀的說明避免技術術語
estimated_improvement_pct: 預估改善百分比0.0100.0
confidence: 信心程度"high" | "medium" | "low"
action_params: 執行建議所需的參數字典
"""
suggestion_id: str
type: str
description: str
estimated_improvement_pct: float
confidence: str
action_params: Dict[str, Any]
class OptimizationEngine:
"""分析 Pipeline 執行統計並產生優化建議。"""
# ------------------------------------------------------------------
# 公開介面
# ------------------------------------------------------------------
def analyze_pipeline(
self,
stats: Dict[str, Any],
) -> List[OptimizationSuggestion]:
"""分析 Pipeline 執行統計,產生優化建議清單。
參數
stats: 來自 InferencePipeline.get_pipeline_statistics() 的字典
格式詳見模組文件
回傳
可能為空的 OptimizationSuggestion 清單
"""
stages: Dict[str, Any] = stats.get("stages", {})
devices: Dict[str, Any] = stats.get("devices", {})
suggestions: List[OptimizationSuggestion] = []
suggestions.extend(self._check_rebalance_devices(stages))
suggestions.extend(self._check_adjust_queue(stages))
suggestions.extend(self._check_add_devices(devices))
return suggestions
def predict_performance(
self,
config: List[Any],
available_devices: List[Any],
) -> Dict[str, float]:
"""以啟發式方法估算 Pipeline 效能。
公式
estimated_fps = sum(device.gops for d in available_devices) / num_stages * 0.6
estimated_latency_ms = 1000 / estimated_fps
confidence_range = (estimated_fps * 0.8, estimated_fps * 1.2)
參數
config: Stage 設定列表每個元素代表一個 Stage
available_devices: DeviceInfo 物件列表具備 gops 屬性
回傳
包含 estimated_fpsestimated_latency_msconfidence_range 的字典
"""
num_stages = len(config)
total_gops = sum(getattr(d, "gops", 0) for d in available_devices)
if num_stages == 0 or total_gops == 0:
return {
"estimated_fps": 0.0,
"estimated_latency_ms": 0.0,
"confidence_range": (0.0, 0.0),
}
estimated_fps = total_gops / num_stages * _CONSERVATIVE_FACTOR
estimated_latency_ms = 1000.0 / estimated_fps
confidence_range = (estimated_fps * 0.8, estimated_fps * 1.2)
return {
"estimated_fps": estimated_fps,
"estimated_latency_ms": estimated_latency_ms,
"confidence_range": confidence_range,
}
def apply_suggestion(
self,
suggestion: OptimizationSuggestion,
device_manager: Any,
) -> bool:
"""執行優化建議。
- rebalance_devices呼叫 device_manager.assign_device 並回傳其結果
- add_devices / adjust_queue記錄 log需人工操作回傳 True
參數
suggestion: 要執行的優化建議
device_manager: DeviceManager 實例
回傳
執行是否成功
"""
if suggestion.type == "rebalance_devices":
device_id = suggestion.action_params.get("device_id", "")
stage_id = suggestion.action_params.get("stage_id", "")
success = device_manager.assign_device(device_id, stage_id)
if success:
logger.info(
"已將裝置 %s 重新分配至 Stage %s", device_id, stage_id
)
else:
logger.warning(
"無法將裝置 %s 分配至 Stage %s", device_id, stage_id
)
return success
if suggestion.type in ("add_devices", "adjust_queue"):
logger.info(
"優化建議 [%s]%s(需要人工操作)",
suggestion.type,
suggestion.description,
)
return True
logger.warning("未知的建議類型:%s", suggestion.type)
return False
# ------------------------------------------------------------------
# 內部規則實作
# ------------------------------------------------------------------
def _check_rebalance_devices(
self, stages: Dict[str, Any]
) -> List[OptimizationSuggestion]:
"""規則 1queue_fill_rate > 0.70 → 建議重新分配裝置。"""
suggestions = []
for stage_id, stage_data in stages.items():
fill_rate: float = stage_data.get("queue_fill_rate", 0.0)
if fill_rate > _QUEUE_FILL_THRESHOLD:
pct = round((fill_rate - _QUEUE_FILL_THRESHOLD) / _QUEUE_FILL_THRESHOLD * 100, 1)
suggestions.append(
OptimizationSuggestion(
suggestion_id=str(uuid.uuid4()),
type="rebalance_devices",
description=(
f"{stage_id} 的佇列使用率偏高({fill_rate:.0%}"
"建議將算力較高的裝置分配給此階段以降低積壓。"
),
estimated_improvement_pct=min(pct, 40.0),
confidence="medium",
action_params={"stage_id": stage_id, "device_id": ""},
)
)
return suggestions
def _check_adjust_queue(
self, stages: Dict[str, Any]
) -> List[OptimizationSuggestion]:
"""規則 2avg_processing_time 最大/最小比值 > 2.0 → 建議調整佇列大小。"""
if len(stages) < 2:
return []
times = {
sid: data.get("avg_processing_time", 0.0)
for sid, data in stages.items()
}
max_time = max(times.values())
min_time = min(times.values())
if min_time <= 0 or max_time / min_time <= _TIME_RATIO_THRESHOLD:
return []
ratio = max_time / min_time
return [
OptimizationSuggestion(
suggestion_id=str(uuid.uuid4()),
type="adjust_queue",
description=(
f"各 Stage 的處理時間差異達 {ratio:.1f} 倍,"
"建議調整佇列大小以平衡各階段的吞吐量。"
),
estimated_improvement_pct=min((ratio - 2.0) * 10.0, 30.0),
confidence="low",
action_params={"max_stage": max(times, key=times.get), "ratio": ratio},
)
]
def _check_add_devices(
self, devices: Dict[str, Any]
) -> List[OptimizationSuggestion]:
"""規則 3所有 Dongle 使用率 > 85% → 建議增加更多 Dongle。"""
if not devices:
return []
utilizations = [
data.get("utilization_pct", 0.0) for data in devices.values()
]
if not all(u > _UTILIZATION_THRESHOLD for u in utilizations):
return []
avg_util = sum(utilizations) / len(utilizations)
return [
OptimizationSuggestion(
suggestion_id=str(uuid.uuid4()),
type="add_devices",
description=(
f"所有裝置的平均使用率已達 {avg_util:.1f}%"
"系統已接近飽和,建議增加更多 NPU 裝置。"
),
estimated_improvement_pct=min((avg_util - 85.0) * 2.0, 50.0),
confidence="high",
action_params={"current_avg_utilization": avg_util},
)
]

View File

@ -0,0 +1,23 @@
"""
core/performance 效能測試與歷史記錄模組
提供 Benchmark 執行結果儲存與回歸分析功能
使用範例
from core.performance import (
PerformanceBenchmarker,
BenchmarkConfig,
BenchmarkResult,
PerformanceHistory,
)
"""
from .benchmarker import BenchmarkConfig, BenchmarkResult, PerformanceBenchmarker
from .history import PerformanceHistory
__all__ = [
"BenchmarkConfig",
"BenchmarkResult",
"PerformanceBenchmarker",
"PerformanceHistory",
]

View File

@ -0,0 +1,247 @@
"""
core/performance/benchmarker.py 效能基準測試模組
提供 BenchmarkConfigBenchmarkResult 資料結構
以及 PerformanceBenchmarker 執行單/多裝置效能測試並計算加速倍數
設計重點
- 實際推論呼叫透過 inference_runner callable 注入
方便在沒有硬體的環境下進行單元測試注入 Mock
- 純計算邏輯calculate_speedup 可直接測試無需 Mock
使用範例測試環境
config = BenchmarkConfig(pipeline_config=[], test_input_source="test.mp4")
benchmarker = PerformanceBenchmarker()
def mock_runner(frame_data):
return {"result": "ok"}
seq = benchmarker.run_sequential_benchmark(config, inference_runner=mock_runner)
par = benchmarker.run_parallel_benchmark(config, inference_runner=mock_runner)
speedup = benchmarker.calculate_speedup(seq, par)
"""
import time
import statistics
from dataclasses import dataclass, field
from typing import Any, Callable, Dict, List, Optional, Tuple
@dataclass
class BenchmarkConfig:
"""Benchmark 測試設定。
屬性
pipeline_config: Pipeline Stage 的設定列表來自 UI
test_input_source: 測試輸入來源影片檔路徑或相機索引
test_duration_seconds: 測試持續時間不含暖機階段
warmup_frames: 暖機幀數不計入統計
"""
pipeline_config: List[Any]
test_input_source: str
test_duration_seconds: float = 30.0
warmup_frames: int = 50
@dataclass
class BenchmarkResult:
"""單次 Benchmark 的測試結果。
屬性
mode: 測試模式'sequential'單裝置 'parallel'多裝置
fps: 每秒幀數
avg_latency_ms: 平均推論延遲毫秒
p95_latency_ms: 95th percentile 延遲毫秒
total_frames: 測試期間處理的總幀數不含暖機
timestamp: 測試開始的 Unix timestamp
device_config: 裝置分配設定例如 {"KL520": 1}
id: 唯一識別碼 PerformanceHistory.record() 填入
"""
mode: str
fps: float
avg_latency_ms: float
p95_latency_ms: float
total_frames: int
timestamp: float
device_config: Dict[str, Any]
id: Optional[str] = field(default=None)
class PerformanceBenchmarker:
"""執行單裝置 vs 多裝置效能測試,計算加速倍數。
設計為可測試性Testability-First
- run_sequential_benchmark / run_parallel_benchmark 接受 inference_runner 參數
讓測試時可注入 Mock 而不需要真實硬體
- calculate_speedup 為純函式直接接受 BenchmarkResult 計算
屬性
device_config: 裝置設定資訊會填入 BenchmarkResult.device_config
"""
def __init__(self, device_config: Optional[Dict[str, Any]] = None):
"""初始化 PerformanceBenchmarker。
參數
device_config: 裝置設定例如 {"KL520": 1}未指定時使用空字典
"""
self.device_config: Dict[str, Any] = device_config or {}
# ------------------------------------------------------------------
# 公開介面
# ------------------------------------------------------------------
def run_sequential_benchmark(
self,
config: BenchmarkConfig,
inference_runner: Optional[Callable[[Any], Any]] = None,
) -> BenchmarkResult:
"""以單裝置(循序)模式執行 Benchmark。
參數
config: 測試設定
inference_runner: 推論執行函式簽名為 ``(frame_data: Any) -> Any``
若為 None使用 no-op 函式僅供架構驗證
回傳
mode='sequential' BenchmarkResult
"""
runner = inference_runner or self._default_runner
return self._run_benchmark(config, runner, mode="sequential")
def run_parallel_benchmark(
self,
config: BenchmarkConfig,
inference_runner: Optional[Callable[[Any], Any]] = None,
) -> BenchmarkResult:
"""以多裝置(平行)模式執行 Benchmark。
參數
config: 測試設定
inference_runner: 推論執行函式簽名為 ``(frame_data: Any) -> Any``
若為 None使用 no-op 函式僅供架構驗證
回傳
mode='parallel' BenchmarkResult
"""
runner = inference_runner or self._default_runner
return self._run_benchmark(config, runner, mode="parallel")
def calculate_speedup(
self,
seq: BenchmarkResult,
par: BenchmarkResult,
) -> float:
"""計算平行相對於循序的加速倍數。
計算公式par.fps / seq.fps
參數
seq: 循序模式的 BenchmarkResult
par: 平行模式的 BenchmarkResult
回傳
加速倍數float
引發
ValueError: seq.fps <= 0 避免除以零
"""
if seq.fps <= 0:
raise ValueError(
f"循序模式的 FPS 必須大於 0收到{seq.fps}"
)
return par.fps / seq.fps
def run_full_benchmark(
self,
config: BenchmarkConfig,
inference_runner: Optional[Callable[[Any], Any]] = None,
) -> Tuple[BenchmarkResult, BenchmarkResult, float]:
"""執行完整 Benchmark循序 → 平行 → 計算加速倍數。
執行序列
1. 執行循序 Benchmark
2. 執行平行 Benchmark
3. 計算加速倍數
參數
config: 測試設定
inference_runner: 推論執行函式可注入 Mock
回傳
Tuple[BenchmarkResult, BenchmarkResult, float]
(sequential_result, parallel_result, speedup)
"""
seq_result = self.run_sequential_benchmark(config, inference_runner)
par_result = self.run_parallel_benchmark(config, inference_runner)
speedup = self.calculate_speedup(seq_result, par_result)
return seq_result, par_result, speedup
# ------------------------------------------------------------------
# 內部實作
# ------------------------------------------------------------------
def _run_benchmark(
self,
config: BenchmarkConfig,
runner: Callable[[Any], Any],
mode: str,
) -> BenchmarkResult:
"""執行 Benchmark 的共用邏輯。
流程
1. 暖機warmup_frames 不計入統計
2. 正式測試test_duration_seconds
3. 計算 FPS平均延遲p95 延遲
參數
config: 測試設定
runner: 推論執行函式
mode: 'sequential' 'parallel'
回傳
BenchmarkResult
"""
# 暖機階段
for _ in range(config.warmup_frames):
runner(None)
# 正式測試
latencies: List[float] = []
test_start = time.time()
while time.time() - test_start < config.test_duration_seconds:
frame_start = time.time()
runner(None)
frame_end = time.time()
latencies.append((frame_end - frame_start) * 1000.0) # 轉換為毫秒
total_frames = len(latencies)
elapsed = time.time() - test_start
# 計算統計數值
if total_frames == 0:
fps = 0.0
avg_latency_ms = 0.0
p95_latency_ms = 0.0
else:
fps = total_frames / elapsed if elapsed > 0 else 0.0
avg_latency_ms = statistics.mean(latencies)
sorted_latencies = sorted(latencies)
p95_index = int(len(sorted_latencies) * 0.95)
p95_latency_ms = sorted_latencies[min(p95_index, len(sorted_latencies) - 1)]
return BenchmarkResult(
mode=mode,
fps=fps,
avg_latency_ms=avg_latency_ms,
p95_latency_ms=p95_latency_ms,
total_frames=total_frames,
timestamp=test_start,
device_config=dict(self.device_config),
)
@staticmethod
def _default_runner(frame_data: Any) -> Any:
"""預設的推論執行函式no-op僅供架構驗證"""
return None

233
core/performance/history.py Normal file
View File

@ -0,0 +1,233 @@
"""
core/performance/history.py Benchmark 歷史記錄模組
提供 PerformanceHistory 類別負責
- BenchmarkResult JSON 格式持久化到本地磁碟
- 依條件limit / mode查詢歷史記錄
- 產生兩次測試間的回歸比較報告
儲存格式範例
{
"records": [
{
"id": "benchmark_20260405_143022",
"mode": "parallel",
"fps": 45.2,
"avg_latency_ms": 22.1,
"p95_latency_ms": 35.0,
"total_frames": 1356,
"timestamp": 1743856222.0,
"device_config": {"KL720": 2}
}
]
}
"""
import json
import logging
import os
import time
from datetime import datetime
from typing import Any, Dict, List, Optional
logger = logging.getLogger(__name__)
from .benchmarker import BenchmarkResult
class PerformanceHistory:
"""本地 Benchmark 歷史記錄管理器。
屬性
storage_path: JSON 儲存檔案的完整路徑
預設為 ``~/.cluster4npu/benchmark_history.json``
"""
DEFAULT_STORAGE_PATH = os.path.join(
os.path.expanduser("~"), ".cluster4npu", "benchmark_history.json"
)
def __init__(self, storage_path: str = DEFAULT_STORAGE_PATH):
"""初始化 PerformanceHistory。
若儲存目錄不存在會自動建立
參數
storage_path: JSON 儲存檔案路徑
"""
self.storage_path = storage_path
self._ensure_storage_directory()
# ------------------------------------------------------------------
# 公開介面
# ------------------------------------------------------------------
def record(self, result: BenchmarkResult) -> None:
"""記錄一筆 BenchmarkResult 並持久化至 JSON。
此方法會
1. 為結果產生唯一 id若尚未有 id
2. id 寫回 result.id
3. 追加到 JSON 儲存
參數
result: 要記錄的 BenchmarkResult
"""
data = self._load_raw()
# 產生唯一 id
record_id = self._generate_id(result)
result.id = record_id
record_dict = self._result_to_dict(result)
data["records"].append(record_dict)
self._save_raw(data)
def get_history(
self,
limit: int = 50,
mode: Optional[str] = None,
) -> List[BenchmarkResult]:
"""查詢歷史記錄。
回傳最新優先reverse chronological的記錄列表
參數
limit: 最多回傳幾筆預設 50
mode: 若指定只回傳符合 mode 的記錄'sequential' 'parallel'
回傳
List[BenchmarkResult]最新的記錄排在最前面
"""
data = self._load_raw()
records = data.get("records", [])
# 過濾 mode
if mode is not None:
records = [r for r in records if r.get("mode") == mode]
# 最新優先(依 timestamp 降序)
records = sorted(records, key=lambda r: r.get("timestamp", 0), reverse=True)
# 套用 limit
records = records[:limit]
return [self._dict_to_result(r) for r in records]
def get_regression_report(
self,
baseline_id: str,
compare_id: str,
) -> Dict[str, Any]:
"""比較兩次測試的效能差異,產生回歸報告。
參數
baseline_id: 基準測試的 id
compare_id: 比較測試的 id
回傳
包含以下鍵的字典
- baseline: BenchmarkResult基準
- compare: BenchmarkResult比較對象
- fps_change_pct: FPS 變化百分比正值為改善
- avg_latency_change_pct: 平均延遲變化百分比負值為改善
- p95_latency_change_pct: P95 延遲變化百分比負值為改善
引發
ValueError: 若任一 id 不存在於歷史記錄中
"""
data = self._load_raw()
all_records = {r["id"]: r for r in data.get("records", [])}
if baseline_id not in all_records:
raise ValueError(f"找不到基準測試 id{baseline_id}")
if compare_id not in all_records:
raise ValueError(f"找不到比較測試 id{compare_id}")
baseline = self._dict_to_result(all_records[baseline_id])
compare = self._dict_to_result(all_records[compare_id])
def pct_change(old: float, new: float) -> float:
"""計算相對變化百分比。"""
if old == 0:
return 0.0
return (new - old) / old * 100.0
return {
"baseline": baseline,
"compare": compare,
"fps_change_pct": pct_change(baseline.fps, compare.fps),
"avg_latency_change_pct": pct_change(
baseline.avg_latency_ms, compare.avg_latency_ms
),
"p95_latency_change_pct": pct_change(
baseline.p95_latency_ms, compare.p95_latency_ms
),
}
# ------------------------------------------------------------------
# 內部實作
# ------------------------------------------------------------------
def _ensure_storage_directory(self) -> None:
"""若儲存目錄不存在,自動建立。"""
parent_dir = os.path.dirname(self.storage_path)
if parent_dir:
os.makedirs(parent_dir, exist_ok=True)
def _load_raw(self) -> Dict[str, Any]:
"""從 JSON 檔案讀取原始資料。若檔案不存在或損毀,回傳空結構。"""
if not os.path.exists(self.storage_path):
return {"records": []}
try:
with open(self.storage_path, "r", encoding="utf-8") as f:
return json.load(f)
except json.JSONDecodeError as e:
logger.warning("歷史記錄 JSON 檔案損毀,降級回傳空結構:%s", e)
return {"records": []}
except (IOError, OSError) as e:
logger.warning("無法讀取歷史記錄檔案,降級回傳空結構:%s", e)
return {"records": []}
def _save_raw(self, data: Dict[str, Any]) -> None:
"""將資料寫入 JSON 檔案。"""
with open(self.storage_path, "w", encoding="utf-8") as f:
json.dump(data, f, ensure_ascii=False, indent=2)
@staticmethod
def _generate_id(result: BenchmarkResult) -> str:
"""依 timestamp 產生唯一識別碼。
格式``benchmark_YYYYMMDD_HHMMSSffffff``
"""
dt = datetime.fromtimestamp(result.timestamp)
return dt.strftime("benchmark_%Y%m%d_%H%M%S%f")
@staticmethod
def _result_to_dict(result: BenchmarkResult) -> Dict[str, Any]:
"""將 BenchmarkResult 轉換為可序列化的字典。"""
return {
"id": result.id,
"mode": result.mode,
"fps": result.fps,
"avg_latency_ms": result.avg_latency_ms,
"p95_latency_ms": result.p95_latency_ms,
"total_frames": result.total_frames,
"timestamp": result.timestamp,
"device_config": result.device_config,
}
@staticmethod
def _dict_to_result(data: Dict[str, Any]) -> BenchmarkResult:
"""將字典轉換回 BenchmarkResult。"""
return BenchmarkResult(
id=data.get("id"),
mode=data["mode"],
fps=data["fps"],
avg_latency_ms=data["avg_latency_ms"],
p95_latency_ms=data["p95_latency_ms"],
total_frames=data["total_frames"],
timestamp=data["timestamp"],
device_config=data.get("device_config", {}),
)

View File

@ -0,0 +1,428 @@
"""
core/performance/report_exporter.py 效能報告匯出模組
提供 DeviceSummaryReportData 資料結構與 ReportExporter 主類別
支援將 Benchmark 結果匯出為 PDF需要 reportlab CSV標準庫
設計重點
- ReportExporter 不依賴 PyQt5只依賴 reportlab 與標準庫
- reportlab try/except ImportError 保護若未安裝export_pdf() 拋出 ImportError
- export_csv() 只用標準庫 csv永遠可用
- 無狀態設計stateless每次匯出建立新實例或直接呼叫靜態方法
"""
from __future__ import annotations
import csv
import io
import time
from dataclasses import dataclass, field
from pathlib import Path
from typing import Any, List, Optional
# ---------------------------------------------------------------------------
# reportlab 可用性旗標
# ---------------------------------------------------------------------------
try:
from reportlab.platypus import SimpleDocTemplate # noqa: F401
_REPORTLAB_AVAILABLE = True
except ImportError:
_REPORTLAB_AVAILABLE = False
# ---------------------------------------------------------------------------
# 資料結構
# ---------------------------------------------------------------------------
@dataclass
class DeviceSummary:
"""單一裝置的摘要資訊,來自 DeviceManager。"""
device_id: str
product_name: str # 如 "KL720"
firmware_version: str
is_active: bool
@dataclass
class ReportData:
"""
報告所需的完整資料由呼叫方UI 從各模組收集後傳入 ReportExporter
設計為純資料容器 UI / SDK 解耦方便單元測試
"""
# 報告基本資訊
report_title: str = "效能測試報告"
generated_at: float = field(default_factory=time.time) # UNIX timestamp
pipeline_name: str = "" # 來自 .mflow 檔名或使用者命名
# Benchmark 結果(來自 PerformanceBenchmarker.run_full_benchmark()
sequential_result: Optional[Any] = None # BenchmarkResult
parallel_result: Optional[Any] = None # BenchmarkResult
speedup: Optional[float] = None # par.fps / seq.fps
# 歷史記錄(來自 PerformanceHistory.get_history()
history_records: List[Any] = field(default_factory=list) # List[BenchmarkResult]
# 裝置資訊(來自 DeviceManager.get_all_devices()
devices: List[DeviceSummary] = field(default_factory=list)
# 圖表截圖(由 UI 層在匯出前擷取)
chart_image_bytes: Optional[bytes] = None # PNG bytes來自 PerformanceDashboard
# ---------------------------------------------------------------------------
# ReportExporter
# ---------------------------------------------------------------------------
class ReportExporter:
"""
負責將 ReportData 序列化為 PDF CSV 檔案
無狀態設計stateless每次匯出建立新實例或直接呼叫靜態方法
"""
# ------------------------------------------------------------------
# PDF 匯出
# ------------------------------------------------------------------
def export_pdf(
self,
data: ReportData,
output_path: "str | Path",
) -> Path:
"""
將完整效能報告匯出為 PDF
回傳實際寫入的檔案路徑
output_path 的父目錄不存在自動建立
引發
ImportError: reportlab 未安裝提示安裝指令
"""
if not _REPORTLAB_AVAILABLE:
raise ImportError(
"reportlab is required for PDF export. Install with: pip install reportlab>=4.0.0"
)
try:
from reportlab.platypus import (
SimpleDocTemplate,
Table,
TableStyle,
Paragraph,
Spacer,
Image,
)
from reportlab.lib.pagesizes import A4
from reportlab.lib.styles import getSampleStyleSheet
from reportlab.lib import colors
from reportlab.lib.units import mm
import reportlab # noqa: F401 — 確認已安裝
except ImportError as e:
raise ImportError(
f"reportlab 未安裝請執行pip install reportlab>=4.0.0\n原始錯誤:{e}"
) from e
output_path = Path(output_path)
output_path.parent.mkdir(parents=True, exist_ok=True)
doc = SimpleDocTemplate(
str(output_path),
pagesize=A4,
rightMargin=20 * mm,
leftMargin=20 * mm,
topMargin=20 * mm,
bottomMargin=20 * mm,
)
story: list = []
styles = getSampleStyleSheet()
# 封面(用 Paragraph 實作,封面 callback 難以在無 GUI 環境穩定測試)
self._build_cover_paragraphs(story, data, styles, Paragraph, Spacer)
# Benchmark 結果表
self._build_benchmark_table(story, data, styles, Table, TableStyle, Paragraph, Spacer, colors)
# 趨勢圖
self._build_trend_chart(story, data, styles, Paragraph, Spacer, Image)
# 歷史記錄表
self._build_history_table(story, data, styles, Table, TableStyle, Paragraph, Spacer, colors)
# 裝置資訊
self._build_device_info(story, data, styles, Paragraph, Spacer)
doc.build(story)
return output_path
def _build_cover_page(self, canvas, data: ReportData) -> None:
"""繪製封面報告標題、生成時間、Pipeline 名稱、裝置清單canvas callback 版本)"""
canvas.saveState()
canvas.setFont("Helvetica-Bold", 24)
canvas.drawCentredString(
canvas._pagesize[0] / 2,
canvas._pagesize[1] * 0.65,
data.report_title,
)
canvas.setFont("Helvetica", 12)
canvas.drawCentredString(
canvas._pagesize[0] / 2,
canvas._pagesize[1] * 0.58,
f"生成時間:{self._get_timestamp_str(data.generated_at)}",
)
if data.pipeline_name:
canvas.drawCentredString(
canvas._pagesize[0] / 2,
canvas._pagesize[1] * 0.53,
f"Pipeline{data.pipeline_name}",
)
canvas.drawCentredString(
canvas._pagesize[0] / 2,
canvas._pagesize[1] * 0.48,
f"裝置數量:{len(data.devices)}",
)
canvas.restoreState()
def _build_cover_paragraphs(self, story, data, styles, Paragraph, Spacer) -> None:
"""以 Paragraph flowable 形式建立封面內容(嵌入 story 流)。"""
story.append(Spacer(1, 60))
story.append(Paragraph(data.report_title, styles["Title"]))
story.append(Spacer(1, 12))
story.append(Paragraph(
f"生成時間:{self._get_timestamp_str(data.generated_at)}",
styles["Normal"],
))
if data.pipeline_name:
story.append(Paragraph(f"Pipeline{data.pipeline_name}", styles["Normal"]))
story.append(Paragraph(f"裝置數量:{len(data.devices)}", styles["Normal"]))
story.append(Spacer(1, 30))
def _build_benchmark_table(
self, story, data, styles=None,
Table=None, TableStyle=None, Paragraph=None, Spacer=None, colors=None,
) -> None:
"""
建立 Benchmark 結果對比表reportlab Table
欄位指標 / 循序模式 / 平行模式 / 差異%
指標FPS平均延遲(ms)P95 延遲(ms)總幀數
"""
if Paragraph is None:
return
story.append(Paragraph("Benchmark 結果", styles["Heading1"]))
story.append(Spacer(1, 8))
seq = data.sequential_result
par = data.parallel_result
if seq is None or par is None:
story.append(Paragraph("無 Benchmark 資料", styles["Normal"]))
story.append(Spacer(1, 12))
return
def diff_pct(a, b):
if a and a != 0:
return f"{(b - a) / a * 100:+.1f}%"
return ""
table_data = [
["指標", "循序模式", "平行模式", "差異%"],
["FPS", f"{seq.fps:.1f}", f"{par.fps:.1f}", diff_pct(seq.fps, par.fps)],
["平均延遲(ms)", f"{seq.avg_latency_ms:.1f}", f"{par.avg_latency_ms:.1f}", diff_pct(seq.avg_latency_ms, par.avg_latency_ms)],
["P95 延遲(ms)", f"{seq.p95_latency_ms:.1f}", f"{par.p95_latency_ms:.1f}", diff_pct(seq.p95_latency_ms, par.p95_latency_ms)],
["總幀數", str(seq.total_frames), str(par.total_frames), ""],
]
if data.speedup is not None:
table_data.append(["加速倍數", "", f"{data.speedup:.2f}x", ""])
t = Table(table_data)
t.setStyle(TableStyle([
("BACKGROUND", (0, 0), (-1, 0), colors.grey),
("TEXTCOLOR", (0, 0), (-1, 0), colors.whitesmoke),
("ALIGN", (0, 0), (-1, -1), "CENTER"),
("FONTNAME", (0, 0), (-1, 0), "Helvetica-Bold"),
("GRID", (0, 0), (-1, -1), 0.5, colors.black),
]))
story.append(t)
story.append(Spacer(1, 20))
def _build_trend_chart(
self, story, data, styles=None,
Paragraph=None, Spacer=None, Image=None,
) -> None:
"""
data.chart_image_bytes 不為 None將圖表 PNG 嵌入 PDF
若為 None插入無圖表資料的提示文字
"""
if Paragraph is None:
return
story.append(Paragraph("效能趨勢圖", styles["Heading1"]))
story.append(Spacer(1, 8))
if data.chart_image_bytes is not None:
img_buf = io.BytesIO(data.chart_image_bytes)
img = Image(img_buf, width=400, height=200)
story.append(img)
else:
story.append(Paragraph("(無圖表資料)", styles["Normal"]))
story.append(Spacer(1, 20))
def _build_history_table(
self, story, data, styles=None,
Table=None, TableStyle=None, Paragraph=None, Spacer=None, colors=None,
) -> None:
"""
建立歷史記錄表最多顯示 20 超過則截斷並標注
欄位測試時間 / 模式 / FPS / 平均延遲(ms) / P95 延遲(ms)
"""
if Paragraph is None:
return
story.append(Paragraph("歷史記錄", styles["Heading1"]))
story.append(Spacer(1, 8))
records = data.history_records[:20]
truncated = len(data.history_records) > 20
table_data = [["測試時間", "模式", "FPS", "平均延遲(ms)", "P95 延遲(ms)"]]
for r in records:
table_data.append([
self._get_timestamp_str(r.timestamp),
r.mode,
f"{r.fps:.1f}",
f"{r.avg_latency_ms:.1f}",
f"{r.p95_latency_ms:.1f}",
])
if not records:
table_data.append(["(無記錄)", "", "", "", ""])
t = Table(table_data)
t.setStyle(TableStyle([
("BACKGROUND", (0, 0), (-1, 0), colors.grey),
("TEXTCOLOR", (0, 0), (-1, 0), colors.whitesmoke),
("ALIGN", (0, 0), (-1, -1), "CENTER"),
("FONTNAME", (0, 0), (-1, 0), "Helvetica-Bold"),
("GRID", (0, 0), (-1, -1), 0.5, colors.black),
]))
story.append(t)
if truncated:
story.append(Spacer(1, 6))
story.append(Paragraph(
f"(僅顯示最新 20 筆,共 {len(data.history_records)} 筆)",
styles["Normal"],
))
story.append(Spacer(1, 20))
def _build_device_info(
self, story, data, styles=None,
Paragraph=None, Spacer=None,
) -> None:
"""列出測試時連接的裝置清單:裝置 ID、型號、韌體版本、是否啟用。"""
if Paragraph is None:
return
story.append(Paragraph("裝置資訊", styles["Heading1"]))
story.append(Spacer(1, 8))
if not data.devices:
story.append(Paragraph("(無裝置資訊)", styles["Normal"]))
else:
for dev in data.devices:
status = "啟用" if dev.is_active else "停用"
story.append(Paragraph(
f"裝置 {dev.device_id}{dev.product_name},韌體 {dev.firmware_version}{status}",
styles["Normal"],
))
story.append(Spacer(1, 12))
# ------------------------------------------------------------------
# CSV 匯出
# ------------------------------------------------------------------
def export_csv(
self,
data: ReportData,
output_path: "str | Path",
) -> Path:
"""
Benchmark 結果與歷史記錄匯出為 CSV
CSV 包含兩個邏輯區塊以空行分隔
1. Benchmark 摘要循序 vs 平行對比
2. 歷史記錄每筆 BenchmarkResult 一行
回傳實際寫入的檔案路徑
引發
ValueError: sequential_result parallel_result None
"""
if data.sequential_result is None or data.parallel_result is None:
raise ValueError(
"export_csv() 需要 sequential_result 與 parallel_result但其中一個為 None。"
)
output_path = Path(output_path)
output_path.parent.mkdir(parents=True, exist_ok=True)
seq = data.sequential_result
par = data.parallel_result
def diff_pct(a, b):
if a and a != 0:
return f"{(b - a) / a * 100:+.1f}%"
return ""
with output_path.open("w", newline="", encoding="utf-8") as f:
writer = csv.writer(f)
# 區塊 1Benchmark 摘要
writer.writerow(["section", "metric", "sequential", "parallel", "diff_pct"])
writer.writerow([
"benchmark_summary", "fps",
f"{seq.fps:.1f}", f"{par.fps:.1f}",
diff_pct(seq.fps, par.fps),
])
writer.writerow([
"benchmark_summary", "avg_latency_ms",
f"{seq.avg_latency_ms:.1f}", f"{par.avg_latency_ms:.1f}",
diff_pct(seq.avg_latency_ms, par.avg_latency_ms),
])
writer.writerow([
"benchmark_summary", "p95_latency_ms",
f"{seq.p95_latency_ms:.1f}", f"{par.p95_latency_ms:.1f}",
diff_pct(seq.p95_latency_ms, par.p95_latency_ms),
])
writer.writerow([
"benchmark_summary", "total_frames",
str(seq.total_frames), str(par.total_frames),
"",
])
speedup_val = f"{data.speedup:.2f}x" if data.speedup is not None else ""
writer.writerow([
"benchmark_summary", "speedup",
"", speedup_val,
"",
])
# 空行分隔
writer.writerow([])
# 區塊 2歷史記錄
writer.writerow(["id", "timestamp", "mode", "fps", "avg_latency_ms", "p95_latency_ms", "total_frames"])
for r in data.history_records:
writer.writerow([
r.id or "",
self._get_timestamp_str(r.timestamp),
r.mode,
f"{r.fps:.1f}",
f"{r.avg_latency_ms:.1f}",
f"{r.p95_latency_ms:.1f}",
str(r.total_frames),
])
return output_path
# ------------------------------------------------------------------
# 工廠方法
# ------------------------------------------------------------------
@staticmethod
def _get_timestamp_str(ts: float) -> str:
"""將 UNIX timestamp 格式化為 'YYYY-MM-DD HH:MM:SS'(本地時間)。"""
import time as _time
local = _time.localtime(ts)
return _time.strftime("%Y-%m-%d %H:%M:%S", local)

View File

@ -0,0 +1 @@
"""core/templates — Pipeline 設定範本模組。"""

182
core/templates/manager.py Normal file
View File

@ -0,0 +1,182 @@
"""
core/templates/manager.py
TemplateManager 提供常見使用情境的預設 Pipeline 範本
設計重點
- 三個內建範本yolov5_detectionfire_detectiondual_model_cascade以常數定義
- save_as_template 將自訂範本儲存於記憶體in-memory不持久化到磁碟
- load_template 先查內建範本再查自訂範本找不到時拋出 ValueError
- nodes/connections 格式與 .mflow JSON 相同idtype 為必要欄位
"""
from __future__ import annotations
import time
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional
@dataclass
class PipelineTemplate:
"""單一 Pipeline 範本。
屬性
template_id: 唯一識別碼內建範本使用語意名稱自訂範本以 custom_ 開頭
name: 顯示名稱 "YOLOv5 物件偵測"
description: 範本說明
nodes: 節點定義列表格式與 .mflow 相同每個節點至少含 id type
connections: 連線定義列表每條連線含 from to
"""
template_id: str
name: str
description: str
nodes: List[Dict[str, Any]]
connections: List[Dict[str, Any]]
# ---------------------------------------------------------------------------
# 內建範本定義
# ---------------------------------------------------------------------------
_BUILTIN_TEMPLATES: List[PipelineTemplate] = [
PipelineTemplate(
template_id="yolov5_detection",
name="YOLOv5 物件偵測",
description="標準 YOLOv5 物件偵測流程:輸入影像經前處理後送入模型,後處理輸出邊界框結果。",
nodes=[
{"id": "input_0", "type": "Input", "label": "Input"},
{"id": "preprocess_0", "type": "Preprocess", "label": "Preprocess"},
{"id": "model_0", "type": "Model", "label": "Model"},
{"id": "postprocess_0","type": "Postprocess", "label": "Postprocess"},
{"id": "output_0", "type": "Output", "label": "Output"},
],
connections=[
{"from": "input_0", "to": "preprocess_0"},
{"from": "preprocess_0", "to": "model_0"},
{"from": "model_0", "to": "postprocess_0"},
{"from": "postprocess_0", "to": "output_0"},
],
),
PipelineTemplate(
template_id="fire_detection",
name="火焰偵測分類",
description="火焰偵測流程:影像直接送入模型推論,後處理輸出火焰偵測結果(無前處理節點)。",
nodes=[
{"id": "input_0", "type": "Input", "label": "Input"},
{"id": "model_0", "type": "Model", "label": "Model"},
{"id": "postprocess_0","type": "Postprocess", "label": "Postprocess"},
{"id": "output_0", "type": "Output", "label": "Output"},
],
connections=[
{"from": "input_0", "to": "model_0"},
{"from": "model_0", "to": "postprocess_0"},
{"from": "postprocess_0", "to": "output_0"},
],
),
PipelineTemplate(
template_id="dual_model_cascade",
name="雙模型串接",
description=(
"兩個模型串接的複合推論流程:第一個模型的輸出結果經後處理後,"
"作為第二個模型的輸入,適合先偵測後分類的使用情境。"
),
nodes=[
{"id": "input_0", "type": "Input", "label": "Input"},
{"id": "model_0", "type": "Model", "label": "Model 1"},
{"id": "postprocess_0", "type": "Postprocess", "label": "Postprocess 1"},
{"id": "model_1", "type": "Model", "label": "Model 2"},
{"id": "postprocess_1", "type": "Postprocess", "label": "Postprocess 2"},
{"id": "output_0", "type": "Output", "label": "Output"},
],
connections=[
{"from": "input_0", "to": "model_0"},
{"from": "model_0", "to": "postprocess_0"},
{"from": "postprocess_0", "to": "model_1"},
{"from": "model_1", "to": "postprocess_1"},
{"from": "postprocess_1", "to": "output_0"},
],
),
]
# 以 template_id 建立快速查找字典
_BUILTIN_BY_ID: Dict[str, PipelineTemplate] = {
t.template_id: t for t in _BUILTIN_TEMPLATES
}
# ---------------------------------------------------------------------------
# TemplateManager
# ---------------------------------------------------------------------------
class TemplateManager:
"""管理內建與自訂 Pipeline 範本。
自訂範本儲存於記憶體每個 TemplateManager 實例各自獨立
"""
def __init__(self) -> None:
# 自訂範本字典:{template_id: PipelineTemplate}
self._custom: Dict[str, PipelineTemplate] = {}
# ------------------------------------------------------------------
# 公開介面
# ------------------------------------------------------------------
def get_builtin_templates(self) -> List[PipelineTemplate]:
"""回傳所有內建範本的清單(共 3 個)。
回傳
PipelineTemplate 列表不含自訂範本
"""
return list(_BUILTIN_TEMPLATES)
def load_template(self, template_id: str) -> PipelineTemplate:
"""依 template_id 載入範本。
查找順序內建範本 自訂範本
參數
template_id: 範本唯一識別碼
回傳
對應的 PipelineTemplate
引發
ValueError: template_id 不存在於任何範本時
"""
if template_id in _BUILTIN_BY_ID:
return _BUILTIN_BY_ID[template_id]
if template_id in self._custom:
return self._custom[template_id]
raise ValueError(f"Template {template_id} not found")
def save_as_template(
self,
pipeline_config: Dict[str, Any],
name: str,
description: str,
) -> PipelineTemplate:
"""將 Pipeline 設定儲存為新的自訂範本。
參數
pipeline_config: 包含 nodes connections 列表的字典
name: 範本顯示名稱
description: 範本說明
回傳
新建立的 PipelineTemplatetemplate_id custom_ 開頭
"""
safe_name = name.lower().replace(" ", "_")
template_id = f"custom_{safe_name}_{int(time.time() * 1000)}"
template = PipelineTemplate(
template_id=template_id,
name=name,
description=description,
nodes=list(pipeline_config.get("nodes", [])),
connections=list(pipeline_config.get("connections", [])),
)
self._custom[template_id] = template
return template

295
tests/unit/conftest.py Normal file
View File

@ -0,0 +1,295 @@
"""
pytest conftest.py 單元測試環境設定
此測試環境沒有 Kneron NPU 硬體也沒有 PyQt5 GUI 函式庫
為了能夠測試純 Python core/ ui/ 模組
在收集測試前預先注入 Mock 模組避免 import 時觸發硬體/GUI 初始化
UI 元件測試需要 QWidget 等基底類別可被正常繼承與多次實例化
因此使用輕量 Stub 取代 MagicMock 作為 PyQt5 Widget 基底
"""
import sys
from unittest.mock import MagicMock
def _install_mock(name: str) -> None:
"""若模組尚未存在,安裝空 MagicMock 作為替代。"""
if name not in sys.modules:
sys.modules[name] = MagicMock()
# Kneron KP SDK需要硬體驅動程式
_install_mock("kp")
# NumPy可能未安裝
try:
import numpy # noqa: F401
except ImportError:
_install_mock("numpy")
# OpenCV可能未安裝
_install_mock("cv2")
# NodeGraphQt依賴 PyQt5
_install_mock("NodeGraphQt")
_install_mock("NodeGraphQt.constants")
_install_mock("NodeGraphQt.base")
_install_mock("NodeGraphQt.base.node")
# ---------------------------------------------------------------------------
# PyQt5 Stub — 允許 QWidget/QDialog 子類別被正常繼承並多次實例化。
# 使用輕量 Python 類別替代,避免 MagicMock 繼承時的 side_effect 耗盡問題。
# ---------------------------------------------------------------------------
class _StubQObject:
"""所有 Qt 物件的基底 Stub。"""
def __init__(self, *args, **kwargs):
pass
class _StubQWidget(_StubQObject):
"""QWidget Stub可被繼承支援多次實例化。提供常用 QWidget 方法的空實作。"""
def setLayout(self, layout):
pass
def setParent(self, parent):
pass
def show(self):
pass
def hide(self):
pass
def setVisible(self, visible: bool):
pass
def setEnabled(self, enabled: bool):
pass
def isEnabled(self) -> bool:
return True
def setObjectName(self, name: str):
pass
def setStyleSheet(self, style: str):
pass
def setMinimumWidth(self, w: int):
pass
def setMinimumHeight(self, h: int):
pass
def setMaximumWidth(self, w: int):
pass
def setMaximumHeight(self, h: int):
pass
def resize(self, *args):
pass
def setWindowTitle(self, title: str):
pass
def setSizePolicy(self, *args):
pass
def update(self):
pass
def repaint(self):
pass
def close(self):
pass
def font(self):
return MagicMock()
def setFont(self, font):
pass
class _StubQDialog(_StubQWidget):
"""QDialog Stub。"""
Accepted = 1
Rejected = 0
def exec_(self):
return self.Accepted
def accept(self):
pass
def reject(self):
pass
class _StubQLabel(_StubQWidget):
"""QLabel Stub追蹤 setText 呼叫,可在測試中驗證顯示文字。"""
def __init__(self, text: str = "", parent=None):
super().__init__(parent)
self._text = text
self.setText = MagicMock(side_effect=self._set_text)
def _set_text(self, text: str) -> None:
self._text = text
def text(self) -> str:
return self._text
class _StubLayout(_StubQObject):
"""QLayout Stub忽略所有 add* 呼叫。"""
def addWidget(self, *args, **kwargs):
pass
def addLayout(self, *args, **kwargs):
pass
def addStretch(self, *args, **kwargs):
pass
def setSpacing(self, *args, **kwargs):
pass
def setContentsMargins(self, *args, **kwargs):
pass
class _StubQVBoxLayout(_StubLayout):
pass
class _StubQHBoxLayout(_StubLayout):
pass
class _StubQProgressBar(_StubQWidget):
def __init__(self, parent=None):
super().__init__(parent)
self._value = 0
self._maximum = 100
self._minimum = 0
self.setValue = MagicMock(side_effect=self._set_value)
def _set_value(self, v: int) -> None:
self._value = v
def value(self) -> int:
return self._value
def setMaximum(self, v: int) -> None:
self._maximum = v
def setMinimum(self, v: int) -> None:
self._minimum = v
class _StubQTableWidget(_StubQWidget):
def __init__(self, *args, **kwargs):
super().__init__()
self.setItem = MagicMock()
self.setHorizontalHeaderLabels = MagicMock()
class _StubQPushButton(_StubQWidget):
def __init__(self, text: str = "", parent=None):
super().__init__(parent)
self._text = text
self._enabled = True
self.clicked = MagicMock()
self.setEnabled = MagicMock(side_effect=self._set_enabled)
def _set_enabled(self, enabled: bool) -> None:
self._enabled = enabled
def isEnabled(self) -> bool:
return self._enabled
def _make_pyqt_signal(*args, **kwargs):
"""pyqtSignal Stub回傳可 connect/emit 的 MagicMock。"""
signal = MagicMock()
signal.connect = MagicMock()
signal.emit = MagicMock()
return signal
def _make_qthread():
"""QThread Stub。"""
class _StubQThread(_StubQObject):
started = MagicMock()
finished = MagicMock()
def start(self):
pass
def isRunning(self):
return False
def wait(self):
pass
def run(self):
pass
def deleteLater(self):
pass
return _StubQThread
# 建立 PyQt5.QtWidgets Mock 模組(保留 MagicMock 為底,覆蓋關鍵類別)
_qtwidgets_mock = MagicMock()
_qtwidgets_mock.QWidget = _StubQWidget
_qtwidgets_mock.QDialog = _StubQDialog
_qtwidgets_mock.QLabel = _StubQLabel
_qtwidgets_mock.QVBoxLayout = _StubQVBoxLayout
_qtwidgets_mock.QHBoxLayout = _StubQHBoxLayout
_qtwidgets_mock.QProgressBar = _StubQProgressBar
_qtwidgets_mock.QTableWidget = _StubQTableWidget
_qtwidgets_mock.QPushButton = _StubQPushButton
_qtwidgets_mock.QSizePolicy = MagicMock()
_qtwidgets_mock.QTableWidgetItem = MagicMock()
_qtwidgets_mock.QHeaderView = MagicMock()
_qtwidgets_mock.QMessageBox = MagicMock()
_qtwidgets_mock.QApplication = MagicMock()
_qtwidgets_mock.QGroupBox = _StubQWidget
_qtwidgets_mock.QFrame = _StubQWidget
_qtwidgets_mock.QScrollArea = _StubQWidget
_qtwidgets_mock.QSpinBox = _StubQWidget
_qtwidgets_mock.QComboBox = _StubQWidget
_qtwidgets_mock.QCheckBox = _StubQWidget
# 建立 PyQt5.QtCore Mock 模組
_qtcore_mock = MagicMock()
_qtcore_mock.pyqtSignal = _make_pyqt_signal
_qtcore_mock.QThread = _make_qthread()
_qtcore_mock.Qt = MagicMock()
_qtcore_mock.QTimer = MagicMock()
_qtcore_mock.QObject = _StubQObject
# 建立 PyQt5.QtGui Mock 模組
_qtgui_mock = MagicMock()
# 建立頂層 PyQt5 Mock
_pyqt5_mock = MagicMock()
_pyqt5_mock.QtWidgets = _qtwidgets_mock
_pyqt5_mock.QtCore = _qtcore_mock
_pyqt5_mock.QtGui = _qtgui_mock
sys.modules["PyQt5"] = _pyqt5_mock
sys.modules["PyQt5.QtWidgets"] = _qtwidgets_mock
sys.modules["PyQt5.QtCore"] = _qtcore_mock
sys.modules["PyQt5.QtGui"] = _qtgui_mock
sys.modules["PyQt5.QtChart"] = MagicMock()
# pyqtgraph選配
_install_mock("pyqtgraph")

View File

@ -0,0 +1,134 @@
"""
BenchmarkDialog 的單元測試
測試策略
- PyQt5 CI 環境中不可用透過 conftest.py Stub 注入繞過 import
- 測試驗證 BenchmarkDialog 的行為邏輯
- 對話框可正常建立
- pipeline_config 為空時開始按鈕被禁用
- show_result 正確顯示加速倍數文字
- update_progress 更新進度條值
"""
import pytest
from unittest.mock import MagicMock
# ---------------------------------------------------------------------------
# 測試BenchmarkDialog 可以建立
# ---------------------------------------------------------------------------
class TestBenchmarkDialogInit:
def should_be_importable(self):
"""BenchmarkDialog 模組應可匯入(即使 PyQt5 被 Stub"""
from ui.dialogs.benchmark_dialog import BenchmarkDialog
assert BenchmarkDialog is not None
def should_instantiate_with_valid_config(self):
"""提供非空 pipeline_config 時BenchmarkDialog 應可正常建立。"""
from ui.dialogs.benchmark_dialog import BenchmarkDialog
stage_config = MagicMock()
dialog = BenchmarkDialog(parent=None, pipeline_config=[stage_config])
assert dialog is not None
def should_instantiate_with_empty_config(self):
"""pipeline_config 為空時BenchmarkDialog 應可建立(不應拋出例外)。"""
from ui.dialogs.benchmark_dialog import BenchmarkDialog
dialog = BenchmarkDialog(parent=None, pipeline_config=[])
assert dialog is not None
# ---------------------------------------------------------------------------
# 測試pipeline_config 為空時禁用開始按鈕
# ---------------------------------------------------------------------------
class TestStartButtonDisabledWhenEmptyConfig:
def should_disable_start_button_when_pipeline_config_is_empty(self):
"""pipeline_config 為空時start_button 應被禁用。"""
from ui.dialogs.benchmark_dialog import BenchmarkDialog
dialog = BenchmarkDialog(parent=None, pipeline_config=[])
assert dialog.start_button.isEnabled() is False
def should_enable_start_button_when_pipeline_config_has_stages(self):
"""pipeline_config 有 Stage 時start_button 應為啟用狀態。"""
from ui.dialogs.benchmark_dialog import BenchmarkDialog
stage_config = MagicMock()
dialog = BenchmarkDialog(parent=None, pipeline_config=[stage_config])
assert dialog.start_button.isEnabled() is True
def should_show_info_label_when_pipeline_config_is_empty(self):
"""pipeline_config 為空時,應有提示訊息 label 顯示。"""
from ui.dialogs.benchmark_dialog import BenchmarkDialog
dialog = BenchmarkDialog(parent=None, pipeline_config=[])
assert hasattr(dialog, "info_label")
# ---------------------------------------------------------------------------
# 測試show_result 顯示加速倍數
# ---------------------------------------------------------------------------
class TestShowResult:
def should_display_speedup_text_with_x_suffix(self):
"""show_result 後speedup_label 的文字應包含倍數數值與 'x'"""
from ui.dialogs.benchmark_dialog import BenchmarkDialog
stage = MagicMock()
dialog = BenchmarkDialog(parent=None, pipeline_config=[stage])
seq_result = MagicMock()
par_result = MagicMock()
dialog.show_result(seq_result, par_result, speedup=3.2)
call_arg = dialog.speedup_label.setText.call_args[0][0]
assert "3.2" in call_arg
assert "x" in call_arg.lower() or "X" in call_arg
def should_display_faster_in_speedup_text(self):
"""show_result 後speedup_label 文字應包含 'FASTER''faster'"""
from ui.dialogs.benchmark_dialog import BenchmarkDialog
stage = MagicMock()
dialog = BenchmarkDialog(parent=None, pipeline_config=[stage])
seq_result = MagicMock()
par_result = MagicMock()
dialog.show_result(seq_result, par_result, speedup=2.5)
call_arg = dialog.speedup_label.setText.call_args[0][0]
assert "FASTER" in call_arg or "faster" in call_arg
def should_store_seq_result(self):
"""show_result 後seq_result 應儲存在 dialog 上。"""
from ui.dialogs.benchmark_dialog import BenchmarkDialog
stage = MagicMock()
dialog = BenchmarkDialog(parent=None, pipeline_config=[stage])
seq_result = MagicMock()
par_result = MagicMock()
dialog.show_result(seq_result, par_result, speedup=1.8)
assert dialog.seq_result is seq_result
def should_store_par_result(self):
"""show_result 後par_result 應儲存在 dialog 上。"""
from ui.dialogs.benchmark_dialog import BenchmarkDialog
stage = MagicMock()
dialog = BenchmarkDialog(parent=None, pipeline_config=[stage])
seq_result = MagicMock()
par_result = MagicMock()
dialog.show_result(seq_result, par_result, speedup=1.8)
assert dialog.par_result is par_result
# ---------------------------------------------------------------------------
# 測試update_progress 更新進度條
# ---------------------------------------------------------------------------
class TestUpdateProgress:
def should_update_progress_bar_value(self):
"""update_progress 應將進度條值更新為傳入的 value。"""
from ui.dialogs.benchmark_dialog import BenchmarkDialog
stage = MagicMock()
dialog = BenchmarkDialog(parent=None, pipeline_config=[stage])
dialog.progress_bar.setValue.reset_mock()
dialog.update_progress("warmup", 42)
dialog.progress_bar.setValue.assert_called_once_with(42)
def should_store_current_phase(self):
"""update_progress 應儲存當前 phase 名稱。"""
from ui.dialogs.benchmark_dialog import BenchmarkDialog
stage = MagicMock()
dialog = BenchmarkDialog(parent=None, pipeline_config=[stage])
dialog.update_progress("sequential", 70)
assert dialog.current_phase == "sequential"

View File

@ -0,0 +1,282 @@
"""
PerformanceBenchmarker 的單元測試
測試策略
- BenchmarkConfig / BenchmarkResult 資料結構驗證
- calculate_speedup() 純計算邏輯
- run_sequential_benchmark() / run_parallel_benchmark() 透過注入的
inference_runner callable 進行 Mock不需要實際硬體
- run_full_benchmark() 整合流程
"""
import time
import pytest
from unittest.mock import MagicMock, patch
from core.performance.benchmarker import (
BenchmarkConfig,
BenchmarkResult,
PerformanceBenchmarker,
)
# ---------------------------------------------------------------------------
# 輔助:建立測試用資料結構
# ---------------------------------------------------------------------------
def make_config(**kwargs) -> BenchmarkConfig:
"""建立測試用 BenchmarkConfig提供合理的預設值。"""
defaults = dict(
pipeline_config=[],
test_duration_seconds=1.0,
warmup_frames=2,
test_input_source="test_video.mp4",
)
defaults.update(kwargs)
return BenchmarkConfig(**defaults)
def make_result(mode: str = "sequential", fps: float = 30.0) -> BenchmarkResult:
"""建立測試用 BenchmarkResult。"""
avg_latency_ms = (1000.0 / fps) if fps > 0 else 0.0
return BenchmarkResult(
mode=mode,
fps=fps,
avg_latency_ms=avg_latency_ms,
p95_latency_ms=avg_latency_ms * 1.5,
total_frames=int(fps * 30),
timestamp=time.time(),
device_config={"KL520": 1},
)
# ---------------------------------------------------------------------------
# 測試BenchmarkConfig 資料結構
# ---------------------------------------------------------------------------
class TestBenchmarkConfig:
def should_have_default_duration_30_seconds(self):
"""test_duration_seconds 預設值應為 30.0。"""
config = BenchmarkConfig(
pipeline_config=[],
test_input_source="video.mp4",
)
assert config.test_duration_seconds == 30.0
def should_have_default_warmup_50_frames(self):
"""warmup_frames 預設值應為 50。"""
config = BenchmarkConfig(
pipeline_config=[],
test_input_source="video.mp4",
)
assert config.warmup_frames == 50
def should_allow_custom_duration(self):
"""應可自訂 test_duration_seconds。"""
config = BenchmarkConfig(
pipeline_config=[],
test_input_source="video.mp4",
test_duration_seconds=10.0,
)
assert config.test_duration_seconds == 10.0
# ---------------------------------------------------------------------------
# 測試BenchmarkResult 資料結構
# ---------------------------------------------------------------------------
class TestBenchmarkResult:
def should_store_all_required_fields(self):
"""BenchmarkResult 應儲存所有規格要求的欄位。"""
ts = time.time()
result = BenchmarkResult(
mode="parallel",
fps=45.2,
avg_latency_ms=22.1,
p95_latency_ms=35.0,
total_frames=1356,
timestamp=ts,
device_config={"KL720": 2},
)
assert result.mode == "parallel"
assert result.fps == pytest.approx(45.2)
assert result.avg_latency_ms == pytest.approx(22.1)
assert result.p95_latency_ms == pytest.approx(35.0)
assert result.total_frames == 1356
assert result.timestamp == pytest.approx(ts)
assert result.device_config == {"KL720": 2}
def should_accept_sequential_mode(self):
"""mode 欄位應接受 'sequential'"""
result = make_result(mode="sequential")
assert result.mode == "sequential"
def should_accept_parallel_mode(self):
"""mode 欄位應接受 'parallel'"""
result = make_result(mode="parallel")
assert result.mode == "parallel"
# ---------------------------------------------------------------------------
# 測試calculate_speedup純計算無外部依賴
# ---------------------------------------------------------------------------
class TestCalculateSpeedup:
def should_return_ratio_of_parallel_to_sequential_fps(self):
"""calculate_speedup 應回傳 par.fps / seq.fps。"""
benchmarker = PerformanceBenchmarker()
seq = make_result(mode="sequential", fps=20.0)
par = make_result(mode="parallel", fps=60.0)
speedup = benchmarker.calculate_speedup(seq, par)
assert speedup == pytest.approx(3.0)
def should_return_one_when_same_fps(self):
"""相同 FPS 時 speedup 應為 1.0。"""
benchmarker = PerformanceBenchmarker()
result = make_result(fps=30.0)
speedup = benchmarker.calculate_speedup(result, result)
assert speedup == pytest.approx(1.0)
def should_raise_when_sequential_fps_is_zero(self):
"""seq.fps 為 0 時應引發 ValueError避免除以零。"""
benchmarker = PerformanceBenchmarker()
seq = make_result(fps=0.0)
par = make_result(fps=30.0)
with pytest.raises(ValueError):
benchmarker.calculate_speedup(seq, par)
# ---------------------------------------------------------------------------
# 測試run_sequential_benchmarkMock inference_runner
# ---------------------------------------------------------------------------
class TestRunSequentialBenchmark:
def should_return_benchmark_result_with_sequential_mode(self):
"""run_sequential_benchmark() 應回傳 mode='sequential' 的 BenchmarkResult。"""
benchmarker = PerformanceBenchmarker()
config = make_config(warmup_frames=1, test_duration_seconds=0.1)
# Mock inference_runner每次呼叫模擬 10ms 推論
def fake_runner(frame_data):
time.sleep(0.01)
return {"result": "ok"}
result = benchmarker.run_sequential_benchmark(config, inference_runner=fake_runner)
assert isinstance(result, BenchmarkResult)
assert result.mode == "sequential"
def should_report_positive_fps(self):
"""FPS 應大於 0。"""
benchmarker = PerformanceBenchmarker()
config = make_config(warmup_frames=1, test_duration_seconds=0.1)
def fake_runner(frame_data):
time.sleep(0.01)
return {}
result = benchmarker.run_sequential_benchmark(config, inference_runner=fake_runner)
assert result.fps > 0
def should_report_positive_latency(self):
"""avg_latency_ms 和 p95_latency_ms 應大於 0。"""
benchmarker = PerformanceBenchmarker()
config = make_config(warmup_frames=1, test_duration_seconds=0.1)
def fake_runner(frame_data):
time.sleep(0.01)
return {}
result = benchmarker.run_sequential_benchmark(config, inference_runner=fake_runner)
assert result.avg_latency_ms > 0
assert result.p95_latency_ms > 0
def should_count_frames_excluding_warmup(self):
"""total_frames 不應包含暖機幀數。"""
benchmarker = PerformanceBenchmarker()
call_times = []
def fake_runner(frame_data):
call_times.append(time.time())
time.sleep(0.005)
return {}
config = make_config(warmup_frames=3, test_duration_seconds=0.1)
result = benchmarker.run_sequential_benchmark(config, inference_runner=fake_runner)
# warmup 幀不計入 total_frames
assert result.total_frames < len(call_times)
assert result.total_frames > 0
def should_use_device_config_from_benchmarker(self):
"""BenchmarkResult.device_config 應由 PerformanceBenchmarker 填寫。"""
benchmarker = PerformanceBenchmarker(device_config={"KL520": 1})
config = make_config(warmup_frames=1, test_duration_seconds=0.05)
def fake_runner(frame_data):
return {}
result = benchmarker.run_sequential_benchmark(config, inference_runner=fake_runner)
assert result.device_config == {"KL520": 1}
# ---------------------------------------------------------------------------
# 測試run_parallel_benchmarkMock inference_runner
# ---------------------------------------------------------------------------
class TestRunParallelBenchmark:
def should_return_benchmark_result_with_parallel_mode(self):
"""run_parallel_benchmark() 應回傳 mode='parallel' 的 BenchmarkResult。"""
benchmarker = PerformanceBenchmarker()
config = make_config(warmup_frames=1, test_duration_seconds=0.1)
def fake_runner(frame_data):
time.sleep(0.01)
return {}
result = benchmarker.run_parallel_benchmark(config, inference_runner=fake_runner)
assert isinstance(result, BenchmarkResult)
assert result.mode == "parallel"
# ---------------------------------------------------------------------------
# 測試run_full_benchmark
# ---------------------------------------------------------------------------
class TestRunFullBenchmark:
def should_return_tuple_of_seq_par_speedup(self):
"""run_full_benchmark() 應回傳 (BenchmarkResult, BenchmarkResult, float)。"""
benchmarker = PerformanceBenchmarker()
config = make_config(warmup_frames=1, test_duration_seconds=0.05)
def fast_runner(frame_data):
time.sleep(0.005)
return {}
seq_result, par_result, speedup = benchmarker.run_full_benchmark(
config, inference_runner=fast_runner
)
assert isinstance(seq_result, BenchmarkResult)
assert isinstance(par_result, BenchmarkResult)
assert isinstance(speedup, float)
assert seq_result.mode == "sequential"
assert par_result.mode == "parallel"
def should_calculate_speedup_consistently(self):
"""speedup 應與 calculate_speedup(seq, par) 的結果一致。"""
benchmarker = PerformanceBenchmarker()
config = make_config(warmup_frames=1, test_duration_seconds=0.05)
def fake_runner(frame_data):
time.sleep(0.005)
return {}
seq_result, par_result, speedup = benchmarker.run_full_benchmark(
config, inference_runner=fake_runner
)
expected_speedup = benchmarker.calculate_speedup(seq_result, par_result)
assert speedup == pytest.approx(expected_speedup)

View File

@ -0,0 +1,43 @@
"""
tests/unit/test_bottleneck.py
Unit tests for the BottleneckAlert dataclass.
TDD: Red phase tests written before implementation.
"""
import pytest
from core.device.bottleneck import BottleneckAlert
class TestBottleneckAlert:
def test_fields_accessible(self):
alert = BottleneckAlert(
stage_id="stage-1",
queue_fill_rate=0.85,
suggested_action="Add more Dongles to this stage",
severity="warning",
)
assert alert.stage_id == "stage-1"
assert alert.queue_fill_rate == 0.85
assert alert.suggested_action == "Add more Dongles to this stage"
assert alert.severity == "warning"
def test_severity_critical(self):
alert = BottleneckAlert(
stage_id="stage-2",
queue_fill_rate=0.95,
suggested_action="Urgent: add Dongles",
severity="critical",
)
assert alert.severity == "critical"
def test_dataclass_equality(self):
a = BottleneckAlert("s1", 0.9, "action", "warning")
b = BottleneckAlert("s1", 0.9, "action", "warning")
assert a == b
def test_dataclass_inequality(self):
a = BottleneckAlert("s1", 0.9, "action", "warning")
b = BottleneckAlert("s1", 0.5, "action", "warning")
assert a != b

View File

@ -0,0 +1,106 @@
"""
tests/unit/test_device_management_panel.py
Unit tests for DeviceManagementPanel QWidget.
TDD: Red phase tests written before implementation.
Uses conftest.py Stubs for PyQt5 so no display hardware is needed.
"""
from unittest.mock import MagicMock, patch
import pytest
from core.device.device_manager import DeviceInfo, DeviceManager
from ui.components.device_management_panel import DeviceManagementPanel
# ---------------------------------------------------------------------------
# Helpers
# ---------------------------------------------------------------------------
def _make_device_manager(devices=None):
"""Return a DeviceManager-like mock with controllable scan_devices()."""
mgr = MagicMock(spec=DeviceManager)
if devices is None:
devices = [
DeviceInfo(
device_id="usb-1",
series="KL520",
product_id=0x100,
status="online",
gops=2,
assigned_stage=None,
current_fps=15.0,
utilization_pct=50.0,
)
]
mgr.scan_devices.return_value = devices
mgr.get_device_statistics.return_value = {d.device_id: d for d in devices}
mgr.get_load_balance_recommendation.return_value = {}
return mgr
# ---------------------------------------------------------------------------
# Panel instantiation
# ---------------------------------------------------------------------------
class TestDeviceManagementPanelInit:
def test_panel_creates_without_error(self):
mgr = _make_device_manager()
panel = DeviceManagementPanel(device_manager=mgr)
assert panel is not None
def test_panel_has_auto_balance_button(self):
mgr = _make_device_manager()
panel = DeviceManagementPanel(device_manager=mgr)
# auto_balance_button must exist
assert hasattr(panel, "auto_balance_button")
def test_auto_balance_button_text(self):
mgr = _make_device_manager()
panel = DeviceManagementPanel(device_manager=mgr)
assert panel.auto_balance_button._text == "Auto Balance"
# ---------------------------------------------------------------------------
# refresh()
# ---------------------------------------------------------------------------
class TestDeviceManagementPanelRefresh:
def test_refresh_calls_scan_devices(self):
mgr = _make_device_manager()
panel = DeviceManagementPanel(device_manager=mgr)
mgr.scan_devices.reset_mock()
panel.refresh()
mgr.scan_devices.assert_called_once()
def test_refresh_updates_known_devices(self):
mgr = _make_device_manager()
panel = DeviceManagementPanel(device_manager=mgr)
panel.refresh()
# After refresh, panel should have device data accessible
assert len(panel._devices) == 1
assert panel._devices[0].device_id == "usb-1"
def test_refresh_with_no_devices_sets_empty_list(self):
mgr = _make_device_manager(devices=[])
panel = DeviceManagementPanel(device_manager=mgr)
panel.refresh()
assert panel._devices == []
# ---------------------------------------------------------------------------
# set_auto_refresh()
# ---------------------------------------------------------------------------
class TestSetAutoRefresh:
def test_set_auto_refresh_stores_interval(self):
mgr = _make_device_manager()
panel = DeviceManagementPanel(device_manager=mgr)
panel.set_auto_refresh(interval_ms=3000)
assert panel._auto_refresh_interval_ms == 3000
def test_set_auto_refresh_default_interval(self):
mgr = _make_device_manager()
panel = DeviceManagementPanel(device_manager=mgr)
panel.set_auto_refresh()
assert panel._auto_refresh_interval_ms == 2000

View File

@ -0,0 +1,291 @@
"""
tests/unit/test_device_manager.py
Unit tests for DeviceManager, DeviceInfo, DeviceHealth.
TDD: Red phase tests written before implementation.
"""
from unittest.mock import MagicMock
import pytest
from core.device.device_manager import DeviceInfo, DeviceHealth, DeviceManager
# ---------------------------------------------------------------------------
# Fixtures
# ---------------------------------------------------------------------------
def _make_mock_kp_api(devices):
"""Build a minimal kp API mock whose scan_devices() returns a descriptor list."""
descriptor_list = MagicMock()
descriptor_list.device_descriptor_number = len(devices)
mock_descs = []
for d in devices:
desc = MagicMock()
desc.usb_port_id = d["port_id"]
desc.product_id = d["product_id"]
desc.kn_number = d.get("kn_number", 0)
mock_descs.append(desc)
descriptor_list.device_descriptor_list = mock_descs
kp_api = MagicMock()
kp_api.core.scan_devices.return_value = descriptor_list
return kp_api
@pytest.fixture
def two_device_kp():
"""Mock kp API returning one KL520 and one KL720."""
return _make_mock_kp_api([
{"port_id": 1, "product_id": 0x100}, # KL520
{"port_id": 2, "product_id": 0x720}, # KL720
])
@pytest.fixture
def empty_kp():
"""Mock kp API returning no devices."""
descriptor_list = MagicMock()
descriptor_list.device_descriptor_number = 0
descriptor_list.device_descriptor_list = []
kp_api = MagicMock()
kp_api.core.scan_devices.return_value = descriptor_list
return kp_api
# ---------------------------------------------------------------------------
# DeviceInfo dataclass
# ---------------------------------------------------------------------------
class TestDeviceInfo:
def test_fields_accessible(self):
info = DeviceInfo(
device_id="usb-1",
series="KL520",
product_id=0x100,
status="online",
gops=2,
assigned_stage=None,
current_fps=0.0,
utilization_pct=0.0,
)
assert info.device_id == "usb-1"
assert info.series == "KL520"
assert info.product_id == 0x100
assert info.status == "online"
assert info.gops == 2
assert info.assigned_stage is None
assert info.current_fps == 0.0
assert info.utilization_pct == 0.0
# ---------------------------------------------------------------------------
# DeviceHealth dataclass
# ---------------------------------------------------------------------------
class TestDeviceHealth:
def test_fields_accessible(self):
health = DeviceHealth(
device_id="usb-1",
temperature_celsius=None,
error_count=0,
last_error=None,
uptime_seconds=120.0,
)
assert health.device_id == "usb-1"
assert health.temperature_celsius is None
assert health.error_count == 0
assert health.last_error is None
assert health.uptime_seconds == 120.0
# ---------------------------------------------------------------------------
# DeviceManager.scan_devices
# ---------------------------------------------------------------------------
class TestScanDevices:
def test_returns_list_of_device_info(self, two_device_kp):
mgr = DeviceManager(kp_api=two_device_kp)
devices = mgr.scan_devices()
assert isinstance(devices, list)
assert len(devices) == 2
assert all(isinstance(d, DeviceInfo) for d in devices)
def test_kl520_properties(self, two_device_kp):
mgr = DeviceManager(kp_api=two_device_kp)
devices = mgr.scan_devices()
kl520 = next(d for d in devices if d.series == "KL520")
assert kl520.product_id == 0x100
assert kl520.gops == 2
assert kl520.status == "online"
def test_kl720_properties(self, two_device_kp):
mgr = DeviceManager(kp_api=two_device_kp)
devices = mgr.scan_devices()
kl720 = next(d for d in devices if d.series == "KL720")
assert kl720.product_id == 0x720
assert kl720.gops == 28
assert kl720.status == "online"
def test_empty_returns_empty_list(self, empty_kp):
mgr = DeviceManager(kp_api=empty_kp)
devices = mgr.scan_devices()
assert devices == []
def test_device_id_uses_port(self, two_device_kp):
mgr = DeviceManager(kp_api=two_device_kp)
devices = mgr.scan_devices()
ids = {d.device_id for d in devices}
assert "usb-1" in ids
assert "usb-2" in ids
# ---------------------------------------------------------------------------
# DeviceManager.assign_device / unassign_device
# ---------------------------------------------------------------------------
class TestAssignDevice:
def test_assign_online_device_returns_true(self, two_device_kp):
mgr = DeviceManager(kp_api=two_device_kp)
mgr.scan_devices()
result = mgr.assign_device("usb-1", "stage-A")
assert result is True
def test_assigned_device_shows_stage(self, two_device_kp):
mgr = DeviceManager(kp_api=two_device_kp)
mgr.scan_devices()
mgr.assign_device("usb-1", "stage-A")
devices = mgr.get_device_statistics()
assert devices["usb-1"].assigned_stage == "stage-A"
def test_assign_already_assigned_device_returns_false(self, two_device_kp):
mgr = DeviceManager(kp_api=two_device_kp)
mgr.scan_devices()
mgr.assign_device("usb-1", "stage-A")
result = mgr.assign_device("usb-1", "stage-B")
assert result is False
def test_assign_unknown_device_returns_false(self, two_device_kp):
mgr = DeviceManager(kp_api=two_device_kp)
mgr.scan_devices()
result = mgr.assign_device("usb-99", "stage-A")
assert result is False
def test_unassign_frees_device(self, two_device_kp):
mgr = DeviceManager(kp_api=two_device_kp)
mgr.scan_devices()
mgr.assign_device("usb-1", "stage-A")
result = mgr.unassign_device("usb-1")
assert result is True
devices = mgr.get_device_statistics()
assert devices["usb-1"].assigned_stage is None
def test_unassign_unknown_device_returns_false(self, two_device_kp):
mgr = DeviceManager(kp_api=two_device_kp)
mgr.scan_devices()
result = mgr.unassign_device("usb-99")
assert result is False
def test_reassign_after_unassign_succeeds(self, two_device_kp):
mgr = DeviceManager(kp_api=two_device_kp)
mgr.scan_devices()
mgr.assign_device("usb-1", "stage-A")
mgr.unassign_device("usb-1")
result = mgr.assign_device("usb-1", "stage-B")
assert result is True
def test_should_reject_assignment_for_offline_device(self):
"""assign_device returns False when the device status is offline."""
kp_api = _make_mock_kp_api([{"port_id": 5, "product_id": 0x100}])
mgr = DeviceManager(kp_api=kp_api)
mgr.scan_devices()
mgr._devices["usb-5"].status = "offline"
result = mgr.assign_device("usb-5", "stage-A")
assert result is False
def test_should_allow_reassignment_to_same_stage(self, two_device_kp):
"""Assigning a device to the same stage twice is idempotent and returns True."""
mgr = DeviceManager(kp_api=two_device_kp)
mgr.scan_devices()
mgr.assign_device("usb-1", "stage-A")
result = mgr.assign_device("usb-1", "stage-A")
assert result is True
def test_should_reject_reassignment_to_different_stage(self, two_device_kp):
"""Assigning a device already assigned to a different stage returns False."""
mgr = DeviceManager(kp_api=two_device_kp)
mgr.scan_devices()
mgr.assign_device("usb-1", "stage-A")
result = mgr.assign_device("usb-1", "stage-B")
assert result is False
# ---------------------------------------------------------------------------
# DeviceManager.get_load_balance_recommendation
# ---------------------------------------------------------------------------
class TestLoadBalanceRecommendation:
def test_returns_dict_mapping_stage_to_device(self, two_device_kp):
mgr = DeviceManager(kp_api=two_device_kp)
mgr.scan_devices()
rec = mgr.get_load_balance_recommendation(["stage-A", "stage-B"])
assert isinstance(rec, dict)
assert "stage-A" in rec
assert "stage-B" in rec
def test_high_gops_assigned_to_first_stage(self, two_device_kp):
"""KL720 (28 GOPS) should be recommended for the first stage."""
mgr = DeviceManager(kp_api=two_device_kp)
mgr.scan_devices()
rec = mgr.get_load_balance_recommendation(["stage-A", "stage-B"])
# The device recommended for stage-A should be the higher-gops one
stats = mgr.get_device_statistics()
first_device_id = rec["stage-A"]
assert stats[first_device_id].gops == 28 # KL720
def test_recommendation_with_more_stages_than_devices(self, two_device_kp):
"""Extra stages beyond available devices map to empty string."""
mgr = DeviceManager(kp_api=two_device_kp)
mgr.scan_devices()
rec = mgr.get_load_balance_recommendation(["s1", "s2", "s3"])
assert rec["s3"] == ""
def test_recommendation_with_no_devices(self, empty_kp):
mgr = DeviceManager(kp_api=empty_kp)
mgr.scan_devices()
rec = mgr.get_load_balance_recommendation(["stage-A"])
assert rec["stage-A"] == ""
# ---------------------------------------------------------------------------
# DeviceManager.get_device_health
# ---------------------------------------------------------------------------
class TestGetDeviceHealth:
def test_returns_device_health(self, two_device_kp):
mgr = DeviceManager(kp_api=two_device_kp)
mgr.scan_devices()
health = mgr.get_device_health("usb-1")
assert isinstance(health, DeviceHealth)
assert health.device_id == "usb-1"
assert health.temperature_celsius is None # SDK does not support it
assert health.error_count == 0
# ---------------------------------------------------------------------------
# DeviceManager.get_device_statistics
# ---------------------------------------------------------------------------
class TestGetDeviceStatistics:
def test_returns_all_known_devices(self, two_device_kp):
mgr = DeviceManager(kp_api=two_device_kp)
mgr.scan_devices()
stats = mgr.get_device_statistics()
assert isinstance(stats, dict)
assert "usb-1" in stats
assert "usb-2" in stats
def test_values_are_device_info(self, two_device_kp):
mgr = DeviceManager(kp_api=two_device_kp)
mgr.scan_devices()
stats = mgr.get_device_statistics()
assert all(isinstance(v, DeviceInfo) for v in stats.values())

View File

@ -0,0 +1,179 @@
"""
tests/unit/test_export_report_dialog.py ExportReportDialog 單元測試
在無 PyQt5 環境下使用 conftest.py 中的 Stub 進行測試
"""
from unittest.mock import MagicMock, patch
import pytest
from core.performance.benchmarker import BenchmarkResult
from core.performance.report_exporter import DeviceSummary, ReportData
from ui.dialogs.export_report_dialog import ExportReportDialog
# ---------------------------------------------------------------------------
# Fixtures
# ---------------------------------------------------------------------------
def _make_benchmark_result(mode: str = "sequential", fps: float = 14.2) -> BenchmarkResult:
return BenchmarkResult(
mode=mode,
fps=fps,
avg_latency_ms=70.4,
p95_latency_ms=95.0,
total_frames=426,
timestamp=1743856222.0,
device_config={"KL720": 1},
id=f"benchmark_20260405_143022_{mode}",
)
def _make_dialog(
benchmarker=None,
history=None,
device_manager=None,
dashboard=None,
) -> ExportReportDialog:
"""建立 ExportReportDialog所有依賴預設為 MagicMock。"""
if benchmarker is None:
benchmarker = MagicMock()
benchmarker.history = []
if history is None:
history = MagicMock()
history.get_history.return_value = []
if device_manager is None:
device_manager = MagicMock()
device_manager.scan_devices.return_value = []
if dashboard is None:
dashboard = MagicMock()
return ExportReportDialog(
parent=None,
benchmarker=benchmarker,
history=history,
device_manager=device_manager,
dashboard=dashboard,
)
# ---------------------------------------------------------------------------
# 基本建立
# ---------------------------------------------------------------------------
class TestExportReportDialogCreation:
def test_dialog_can_be_created(self):
"""ExportReportDialog 應可正常建立"""
dialog = _make_dialog()
assert dialog is not None
def test_dialog_is_instance_of_qdialog(self):
"""ExportReportDialog 應繼承自 QDialog或其 Stub"""
from PyQt5.QtWidgets import QDialog
dialog = _make_dialog()
assert isinstance(dialog, QDialog)
def test_dialog_default_format_is_pdf(self):
"""格式選擇預設應為 PDF"""
dialog = _make_dialog()
assert dialog._selected_format == "pdf"
# ---------------------------------------------------------------------------
# _collect_report_data
# ---------------------------------------------------------------------------
class TestCollectReportData:
def test_returns_report_data_instance(self):
"""_collect_report_data() 應回傳 ReportData 型別"""
dialog = _make_dialog()
result = dialog._collect_report_data()
assert isinstance(result, ReportData)
def test_uses_history_records(self):
"""_collect_report_data() 應使用 history.get_history() 的結果"""
history = MagicMock()
records = [_make_benchmark_result("parallel")]
history.get_history.return_value = records
dialog = _make_dialog(history=history)
result = dialog._collect_report_data()
history.get_history.assert_called_once()
assert result.history_records == records
def test_uses_device_manager_scan(self):
"""_collect_report_data() 應呼叫 device_manager.scan_devices()"""
device_manager = MagicMock()
device_manager.scan_devices.return_value = []
dialog = _make_dialog(device_manager=device_manager)
dialog._collect_report_data()
device_manager.scan_devices.assert_called_once()
def test_handles_history_failure_gracefully(self):
"""history.get_history() 拋出例外時,應回傳空的 history_records"""
history = MagicMock()
history.get_history.side_effect = Exception("history error")
dialog = _make_dialog(history=history)
result = dialog._collect_report_data()
assert result.history_records == []
def test_handles_device_manager_failure_gracefully(self):
"""device_manager.scan_devices() 拋出例外時devices 應為空列表"""
device_manager = MagicMock()
device_manager.scan_devices.side_effect = Exception("device error")
dialog = _make_dialog(device_manager=device_manager)
result = dialog._collect_report_data()
assert result.devices == []
def test_uses_latest_benchmark_from_history_as_parallel_result(self):
"""benchmarker.history 有記錄時,應使用最新一筆作為 parallel_result"""
benchmarker = MagicMock()
latest = _make_benchmark_result("parallel", fps=45.6)
benchmarker.history = [_make_benchmark_result("sequential"), latest]
dialog = _make_dialog(benchmarker=benchmarker)
result = dialog._collect_report_data()
# parallel_result 應為最新一筆index -1
assert result.parallel_result == latest
def test_parallel_result_is_none_when_history_empty(self):
"""benchmarker.history 為空時parallel_result 應為 None"""
benchmarker = MagicMock()
benchmarker.history = []
dialog = _make_dialog(benchmarker=benchmarker)
result = dialog._collect_report_data()
assert result.parallel_result is None
def test_chart_image_bytes_is_none(self):
"""chart_image_bytes 應為 None截圖整合留未來"""
dialog = _make_dialog()
result = dialog._collect_report_data()
assert result.chart_image_bytes is None
# ---------------------------------------------------------------------------
# 格式選擇
# ---------------------------------------------------------------------------
class TestFormatSelection:
def test_set_format_to_csv(self):
"""可將格式設為 CSV"""
dialog = _make_dialog()
dialog._set_format("csv")
assert dialog._selected_format == "csv"
def test_set_format_to_pdf(self):
"""可將格式設回 PDF"""
dialog = _make_dialog()
dialog._set_format("csv")
dialog._set_format("pdf")
assert dialog._selected_format == "pdf"

224
tests/unit/test_history.py Normal file
View File

@ -0,0 +1,224 @@
"""
PerformanceHistory 的單元測試
測試覆蓋
- 記錄 BenchmarkResult
- 依條件查詢歷史記錄limit / mode 過濾
- 回歸比較報告
- 持久化JSON 讀寫
"""
import json
import os
import time
import tempfile
import pytest
from core.performance.benchmarker import BenchmarkResult
from core.performance.history import PerformanceHistory
# ---------------------------------------------------------------------------
# 輔助函式
# ---------------------------------------------------------------------------
def make_result(mode: str = "sequential", fps: float = 30.0, avg_latency_ms: float = 33.3,
p95_latency_ms: float = 50.0, total_frames: int = 900) -> BenchmarkResult:
"""建立測試用的 BenchmarkResult。"""
return BenchmarkResult(
mode=mode,
fps=fps,
avg_latency_ms=avg_latency_ms,
p95_latency_ms=p95_latency_ms,
total_frames=total_frames,
timestamp=time.time(),
device_config={"KL520": 1},
)
# ---------------------------------------------------------------------------
# Fixture
# ---------------------------------------------------------------------------
@pytest.fixture
def tmp_history(tmp_path):
"""回傳一個使用暫存路徑的 PerformanceHistory 實例。"""
storage_path = str(tmp_path / "benchmark_history.json")
return PerformanceHistory(storage_path=storage_path)
# ---------------------------------------------------------------------------
# 測試:基本記錄功能
# ---------------------------------------------------------------------------
class TestRecord:
def should_record_result_to_storage(self, tmp_history):
"""record() 應將結果寫入 JSON 儲存。"""
result = make_result()
tmp_history.record(result)
records = tmp_history.get_history()
assert len(records) == 1
def should_persist_across_instances(self, tmp_path):
"""record() 應將資料持久化,重新建立實例後仍可讀取。"""
storage_path = str(tmp_path / "benchmark_history.json")
history1 = PerformanceHistory(storage_path=storage_path)
result = make_result(fps=42.0)
history1.record(result)
history2 = PerformanceHistory(storage_path=storage_path)
records = history2.get_history()
assert len(records) == 1
assert records[0].fps == 42.0
def should_assign_unique_id_to_each_record(self, tmp_history):
"""每筆記錄應有唯一的 id。"""
tmp_history.record(make_result())
time.sleep(0.01)
tmp_history.record(make_result())
records = tmp_history.get_history()
ids = [r.id for r in records]
assert len(set(ids)) == 2
def should_store_all_benchmark_fields(self, tmp_history):
"""record() 應完整儲存所有欄位。"""
result = make_result(
mode="parallel",
fps=60.5,
avg_latency_ms=16.5,
p95_latency_ms=25.0,
total_frames=1815,
)
tmp_history.record(result)
saved = tmp_history.get_history()[0]
assert saved.mode == "parallel"
assert saved.fps == pytest.approx(60.5)
assert saved.avg_latency_ms == pytest.approx(16.5)
assert saved.p95_latency_ms == pytest.approx(25.0)
assert saved.total_frames == 1815
# ---------------------------------------------------------------------------
# 測試get_history 查詢
# ---------------------------------------------------------------------------
class TestGetHistory:
def should_return_records_in_reverse_chronological_order(self, tmp_history):
"""get_history() 應以最新優先的順序回傳記錄。"""
base_time = 1000000.0
for i, fps in enumerate([10.0, 20.0, 30.0]):
result = make_result(fps=fps)
result.timestamp = base_time + i # 確保時間戳遞增
tmp_history.record(result)
records = tmp_history.get_history()
fps_values = [r.fps for r in records]
# 最新優先fps=30 (timestamp最大) 排第一
assert fps_values == [30.0, 20.0, 10.0]
def should_respect_limit_parameter(self, tmp_history):
"""get_history(limit=N) 應只回傳最新的 N 筆記錄。"""
for i in range(5):
tmp_history.record(make_result(fps=float(i + 1)))
records = tmp_history.get_history(limit=3)
assert len(records) == 3
def should_filter_by_mode(self, tmp_history):
"""get_history(mode='parallel') 應只回傳 parallel 模式的記錄。"""
tmp_history.record(make_result(mode="sequential"))
tmp_history.record(make_result(mode="parallel"))
tmp_history.record(make_result(mode="sequential"))
records = tmp_history.get_history(mode="parallel")
assert len(records) == 1
assert records[0].mode == "parallel"
def should_return_empty_list_when_no_records(self, tmp_history):
"""空儲存應回傳空列表。"""
records = tmp_history.get_history()
assert records == []
def should_apply_limit_after_mode_filter(self, tmp_history):
"""limit 應在 mode 過濾之後套用。"""
for _ in range(4):
tmp_history.record(make_result(mode="sequential"))
for _ in range(4):
tmp_history.record(make_result(mode="parallel"))
records = tmp_history.get_history(limit=2, mode="parallel")
assert len(records) == 2
assert all(r.mode == "parallel" for r in records)
# ---------------------------------------------------------------------------
# 測試:回歸報告
# ---------------------------------------------------------------------------
class TestGetRegressionReport:
def should_report_fps_improvement(self, tmp_history):
"""get_regression_report() 應計算 FPS 改善百分比。"""
baseline = make_result(fps=30.0, avg_latency_ms=33.3, p95_latency_ms=50.0)
tmp_history.record(baseline)
baseline_id = tmp_history.get_history()[0].id
compare = make_result(fps=45.0, avg_latency_ms=22.2, p95_latency_ms=35.0)
tmp_history.record(compare)
compare_id = tmp_history.get_history()[0].id # 最新一筆
report = tmp_history.get_regression_report(baseline_id, compare_id)
assert "fps_change_pct" in report
assert report["fps_change_pct"] == pytest.approx(50.0, rel=1e-2)
def should_report_latency_change(self, tmp_history):
"""get_regression_report() 應計算延遲變化百分比。"""
baseline = make_result(avg_latency_ms=40.0, p95_latency_ms=60.0)
tmp_history.record(baseline)
baseline_id = tmp_history.get_history()[0].id
compare = make_result(avg_latency_ms=20.0, p95_latency_ms=30.0)
tmp_history.record(compare)
compare_id = tmp_history.get_history()[0].id
report = tmp_history.get_regression_report(baseline_id, compare_id)
assert "avg_latency_change_pct" in report
assert report["avg_latency_change_pct"] == pytest.approx(-50.0, rel=1e-2)
def should_raise_error_for_invalid_id(self, tmp_history):
"""無效的 id 應引發 ValueError。"""
with pytest.raises(ValueError):
tmp_history.get_regression_report("nonexistent_baseline", "nonexistent_compare")
# ---------------------------------------------------------------------------
# 測試JSON 檔案格式
# ---------------------------------------------------------------------------
class TestStorageFormat:
def should_produce_valid_json_file(self, tmp_path):
"""儲存的檔案應為合法的 JSON 並符合規格格式。"""
storage_path = str(tmp_path / "benchmark_history.json")
history = PerformanceHistory(storage_path=storage_path)
history.record(make_result(mode="parallel", fps=45.2))
with open(storage_path, "r", encoding="utf-8") as f:
data = json.load(f)
assert "records" in data
assert len(data["records"]) == 1
record = data["records"][0]
for field in ("id", "mode", "fps", "avg_latency_ms", "p95_latency_ms",
"total_frames", "timestamp", "device_config"):
assert field in record, f"缺少欄位:{field}"
def should_create_parent_directory_if_not_exists(self, tmp_path):
"""若父目錄不存在,應自動建立。"""
storage_path = str(tmp_path / "deep" / "nested" / "history.json")
history = PerformanceHistory(storage_path=storage_path)
history.record(make_result())
assert os.path.exists(storage_path)

View File

@ -0,0 +1,364 @@
"""
tests/unit/test_optimization_engine.py
TDD Phase 3.3.1 OptimizationEngine 單元測試
覆蓋範圍
- analyze_pipeline 的三條優化規則含邊界值測試
- predict_performance 計算邏輯
- apply_suggestion rebalance_devices 呼叫 device_manager
"""
import pytest
from unittest.mock import MagicMock, call
from core.optimization.engine import OptimizationEngine, OptimizationSuggestion
from core.device.device_manager import DeviceInfo
# ---------------------------------------------------------------------------
# Fixtures
# ---------------------------------------------------------------------------
@pytest.fixture
def engine():
return OptimizationEngine()
def _make_stats(
stage_fill_rates=None,
stage_avg_times=None,
device_utilizations=None,
):
"""建立 analyze_pipeline 接受的 stats 字典。"""
stage_fill_rates = stage_fill_rates or {}
stage_avg_times = stage_avg_times or {}
device_utilizations = device_utilizations or {}
stages = {}
all_stage_ids = set(stage_fill_rates) | set(stage_avg_times)
for sid in all_stage_ids:
stages[sid] = {
"queue_fill_rate": stage_fill_rates.get(sid, 0.0),
"avg_processing_time": stage_avg_times.get(sid, 10.0),
"fps": 30.0,
}
devices = {}
for did, util in device_utilizations.items():
devices[did] = {
"utilization_pct": util,
"series": "KL720",
}
return {"stages": stages, "devices": devices}
def _make_device_info(device_id="usb-1", gops=28, series="KL720"):
return DeviceInfo(
device_id=device_id,
series=series,
product_id=0x720,
status="online",
gops=gops,
assigned_stage=None,
current_fps=0.0,
utilization_pct=0.0,
)
# ---------------------------------------------------------------------------
# analyze_pipeline — rule 1: rebalance_devices
# ---------------------------------------------------------------------------
class TestAnalyzePipelineRebalanceDevices:
"""queue_fill_rate > 0.70 應觸發 rebalance_devices 建議。"""
def test_should_suggest_rebalance_when_fill_rate_above_threshold(self, engine):
stats = _make_stats(stage_fill_rates={"stage_0": 0.71})
suggestions = engine.analyze_pipeline(stats)
types = [s.type for s in suggestions]
assert "rebalance_devices" in types
def test_should_not_suggest_rebalance_when_fill_rate_at_threshold(self, engine):
"""恰好等於 0.70 不觸發(需 > 0.70)。"""
stats = _make_stats(stage_fill_rates={"stage_0": 0.70})
suggestions = engine.analyze_pipeline(stats)
types = [s.type for s in suggestions]
assert "rebalance_devices" not in types
def test_should_not_suggest_rebalance_when_fill_rate_below_threshold(self, engine):
stats = _make_stats(stage_fill_rates={"stage_0": 0.50})
suggestions = engine.analyze_pipeline(stats)
types = [s.type for s in suggestions]
assert "rebalance_devices" not in types
def test_rebalance_suggestion_has_required_fields(self, engine):
stats = _make_stats(stage_fill_rates={"stage_0": 0.85})
suggestions = engine.analyze_pipeline(stats)
rebalance = next(s for s in suggestions if s.type == "rebalance_devices")
assert rebalance.suggestion_id
assert rebalance.description
assert 0.0 <= rebalance.estimated_improvement_pct
assert rebalance.confidence in ("high", "medium", "low")
assert isinstance(rebalance.action_params, dict)
def test_rebalance_action_params_includes_stage_id(self, engine):
stats = _make_stats(stage_fill_rates={"stage_0": 0.85})
suggestions = engine.analyze_pipeline(stats)
rebalance = next(s for s in suggestions if s.type == "rebalance_devices")
assert "stage_id" in rebalance.action_params
# ---------------------------------------------------------------------------
# analyze_pipeline — rule 2: adjust_queue
# ---------------------------------------------------------------------------
class TestAnalyzePipelineAdjustQueue:
"""avg_processing_time 最大/最小比值 > 2.0 應觸發 adjust_queue 建議。"""
def test_should_suggest_adjust_queue_when_ratio_above_threshold(self, engine):
stats = _make_stats(
stage_avg_times={"stage_0": 10.0, "stage_1": 25.0}
)
suggestions = engine.analyze_pipeline(stats)
types = [s.type for s in suggestions]
assert "adjust_queue" in types
def test_should_not_suggest_adjust_queue_when_ratio_at_threshold(self, engine):
"""恰好等於 2.0 不觸發(需 > 2.0)。"""
stats = _make_stats(
stage_avg_times={"stage_0": 10.0, "stage_1": 20.0}
)
suggestions = engine.analyze_pipeline(stats)
types = [s.type for s in suggestions]
assert "adjust_queue" not in types
def test_should_not_suggest_adjust_queue_when_ratio_below_threshold(self, engine):
stats = _make_stats(
stage_avg_times={"stage_0": 10.0, "stage_1": 15.0}
)
suggestions = engine.analyze_pipeline(stats)
types = [s.type for s in suggestions]
assert "adjust_queue" not in types
def test_should_not_suggest_adjust_queue_with_single_stage(self, engine):
"""只有一個 Stage 時無法計算比值,不觸發。"""
stats = _make_stats(stage_avg_times={"stage_0": 100.0})
suggestions = engine.analyze_pipeline(stats)
types = [s.type for s in suggestions]
assert "adjust_queue" not in types
def test_adjust_queue_suggestion_has_required_fields(self, engine):
stats = _make_stats(
stage_avg_times={"stage_0": 10.0, "stage_1": 25.0}
)
suggestions = engine.analyze_pipeline(stats)
adj = next(s for s in suggestions if s.type == "adjust_queue")
assert adj.suggestion_id
assert adj.description
assert adj.confidence in ("high", "medium", "low")
assert isinstance(adj.action_params, dict)
def should_not_suggest_adjust_queue_when_min_processing_time_is_zero(self, engine):
# stage avg_processing_time 為 0 時,比值計算無意義,不應觸發規則
stats = _make_stats(stage_avg_times={"stage_0": 0.0, "stage_1": 50.0})
suggestions = engine.analyze_pipeline(stats)
adjust = [s for s in suggestions if s.type == "adjust_queue"]
assert len(adjust) == 0
# ---------------------------------------------------------------------------
# analyze_pipeline — rule 3: add_devices
# ---------------------------------------------------------------------------
class TestAnalyzePipelineAddDevices:
"""所有 Dongle 使用率 > 85% 應觸發 add_devices 建議。"""
def test_should_suggest_add_devices_when_all_above_threshold(self, engine):
stats = _make_stats(
device_utilizations={"usb-1": 86.0, "usb-2": 90.0}
)
suggestions = engine.analyze_pipeline(stats)
types = [s.type for s in suggestions]
assert "add_devices" in types
def test_should_not_suggest_add_devices_when_one_device_below_threshold(self, engine):
stats = _make_stats(
device_utilizations={"usb-1": 90.0, "usb-2": 80.0}
)
suggestions = engine.analyze_pipeline(stats)
types = [s.type for s in suggestions]
assert "add_devices" not in types
def test_should_not_suggest_add_devices_when_all_at_threshold(self, engine):
"""恰好等於 85% 不觸發(需 > 85%)。"""
stats = _make_stats(
device_utilizations={"usb-1": 85.0, "usb-2": 85.0}
)
suggestions = engine.analyze_pipeline(stats)
types = [s.type for s in suggestions]
assert "add_devices" not in types
def test_should_not_suggest_add_devices_when_no_devices(self, engine):
"""沒有裝置資訊時不觸發。"""
stats = _make_stats(device_utilizations={})
suggestions = engine.analyze_pipeline(stats)
types = [s.type for s in suggestions]
assert "add_devices" not in types
def test_add_devices_suggestion_has_required_fields(self, engine):
stats = _make_stats(
device_utilizations={"usb-1": 90.0, "usb-2": 92.0}
)
suggestions = engine.analyze_pipeline(stats)
add = next(s for s in suggestions if s.type == "add_devices")
assert add.suggestion_id
assert add.description
assert add.confidence in ("high", "medium", "low")
# ---------------------------------------------------------------------------
# analyze_pipeline — empty stats
# ---------------------------------------------------------------------------
class TestAnalyzePipelineEmptyStats:
def test_should_return_empty_list_when_stats_empty(self, engine):
suggestions = engine.analyze_pipeline({"stages": {}, "devices": {}})
assert suggestions == []
# ---------------------------------------------------------------------------
# predict_performance
# ---------------------------------------------------------------------------
class TestPredictPerformance:
"""predict_performance 使用 sum(gops) / num_stages * 0.6 計算 FPS。"""
def test_should_return_expected_fps_with_single_device_single_stage(self, engine):
devices = [_make_device_info(gops=28)]
# estimated_fps = 28 / 1 * 0.6 = 16.8
config = [MagicMock()] # 1 stage
result = engine.predict_performance(config, devices)
assert result["estimated_fps"] == pytest.approx(16.8)
def test_should_return_expected_latency(self, engine):
devices = [_make_device_info(gops=28)]
config = [MagicMock()] # 1 stage
result = engine.predict_performance(config, devices)
# estimated_latency_ms = 1000 / 16.8
assert result["estimated_latency_ms"] == pytest.approx(1000.0 / 16.8, rel=1e-4)
def test_should_return_confidence_range_as_tuple(self, engine):
devices = [_make_device_info(gops=28)]
config = [MagicMock()] # 1 stage
result = engine.predict_performance(config, devices)
low, high = result["confidence_range"]
fps = result["estimated_fps"]
assert low == pytest.approx(fps * 0.8)
assert high == pytest.approx(fps * 1.2)
def test_should_scale_fps_with_multiple_devices(self, engine):
devices = [
_make_device_info("usb-1", gops=28),
_make_device_info("usb-2", gops=28),
]
config = [MagicMock(), MagicMock()] # 2 stages
result = engine.predict_performance(config, devices)
# estimated_fps = (28 + 28) / 2 * 0.6 = 16.8
assert result["estimated_fps"] == pytest.approx(16.8)
def test_should_decrease_fps_with_more_stages(self, engine):
devices = [_make_device_info(gops=28)]
config_1 = [MagicMock()] # 1 stage
config_4 = [MagicMock()] * 4 # 4 stages
result_1 = engine.predict_performance(config_1, devices)
result_4 = engine.predict_performance(config_4, devices)
assert result_4["estimated_fps"] < result_1["estimated_fps"]
def test_should_handle_zero_stages_without_crash(self, engine):
"""num_stages = 0 時回傳 0 FPS不拋錯"""
devices = [_make_device_info(gops=28)]
result = engine.predict_performance([], devices)
assert result["estimated_fps"] == 0.0
def test_should_return_zero_fps_with_no_devices(self, engine):
config = [MagicMock()]
result = engine.predict_performance(config, [])
assert result["estimated_fps"] == 0.0
# ---------------------------------------------------------------------------
# apply_suggestion
# ---------------------------------------------------------------------------
class TestApplySuggestion:
def _make_rebalance_suggestion(self, stage_id="stage_0", device_id="usb-1"):
return OptimizationSuggestion(
suggestion_id="test-001",
type="rebalance_devices",
description="Rebalance test",
estimated_improvement_pct=10.0,
confidence="medium",
action_params={"stage_id": stage_id, "device_id": device_id},
)
def test_should_call_assign_device_for_rebalance_suggestion(self, engine):
dm = MagicMock()
dm.assign_device.return_value = True
suggestion = self._make_rebalance_suggestion("stage_0", "usb-1")
result = engine.apply_suggestion(suggestion, dm)
dm.assign_device.assert_called_once_with("usb-1", "stage_0")
assert result is True
def test_should_return_false_when_assign_device_fails(self, engine):
dm = MagicMock()
dm.assign_device.return_value = False
suggestion = self._make_rebalance_suggestion()
result = engine.apply_suggestion(suggestion, dm)
assert result is False
def test_should_return_true_for_add_devices_without_calling_assign(self, engine):
dm = MagicMock()
suggestion = OptimizationSuggestion(
suggestion_id="test-002",
type="add_devices",
description="Add more dongles",
estimated_improvement_pct=20.0,
confidence="high",
action_params={},
)
result = engine.apply_suggestion(suggestion, dm)
dm.assign_device.assert_not_called()
assert result is True
def test_should_return_true_for_adjust_queue_without_calling_assign(self, engine):
dm = MagicMock()
suggestion = OptimizationSuggestion(
suggestion_id="test-003",
type="adjust_queue",
description="Adjust queue size",
estimated_improvement_pct=5.0,
confidence="low",
action_params={},
)
result = engine.apply_suggestion(suggestion, dm)
dm.assign_device.assert_not_called()
assert result is True
def should_call_assign_device_with_empty_device_id_when_not_populated(self, engine):
# analyze_pipeline 產生的 rebalance 建議 device_id 預設為空字串
# apply_suggestion 應如實傳遞空字串給 device_manager行為可預期
suggestion = OptimizationSuggestion(
suggestion_id="test",
type="rebalance_devices",
description="test",
estimated_improvement_pct=10.0,
confidence="medium",
action_params={"device_id": "", "stage_id": "stage_0"}
)
mock_dm = MagicMock()
mock_dm.assign_device.return_value = False # 空 device_id 通常回傳 False
result = engine.apply_suggestion(suggestion, mock_dm)
mock_dm.assign_device.assert_called_once_with("", "stage_0")
# result 取決於 assign_device 回傳值
assert result == False

View File

@ -0,0 +1,152 @@
"""
PerformanceDashboard 的單元測試
測試策略
- PyQt5 CI 環境中不可用透過 conftest.py Mock 注入繞過 import
- 測試驗證 PerformanceDashboard 的行為邏輯
update_stats 是否更新顯示值reset 是否歸零set_display_window 是否儲存設定
- 使用 MagicMock 取代真實 QLabel透過記錄 setText 呼叫來驗證
"""
import sys
import pytest
from unittest.mock import MagicMock, patch, call
# ---------------------------------------------------------------------------
# 測試PerformanceDashboard 可以建立
# ---------------------------------------------------------------------------
class TestPerformanceDashboardInit:
def should_be_importable(self):
"""PerformanceDashboard 模組應可匯入(即使 PyQt5 被 Mock"""
from ui.components.performance_dashboard import PerformanceDashboard
assert PerformanceDashboard is not None
def should_instantiate_without_error(self):
"""PerformanceDashboard() 應可無錯誤地建立實例。"""
from ui.components.performance_dashboard import PerformanceDashboard
dashboard = PerformanceDashboard()
assert dashboard is not None
# ---------------------------------------------------------------------------
# 測試update_stats 更新顯示值
# ---------------------------------------------------------------------------
class TestUpdateStats:
def should_store_fps_after_update(self):
"""update_stats 後current_fps 屬性應更新為傳入的值。"""
from ui.components.performance_dashboard import PerformanceDashboard
dashboard = PerformanceDashboard()
dashboard.update_stats({"fps": 30.5, "avg_latency_ms": 10.0, "p95_latency_ms": 15.0})
assert dashboard.current_fps == pytest.approx(30.5)
def should_store_avg_latency_after_update(self):
"""update_stats 後current_avg_latency_ms 屬性應更新。"""
from ui.components.performance_dashboard import PerformanceDashboard
dashboard = PerformanceDashboard()
dashboard.update_stats({"fps": 30.0, "avg_latency_ms": 12.3, "p95_latency_ms": 20.0})
assert dashboard.current_avg_latency_ms == pytest.approx(12.3)
def should_store_p95_latency_after_update(self):
"""update_stats 後current_p95_latency_ms 屬性應更新。"""
from ui.components.performance_dashboard import PerformanceDashboard
dashboard = PerformanceDashboard()
dashboard.update_stats({"fps": 30.0, "avg_latency_ms": 12.0, "p95_latency_ms": 25.7})
assert dashboard.current_p95_latency_ms == pytest.approx(25.7)
def should_call_fps_label_setText(self):
"""update_stats 應對 fps_label 呼叫 setText包含 fps 數值。"""
from ui.components.performance_dashboard import PerformanceDashboard
dashboard = PerformanceDashboard()
dashboard.fps_label.setText.reset_mock()
dashboard.update_stats({"fps": 45.0, "avg_latency_ms": 10.0, "p95_latency_ms": 15.0})
dashboard.fps_label.setText.assert_called_once()
call_arg = dashboard.fps_label.setText.call_args[0][0]
assert "45" in call_arg
def should_call_avg_latency_label_setText(self):
"""update_stats 應對 avg_latency_label 呼叫 setText包含延遲數值。"""
from ui.components.performance_dashboard import PerformanceDashboard
dashboard = PerformanceDashboard()
dashboard.avg_latency_label.setText.reset_mock()
dashboard.update_stats({"fps": 30.0, "avg_latency_ms": 8.5, "p95_latency_ms": 12.0})
dashboard.avg_latency_label.setText.assert_called_once()
call_arg = dashboard.avg_latency_label.setText.call_args[0][0]
assert "8.5" in call_arg or "8" in call_arg
def should_call_p95_latency_label_setText(self):
"""update_stats 應對 p95_latency_label 呼叫 setText包含 p95 數值。"""
from ui.components.performance_dashboard import PerformanceDashboard
dashboard = PerformanceDashboard()
dashboard.p95_latency_label.setText.reset_mock()
dashboard.update_stats({"fps": 30.0, "avg_latency_ms": 8.0, "p95_latency_ms": 19.2})
dashboard.p95_latency_label.setText.assert_called_once()
call_arg = dashboard.p95_latency_label.setText.call_args[0][0]
assert "19" in call_arg
# ---------------------------------------------------------------------------
# 測試reset 歸零
# ---------------------------------------------------------------------------
class TestReset:
def should_reset_fps_to_zero(self):
"""reset() 後 current_fps 應歸零。"""
from ui.components.performance_dashboard import PerformanceDashboard
dashboard = PerformanceDashboard()
dashboard.update_stats({"fps": 55.0, "avg_latency_ms": 5.0, "p95_latency_ms": 8.0})
dashboard.reset()
assert dashboard.current_fps == 0.0
def should_reset_avg_latency_to_zero(self):
"""reset() 後 current_avg_latency_ms 應歸零。"""
from ui.components.performance_dashboard import PerformanceDashboard
dashboard = PerformanceDashboard()
dashboard.update_stats({"fps": 30.0, "avg_latency_ms": 12.0, "p95_latency_ms": 18.0})
dashboard.reset()
assert dashboard.current_avg_latency_ms == 0.0
def should_reset_p95_latency_to_zero(self):
"""reset() 後 current_p95_latency_ms 應歸零。"""
from ui.components.performance_dashboard import PerformanceDashboard
dashboard = PerformanceDashboard()
dashboard.update_stats({"fps": 30.0, "avg_latency_ms": 12.0, "p95_latency_ms": 18.0})
dashboard.reset()
assert dashboard.current_p95_latency_ms == 0.0
def should_call_label_setText_with_zero_on_reset(self):
"""reset() 應對 fps_label 呼叫 setText更新為 0 值。"""
from ui.components.performance_dashboard import PerformanceDashboard
dashboard = PerformanceDashboard()
dashboard.fps_label.setText.reset_mock()
dashboard.reset()
dashboard.fps_label.setText.assert_called_once()
# ---------------------------------------------------------------------------
# 測試set_display_window 儲存設定
# ---------------------------------------------------------------------------
class TestSetDisplayWindow:
def should_store_display_window_seconds(self):
"""set_display_window(120) 後display_window_seconds 應為 120。"""
from ui.components.performance_dashboard import PerformanceDashboard
dashboard = PerformanceDashboard()
dashboard.set_display_window(120)
assert dashboard.display_window_seconds == 120
def should_default_to_60_seconds(self):
"""不傳參數時 display_window_seconds 預設應為 60。"""
from ui.components.performance_dashboard import PerformanceDashboard
dashboard = PerformanceDashboard()
dashboard.set_display_window()
assert dashboard.display_window_seconds == 60
def should_update_display_window_on_second_call(self):
"""連續呼叫 set_display_window 應覆蓋舊值。"""
from ui.components.performance_dashboard import PerformanceDashboard
dashboard = PerformanceDashboard()
dashboard.set_display_window(30)
dashboard.set_display_window(90)
assert dashboard.display_window_seconds == 90

View File

@ -0,0 +1,250 @@
"""
tests/unit/test_report_exporter.py ReportExporter 單元測試
按照 TDD 3.4.9 的測試清單實作
"""
import csv
import io
import time
from pathlib import Path
from unittest.mock import patch, MagicMock
import pytest
from core.performance.benchmarker import BenchmarkResult
from core.performance.report_exporter import DeviceSummary, ReportData, ReportExporter
# ---------------------------------------------------------------------------
# Fixtures
# ---------------------------------------------------------------------------
def _make_benchmark_result(mode: str = "sequential", fps: float = 14.2) -> BenchmarkResult:
return BenchmarkResult(
mode=mode,
fps=fps,
avg_latency_ms=70.4,
p95_latency_ms=95.0,
total_frames=426,
timestamp=1743856222.0,
device_config={"KL720": 1},
id=f"benchmark_20260405_143022_{mode}",
)
def _make_report_data_with_benchmark() -> ReportData:
seq = _make_benchmark_result("sequential", fps=14.2)
par = _make_benchmark_result("parallel", fps=45.6)
return ReportData(
report_title="Test Report",
pipeline_name="test_pipeline",
sequential_result=seq,
parallel_result=par,
speedup=45.6 / 14.2,
history_records=[seq, par],
)
# ---------------------------------------------------------------------------
# _get_timestamp_str
# ---------------------------------------------------------------------------
class TestGetTimestampStr:
def test_format_is_yyyy_mm_dd_hh_mm_ss(self):
"""_get_timestamp_str 應回傳 'YYYY-MM-DD HH:MM:SS' 格式的字串"""
ts = 1743856222.0
result = ReportExporter._get_timestamp_str(ts)
# 驗證格式:長度固定為 19包含 '-' 和 ':'
assert len(result) == 19
assert result[4] == "-"
assert result[7] == "-"
assert result[10] == " "
assert result[13] == ":"
assert result[16] == ":"
def test_all_parts_are_digits(self):
"""timestamp 各欄位均應為數字"""
ts = 1743856222.0
result = ReportExporter._get_timestamp_str(ts)
parts = result.replace("-", "").replace(":", "").replace(" ", "")
assert parts.isdigit()
# ---------------------------------------------------------------------------
# ReportData 預設值
# ---------------------------------------------------------------------------
class TestReportDataDefaults:
def test_report_title_is_non_empty(self):
"""ReportData 預設 report_title 應非空"""
data = ReportData()
assert data.report_title
assert len(data.report_title) > 0
def test_generated_at_is_close_to_now(self):
"""ReportData 預設 generated_at 應接近當下時間(誤差 < 5 秒)"""
before = time.time()
data = ReportData()
after = time.time()
assert before <= data.generated_at <= after + 5
def test_history_records_defaults_to_empty_list(self):
"""ReportData 預設 history_records 應為空列表"""
data = ReportData()
assert data.history_records == []
def test_devices_defaults_to_empty_list(self):
"""ReportData 預設 devices 應為空列表"""
data = ReportData()
assert data.devices == []
def test_sequential_result_defaults_to_none(self):
data = ReportData()
assert data.sequential_result is None
def test_parallel_result_defaults_to_none(self):
data = ReportData()
assert data.parallel_result is None
# ---------------------------------------------------------------------------
# export_csv
# ---------------------------------------------------------------------------
class TestExportCsv:
def test_creates_file_at_given_path(self, tmp_path):
"""export_csv() 應在指定路徑建立 CSV 檔案"""
data = _make_report_data_with_benchmark()
output_path = tmp_path / "report.csv"
exporter = ReportExporter()
result = exporter.export_csv(data, output_path)
assert output_path.exists()
assert result == output_path
def test_contains_benchmark_summary_section(self, tmp_path):
"""CSV 應包含完整的 benchmark_summary header 行"""
data = _make_report_data_with_benchmark()
output_path = tmp_path / "report.csv"
exporter = ReportExporter()
exporter.export_csv(data, output_path)
content = output_path.read_text(encoding="utf-8")
assert "section,metric,sequential,parallel,diff_pct" in content
def test_contains_history_section(self, tmp_path):
"""CSV 應包含完整的歷史記錄 header 行"""
data = _make_report_data_with_benchmark()
output_path = tmp_path / "report.csv"
exporter = ReportExporter()
exporter.export_csv(data, output_path)
content = output_path.read_text(encoding="utf-8")
assert "id,timestamp,mode,fps,avg_latency_ms,p95_latency_ms,total_frames" in content
# 歷史記錄有 2 筆,驗證資料行數
lines = [l for l in content.splitlines() if l.strip()]
history_data_lines = [l for l in lines if l.startswith("benchmark_2")]
assert len(history_data_lines) == len(data.history_records)
def test_two_sections_separated_by_blank_line(self, tmp_path):
"""CSV 的兩個 header 行之間恰有一行空行"""
data = _make_report_data_with_benchmark()
output_path = tmp_path / "report.csv"
exporter = ReportExporter()
exporter.export_csv(data, output_path)
content = output_path.read_text(encoding="utf-8")
lines = content.splitlines()
summary_header = "section,metric,sequential,parallel,diff_pct"
history_header = "id,timestamp,mode,fps,avg_latency_ms,p95_latency_ms,total_frames"
idx_summary = next(i for i, l in enumerate(lines) if l == summary_header)
idx_history = next(i for i, l in enumerate(lines) if l == history_header)
# 兩個 header 行之間,緊鄰 history header 的前一行必須是空行
assert idx_history > idx_summary + 1
assert lines[idx_history - 1] == ""
def test_no_benchmark_result_raises_value_error(self, tmp_path):
"""sequential_result 或 parallel_result 為 None 時,應拋出 ValueError"""
data = ReportData() # sequential_result=None, parallel_result=None
output_path = tmp_path / "report.csv"
exporter = ReportExporter()
with pytest.raises(ValueError):
exporter.export_csv(data, output_path)
def test_empty_history_produces_only_summary(self, tmp_path):
"""history_records 為空時CSV 只輸出 Benchmark 摘要區塊,歷史記錄表為空"""
seq = _make_benchmark_result("sequential", fps=14.2)
par = _make_benchmark_result("parallel", fps=45.6)
data = ReportData(
sequential_result=seq,
parallel_result=par,
speedup=45.6 / 14.2,
history_records=[],
)
output_path = tmp_path / "report.csv"
exporter = ReportExporter()
exporter.export_csv(data, output_path)
content = output_path.read_text(encoding="utf-8")
assert "benchmark_summary" in content
# 沒有歷史資料行id 開頭的行)
data_lines = [l for l in content.splitlines() if l.startswith("benchmark_2")]
assert len(data_lines) == 0
def test_auto_creates_parent_directory(self, tmp_path):
"""若輸出路徑的父目錄不存在export_csv() 應自動建立"""
data = _make_report_data_with_benchmark()
output_path = tmp_path / "subdir" / "report.csv"
exporter = ReportExporter()
exporter.export_csv(data, output_path)
assert output_path.exists()
# ---------------------------------------------------------------------------
# export_pdf
# ---------------------------------------------------------------------------
class TestExportPdf:
def test_creates_file_at_given_path(self, tmp_path):
"""export_pdf() 應在指定路徑建立 PDF 檔案(不驗證內容,只驗證存在)"""
reportlab = pytest.importorskip("reportlab")
data = _make_report_data_with_benchmark()
output_path = tmp_path / "report.pdf"
exporter = ReportExporter()
result = exporter.export_pdf(data, output_path)
assert output_path.exists()
assert result == output_path
def test_auto_creates_parent_directory(self, tmp_path):
"""若輸出路徑的父目錄不存在export_pdf() 應自動建立"""
pytest.importorskip("reportlab")
data = _make_report_data_with_benchmark()
output_path = tmp_path / "subdir" / "report.pdf"
exporter = ReportExporter()
exporter.export_pdf(data, output_path)
assert output_path.exists()
def test_without_chart_image_does_not_raise(self, tmp_path):
"""chart_image_bytes 為 None 時PDF 匯出不應拋出例外"""
pytest.importorskip("reportlab")
data = _make_report_data_with_benchmark()
data.chart_image_bytes = None
output_path = tmp_path / "report.pdf"
exporter = ReportExporter()
# 不應拋出例外
exporter.export_pdf(data, output_path)
def test_raises_import_error_when_reportlab_missing(self, tmp_path):
"""reportlab 未安裝時export_pdf() 應拋出 ImportError"""
import core.performance.report_exporter as re_mod
data = _make_report_data_with_benchmark()
output_path = tmp_path / "report.pdf"
exporter = ReportExporter()
with patch.object(re_mod, "_REPORTLAB_AVAILABLE", False):
with pytest.raises(ImportError, match="reportlab"):
exporter.export_pdf(data, output_path)

View File

@ -0,0 +1,88 @@
"""
Tests for ResultSerializer JSON serialization of inference result objects.
"""
import dataclasses
import pytest
from unittest.mock import MagicMock
from core.functions.result_handler import ResultSerializer
# Minimal stand-ins for the SDK dataclasses (no kp import needed)
@dataclasses.dataclass
class FakeBoundingBox:
x1: int = 0
y1: int = 0
x2: int = 100
y2: int = 100
class_name: str = "fire"
score: float = 0.9
@dataclasses.dataclass
class FakeObjectDetectionResult:
class_count: int = 1
box_count: int = 1
box_list: list = dataclasses.field(default_factory=list)
@dataclasses.dataclass
class FakeClassificationResult:
probability: float = 0.85
class_name: str = "fire"
class_num: int = 0
class TestResultSerializerToJson:
def setup_method(self):
self.serializer = ResultSerializer()
def should_serialize_plain_dict(self):
data = {"fps": 30.0, "pipeline_id": "p1"}
result = self.serializer.to_json(data)
assert '"fps"' in result
assert "30.0" in result
def should_serialize_dict_containing_dataclass_object(self):
"""Bug reproduction: ObjectDetectionResult in result dict caused TypeError."""
det = FakeObjectDetectionResult(
class_count=1,
box_count=1,
box_list=[FakeBoundingBox()]
)
data = {"stage_results": {"stage_0": det}}
# Should NOT raise TypeError: Object of type FakeObjectDetectionResult is not JSON serializable
result = self.serializer.to_json(data)
assert result is not None
assert "stage_0" in result
def should_serialize_dict_containing_classification_result(self):
"""ClassificationResult must also be handled."""
clf = FakeClassificationResult(probability=0.85, class_name="fire")
data = {"stage_results": {"stage_0": clf}}
result = self.serializer.to_json(data)
assert "stage_0" in result
def should_serialize_nested_dataclass_in_list(self):
"""box_list inside ObjectDetectionResult contains BoundingBox dataclasses."""
det = FakeObjectDetectionResult(
box_count=1,
box_list=[FakeBoundingBox(x1=10, y1=20, x2=110, y2=120, class_name="fire")]
)
data = {"detections": det}
result = self.serializer.to_json(data)
assert "fire" in result
def should_preserve_primitive_values_unchanged(self):
data = {"fps": 45.2, "count": 3, "name": "test", "flag": True}
import json
result = json.loads(self.serializer.to_json(data))
assert result["fps"] == 45.2
assert result["count"] == 3
assert result["name"] == "test"
assert result["flag"] is True
def should_handle_none_values(self):
data = {"result": None, "stage": "stage_0"}
result = self.serializer.to_json(data)
assert "null" in result

View File

@ -0,0 +1,231 @@
"""
tests/unit/test_template_manager.py
TDD Phase 3.3.2 TemplateManager 單元測試
覆蓋範圍
- get_builtin_templates 回傳 3 個範本
- load_template 正確載入內建範本
- load_template 對不存在的 ID 拋出 ValueError
- save_as_template 建立新範本並可被 load_template 讀取
"""
import pytest
from core.templates.manager import TemplateManager, PipelineTemplate
# ---------------------------------------------------------------------------
# Fixtures
# ---------------------------------------------------------------------------
@pytest.fixture
def manager():
return TemplateManager()
# ---------------------------------------------------------------------------
# get_builtin_templates
# ---------------------------------------------------------------------------
class TestGetBuiltinTemplates:
def test_should_return_exactly_three_builtin_templates(self, manager):
templates = manager.get_builtin_templates()
assert len(templates) == 3
def test_should_return_list_of_pipeline_template_instances(self, manager):
templates = manager.get_builtin_templates()
for t in templates:
assert isinstance(t, PipelineTemplate)
def test_should_include_yolov5_detection_template(self, manager):
templates = manager.get_builtin_templates()
ids = [t.template_id for t in templates]
assert "yolov5_detection" in ids
def test_should_include_fire_detection_template(self, manager):
templates = manager.get_builtin_templates()
ids = [t.template_id for t in templates]
assert "fire_detection" in ids
def test_should_include_dual_model_cascade_template(self, manager):
templates = manager.get_builtin_templates()
ids = [t.template_id for t in templates]
assert "dual_model_cascade" in ids
def test_each_template_has_non_empty_name_and_description(self, manager):
templates = manager.get_builtin_templates()
for t in templates:
assert t.name
assert t.description
def test_each_template_has_nodes_list(self, manager):
templates = manager.get_builtin_templates()
for t in templates:
assert isinstance(t.nodes, list)
assert len(t.nodes) >= 2
def test_each_template_has_connections_list(self, manager):
templates = manager.get_builtin_templates()
for t in templates:
assert isinstance(t.connections, list)
# ---------------------------------------------------------------------------
# load_template — 內建範本
# ---------------------------------------------------------------------------
class TestLoadTemplate:
def test_should_load_yolov5_detection_by_id(self, manager):
t = manager.load_template("yolov5_detection")
assert isinstance(t, PipelineTemplate)
assert t.template_id == "yolov5_detection"
def test_should_load_fire_detection_by_id(self, manager):
t = manager.load_template("fire_detection")
assert t.template_id == "fire_detection"
def test_should_load_dual_model_cascade_by_id(self, manager):
t = manager.load_template("dual_model_cascade")
assert t.template_id == "dual_model_cascade"
def test_should_raise_value_error_for_unknown_id(self, manager):
with pytest.raises(ValueError, match="not found"):
manager.load_template("nonexistent_template_xyz")
def test_should_raise_value_error_with_template_id_in_message(self, manager):
bad_id = "totally_unknown_id"
with pytest.raises(ValueError, match=bad_id):
manager.load_template(bad_id)
# ---------------------------------------------------------------------------
# yolov5_detection 節點結構驗證
# ---------------------------------------------------------------------------
class TestYolov5DetectionStructure:
"""Input → Preprocess → Model → Postprocess → Output 順序。"""
def test_should_have_five_nodes(self, manager):
t = manager.load_template("yolov5_detection")
assert len(t.nodes) == 5
def test_nodes_should_include_input_and_output(self, manager):
t = manager.load_template("yolov5_detection")
node_types = [n["type"] for n in t.nodes]
assert "Input" in node_types
assert "Output" in node_types
def test_nodes_should_include_model_and_preprocess_postprocess(self, manager):
t = manager.load_template("yolov5_detection")
node_types = [n["type"] for n in t.nodes]
assert "Model" in node_types
assert "Preprocess" in node_types
assert "Postprocess" in node_types
# ---------------------------------------------------------------------------
# fire_detection 節點結構驗證
# ---------------------------------------------------------------------------
class TestFireDetectionStructure:
"""Input → Model → Postprocess → Output 順序。"""
def test_should_have_four_nodes(self, manager):
t = manager.load_template("fire_detection")
assert len(t.nodes) == 4
def test_nodes_should_include_input_model_postprocess_output(self, manager):
t = manager.load_template("fire_detection")
node_types = [n["type"] for n in t.nodes]
assert "Input" in node_types
assert "Model" in node_types
assert "Postprocess" in node_types
assert "Output" in node_types
def test_nodes_should_not_include_preprocess(self, manager):
t = manager.load_template("fire_detection")
node_types = [n["type"] for n in t.nodes]
assert "Preprocess" not in node_types
# ---------------------------------------------------------------------------
# dual_model_cascade 節點結構驗證
# ---------------------------------------------------------------------------
class TestDualModelCascadeStructure:
"""Input → Model1 → Postprocess1 → Model2 → Postprocess2 → Output 順序。"""
def test_should_have_six_nodes(self, manager):
t = manager.load_template("dual_model_cascade")
assert len(t.nodes) == 6
def test_should_have_two_model_nodes(self, manager):
t = manager.load_template("dual_model_cascade")
model_nodes = [n for n in t.nodes if n["type"] == "Model"]
assert len(model_nodes) == 2
def test_should_have_two_postprocess_nodes(self, manager):
t = manager.load_template("dual_model_cascade")
pp_nodes = [n for n in t.nodes if n["type"] == "Postprocess"]
assert len(pp_nodes) == 2
# ---------------------------------------------------------------------------
# save_as_template
# ---------------------------------------------------------------------------
class TestSaveAsTemplate:
def _sample_config(self):
return {
"nodes": [
{"id": "n1", "type": "Input"},
{"id": "n2", "type": "Output"},
],
"connections": [
{"from": "n1", "to": "n2"},
],
}
def test_should_return_pipeline_template_instance(self, manager):
t = manager.save_as_template(
self._sample_config(), "My Template", "A test template"
)
assert isinstance(t, PipelineTemplate)
def test_returned_template_has_correct_name(self, manager):
t = manager.save_as_template(self._sample_config(), "Custom Pipeline", "desc")
assert t.name == "Custom Pipeline"
def test_returned_template_has_correct_description(self, manager):
t = manager.save_as_template(self._sample_config(), "name", "My description")
assert t.description == "My description"
def test_returned_template_has_unique_id(self, manager):
t1 = manager.save_as_template(self._sample_config(), "T1", "desc")
t2 = manager.save_as_template(self._sample_config(), "T2", "desc")
assert t1.template_id != t2.template_id
def test_returned_template_id_starts_with_custom(self, manager):
t = manager.save_as_template(self._sample_config(), "My Template", "desc")
assert t.template_id.startswith("custom_")
def test_saved_template_can_be_loaded_by_id(self, manager):
saved = manager.save_as_template(self._sample_config(), "Loadable", "desc")
loaded = manager.load_template(saved.template_id)
assert loaded.template_id == saved.template_id
assert loaded.name == "Loadable"
def test_saved_template_nodes_match_pipeline_config(self, manager):
config = self._sample_config()
saved = manager.save_as_template(config, "Node Test", "desc")
assert saved.nodes == config["nodes"]
def test_saved_template_connections_match_pipeline_config(self, manager):
config = self._sample_config()
saved = manager.save_as_template(config, "Conn Test", "desc")
assert saved.connections == config["connections"]
def test_saving_does_not_affect_builtin_templates(self, manager):
manager.save_as_template(self._sample_config(), "Extra", "desc")
builtins = manager.get_builtin_templates()
assert len(builtins) == 3

View File

@ -0,0 +1,123 @@
"""
ui/components/device_management_panel.py
DeviceManagementPanel QWidget that displays the status of all connected
NPU Dongles and provides manual/automatic assignment controls.
"""
from __future__ import annotations
from typing import List, Optional
from PyQt5.QtCore import QTimer, pyqtSignal
from PyQt5.QtWidgets import (
QHBoxLayout,
QLabel,
QPushButton,
QVBoxLayout,
QWidget,
)
from core.device.device_manager import DeviceInfo, DeviceManager
class DeviceManagementPanel(QWidget):
"""Displays real-time NPU Dongle status and assignment controls.
Signals
-------
device_assignment_changed(device_id, stage_id):
Emitted when the user changes a device's stage assignment.
"""
device_assignment_changed = pyqtSignal(str, str)
def __init__(
self,
device_manager: DeviceManager,
parent: Optional[QWidget] = None,
) -> None:
super().__init__(parent)
self._device_manager = device_manager
self._devices: List[DeviceInfo] = []
self._auto_refresh_interval_ms: int = 0
self._timer: Optional[QTimer] = None
self._setup_ui()
self.refresh()
# ------------------------------------------------------------------
# UI construction
# ------------------------------------------------------------------
def _setup_ui(self) -> None:
layout = QVBoxLayout()
# Toolbar row: Auto Balance button
toolbar = QHBoxLayout()
self.auto_balance_button = QPushButton("Auto Balance")
self.auto_balance_button.clicked.connect(self._on_auto_balance)
toolbar.addWidget(self.auto_balance_button)
toolbar.addStretch()
# Device cards area
self._cards_layout = QVBoxLayout()
self._no_device_label = QLabel("No devices connected")
layout.addLayout(toolbar)
layout.addWidget(self._no_device_label)
layout.addLayout(self._cards_layout)
self.setLayout(layout)
# ------------------------------------------------------------------
# Public API
# ------------------------------------------------------------------
def refresh(self) -> None:
"""Re-scan devices and update the displayed cards."""
self._devices = self._device_manager.scan_devices()
self._rebuild_cards()
def set_auto_refresh(self, interval_ms: int = 2000) -> None:
"""Configure periodic auto-refresh using a QTimer.
Parameters
----------
interval_ms:
Refresh interval in milliseconds. Defaults to 2 000 ms.
"""
if interval_ms <= 0:
if self._timer is not None:
self._timer.stop()
return
self._auto_refresh_interval_ms = interval_ms
if self._timer is None:
self._timer = QTimer(self)
self._timer.timeout.connect(self.refresh)
self._timer.start(interval_ms)
# ------------------------------------------------------------------
# Private helpers
# ------------------------------------------------------------------
def _rebuild_cards(self) -> None:
"""Recreate device card widgets from the current device list."""
if not self._devices:
self._no_device_label.setVisible(True)
return
self._no_device_label.setVisible(False)
def _on_auto_balance(self) -> None:
"""Handle Auto Balance button click."""
if not self._devices:
return
stage_ids = [
d.assigned_stage for d in self._devices if d.assigned_stage
]
if not stage_ids:
return
recommendations = self._device_manager.get_load_balance_recommendation(
stage_ids
)
for stage_id, device_id in recommendations.items():
if device_id:
self.device_assignment_changed.emit(device_id, stage_id)

View File

@ -0,0 +1,97 @@
"""
ui/components/performance_dashboard.py
PerformanceDashboard 顯示即時 FPS 與延遲數值的 QWidget
使用 pyqtgraph 繪製折線圖如可用否則降級為純 QLabel 顯示數值
避免 import error 導致應用崩潰
"""
from typing import Any, Dict, Optional
from PyQt5.QtCore import pyqtSignal
from PyQt5.QtWidgets import QHBoxLayout, QLabel, QVBoxLayout, QWidget
try:
import pyqtgraph as pg # type: ignore
_PYQTGRAPH_AVAILABLE = True
except ImportError:
_PYQTGRAPH_AVAILABLE = False
# TODO: Phase 2 - 當 pyqtgraph 可用時,改用折線圖顯示歷史 FPS/Latency
class PerformanceDashboard(QWidget):
"""即時效能儀錶板元件。
顯示當前 FPS平均延遲與 p95 延遲
接受 update_stats(stats) 推送的數據並更新 QLabel 顯示值
"""
update_requested = pyqtSignal(dict)
def __init__(self, parent: Optional[QWidget] = None) -> None:
super().__init__(parent)
# 內部狀態
self.current_fps: float = 0.0
self.current_avg_latency_ms: float = 0.0
self.current_p95_latency_ms: float = 0.0
self.display_window_seconds: int = 60
# UI 元件(動態值 label前綴由靜態 label 負責)
self.fps_label = QLabel("0.0")
self.avg_latency_label = QLabel("0.0")
self.p95_latency_label = QLabel("0.0")
self._setup_ui()
def _setup_ui(self) -> None:
layout = QVBoxLayout()
fps_row = QHBoxLayout()
fps_row.addWidget(QLabel("FPS:"))
fps_row.addWidget(self.fps_label)
avg_row = QHBoxLayout()
avg_row.addWidget(QLabel("Avg Latency:"))
avg_row.addWidget(self.avg_latency_label)
p95_row = QHBoxLayout()
p95_row.addWidget(QLabel("P95 Latency:"))
p95_row.addWidget(self.p95_latency_label)
layout.addLayout(fps_row)
layout.addLayout(avg_row)
layout.addLayout(p95_row)
self.setLayout(layout)
def update_stats(self, stats: Dict[str, Any]) -> None:
"""接收效能數據並更新顯示。
Args:
stats: 包含 "fps""avg_latency_ms""p95_latency_ms" 的字典
"""
self.current_fps = float(stats.get("fps", 0.0))
self.current_avg_latency_ms = float(stats.get("avg_latency_ms", 0.0))
self.current_p95_latency_ms = float(stats.get("p95_latency_ms", 0.0))
self.fps_label.setText(f"{self.current_fps:.1f} FPS")
self.avg_latency_label.setText(f"{self.current_avg_latency_ms:.1f} ms")
self.p95_latency_label.setText(f"{self.current_p95_latency_ms:.1f} ms")
def reset(self) -> None:
"""清空所有顯示值回到初始狀態0"""
self.current_fps = 0.0
self.current_avg_latency_ms = 0.0
self.current_p95_latency_ms = 0.0
self.fps_label.setText("0.0 FPS")
self.avg_latency_label.setText("0.0 ms")
self.p95_latency_label.setText("0.0 ms")
def set_display_window(self, seconds: int = 60) -> None:
"""設定圖表顯示的時間視窗(秒)。
Args:
seconds: 要顯示的歷史時間範圍預設 60
"""
self.display_window_seconds = seconds

View File

@ -0,0 +1,207 @@
"""
ui/dialogs/benchmark_dialog.py
BenchmarkDialog 一鍵啟動 Benchmark QDialog
顯示三階段進度條熱機/循序/平行即時 FPS完成後加速倍數大字體
以及循序 vs 平行的 FPS 與延遲對比表
Benchmark 執行透過 QThread 進行避免 UI 凍結
pipeline_config 為空顯示提示訊息並禁用開始按鈕
"""
from typing import Any, List, Optional
from PyQt5.QtCore import QThread, pyqtSignal
from PyQt5.QtWidgets import (
QDialog,
QHBoxLayout,
QLabel,
QProgressBar,
QPushButton,
QTableWidget,
QTableWidgetItem,
QVBoxLayout,
QWidget,
)
class _BenchmarkWorker(QThread):
"""在背景執行緒執行 benchmark避免 UI 凍結。"""
progress_updated = pyqtSignal(str, int)
result_ready = pyqtSignal(object, object, float)
error_occurred = pyqtSignal(str)
def __init__(self, benchmarker: Any) -> None:
super().__init__()
self._benchmarker = benchmarker
def run(self) -> None:
try:
seq_result, par_result, speedup = self._benchmarker.run_full_benchmark(
progress_callback=self._on_progress
)
self.result_ready.emit(seq_result, par_result, speedup)
except Exception as exc:
self.error_occurred.emit(str(exc))
def _on_progress(self, phase: str, value: int) -> None:
self.progress_updated.emit(phase, value)
class BenchmarkDialog(QDialog):
"""Benchmark 觸發與結果顯示對話框。
Args:
parent: 父視窗
pipeline_config: 目前的 pipeline Stage 設定列表若為空禁用開始按鈕
"""
def __init__(
self,
parent: Optional[QWidget],
pipeline_config: List[Any],
) -> None:
super().__init__(parent)
self._pipeline_config = pipeline_config
self.seq_result: Optional[Any] = None
self.par_result: Optional[Any] = None
self.current_phase: str = ""
self._worker: Optional[_BenchmarkWorker] = None
self.setWindowTitle("Performance Benchmark")
# UI 元件
self.info_label = QLabel("")
self.progress_bar = QProgressBar()
self.progress_bar.setMinimum(0)
self.progress_bar.setMaximum(100)
self.fps_label = QLabel("FPS: —")
self.phase_label = QLabel("")
self.speedup_label = QLabel("")
self.result_table = QTableWidget(2, 3)
self.result_table.setHorizontalHeaderLabels(["模式", "FPS", "Avg Latency (ms)"])
self.start_button = QPushButton("開始 Benchmark")
self.close_button = QPushButton("關閉")
self._setup_ui()
self._apply_initial_state()
def _setup_ui(self) -> None:
layout = QVBoxLayout()
layout.addWidget(self.info_label)
progress_row = QHBoxLayout()
progress_row.addWidget(self.progress_bar)
progress_row.addWidget(self.phase_label)
layout.addLayout(progress_row)
fps_row = QHBoxLayout()
fps_row.addWidget(QLabel("即時 FPS"))
fps_row.addWidget(self.fps_label)
layout.addLayout(fps_row)
layout.addWidget(self.speedup_label)
layout.addWidget(self.result_table)
btn_row = QHBoxLayout()
btn_row.addWidget(self.start_button)
btn_row.addWidget(self.close_button)
layout.addLayout(btn_row)
self.setLayout(layout)
def _apply_initial_state(self) -> None:
if not self._pipeline_config:
self.info_label.setText("尚未設定 Pipeline請先在 Pipeline Editor 中建立 Stage。")
self.start_button.setEnabled(False)
else:
self.info_label.setText(f"已載入 {len(self._pipeline_config)} 個 Stage可開始 Benchmark。")
self.start_button.setEnabled(True)
def start_benchmark(self, benchmarker: Any) -> None:
"""在 QThread 中執行 benchmark避免 UI 凍結。
Args:
benchmarker: PerformanceBenchmarker 實例
"""
self._worker = _BenchmarkWorker(benchmarker)
self._worker.progress_updated.connect(self.update_progress)
self._worker.result_ready.connect(self._on_result_ready)
self._worker.error_occurred.connect(self._on_error)
self._worker.finished.connect(self._worker.deleteLater)
self.start_button.setEnabled(False)
self._worker.start()
def update_progress(self, phase: str, value: int) -> None:
"""更新進度條與當前階段。
Args:
phase: 當前階段名稱"warmup" / "sequential" / "parallel"
value: 進度值0100
"""
_PHASE_LABELS = {
"warmup": "熱機中...",
"sequential": "循序測試...",
"parallel": "平行測試...",
}
self.current_phase = phase
self.progress_bar.setValue(value)
self.phase_label.setText(_PHASE_LABELS.get(phase, phase))
def show_result(
self,
seq_result: Any,
par_result: Any,
speedup: float,
) -> None:
"""顯示 benchmark 結果。
Args:
seq_result: 循序模式的 BenchmarkResult
par_result: 平行模式的 BenchmarkResult
speedup: 加速倍數par.fps / seq.fps
"""
self.seq_result = seq_result
self.par_result = par_result
font = self.speedup_label.font()
font.setPointSize(20)
font.setBold(True)
self.speedup_label.setFont(font)
self.speedup_label.setText(f"{speedup:.1f}x FASTER")
self._populate_table(seq_result, par_result)
def _populate_table(self, seq_result: Any, par_result: Any) -> None:
rows = [
("循序", seq_result),
("平行", par_result),
]
for row_idx, (mode_label, result) in enumerate(rows):
self.result_table.setItem(row_idx, 0, QTableWidgetItem(mode_label))
try:
self.result_table.setItem(row_idx, 1, QTableWidgetItem(f"{result.fps:.1f}"))
self.result_table.setItem(
row_idx, 2, QTableWidgetItem(f"{result.avg_latency_ms:.1f}")
)
except (AttributeError, TypeError):
pass
def _on_result_ready(
self,
seq_result: Any,
par_result: Any,
speedup: float,
) -> None:
self.show_result(seq_result, par_result, speedup)
def _on_error(self, message: str) -> None:
self.info_label.setText(f"Benchmark 失敗:{message}")
self.progress_bar.setValue(0)
self._worker = None
self.start_button.setEnabled(True)

View File

@ -0,0 +1,238 @@
"""
ui/dialogs/export_report_dialog.py 效能報告匯出對話框
提供 ExportReportDialog(QDialog)讓使用者選擇報告格式PDF/CSV與儲存路徑
然後觸發 ReportExporter 執行匯出
設計重點
- _collect_report_data() 從各模組收集資料每個來源都用 try/except 保護
- 不在此模組執行實際 benchmark只使用 history 的最新一筆作為 parallel_result
- chart_image_bytes None截圖整合留未來
"""
from __future__ import annotations
from typing import TYPE_CHECKING, List, Optional
from PyQt5.QtWidgets import (
QDialog,
QFileDialog,
QHBoxLayout,
QLabel,
QPushButton,
QRadioButton,
QVBoxLayout,
QWidget,
QLineEdit,
QGroupBox,
QProgressBar,
)
from PyQt5.QtCore import Qt
from core.performance.report_exporter import DeviceSummary, ReportData, ReportExporter
if TYPE_CHECKING:
from core.performance.benchmarker import PerformanceBenchmarker
from core.performance.history import PerformanceHistory
class ExportReportDialog(QDialog):
"""
效能報告匯出對話框
使用者可選擇格式PDF / CSV指定儲存路徑後按匯出
對話框會呼叫 ReportExporter 產出檔案並顯示結果
"""
def __init__(
self,
parent: Optional[QWidget],
benchmarker, # PerformanceBenchmarker | None
history, # PerformanceHistory | None
device_manager, # DeviceManager | None
dashboard, # PerformanceDashboard | None
) -> None:
super().__init__(parent)
self._benchmarker = benchmarker
self._history = history
self._device_manager = device_manager
self._dashboard = dashboard
self._exporter = ReportExporter()
# 預設格式為 PDF
self._selected_format: str = "pdf"
self._setup_ui()
# ------------------------------------------------------------------
# UI 建立
# ------------------------------------------------------------------
def _setup_ui(self) -> None:
"""建立對話框 UI。"""
self.setWindowTitle("匯出效能報告")
main_layout = QVBoxLayout()
# 格式選擇
format_group = QGroupBox("匯出格式")
format_layout = QHBoxLayout()
self._pdf_radio = QRadioButton("PDF")
self._csv_radio = QRadioButton("CSV")
self._pdf_radio.setChecked(True)
self._pdf_radio.clicked.connect(lambda: self._set_format("pdf"))
self._csv_radio.clicked.connect(lambda: self._set_format("csv"))
format_layout.addWidget(self._pdf_radio)
format_layout.addWidget(self._csv_radio)
format_group.setLayout(format_layout)
main_layout.addWidget(format_group)
# 儲存路徑
path_layout = QHBoxLayout()
self._path_input = QLineEdit()
self._path_input.setPlaceholderText("儲存路徑…")
self._browse_btn = QPushButton("瀏覽")
self._browse_btn.clicked.connect(self._on_browse)
path_layout.addWidget(self._path_input)
path_layout.addWidget(self._browse_btn)
main_layout.addLayout(path_layout)
# 進度條
self._progress_bar = QProgressBar()
self._progress_bar.setVisible(False)
main_layout.addWidget(self._progress_bar)
# 匯出按鈕
self._export_btn = QPushButton("匯出")
self._export_btn.clicked.connect(self._on_export)
main_layout.addWidget(self._export_btn)
# 狀態標籤
self._status_label = QLabel("")
main_layout.addWidget(self._status_label)
self.setLayout(main_layout)
# ------------------------------------------------------------------
# 格式設定
# ------------------------------------------------------------------
def _set_format(self, fmt: str) -> None:
"""設定匯出格式('pdf''csv')。"""
self._selected_format = fmt
# ------------------------------------------------------------------
# 事件處理
# ------------------------------------------------------------------
def _on_browse(self) -> None:
"""開啟 QFileDialog 讓使用者選擇儲存路徑。"""
if self._selected_format == "pdf":
file_filter = "PDF 檔案 (*.pdf)"
default_suffix = ".pdf"
else:
file_filter = "CSV 檔案 (*.csv)"
default_suffix = ".csv"
path, _ = QFileDialog.getSaveFileName(
self,
"選擇儲存位置",
f"performance_report{default_suffix}",
file_filter,
)
if path:
self._path_input.setText(path)
def _on_export(self) -> None:
"""執行匯出:收集資料 -> 呼叫 ReportExporter。"""
output_path = self._path_input.text().strip()
if not output_path:
self._status_label.setText("請先指定儲存路徑。")
return
data = self._collect_report_data()
try:
if self._selected_format == "pdf":
result = self._exporter.export_pdf(data, output_path)
else:
result = self._exporter.export_csv(data, output_path)
self._status_label.setText(f"匯出成功:{result}")
except ImportError as e:
self._status_label.setText(f"匯出失敗(缺少函式庫):{e}")
except ValueError as e:
self._status_label.setText(f"匯出失敗(資料不足):{e}")
except Exception as e:
self._status_label.setText(f"匯出失敗:{e}")
# ------------------------------------------------------------------
# 資料收集
# ------------------------------------------------------------------
def _collect_report_data(self) -> ReportData:
"""
從各模組收集資料組裝 ReportData
每個來源都用 try/except 保護失敗時使用 None / 空值
不實際執行 benchmark只使用 history 的最新一筆作為 parallel_result
"""
# 歷史記錄,同時從中取最近一筆 sequential / parallel 作為 result
history_records: list = []
seq_result = None
par_result = None
try:
records = self._history.get_history(limit=20) if self._history else []
history_records = list(records) if records else []
seq_result = next((r for r in history_records if r.mode == "sequential"), None)
par_result = next((r for r in history_records if r.mode == "parallel"), None)
except Exception:
history_records, seq_result, par_result = [], None, None
# 從 benchmarker.history 取最新一筆作為 parallel_resultfallback不執行新的 benchmark
if par_result is None:
try:
if self._benchmarker is not None:
hist = self._benchmarker.history
if hist:
par_result = hist[-1]
except Exception:
par_result = None
# 裝置資訊
devices: List[DeviceSummary] = []
try:
if self._device_manager is not None:
raw_devices = self._device_manager.scan_devices() or []
devices = self._convert_devices(raw_devices)
except Exception:
devices = []
return ReportData(
sequential_result=seq_result,
parallel_result=par_result,
speedup=None,
history_records=history_records,
devices=devices,
chart_image_bytes=None, # 截圖整合留未來
)
@staticmethod
def _convert_devices(raw_devices: list) -> List[DeviceSummary]:
"""
DeviceManager 回傳的裝置列表轉換為 DeviceSummary 列表
若轉換失敗略過該裝置
"""
result: List[DeviceSummary] = []
for dev in raw_devices:
try:
result.append(DeviceSummary(
device_id=str(getattr(dev, "device_id", getattr(dev, "id", "unknown"))),
product_name=str(getattr(dev, "product_name", getattr(dev, "model", "unknown"))),
firmware_version=str(getattr(dev, "firmware_version", "unknown")),
is_active=bool(getattr(dev, "is_active", True)),
))
except Exception:
continue
return result