jim800121chen 44711753ae feat(local-tool): 推論功能完整搬入 — flash 模組 + workspace 推論介面
## 後端(Phase 1)
新增 flash 模組(從 edge-ai-platform 搬入):
- server/internal/flash/service.go:StartFlash + 模型相容性檢查 + 晶片 NEF 解析
- server/internal/flash/progress.go:Flash 進度追蹤器
- server/internal/api/ws/flash_ws.go:WebSocket 推送 flash 進度
- device_handler.go:新增 FlashDevice method + flashSvc 欄位
- router.go:新增 POST /api/devices/:id/flash + WS /ws/devices/:id/flash-progress
- main.go:初始化 flash.NewService 並傳入 router

推論/攝影機/MJPEG/inference WebSocket 之前 M1 已搬好,不需改動。
Python bridge (kneron_bridge.py) 與 edge-ai-platform 完全相同,不需改動。

## 前端 store + hooks(Phase 2)
- stores/flash-store.ts(新):Zustand store — startFlash / updateProgress / retryFlash / reset
- hooks/use-flash-progress.ts(新):WebSocket hook 接收 flash 進度

inference-store / camera-store / inference types / use-inference-stream / use-websocket
之前 M1 已搬好,不需改動。

## 前端 UI 元件(Phase 3)
- components/devices/flash-dialog.tsx(新):模型載入對話框 + 硬體相容性檢查
- components/devices/flash-progress.tsx(新):Flash 進度條 + 錯誤重試

camera-inference-view / camera-feed / camera-overlay / source-selector /
inference-panel / performance-metrics / classification-result / confidence-slider /
video-progress / batch-image-thumbnails 之前 M1 已搬好。

## 前端頁面整合(Phase 4)
- workspace/page.tsx:繁中硬編碼、顯示已載入模型名稱
- workspace/[deviceId]/workspace-client.tsx:加入 FlashDialog 按鈕 + 繁中硬編碼
- devices/[id]/device-detail-client.tsx:加入 FlashDialog + 「進入工作區」按鈕(模型已載入才顯示)
- device-card.tsx:已連線 + 模型已載入時顯示「工作區」快捷按鈕

## 使用者操作流程
裝置列表 → 連線 → 管理 → 載入模型 → 進入工作區 → 選攝影機/圖片/影片 → 開始推論 → 看 bounding box / FPS / latency
或:裝置列表 → 工作區(已有模型)→ 直接推論

## 不搬的東西
- cluster/* 全部不搬(已砍 cluster 功能)
- relay / tunnel 相關不搬

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-04-12 20:07:09 +08:00

182 lines
4.1 KiB
Go

package handlers
import (
"context"
"fmt"
"time"
"visiona-local/server/internal/api/ws"
"visiona-local/server/internal/device"
"visiona-local/server/internal/driver"
"visiona-local/server/internal/flash"
"visiona-local/server/internal/inference"
"github.com/gin-gonic/gin"
)
type DeviceHandler struct {
deviceMgr *device.Manager
flashSvc *flash.Service
inferenceSvc *inference.Service
wsHub *ws.Hub
}
func NewDeviceHandler(
deviceMgr *device.Manager,
flashSvc *flash.Service,
inferenceSvc *inference.Service,
wsHub *ws.Hub,
) *DeviceHandler {
return &DeviceHandler{
deviceMgr: deviceMgr,
flashSvc: flashSvc,
inferenceSvc: inferenceSvc,
wsHub: wsHub,
}
}
func (h *DeviceHandler) ScanDevices(c *gin.Context) {
devices := h.deviceMgr.Rescan()
c.JSON(200, gin.H{
"success": true,
"data": gin.H{
"devices": devices,
},
})
}
func (h *DeviceHandler) ListDevices(c *gin.Context) {
devices := h.deviceMgr.ListDevices()
c.JSON(200, gin.H{
"success": true,
"data": gin.H{
"devices": devices,
},
})
}
func (h *DeviceHandler) GetDevice(c *gin.Context) {
id := c.Param("id")
session, err := h.deviceMgr.GetDevice(id)
if err != nil {
c.JSON(404, gin.H{
"success": false,
"error": gin.H{"code": "DEVICE_NOT_FOUND", "message": err.Error()},
})
return
}
c.JSON(200, gin.H{"success": true, "data": session.Driver.Info()})
}
func (h *DeviceHandler) ConnectDevice(c *gin.Context) {
id := c.Param("id")
// KL520 USB Boot flow can take ~40s: retry connect (3x2s) + firmware
// load + 5s reboot wait + reconnect retry (3x3s). Use 60s timeout.
ctx, cancel := context.WithTimeout(c.Request.Context(), 60*time.Second)
defer cancel()
errCh := make(chan error, 1)
go func() {
errCh <- h.deviceMgr.Connect(id)
}()
select {
case err := <-errCh:
if err != nil {
c.JSON(400, gin.H{
"success": false,
"error": gin.H{"code": "CONNECT_FAILED", "message": err.Error()},
})
return
}
c.JSON(200, gin.H{"success": true})
case <-ctx.Done():
c.JSON(504, gin.H{
"success": false,
"error": gin.H{"code": "CONNECT_TIMEOUT", "message": fmt.Sprintf("device connect timed out after 60s for %s", id)},
})
}
}
func (h *DeviceHandler) DisconnectDevice(c *gin.Context) {
id := c.Param("id")
if err := h.deviceMgr.Disconnect(id); err != nil {
c.JSON(400, gin.H{
"success": false,
"error": gin.H{"code": "DISCONNECT_FAILED", "message": err.Error()},
})
return
}
c.JSON(200, gin.H{"success": true})
}
func (h *DeviceHandler) FlashDevice(c *gin.Context) {
id := c.Param("id")
var req struct {
ModelID string `json:"modelId"`
}
if err := c.ShouldBindJSON(&req); err != nil {
c.JSON(400, gin.H{
"success": false,
"error": gin.H{"code": "BAD_REQUEST", "message": "modelId is required"},
})
return
}
taskID, progressCh, err := h.flashSvc.StartFlash(id, req.ModelID)
if err != nil {
c.JSON(400, gin.H{
"success": false,
"error": gin.H{"code": "FLASH_FAILED", "message": err.Error()},
})
return
}
// Forward progress to WebSocket
go func() {
room := "flash:" + id
for progress := range progressCh {
h.wsHub.BroadcastToRoom(room, progress)
}
}()
c.JSON(200, gin.H{"success": true, "data": gin.H{"taskId": taskID}})
}
func (h *DeviceHandler) StartInference(c *gin.Context) {
id := c.Param("id")
resultCh := make(chan *driver.InferenceResult, 10)
if err := h.inferenceSvc.Start(id, resultCh); err != nil {
c.JSON(400, gin.H{
"success": false,
"error": gin.H{"code": "INFERENCE_ERROR", "message": err.Error()},
})
return
}
// Forward results to WebSocket, enriching with device ID
go func() {
room := "inference:" + id
for result := range resultCh {
result.DeviceID = id
h.wsHub.BroadcastToRoom(room, result)
}
}()
c.JSON(200, gin.H{"success": true})
}
func (h *DeviceHandler) StopInference(c *gin.Context) {
id := c.Param("id")
if err := h.inferenceSvc.Stop(id); err != nil {
c.JSON(400, gin.H{
"success": false,
"error": gin.H{"code": "INFERENCE_ERROR", "message": err.Error()},
})
return
}
c.JSON(200, gin.H{"success": true})
}