依 R5 五輪決策把 visionA-local 從「Wails 內嵌 Next.js」重構為「Wails
本機伺服器控制台 + 瀏覽器 Web UI」模式(類比 Docker Desktop / Ollama)。
程式碼變動
- M8-1 砍 yt-dlp 全套(後端 resolver / URL handler / 前端 URL tab /
Makefile vendor / installer / bootstrap / CI workflow,-555 行)
- M8-2 砍 Mock 模式全套(driver/mock、mock_camera、Settings runtimeMode、
VISIONA_MOCK 環境變數,-528 行)
- M8-3 ffmpeg 從 GPL 切換到 LGPL 混合方案:Windows/Linux 用 BtbN 現成
LGPL binary,macOS 自 build minimal decoder-only 進 git
(vendor/ffmpeg/macos/ffmpeg 5.7MB + ffprobe 5.6MB,比 GPL 版省 85% 空間)
- M8-4 Wails Server Controller:state machine、log ring buffer 2000 行、
preferences.json atomic write、boot-id、Gin SkipPaths、shutdown 7+1 秒、
notify_*.go 三平台 OS 通知、watchServer 改 Error state 不 os.Exit
- M8-4b 啟動階段管線 R5-E:6 階段進度 event、20s soft / 60s hard timeout、
stage 5/6 skip 規則、sentinel file、RestartStartupSequence 5 步驟
- M8-5 Wails 控制台 vanilla HTML/JS/CSS(9 檔 ~2012 行)取代 M7-B splash:
state 視覺、log panel、startup progress panel、Stage 6 manual CTA
pulse、shutdown modal、Settings、Dark Mode、i18n 中英雙語
- M8-6 上傳影片副檔名擴充(mp4/avi/mov/mpeg/mpg)
- M8-7 Web UI Server Offline Overlay(role=alertdialog + focus trap +
wsEverConnected 容錯 + Page Visibility)
- M8-8 CORS middleware(127.0.0.1/localhost only + suffix attack 防護)+
ws/origin.go 獨立 WebSocket CheckOrigin 避 package cycle
- MAJ-4 server:shutdown-imminent WebSocket broadcast 機制
(/ws/system endpoint + notifyShutdownImminent helper)
- M8-9 Boot-ID + 瀏覽器 tab 自動重連(sessionStorage loop guard)
品質
- ~105+ 新 unit test + race detector (-count=2) 全綠
- 10 個 milestone 全部通過 Reviewer 審查
- 三方 v2 + v2.1 文件(PRD / Design Spec / TDD)+ 交叉互審紀錄
收錄在 .autoflow/
交付前待處理(M8-10)
- 重跑 make payload-macos 把舊 GPL 77MB binary 換成新 LGPL
- 三平台 end-to-end build 驗證
Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
133 lines
3.9 KiB
Go
133 lines
3.9 KiB
Go
package ws
|
||
|
||
// hub_broadcast_test.go — MAJ-4 補丁:驗證 Hub.BroadcastToRoom 在 system room 的行為
|
||
//
|
||
// 涵蓋:
|
||
// 1. 多個 client 都收到同一則訊息
|
||
// 2. 空 room(無 client)不 panic、不 block
|
||
// 3. client send channel 滿時不 block hub(會 drop 該 client)
|
||
|
||
import (
|
||
"encoding/json"
|
||
"testing"
|
||
"time"
|
||
)
|
||
|
||
// makeRegisteredClient 註冊一個 buffered send channel 的 dummy client 到指定 room。
|
||
func makeRegisteredClient(h *Hub, room string, bufSize int) *Client {
|
||
c := &Client{Send: make(chan []byte, bufSize)}
|
||
h.RegisterSync(&Subscription{Client: c, Room: room})
|
||
return c
|
||
}
|
||
|
||
func TestHub_BroadcastToRoom_MultipleClients(t *testing.T) {
|
||
hub := NewHub()
|
||
go hub.Run()
|
||
|
||
c1 := makeRegisteredClient(hub, "system", 4)
|
||
c2 := makeRegisteredClient(hub, "system", 4)
|
||
c3 := makeRegisteredClient(hub, "system", 4)
|
||
|
||
payload := map[string]interface{}{
|
||
"type": "server:shutdown-imminent",
|
||
"reason": "quit",
|
||
"ts": int64(1234567890),
|
||
}
|
||
hub.BroadcastToRoom("system", payload)
|
||
|
||
for i, c := range []*Client{c1, c2, c3} {
|
||
select {
|
||
case msg := <-c.Send:
|
||
var got map[string]interface{}
|
||
if err := json.Unmarshal(msg, &got); err != nil {
|
||
t.Fatalf("client %d bad json: %v", i, err)
|
||
}
|
||
if got["type"] != "server:shutdown-imminent" || got["reason"] != "quit" {
|
||
t.Errorf("client %d wrong payload: %+v", i, got)
|
||
}
|
||
case <-time.After(500 * time.Millisecond):
|
||
t.Fatalf("client %d did not receive broadcast within 500ms", i)
|
||
}
|
||
}
|
||
}
|
||
|
||
func TestHub_BroadcastToRoom_EmptyRoom(t *testing.T) {
|
||
hub := NewHub()
|
||
go hub.Run()
|
||
|
||
// 無 client 註冊在 "system" room → BroadcastToRoom 應該 no-op 且不 panic
|
||
done := make(chan struct{})
|
||
go func() {
|
||
defer close(done)
|
||
hub.BroadcastToRoom("system", map[string]string{"type": "server:shutdown-imminent"})
|
||
}()
|
||
|
||
select {
|
||
case <-done:
|
||
// OK
|
||
case <-time.After(500 * time.Millisecond):
|
||
t.Fatalf("BroadcastToRoom with empty room blocked > 500ms")
|
||
}
|
||
}
|
||
|
||
func TestHub_BroadcastToRoom_FullChannelDoesNotBlock(t *testing.T) {
|
||
hub := NewHub()
|
||
go hub.Run()
|
||
|
||
// buffer = 1 的 slow client,先塞滿 → hub 接著 broadcast 時會打到 default case
|
||
// (select 非 blocking send),hub goroutine 必須仍能繼續處理後續訊息。
|
||
//
|
||
// 同時註冊一個 healthy client 觀察:hub 沒被卡住的證據 = healthy client 仍能收到訊息。
|
||
slow := makeRegisteredClient(hub, "system", 1)
|
||
slow.Send <- []byte("pre-existing") // 塞滿 slow 的 buffer
|
||
|
||
healthy := makeRegisteredClient(hub, "system", 4)
|
||
|
||
// 先量 broadcast 的時間,若 hub 被 slow client 卡住,這行會 block 直到 test timeout
|
||
done := make(chan struct{})
|
||
go func() {
|
||
defer close(done)
|
||
hub.BroadcastToRoom("system", map[string]string{"type": "server:shutdown-imminent"})
|
||
}()
|
||
select {
|
||
case <-done:
|
||
// OK — broadcast 沒 block
|
||
case <-time.After(500 * time.Millisecond):
|
||
t.Fatalf("BroadcastToRoom blocked — slow client 未被 drop")
|
||
}
|
||
|
||
// healthy client 必須收到訊息(證明 hub goroutine 沒被 slow client 卡住)
|
||
select {
|
||
case msg := <-healthy.Send:
|
||
var got map[string]interface{}
|
||
if err := json.Unmarshal(msg, &got); err != nil {
|
||
t.Fatalf("healthy client bad json: %v", err)
|
||
}
|
||
if got["type"] != "server:shutdown-imminent" {
|
||
t.Errorf("healthy client wrong payload: %+v", got)
|
||
}
|
||
case <-time.After(500 * time.Millisecond):
|
||
t.Fatalf("healthy client did not receive broadcast — hub 被 slow client 卡住")
|
||
}
|
||
|
||
// 再次 broadcast 驗證 hub 持續工作(slow 已被 drop,broadcast 仍該回來)
|
||
done2 := make(chan struct{})
|
||
go func() {
|
||
defer close(done2)
|
||
hub.BroadcastToRoom("system", map[string]string{"type": "server:shutdown-imminent", "n": "2"})
|
||
}()
|
||
select {
|
||
case <-done2:
|
||
// OK
|
||
case <-time.After(500 * time.Millisecond):
|
||
t.Fatalf("second BroadcastToRoom blocked")
|
||
}
|
||
|
||
// healthy 應收到第二則
|
||
select {
|
||
case <-healthy.Send:
|
||
case <-time.After(500 * time.Millisecond):
|
||
t.Fatalf("healthy client 未收到第二則")
|
||
}
|
||
}
|