This commit is contained in:
jim800121chen 2026-03-06 17:33:30 +08:00
commit d8128b6c75
109 changed files with 14165 additions and 0 deletions

27
.gitignore vendored Normal file
View File

@ -0,0 +1,27 @@
# Build outputs
/dist/
/frontend/out/
/frontend/.next/
# Embedded frontend (copied from frontend/out/ at build time)
/server/web/out/
!/server/web/out/.gitkeep
# Installer payload & build (staged at build time)
/installer/payload/
!/installer/payload/.gitkeep
/installer/build/
/installer/frontend/wailsjs/
# Test coverage
coverage.out
coverage.html
/frontend/coverage/
# OS
.DS_Store
# IDE
.idea/
.vscode/
*.swp

80
.goreleaser.yaml Normal file
View File

@ -0,0 +1,80 @@
version: 2
before:
hooks:
- make build-frontend build-embed
builds:
- id: edge-ai-server
dir: server
main: ./main.go
binary: edge-ai-server
env:
- CGO_ENABLED=0
tags:
- notray
goos:
- darwin
- linux
- windows
goarch:
- amd64
- arm64
ignore:
- goos: windows
goarch: arm64
- goos: linux
goarch: arm64
ldflags:
- -s -w
- -X main.Version={{.Version}}
- -X main.BuildTime={{.Date}}
archives:
- id: default
builds:
- edge-ai-server
format_overrides:
- goos: windows
format: zip
name_template: >-
edge-ai-platform_{{.Version}}_{{.Os}}_{{.Arch}}
files:
- src: server/data/models.json
dst: data/
strip_parent: true
- src: server/scripts/kneron_bridge.py
dst: scripts/
strip_parent: true
- src: server/scripts/requirements.txt
dst: scripts/
strip_parent: true
- src: server/scripts/update_kl720_firmware.py
dst: scripts/
strip_parent: true
- src: scripts/kneron_detect.py
dst: scripts/
strip_parent: true
- src: server/scripts/firmware/KL520/*
dst: firmware/KL520/
strip_parent: true
- src: server/scripts/firmware/KL720/*
dst: firmware/KL720/
strip_parent: true
checksum:
name_template: checksums.txt
algorithm: sha256
changelog:
sort: asc
filters:
exclude:
- "^docs:"
- "^test:"
- "^ci:"
release:
gitea:
owner: warrenchen
name: web_academy_prototype

168
Makefile Normal file
View File

@ -0,0 +1,168 @@
# Edge AI Platform - Makefile
.PHONY: help dev dev-mock build clean test lint mock install fmt build-embed release release-snapshot \
build-server-tray installer-payload installer installer-dev installer-clean deploy-frontend deploy-frontend-setup deploy-ec2 build-relay
VERSION ?= v0.1.0
BUILD_TIME := $(shell date -u +%Y-%m-%dT%H:%M:%SZ)
help: ## Show available targets
@echo "Edge AI Platform - Available targets:"
@echo ""
@grep -E '^[a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | sort | \
awk 'BEGIN {FS = ":.*?## "}; {printf " \033[36m%-18s\033[0m %s\n", $$1, $$2}'
# ── Development ──────────────────────────────────────────
dev: ## Start frontend + backend with REAL hardware (no mock)
@echo "Starting development servers (real hardware)..."
@$(MAKE) -j2 dev-server dev-frontend
dev-server:
cd server && go run main.go --dev
dev-frontend:
cd frontend && pnpm dev
dev-mock: ## Start frontend + backend with mock devices
@echo "Starting development servers (mock mode)..."
@$(MAKE) -j2 dev-mock-server dev-frontend
dev-mock-server:
cd server && go run main.go --dev --mock --mock-devices=3
mock: ## Start server in mock mode only (no frontend)
cd server && go run main.go --dev --mock --mock-devices=3 --mock-camera
# ── Build ────────────────────────────────────────────────
build: build-frontend build-embed build-server ## Build single binary with embedded frontend
@echo "Build complete! Binary: dist/edge-ai-server"
build-frontend: ## Build Next.js frontend static export
cd frontend && pnpm build
@echo "Frontend built: frontend/out/"
build-embed: ## Copy frontend static export into server for go:embed
@rm -rf server/web/out
@mkdir -p server/web/out
cp -r frontend/out/. server/web/out/
@echo "Frontend static files copied to server/web/out/"
build-server: ## Build Go server binary (embeds frontend)
@mkdir -p dist
cd server && go build \
-ldflags="-X main.Version=$(VERSION) -X main.BuildTime=$(BUILD_TIME)" \
-o ../dist/edge-ai-server main.go
@echo "Server binary: dist/edge-ai-server"
build-relay: ## Build relay server binary
@mkdir -p dist
cd server && go build -o ../dist/relay-server ./cmd/relay-server
@echo "Relay binary: dist/relay-server"
build-server-tray: build-frontend build-embed ## Build server with tray support (CGO required)
@mkdir -p dist
cd server && CGO_ENABLED=1 go build \
-ldflags="-X main.Version=$(VERSION) -X main.BuildTime=$(BUILD_TIME)" \
-o ../dist/edge-ai-server main.go
@echo "Server binary (tray-enabled): dist/edge-ai-server"
# ── Release ──────────────────────────────────────────────
release-snapshot: ## Build release archives locally (no publish)
goreleaser release --snapshot --clean
release: ## Build and publish release to Gitea
goreleaser release --clean
# ── Testing ──────────────────────────────────────────────
test: test-server test-frontend ## Run all tests
@echo "All tests passed!"
test-server: ## Run Go tests
cd server && go test -v ./...
test-frontend: ## Run Vitest tests
cd frontend && pnpm test
test-coverage: ## Run tests with coverage reports
cd server && go test -coverprofile=coverage.out ./... && \
go tool cover -html=coverage.out -o coverage.html
cd frontend && pnpm test -- --coverage
# ── Linting ──────────────────────────────────────────────
lint: lint-server lint-frontend ## Lint all code
lint-server: ## Lint Go code
cd server && go vet ./...
lint-frontend: ## Lint frontend code
cd frontend && pnpm lint
fmt: ## Format all code
cd server && go fmt ./...
# ── Dependencies ─────────────────────────────────────────
install: ## Install all dependencies
cd server && go mod download
cd frontend && pnpm install
# ── GUI Installer ────────────────────────────────────────
installer-payload: build-server-tray ## Stage payload files for GUI installer
@echo "Staging installer payload..."
@rm -rf installer/payload
@mkdir -p installer/payload/data/nef/kl520
@mkdir -p installer/payload/data/nef/kl720
@mkdir -p installer/payload/scripts/firmware/KL520
@mkdir -p installer/payload/scripts/firmware/KL720
cp dist/edge-ai-server installer/payload/
cp server/data/models.json installer/payload/data/
cp server/data/nef/kl520/*.nef installer/payload/data/nef/kl520/
cp server/data/nef/kl720/*.nef installer/payload/data/nef/kl720/
cp server/scripts/kneron_bridge.py installer/payload/scripts/
cp server/scripts/requirements.txt installer/payload/scripts/
cp server/scripts/update_kl720_firmware.py installer/payload/scripts/
cp scripts/kneron_detect.py installer/payload/scripts/
cp server/scripts/firmware/KL520/*.bin installer/payload/scripts/firmware/KL520/
cp server/scripts/firmware/KL720/*.bin installer/payload/scripts/firmware/KL720/
@echo "Payload staged in installer/payload/"
installer: installer-payload ## Build GUI installer app
cd installer && wails build -clean
@echo "Installer built! Check installer/build/"
installer-dev: installer-payload ## Run GUI installer in dev mode
cd installer && wails dev
installer-clean: ## Remove installer build artifacts
rm -rf installer/payload
@mkdir -p installer/payload && touch installer/payload/.gitkeep
rm -rf installer/build
@echo "Installer artifacts cleaned!"
# ── Deploy ───────────────────────────────────────────────
deploy-frontend: build-frontend ## Deploy frontend to AWS (CloudFront + S3)
bash scripts/deploy-aws.sh
deploy-frontend-setup: build-frontend ## First-time AWS S3+CloudFront setup + deploy
bash scripts/deploy-aws.sh --setup
deploy-ec2: build-frontend ## Deploy frontend to EC2 (nginx). Usage: make deploy-ec2 HOST=user@ip KEY=~/.ssh/key.pem
bash scripts/deploy-ec2.sh $(HOST) --key $(KEY)
# ── Cleanup ──────────────────────────────────────────────
clean: installer-clean ## Remove build artifacts
rm -rf dist/
rm -rf frontend/.next
rm -rf frontend/out
rm -rf server/web/out
@mkdir -p server/web/out && touch server/web/out/.gitkeep
rm -f server/coverage.out server/coverage.html
@echo "Clean complete!"

116
README.md Normal file
View File

@ -0,0 +1,116 @@
# Edge AI Platform
邊緣 AI 開發平台 — 管理 AI 模型、連接邊緣裝置Kneron KL720/KL730、即時攝影機推論。
單一執行檔,下載即可使用。
## Quick Start
### macOS
```bash
# 安裝(下載至 ~/.edge-ai-platform
curl -fsSL https://gitea.innovedus.com/warrenchen/web_academy_prototype/raw/branch/main/scripts/install.sh | bash
# 啟動Mock 模式,不需硬體)
edge-ai-server --mock --mock-devices=3
# 開啟瀏覽器
open http://127.0.0.1:3721
```
### Windows (PowerShell)
```powershell
# 安裝
irm https://gitea.innovedus.com/warrenchen/web_academy_prototype/raw/branch/main/scripts/install.ps1 | iex
# 啟動Mock 模式)
edge-ai-server.exe --mock --mock-devices=3
# 開啟瀏覽器
Start-Process http://127.0.0.1:3721
```
### 手動下載
從 [Releases](https://gitea.innovedus.com/warrenchen/web_academy_prototype/releases) 下載對應平台的壓縮檔:
| 平台 | 檔案 |
|:-----|:-----|
| macOS Intel | `edge-ai-platform_vX.Y.Z_darwin_amd64.tar.gz` |
| macOS Apple Silicon | `edge-ai-platform_vX.Y.Z_darwin_arm64.tar.gz` |
| Windows x64 | `edge-ai-platform_vX.Y.Z_windows_amd64.zip` |
解壓後執行:
```bash
# macOS
tar xzf edge-ai-platform_*.tar.gz
cd edge-ai-platform_*/
./edge-ai-server --mock --mock-devices=3
# Windows: 解壓 zip在資料夾中開啟 PowerShell
.\edge-ai-server.exe --mock --mock-devices=3
```
然後開啟瀏覽器 http://127.0.0.1:3721
## 命令列選項
| Flag | 預設值 | 說明 |
|:-----|:------|:-----|
| `--port` | `3721` | 伺服器連接埠 |
| `--host` | `127.0.0.1` | 伺服器位址 |
| `--mock` | `false` | 啟用模擬裝置驅動 |
| `--mock-camera` | `false` | 啟用模擬攝影機 |
| `--mock-devices` | `1` | 模擬裝置數量 |
| `--log-level` | `info` | 日誌等級debug/info/warn/error |
| `--dev` | `false` | 開發模式(停用嵌入式前端) |
## 可選依賴
以下工具可增強功能,但**非必要**
| 工具 | 用途 | macOS 安裝 | Windows 安裝 |
|:-----|:-----|:----------|:------------|
| `ffmpeg` | 攝影機擷取、影片處理 | `brew install ffmpeg` | `winget install Gyan.FFmpeg` |
| `yt-dlp` | YouTube / 影片 URL 解析 | `brew install yt-dlp` | `winget install yt-dlp` |
| `python3` | Kneron KL720 硬體驅動 | `brew install python3` | `winget install Python.Python.3.12` |
啟動時會自動檢查並提示缺少的工具。
## 解除安裝
### macOS
```bash
rm -rf ~/.edge-ai-platform
sudo rm -f /usr/local/bin/edge-ai-server
```
### Windows (PowerShell)
```powershell
Remove-Item -Recurse -Force "$env:LOCALAPPDATA\EdgeAIPlatform"
# 手動從系統環境變數移除 PATH 中的 EdgeAIPlatform 路徑
```
## 開發
```bash
# 安裝依賴
make install
# 啟動開發伺服器(前端 :3000 + 後端 :3721
make dev
# 編譯單一 binary
make build
# 跨平台打包(本機測試,不發佈)
make release-snapshot
# 發佈至 Gitea Release
make release
```

1
frontend Submodule

@ -0,0 +1 @@
Subproject commit 089f5644a73fd5ac2ecf12bc55c2c9a70b93b30f

630
installer/app.go Normal file
View File

@ -0,0 +1,630 @@
package main
import (
"context"
"embed"
"encoding/json"
"fmt"
"io/fs"
"os"
"os/exec"
"path/filepath"
"runtime"
"strings"
wailsRuntime "github.com/wailsapp/wails/v2/pkg/runtime"
)
// SystemInfo describes the current platform and pre-existing state.
type SystemInfo struct {
OS string `json:"os"`
Arch string `json:"arch"`
DefaultDir string `json:"defaultDir"`
PythonAvailable bool `json:"pythonAvailable"`
PythonVersion string `json:"pythonVersion"`
BrewAvailable bool `json:"brewAvailable"`
LibusbInstalled bool `json:"libusbInstalled"`
ExistingInstall bool `json:"existingInstall"`
ExistingVersion string `json:"existingVersion"`
FfmpegAvailable bool `json:"ffmpegAvailable"`
YtdlpAvailable bool `json:"ytdlpAvailable"`
}
// InstallConfig holds user choices from the wizard.
type InstallConfig struct {
InstallDir string `json:"installDir"`
CreateSymlink bool `json:"createSymlink"`
InstallPythonEnv bool `json:"installPythonEnv"`
InstallLibusb bool `json:"installLibusb"`
RelayURL string `json:"relayURL"`
RelayToken string `json:"relayToken"`
ServerPort int `json:"serverPort"`
Language string `json:"language"`
}
// ProgressEvent is emitted via Wails Events to update the frontend.
type ProgressEvent struct {
Step string `json:"step"`
Message string `json:"message"`
Percent float64 `json:"percent"`
IsError bool `json:"isError"`
IsComplete bool `json:"isComplete"`
}
// HardwareDevice describes a detected Kneron device.
type HardwareDevice struct {
Model string `json:"model"`
Port string `json:"port"`
Serial string `json:"serial"`
Product string `json:"product"`
}
// Installer is the main app struct bound to the Wails frontend.
type Installer struct {
ctx context.Context
payload embed.FS
}
// NewInstaller creates a new Installer instance.
func NewInstaller(payload embed.FS) *Installer {
return &Installer{payload: payload}
}
// startup is called by Wails when the app starts.
func (inst *Installer) startup(ctx context.Context) {
inst.ctx = ctx
ensureGUIPath()
}
// ensureGUIPath expands PATH for macOS/Linux GUI apps that inherit a
// minimal environment from launchd/Finder. Without this, exec.LookPath
// cannot find brew, python3, ffmpeg, etc.
func ensureGUIPath() {
extraDirs := []string{
"/usr/local/bin",
"/opt/homebrew/bin", // Apple Silicon Homebrew
"/opt/homebrew/sbin",
"/usr/local/sbin",
}
// Also add ~/bin and ~/.local/bin
if home, err := os.UserHomeDir(); err == nil {
extraDirs = append(extraDirs,
filepath.Join(home, ".local", "bin"),
filepath.Join(home, "bin"),
)
}
current := os.Getenv("PATH")
for _, d := range extraDirs {
if _, err := os.Stat(d); err == nil && !strings.Contains(current, d) {
current = current + ":" + d
}
}
os.Setenv("PATH", current)
}
// emitProgress sends a progress event to the frontend.
func (inst *Installer) emitProgress(event ProgressEvent) {
wailsRuntime.EventsEmit(inst.ctx, "install:progress", event)
}
// GetSystemInfo probes the host system and returns platform details.
func (inst *Installer) GetSystemInfo() (*SystemInfo, error) {
info := &SystemInfo{
OS: runtime.GOOS,
Arch: runtime.GOARCH,
}
info.DefaultDir = platformDefaultDir()
// Check existing installation
if _, err := os.Stat(filepath.Join(info.DefaultDir, "edge-ai-server")); err == nil {
info.ExistingInstall = true
}
binName := "edge-ai-server"
if runtime.GOOS == "windows" {
binName = "edge-ai-server.exe"
}
if _, err := os.Stat(filepath.Join(info.DefaultDir, binName)); err == nil {
info.ExistingInstall = true
}
// Check Python
pythonPath, err := findPython3()
if err == nil {
info.PythonAvailable = true
out, _ := exec.Command(pythonPath, "--version").Output()
info.PythonVersion = strings.TrimSpace(string(out))
}
// Check brew (macOS)
if runtime.GOOS == "darwin" {
if _, err := exec.LookPath("brew"); err == nil {
info.BrewAvailable = true
}
}
// Check libusb
info.LibusbInstalled = checkLibusbInstalled()
// Check optional deps
if _, err := exec.LookPath("ffmpeg"); err == nil {
info.FfmpegAvailable = true
}
if _, err := exec.LookPath("yt-dlp"); err == nil {
info.YtdlpAvailable = true
}
return info, nil
}
// BrowseDirectory opens a native directory picker dialog.
func (inst *Installer) BrowseDirectory() (string, error) {
dir, err := wailsRuntime.OpenDirectoryDialog(inst.ctx, wailsRuntime.OpenDialogOptions{
Title: "Choose Installation Directory",
})
return dir, err
}
// ValidatePath checks if the given path is writable and has enough space.
func (inst *Installer) ValidatePath(path string) string {
if path == "" {
return "Please select an installation directory."
}
// Check if parent directory exists and is writable
parent := filepath.Dir(path)
if _, err := os.Stat(parent); os.IsNotExist(err) {
return fmt.Sprintf("Parent directory does not exist: %s", parent)
}
// Try to create a temp file to test write permission
testFile := filepath.Join(parent, ".edge-ai-write-test")
if err := os.WriteFile(testFile, []byte("test"), 0644); err != nil {
return fmt.Sprintf("Cannot write to directory: %s", err)
}
os.Remove(testFile)
return ""
}
// StartInstall begins the installation process.
func (inst *Installer) StartInstall(config InstallConfig) error {
go inst.runInstall(config)
return nil
}
func (inst *Installer) runInstall(config InstallConfig) {
steps := []struct {
name string
percent float64
fn func(config InstallConfig) error
}{
{"Creating installation directory", 5, inst.stepCreateDir},
{"Extracting server binary", 10, inst.stepExtractBinary},
{"Extracting models and firmware", 30, inst.stepExtractData},
{"Extracting scripts", 48, inst.stepExtractScripts},
{"Configuring system", 55, inst.stepConfigureSystem},
{"Setting up USB driver", 62, inst.stepSetupLibusb},
{"Setting up Python environment", 72, inst.stepSetupPython},
{"Writing configuration", 85, inst.stepWriteConfig},
{"Verifying installation", 90, inst.stepVerify},
{"Setting up auto-start launcher", 95, inst.stepAutoRestart},
}
for _, step := range steps {
inst.emitProgress(ProgressEvent{
Step: step.name,
Message: step.name + "...",
Percent: step.percent,
})
if err := step.fn(config); err != nil {
inst.emitProgress(ProgressEvent{
Step: step.name,
Message: fmt.Sprintf("Warning: %s — %s", step.name, err),
Percent: step.percent,
IsError: true,
})
// Non-critical steps continue; critical ones are handled inside
}
}
inst.emitProgress(ProgressEvent{
Step: "complete",
Message: "Installation complete!",
Percent: 100,
IsComplete: true,
})
}
func (inst *Installer) stepCreateDir(config InstallConfig) error {
return os.MkdirAll(config.InstallDir, 0755)
}
func (inst *Installer) stepExtractBinary(config InstallConfig) error {
binName := "edge-ai-server"
if runtime.GOOS == "windows" {
binName = "edge-ai-server.exe"
}
return inst.extractFile("payload/"+binName, filepath.Join(config.InstallDir, binName), 0755)
}
func (inst *Installer) stepExtractData(config InstallConfig) error {
// Extract everything under payload/data/
return inst.extractDir("payload/data", filepath.Join(config.InstallDir, "data"))
}
func (inst *Installer) stepExtractScripts(config InstallConfig) error {
return inst.extractDir("payload/scripts", filepath.Join(config.InstallDir, "scripts"))
}
func (inst *Installer) stepConfigureSystem(config InstallConfig) error {
// Remove macOS quarantine attribute
removeQuarantine(config.InstallDir)
if config.CreateSymlink {
return createSystemLink(config.InstallDir)
}
return nil
}
func (inst *Installer) stepSetupLibusb(config InstallConfig) error {
if !config.InstallLibusb {
return nil
}
return installLibusb(config.InstallDir)
}
func (inst *Installer) stepSetupPython(config InstallConfig) error {
if !config.InstallPythonEnv {
return nil
}
return inst.setupPythonVenv(config.InstallDir)
}
func (inst *Installer) stepVerify(config InstallConfig) error {
binName := "edge-ai-server"
if runtime.GOOS == "windows" {
binName = "edge-ai-server.exe"
}
binPath := filepath.Join(config.InstallDir, binName)
info, err := os.Stat(binPath)
if err != nil {
return fmt.Errorf("server binary not found: %w", err)
}
if info.Size() == 0 {
return fmt.Errorf("server binary is empty")
}
return nil
}
func (inst *Installer) stepAutoRestart(config InstallConfig) error {
inst.emitProgress(ProgressEvent{
Step: "auto-restart",
Message: "Registering auto-restart service...",
Percent: 96,
})
return installAutoRestart(config.InstallDir)
}
// extractFile copies a single file from the embedded payload to disk.
func (inst *Installer) extractFile(embedPath, destPath string, perm os.FileMode) error {
data, err := inst.payload.ReadFile(embedPath)
if err != nil {
return fmt.Errorf("read embedded %s: %w", embedPath, err)
}
if err := os.MkdirAll(filepath.Dir(destPath), 0755); err != nil {
return err
}
return os.WriteFile(destPath, data, perm)
}
// extractDir copies an entire directory tree from the embedded payload to disk.
func (inst *Installer) extractDir(embedDir, destDir string) error {
return fs.WalkDir(inst.payload, embedDir, func(path string, d fs.DirEntry, err error) error {
if err != nil {
return err
}
relPath, _ := filepath.Rel(embedDir, path)
if relPath == "." {
return nil
}
outPath := filepath.Join(destDir, relPath)
if d.IsDir() {
return os.MkdirAll(outPath, 0755)
}
data, err := inst.payload.ReadFile(path)
if err != nil {
return fmt.Errorf("read embedded %s: %w", path, err)
}
perm := os.FileMode(0644)
// Make .py scripts executable
if strings.HasSuffix(path, ".py") {
perm = 0755
}
return os.WriteFile(outPath, data, perm)
})
}
// setupPythonVenv creates a Python virtual environment and installs requirements.
func (inst *Installer) setupPythonVenv(installDir string) error {
venvDir := filepath.Join(installDir, "venv")
reqFile := filepath.Join(installDir, "scripts", "requirements.txt")
pythonPath, err := findPython3()
if err != nil {
return fmt.Errorf("python3 not found on PATH: %w", err)
}
inst.emitProgress(ProgressEvent{
Step: "python",
Message: "Creating Python virtual environment...",
Percent: 76,
})
cmd := exec.Command(pythonPath, "-m", "venv", venvDir)
if out, err := cmd.CombinedOutput(); err != nil {
return fmt.Errorf("venv creation failed: %s — %w", string(out), err)
}
inst.emitProgress(ProgressEvent{
Step: "python",
Message: "Installing Python packages (numpy, opencv, pyusb)...",
Percent: 80,
})
pipPath := venvPipPath(venvDir)
cmd = exec.Command(pipPath, "install", "-r", reqFile)
if out, err := cmd.CombinedOutput(); err != nil {
return fmt.Errorf("pip install failed: %s — %w", string(out), err)
}
inst.emitProgress(ProgressEvent{
Step: "python",
Message: "Python environment ready.",
Percent: 90,
})
return nil
}
func (inst *Installer) stepWriteConfig(config InstallConfig) error {
cfgDir := platformConfigDir()
if err := os.MkdirAll(cfgDir, 0755); err != nil {
return err
}
port := config.ServerPort
if port == 0 {
port = 3721
}
appCfg := map[string]interface{}{
"version": 1,
"server": map[string]interface{}{
"port": port,
"host": "127.0.0.1",
},
"relay": map[string]interface{}{
"url": config.RelayURL,
"token": config.RelayToken,
},
"launcher": map[string]interface{}{
"autoStart": true,
"language": config.Language,
},
}
data, err := json.MarshalIndent(appCfg, "", " ")
if err != nil {
return err
}
return os.WriteFile(filepath.Join(cfgDir, "config.json"), data, 0644)
}
// DetectHardware runs kneron_detect.py via the installed venv.
func (inst *Installer) DetectHardware() ([]HardwareDevice, error) {
installDir := platformDefaultDir()
detectScript := filepath.Join(installDir, "scripts", "kneron_detect.py")
venvDir := filepath.Join(installDir, "venv")
pythonPath := venvPythonPath(venvDir)
// Fallback to system python if venv doesn't exist
if _, err := os.Stat(pythonPath); os.IsNotExist(err) {
p, err := findPython3()
if err != nil {
return nil, fmt.Errorf("python3 not found")
}
pythonPath = p
}
if _, err := os.Stat(detectScript); os.IsNotExist(err) {
return nil, fmt.Errorf("detection script not found: %s", detectScript)
}
cmd := exec.Command(pythonPath, detectScript)
out, err := cmd.CombinedOutput()
if err != nil {
return nil, fmt.Errorf("detection failed: %s — %w", string(out), err)
}
// Parse JSON output
var devices []HardwareDevice
if err := json.Unmarshal(out, &devices); err != nil {
// kneron_detect.py outputs plain text, not JSON — parse manually
devices = parseDetectOutput(string(out))
}
return devices, nil
}
func parseDetectOutput(output string) []HardwareDevice {
var devices []HardwareDevice
lines := strings.Split(output, "\n")
for _, line := range lines {
line = strings.TrimSpace(line)
if strings.Contains(line, "KL520") || strings.Contains(line, "KL720") || strings.Contains(line, "KL730") {
dev := HardwareDevice{}
if strings.Contains(line, "KL520") {
dev.Model = "KL520"
} else if strings.Contains(line, "KL720") {
dev.Model = "KL720"
} else if strings.Contains(line, "KL730") {
dev.Model = "KL730"
}
dev.Product = line
devices = append(devices, dev)
}
}
return devices
}
// LaunchServer starts the installed edge-ai-server in the background.
func (inst *Installer) LaunchServer() (string, error) {
installDir := platformDefaultDir()
binName := "edge-ai-server"
if runtime.GOOS == "windows" {
binName = "edge-ai-server.exe"
}
binPath := filepath.Join(installDir, binName)
cmd := exec.Command(binPath, "--tray")
cmd.Dir = installDir
if err := cmd.Start(); err != nil {
return "", fmt.Errorf("failed to start launcher: %w", err)
}
return "Launcher started", nil
}
// OpenBrowser opens the given URL in the system default browser.
func (inst *Installer) OpenBrowser(url string) error {
return openBrowser(url)
}
// GetExistingInstall checks if an installation exists.
func (inst *Installer) GetExistingInstall() (string, error) {
dir := platformDefaultDir()
binName := "edge-ai-server"
if runtime.GOOS == "windows" {
binName = "edge-ai-server.exe"
}
if _, err := os.Stat(filepath.Join(dir, binName)); err == nil {
return dir, nil
}
return "", nil
}
// Uninstall removes all installed files.
func (inst *Installer) Uninstall() error {
go inst.runUninstall()
return nil
}
func (inst *Installer) runUninstall() {
installDir := platformDefaultDir()
emit := func(step, msg string, pct float64) {
wailsRuntime.EventsEmit(inst.ctx, "uninstall:progress", ProgressEvent{
Step: step, Message: msg, Percent: pct,
})
}
// 1. Stop running server and remove auto-restart service
emit("stop", "Stopping server and removing auto-restart service...", 10)
removeAutoRestart()
if runtime.GOOS == "windows" {
exec.Command("taskkill", "/F", "/IM", "edge-ai-server.exe").Run()
} else {
exec.Command("pkill", "-f", "edge-ai-server").Run()
}
// 2. Remove symlink / PATH entry
emit("links", "Removing system links...", 20)
removeSystemLink()
// 3. Remove only Edge AI Platform files (not system deps like libusb, Python, Homebrew)
emit("files", "Removing server binary...", 30)
binName := "edge-ai-server"
if runtime.GOOS == "windows" {
binName = "edge-ai-server.exe"
}
os.Remove(filepath.Join(installDir, binName))
emit("files", "Removing models and firmware...", 45)
os.RemoveAll(filepath.Join(installDir, "data"))
emit("files", "Removing scripts...", 55)
os.RemoveAll(filepath.Join(installDir, "scripts"))
emit("files", "Removing Python virtual environment...", 70)
os.RemoveAll(filepath.Join(installDir, "venv"))
// Remove any other platform-generated files (logs, config, etc.)
emit("files", "Removing configuration and logs...", 85)
os.Remove(filepath.Join(installDir, "config.json"))
os.RemoveAll(filepath.Join(installDir, "logs"))
// Try to remove the install directory if empty
entries, _ := os.ReadDir(installDir)
if len(entries) == 0 {
os.Remove(installDir)
}
emit("complete", "Uninstall complete. System dependencies (Python, libusb, Homebrew) were preserved.", 100)
wailsRuntime.EventsEmit(inst.ctx, "uninstall:progress", ProgressEvent{
Step: "complete",
Message: "Uninstall complete. System dependencies (Python, libusb, Homebrew) were preserved.",
Percent: 100,
IsComplete: true,
})
}
// findPython3 locates a Python 3 interpreter.
func findPython3() (string, error) {
if p, err := exec.LookPath("python3"); err == nil {
return p, nil
}
if p, err := exec.LookPath("python"); err == nil {
out, _ := exec.Command(p, "--version").Output()
if strings.Contains(string(out), "Python 3") {
return p, nil
}
}
return "", fmt.Errorf("python3 not found")
}
// venvPipPath returns the pip executable path inside a venv.
func venvPipPath(venvDir string) string {
if runtime.GOOS == "windows" {
return filepath.Join(venvDir, "Scripts", "pip.exe")
}
return filepath.Join(venvDir, "bin", "pip")
}
// venvPythonPath returns the python executable path inside a venv.
func venvPythonPath(venvDir string) string {
if runtime.GOOS == "windows" {
return filepath.Join(venvDir, "Scripts", "python.exe")
}
return filepath.Join(venvDir, "bin", "python3")
}
// openBrowser opens a URL in the default browser.
func openBrowser(url string) error {
switch runtime.GOOS {
case "darwin":
return exec.Command("open", url).Start()
case "windows":
return exec.Command("cmd", "/c", "start", url).Start()
default:
return exec.Command("xdg-open", url).Start()
}
}

9
installer/embed.go Normal file
View File

@ -0,0 +1,9 @@
package main
import "embed"
// payloadFS contains the server binary, models, firmware, and scripts.
// Populated by `make installer-payload` before `wails build`.
//
//go:embed all:payload
var payloadFS embed.FS

468
installer/frontend/app.js Normal file
View File

@ -0,0 +1,468 @@
// Edge AI Platform Installer — Wizard Controller v0.2
// ── i18n Dictionary ───────────────────────────────────────
const i18n = {
en: {
'welcome.title': 'Edge AI Platform Installer',
'welcome.subtitle': 'Set up your edge AI development environment with Kneron hardware support.',
'path.title': 'Installation Path',
'path.subtitle': 'Choose where to install Edge AI Platform.',
'path.browse': 'Browse',
'path.required': 'Installation path is required.',
'path.valid': 'Path is valid.',
'components.title': 'Select Components',
'components.subtitle': 'Choose which components to install.',
'components.server': 'Edge AI Server',
'components.serverDesc': 'Core server binary for hardware communication (~10 MB)',
'components.models': 'Kneron Models',
'components.modelsDesc': 'Pre-trained NEF model files for KL520/KL720 (~50 MB)',
'components.python': 'Python Environment',
'components.pythonDesc': 'Python venv with Kneron PLUS SDK and dependencies (~200 MB)',
'components.libusb': 'libusb',
'components.libusbDesc': 'USB library required for Kneron device communication',
'components.symlink': 'CLI Symlink',
'components.symlinkDesc': "Add 'edge-ai' command to /usr/local/bin",
'relay.title': 'Relay Configuration',
'relay.subtitle': 'Configure the relay server for remote access. You can skip this and configure later.',
'relay.url': 'Relay URL',
'relay.token': 'Relay Token',
'relay.port': 'Server Port',
'relay.hint': 'Leave empty to skip relay configuration. You can set this later in the config file.',
'progress.title': 'Installing...',
'progress.subtitle': 'Please wait while components are being installed.',
'progress.preparing': 'Preparing installation...',
'hardware.title': 'Hardware Detection',
'hardware.subtitle': 'Connect your Kneron devices and scan for hardware.',
'hardware.scanning': 'Scanning for devices...',
'hardware.noDevices': 'No Kneron devices found. Connect a device and try again.',
'hardware.rescan': 'Rescan',
'complete.title': 'Installation Complete',
'complete.subtitle': 'Edge AI Platform has been installed successfully.',
'complete.location': 'Install Location',
'complete.server': 'Edge AI Server',
'complete.models': 'Kneron Models',
'complete.python': 'Python Environment',
'complete.libusb': 'libusb',
'complete.installed': 'Installed',
'complete.skipped': 'Skipped',
'btn.next': 'Next',
'btn.back': 'Back',
'btn.install': 'Install',
'btn.launch': 'Launch Server',
'btn.close': 'Close',
'existing.detected': 'Existing installation detected',
'existing.desc': 'An existing installation was found. You can uninstall it or install over it.',
'existing.uninstall': 'Uninstall',
'uninstall.title': 'Uninstalling...',
'uninstall.subtitle': 'Removing installed files.',
'uninstall.confirm': 'This will remove the Edge AI Platform and all installed files. Continue?',
'uninstall.complete': 'Uninstall complete. System dependencies (Python, libusb) were preserved.',
'system.platform': 'Platform',
'system.python': 'Python',
'system.libusb': 'libusb',
'system.ffmpeg': 'FFmpeg',
'status.installed': 'Installed',
'status.notFound': 'Not found',
'status.notInstalled': 'Not installed',
'status.optional': 'Not installed (optional)',
},
'zh-TW': {
'welcome.title': 'Edge AI 平台安裝程式',
'welcome.subtitle': '設定您的邊緣 AI 開發環境,支援 Kneron 硬體。',
'path.title': '安裝路徑',
'path.subtitle': '選擇 Edge AI 平台的安裝位置。',
'path.browse': '瀏覽',
'path.required': '安裝路徑為必填。',
'path.valid': '路徑有效。',
'components.title': '選擇元件',
'components.subtitle': '選擇要安裝的元件。',
'components.server': 'Edge AI 伺服器',
'components.serverDesc': '硬體通訊核心伺服器程式 (~10 MB)',
'components.models': 'Kneron 模型',
'components.modelsDesc': 'KL520/KL720 預訓練 NEF 模型檔案 (~50 MB)',
'components.python': 'Python 環境',
'components.pythonDesc': '包含 Kneron PLUS SDK 的 Python 虛擬環境 (~200 MB)',
'components.libusb': 'libusb',
'components.libusbDesc': 'Kneron 裝置通訊所需的 USB 函式庫',
'components.symlink': 'CLI 捷徑',
'components.symlinkDesc': "新增 'edge-ai' 指令到 /usr/local/bin",
'relay.title': 'Relay 設定',
'relay.subtitle': '設定 Relay 伺服器以進行遠端存取。可以跳過稍後再設定。',
'relay.url': 'Relay URL',
'relay.token': 'Relay Token',
'relay.port': '伺服器連接埠',
'relay.hint': '留空可跳過 Relay 設定,稍後可在設定檔中修改。',
'progress.title': '安裝中...',
'progress.subtitle': '正在安裝元件,請稍候。',
'progress.preparing': '準備安裝中...',
'hardware.title': '硬體偵測',
'hardware.subtitle': '連接您的 Kneron 裝置並掃描硬體。',
'hardware.scanning': '正在掃描裝置...',
'hardware.noDevices': '未偵測到 Kneron 裝置。請連接裝置後再試。',
'hardware.rescan': '重新掃描',
'complete.title': '安裝完成',
'complete.subtitle': 'Edge AI 平台已成功安裝。',
'complete.location': '安裝位置',
'complete.server': 'Edge AI 伺服器',
'complete.models': 'Kneron 模型',
'complete.python': 'Python 環境',
'complete.libusb': 'libusb',
'complete.installed': '已安裝',
'complete.skipped': '已跳過',
'btn.next': '下一步',
'btn.back': '上一步',
'btn.install': '安裝',
'btn.launch': '啟動伺服器',
'btn.close': '關閉',
'existing.detected': '偵測到既有安裝',
'existing.desc': '發現既有安裝。您可以解除安裝或覆蓋安裝。',
'existing.uninstall': '解除安裝',
'uninstall.title': '解除安裝中...',
'uninstall.subtitle': '正在移除已安裝的檔案。',
'uninstall.confirm': '這將移除 Edge AI 平台及所有已安裝的檔案。是否繼續?',
'uninstall.complete': '解除安裝完成。系統相依套件Python、libusb已保留。',
'system.platform': '平台',
'system.python': 'Python',
'system.libusb': 'libusb',
'system.ffmpeg': 'FFmpeg',
'status.installed': '已安裝',
'status.notFound': '未找到',
'status.notInstalled': '未安裝',
'status.optional': '未安裝(選用)',
}
};
// ── State ─────────────────────────────────────────────────
let currentStep = 0;
let systemInfo = null;
let installConfig = {
installDir: '',
createSymlink: true,
installPythonEnv: true,
installLibusb: true,
relayURL: '',
relayToken: '',
serverPort: 3721,
language: 'en',
};
// ── i18n Functions ────────────────────────────────────────
function t(key) {
const dict = i18n[installConfig.language] || i18n.en;
return dict[key] || i18n.en[key] || key;
}
function setLanguage(lang) {
installConfig.language = lang;
// Update active button state
document.getElementById('lang-en').classList.toggle('active', lang === 'en');
document.getElementById('lang-zh').classList.toggle('active', lang === 'zh-TW');
// Update all elements with data-i18n attribute
document.querySelectorAll('[data-i18n]').forEach(el => {
const key = el.getAttribute('data-i18n');
const text = t(key);
if (text) {
el.textContent = text;
}
});
// Update html lang attribute
document.documentElement.lang = lang === 'zh-TW' ? 'zh-TW' : 'en';
}
// ── Step Navigation ───────────────────────────────────────
function showStep(n) {
document.querySelectorAll('.step').forEach(s => s.classList.remove('active'));
document.querySelectorAll('.step-dot').forEach((d, i) => {
d.classList.remove('active', 'completed');
if (i < n) d.classList.add('completed');
if (i === n) d.classList.add('active');
});
const step = document.getElementById('step-' + n);
if (step) step.classList.add('active');
currentStep = n;
}
// ── Step 0: Welcome ───────────────────────────────────────
async function initWelcome() {
try {
systemInfo = await window.go.main.Installer.GetSystemInfo();
document.getElementById('info-platform').textContent =
systemInfo.os + ' / ' + systemInfo.arch;
const pyEl = document.getElementById('info-python');
if (systemInfo.pythonAvailable) {
pyEl.textContent = systemInfo.pythonVersion;
pyEl.className = 'info-value status-ok';
} else {
pyEl.textContent = t('status.notFound');
pyEl.className = 'info-value status-warn';
}
const luEl = document.getElementById('info-libusb');
luEl.textContent = systemInfo.libusbInstalled ? t('status.installed') : t('status.notInstalled');
luEl.className = 'info-value ' + (systemInfo.libusbInstalled ? 'status-ok' : 'status-warn');
const ffEl = document.getElementById('info-ffmpeg');
ffEl.textContent = systemInfo.ffmpegAvailable ? t('status.installed') : t('status.optional');
ffEl.className = 'info-value ' + (systemInfo.ffmpegAvailable ? 'status-ok' : '');
installConfig.installDir = systemInfo.defaultDir;
if (systemInfo.existingInstall) {
document.getElementById('existing-install').style.display = 'block';
}
} catch (err) {
console.error('GetSystemInfo failed:', err);
}
}
// ── Step 1: Path ──────────────────────────────────────────
document.getElementById('btn-browse').addEventListener('click', async () => {
try {
const dir = await window.go.main.Installer.BrowseDirectory();
if (dir) {
document.getElementById('install-path').value = dir;
installConfig.installDir = dir;
const msg = await window.go.main.Installer.ValidatePath(dir);
const statusEl = document.getElementById('path-status');
if (msg) {
statusEl.textContent = msg;
statusEl.className = 'status-text error';
} else {
statusEl.textContent = t('path.valid');
statusEl.className = 'status-text';
}
}
} catch (err) {
console.error('BrowseDirectory failed:', err);
}
});
// ── Step 4: Install Progress ──────────────────────────────
function addLogLine(message, type) {
const log = document.getElementById('progress-log');
const line = document.createElement('div');
line.className = 'log-' + (type || 'line');
line.textContent = message;
log.appendChild(line);
log.scrollTop = log.scrollHeight;
}
async function startInstall() {
showStep(4);
document.getElementById('progress-title').textContent = t('progress.title');
document.getElementById('progress-subtitle').textContent = t('progress.subtitle');
document.getElementById('progress-log').innerHTML = '';
document.getElementById('progress-fill').style.width = '0%';
document.getElementById('progress-percent').textContent = '0%';
document.getElementById('progress-message').textContent = t('progress.preparing');
try {
await window.go.main.Installer.StartInstall(installConfig);
} catch (err) {
addLogLine('Error: ' + err, 'error');
}
}
if (window.runtime && window.runtime.EventsOn) {
window.runtime.EventsOn('install:progress', (event) => {
const fill = document.getElementById('progress-fill');
const percent = document.getElementById('progress-percent');
const message = document.getElementById('progress-message');
fill.style.width = event.percent + '%';
percent.textContent = Math.round(event.percent) + '%';
message.textContent = event.message;
addLogLine(event.message, event.isError ? 'error' : (event.isComplete ? 'success' : 'line'));
if (event.isComplete && !event.isError) {
setTimeout(() => {
showStep(5);
detectHardware();
}, 500);
}
});
window.runtime.EventsOn('uninstall:progress', (event) => {
const message = document.getElementById('progress-message');
const fill = document.getElementById('progress-fill');
const percent = document.getElementById('progress-percent');
fill.style.width = event.percent + '%';
percent.textContent = Math.round(event.percent) + '%';
message.textContent = event.message;
addLogLine(event.message, event.isError ? 'error' : 'line');
if (event.isComplete) {
document.getElementById('progress-title').textContent = t('uninstall.title').replace('...', '');
document.getElementById('progress-subtitle').textContent = t('uninstall.complete');
addLogLine(t('uninstall.complete'), 'success');
}
});
}
// ── Step 5: Hardware Detection ────────────────────────────
async function detectHardware() {
const el = document.getElementById('hardware-results');
el.innerHTML =
'<div class="device-scanning">' +
'<div class="spinner"></div>' +
'<p>' + t('hardware.scanning') + '</p>' +
'</div>';
try {
const devices = await window.go.main.Installer.DetectHardware();
if (!devices || devices.length === 0) {
el.innerHTML = '<div class="no-devices"><p>' + t('hardware.noDevices') + '</p></div>';
} else {
el.innerHTML = devices.map(d =>
'<div class="device-card">' +
'<div class="device-icon">&#x2B21;</div>' +
'<div class="device-info">' +
'<span class="device-name">Kneron ' + (d.model || 'Unknown') + '</span>' +
'<span class="device-detail">' + (d.product || d.port || '') + '</span>' +
'</div>' +
'</div>'
).join('');
}
} catch (err) {
el.innerHTML = '<div class="no-devices"><p>Detection skipped: ' + err + '</p></div>';
}
}
document.getElementById('btn-rescan').addEventListener('click', () => {
detectHardware();
});
// ── Step 6: Complete ──────────────────────────────────────
document.getElementById('btn-launch').addEventListener('click', async () => {
try {
await window.go.main.Installer.LaunchServer();
} catch (err) {
alert('Failed to launch: ' + err);
}
});
document.getElementById('btn-close').addEventListener('click', () => {
if (window.runtime && window.runtime.Quit) {
window.runtime.Quit();
} else {
window.close();
}
});
// ── Uninstall ─────────────────────────────────────────────
document.getElementById('btn-uninstall').addEventListener('click', async () => {
if (!confirm(t('uninstall.confirm'))) {
return;
}
showStep(4);
document.getElementById('progress-title').textContent = t('uninstall.title');
document.getElementById('progress-subtitle').textContent = t('uninstall.subtitle');
document.getElementById('progress-log').innerHTML = '';
document.getElementById('progress-fill').style.width = '0%';
document.getElementById('progress-percent').textContent = '0%';
try {
await window.go.main.Installer.Uninstall();
} catch (err) {
addLogLine('Error: ' + err, 'error');
}
});
// ── Navigation Wiring ─────────────────────────────────────
document.addEventListener('DOMContentLoaded', () => {
// Detect initial language from browser
const browserLang = navigator.language || navigator.userLanguage || 'en';
const initialLang = browserLang.startsWith('zh') ? 'zh-TW' : 'en';
setLanguage(initialLang);
initWelcome();
// Language switcher
document.getElementById('lang-en').addEventListener('click', () => setLanguage('en'));
document.getElementById('lang-zh').addEventListener('click', () => setLanguage('zh-TW'));
// Step 0 -> Step 1
document.getElementById('btn-next-0').addEventListener('click', () => {
showStep(1);
document.getElementById('install-path').value = installConfig.installDir;
});
// Step 1 Back -> Step 0
document.getElementById('btn-back-1').addEventListener('click', () => {
showStep(0);
});
// Step 1 -> Step 2
document.getElementById('btn-next-1').addEventListener('click', async () => {
const msg = await window.go.main.Installer.ValidatePath(installConfig.installDir);
if (msg) {
const statusEl = document.getElementById('path-status');
statusEl.textContent = msg;
statusEl.className = 'status-text error';
return;
}
showStep(2);
});
// Step 2 Back -> Step 1
document.getElementById('btn-back-2').addEventListener('click', () => {
showStep(1);
});
// Step 2 Install -> Step 3 (Relay Config)
document.getElementById('btn-install').addEventListener('click', () => {
installConfig.createSymlink = document.getElementById('comp-symlink').checked;
installConfig.installPythonEnv = document.getElementById('comp-python').checked;
installConfig.installLibusb = document.getElementById('comp-libusb').checked;
showStep(3);
});
// Step 3 Back -> Step 2
document.getElementById('btn-back-3').addEventListener('click', () => {
showStep(2);
});
// Step 3 Next -> collect relay fields -> Step 4 (Progress) -> start install
document.getElementById('btn-next-3').addEventListener('click', () => {
installConfig.relayURL = document.getElementById('relay-url').value.trim();
installConfig.relayToken = document.getElementById('relay-token').value.trim();
const portVal = parseInt(document.getElementById('server-port').value, 10);
installConfig.serverPort = (portVal >= 1024 && portVal <= 65535) ? portVal : 3721;
startInstall();
});
// Step 5 Next -> Step 6 (Complete)
document.getElementById('btn-next-5').addEventListener('click', () => {
showStep(6);
document.getElementById('summary-path').textContent = installConfig.installDir;
const modelsEl = document.getElementById('summary-models');
modelsEl.textContent = t('complete.installed');
modelsEl.className = 'info-value status-ok';
const pyEl = document.getElementById('summary-python');
pyEl.textContent = installConfig.installPythonEnv ? t('complete.installed') : t('complete.skipped');
pyEl.className = 'info-value ' + (installConfig.installPythonEnv ? 'status-ok' : 'status-skipped');
const luEl = document.getElementById('summary-libusb');
luEl.textContent = installConfig.installLibusb ? t('complete.installed') : t('complete.skipped');
luEl.className = 'info-value ' + (installConfig.installLibusb ? 'status-ok' : 'status-skipped');
});
});

View File

@ -0,0 +1,247 @@
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>Edge AI Platform Installer</title>
<link rel="stylesheet" href="style.css">
</head>
<body>
<div id="app">
<!-- Header -->
<header id="wizard-header">
<div class="header-left">
<div class="logo-text" data-i18n="welcome.title">Edge AI Platform</div>
<div class="version-text">Installer v0.2.0</div>
</div>
<div class="header-center">
<div class="step-indicators">
<span class="step-dot active" data-step="0">1</span>
<span class="step-line"></span>
<span class="step-dot" data-step="1">2</span>
<span class="step-line"></span>
<span class="step-dot" data-step="2">3</span>
<span class="step-line"></span>
<span class="step-dot" data-step="3">4</span>
<span class="step-line"></span>
<span class="step-dot" data-step="4">5</span>
<span class="step-line"></span>
<span class="step-dot" data-step="5">6</span>
<span class="step-line"></span>
<span class="step-dot" data-step="6">7</span>
</div>
</div>
<div class="header-right">
<div class="lang-switch">
<button class="lang-btn active" id="lang-en">EN</button>
<span class="lang-sep">|</span>
<button class="lang-btn" id="lang-zh">中文</button>
</div>
</div>
</header>
<main>
<!-- Step 0: Welcome -->
<section id="step-0" class="step active">
<h1 data-i18n="welcome.title">Edge AI Platform Installer</h1>
<p class="subtitle" data-i18n="welcome.subtitle">Set up your edge AI development environment with Kneron hardware support.</p>
<div id="system-info" class="info-card">
<div class="info-row"><span class="info-label" data-i18n="system.platform">Platform</span><span id="info-platform" class="info-value">-</span></div>
<div class="info-row"><span class="info-label" data-i18n="system.python">Python</span><span id="info-python" class="info-value">-</span></div>
<div class="info-row"><span class="info-label" data-i18n="system.libusb">libusb</span><span id="info-libusb" class="info-value">-</span></div>
<div class="info-row"><span class="info-label" data-i18n="system.ffmpeg">FFmpeg</span><span id="info-ffmpeg" class="info-value">-</span></div>
</div>
<div id="existing-install" class="warning-card" style="display:none">
<strong data-i18n="existing.detected">Existing installation detected</strong>
<p data-i18n="existing.desc">An existing installation was found. You can uninstall it or install over it.</p>
<p id="existing-path" class="existing-path"></p>
<button id="btn-uninstall" class="btn btn-danger" data-i18n="existing.uninstall">Uninstall</button>
</div>
<div class="actions">
<button id="btn-next-0" class="btn btn-primary" data-i18n="btn.next">Next</button>
</div>
</section>
<!-- Step 1: Install Path -->
<section id="step-1" class="step">
<h1 data-i18n="path.title">Installation Path</h1>
<p class="subtitle" data-i18n="path.subtitle">Choose where to install Edge AI Platform.</p>
<div class="form-group">
<div class="path-input-group">
<input type="text" id="install-path" class="input-field" readonly>
<button id="btn-browse" class="btn btn-secondary" data-i18n="path.browse">Browse</button>
</div>
<p id="path-status" class="status-text"></p>
</div>
<div class="actions">
<button id="btn-back-1" class="btn btn-ghost" data-i18n="btn.back">Back</button>
<button id="btn-next-1" class="btn btn-primary" data-i18n="btn.next">Next</button>
</div>
</section>
<!-- Step 2: Components -->
<section id="step-2" class="step">
<h1 data-i18n="components.title">Select Components</h1>
<p class="subtitle" data-i18n="components.subtitle">Choose which components to install.</p>
<div class="component-list">
<label class="component-item required">
<div class="component-check">
<input type="checkbox" id="comp-server" checked disabled>
<span class="checkmark"></span>
</div>
<div class="component-info">
<span class="component-name" data-i18n="components.server">Edge AI Server</span>
<span class="component-desc" data-i18n="components.serverDesc">Core server binary for hardware communication (~10 MB)</span>
</div>
</label>
<label class="component-item">
<div class="component-check">
<input type="checkbox" id="comp-models" checked>
<span class="checkmark"></span>
</div>
<div class="component-info">
<span class="component-name" data-i18n="components.models">Kneron Models</span>
<span class="component-desc" data-i18n="components.modelsDesc">Pre-trained NEF model files for KL520/KL720 (~50 MB)</span>
</div>
</label>
<label class="component-item">
<div class="component-check">
<input type="checkbox" id="comp-python" checked>
<span class="checkmark"></span>
</div>
<div class="component-info">
<span class="component-name" data-i18n="components.python">Python Environment</span>
<span class="component-desc" data-i18n="components.pythonDesc">Python venv with Kneron PLUS SDK and dependencies (~200 MB)</span>
</div>
</label>
<label class="component-item">
<div class="component-check">
<input type="checkbox" id="comp-libusb" checked>
<span class="checkmark"></span>
</div>
<div class="component-info">
<span class="component-name" data-i18n="components.libusb">libusb</span>
<span class="component-desc" data-i18n="components.libusbDesc">USB library required for Kneron device communication</span>
</div>
</label>
<label class="component-item" id="comp-symlink-row">
<div class="component-check">
<input type="checkbox" id="comp-symlink" checked>
<span class="checkmark"></span>
</div>
<div class="component-info">
<span class="component-name" data-i18n="components.symlink">CLI Symlink</span>
<span class="component-desc" data-i18n="components.symlinkDesc">Add 'edge-ai' command to /usr/local/bin</span>
</div>
</label>
</div>
<div class="actions">
<button id="btn-back-2" class="btn btn-ghost" data-i18n="btn.back">Back</button>
<button id="btn-install" class="btn btn-primary" data-i18n="btn.install">Install</button>
</div>
</section>
<!-- Step 3: Relay Configuration -->
<section id="step-3" class="step">
<h1 data-i18n="relay.title">Relay Configuration</h1>
<p class="subtitle" data-i18n="relay.subtitle">Configure the relay server for remote access. You can skip this and configure later.</p>
<div class="form-group">
<label class="field-label" data-i18n="relay.url">Relay URL</label>
<input type="text" class="input-field" id="relay-url" placeholder="ws://relay.example.com/tunnel/connect">
</div>
<div class="form-group">
<label class="field-label" data-i18n="relay.token">Relay Token</label>
<input type="text" class="input-field" id="relay-token" placeholder="your-relay-token">
</div>
<div class="form-group">
<label class="field-label" data-i18n="relay.port">Server Port</label>
<input type="number" class="input-field" id="server-port" value="3721" min="1024" max="65535">
</div>
<p class="field-hint" data-i18n="relay.hint">Leave empty to skip relay configuration. You can set this later in the config file.</p>
<div class="actions">
<button id="btn-back-3" class="btn btn-ghost" data-i18n="btn.back">Back</button>
<button id="btn-next-3" class="btn btn-primary" data-i18n="btn.next">Next</button>
</div>
</section>
<!-- Step 4: Progress -->
<section id="step-4" class="step">
<h1 id="progress-title" data-i18n="progress.title">Installing...</h1>
<p class="subtitle" id="progress-subtitle" data-i18n="progress.subtitle">Please wait while components are being installed.</p>
<div class="progress-container">
<div class="progress-bar">
<div id="progress-fill" class="progress-fill" style="width:0%"></div>
</div>
<span id="progress-percent" class="progress-percent">0%</span>
</div>
<p id="progress-message" class="progress-message" data-i18n="progress.preparing">Preparing installation...</p>
<div id="progress-log" class="log-area"></div>
</section>
<!-- Step 5: Hardware Detection -->
<section id="step-5" class="step">
<h1 data-i18n="hardware.title">Hardware Detection</h1>
<p class="subtitle" data-i18n="hardware.subtitle">Connect your Kneron devices and scan for hardware.</p>
<div id="hardware-results" class="hardware-list">
<div class="device-scanning" id="device-scanning">
<div class="spinner"></div>
<p data-i18n="hardware.scanning">Scanning for devices...</p>
</div>
<div class="no-devices" id="no-devices" style="display:none;">
<p data-i18n="hardware.noDevices">No Kneron devices found. Connect a device and try again.</p>
</div>
</div>
<div class="actions">
<button id="btn-rescan" class="btn btn-secondary" data-i18n="hardware.rescan">Rescan</button>
<button id="btn-next-5" class="btn btn-primary" data-i18n="btn.next">Next</button>
</div>
</section>
<!-- Step 6: Complete -->
<section id="step-6" class="step">
<div class="complete-icon">
<svg width="64" height="64" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round">
<path d="M22 11.08V12a10 10 0 1 1-5.93-9.14"/>
<polyline points="22 4 12 14.01 9 11.01"/>
</svg>
</div>
<h1 data-i18n="complete.title">Installation Complete</h1>
<p class="subtitle" data-i18n="complete.subtitle">Edge AI Platform has been installed successfully.</p>
<div id="install-summary" class="info-card">
<div class="info-row"><span class="info-label" data-i18n="complete.location">Install Location</span><span id="summary-path" class="info-value">-</span></div>
<div class="info-row"><span class="info-label" data-i18n="complete.server">Edge AI Server</span><span id="summary-server" class="info-value status-ok" data-i18n="complete.installed">Installed</span></div>
<div class="info-row"><span class="info-label" data-i18n="complete.models">Kneron Models</span><span id="summary-models" class="info-value">-</span></div>
<div class="info-row"><span class="info-label" data-i18n="complete.python">Python Environment</span><span id="summary-python" class="info-value">-</span></div>
<div class="info-row"><span class="info-label" data-i18n="complete.libusb">libusb</span><span id="summary-libusb" class="info-value">-</span></div>
</div>
<div class="actions">
<button id="btn-launch" class="btn btn-primary" data-i18n="btn.launch">Launch Server</button>
<button id="btn-close" class="btn btn-ghost" data-i18n="btn.close">Close</button>
</div>
</section>
</main>
</div>
<script src="wailsjs/runtime/runtime.js"></script>
<script src="wailsjs/go/main/App.js"></script>
<script src="app.js"></script>
</body>
</html>

View File

@ -0,0 +1,508 @@
/* Edge AI Platform Installer — Modernized v0.2 */
* { margin: 0; padding: 0; box-sizing: border-box; }
:root {
--primary: #6366f1;
--primary-hover: #4f46e5;
--primary-light: #e0e7ff;
--bg: #fafbff;
--surface: #ffffff;
--border: #e2e8f0;
--text: #1e293b;
--text-secondary: #64748b;
--success: #10b981;
--warning: #f59e0b;
--error: #ef4444;
--danger: #dc2626;
--danger-hover: #b91c1c;
--radius: 10px;
}
body {
font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, "Helvetica Neue", sans-serif;
background: var(--bg);
color: var(--text);
height: 100vh;
overflow: hidden;
user-select: none;
-webkit-user-select: none;
}
#app {
display: flex;
flex-direction: column;
height: 100vh;
}
/* ── Header ─────────────────────────── */
#wizard-header {
display: flex;
align-items: center;
justify-content: space-between;
padding: 14px 24px;
background: linear-gradient(135deg, #1e293b 0%, #334155 100%);
color: white;
--wails-draggable: drag;
gap: 16px;
}
.header-left { display: flex; flex-direction: column; gap: 2px; min-width: 140px; }
.logo-text { font-size: 15px; font-weight: 700; letter-spacing: -0.3px; }
.version-text { font-size: 10px; opacity: 0.5; }
.header-center { flex: 1; display: flex; justify-content: center; }
.header-right { min-width: 100px; display: flex; justify-content: flex-end; }
.step-indicators { display: flex; align-items: center; gap: 4px; }
.step-dot {
width: 28px; height: 28px;
border-radius: 50%;
border: 2px solid rgba(255,255,255,0.25);
display: flex; align-items: center; justify-content: center;
font-size: 11px; font-weight: 700;
color: rgba(255,255,255,0.35);
transition: all 0.35s cubic-bezier(0.4, 0, 0.2, 1);
}
.step-dot.active {
border-color: var(--primary);
background: var(--primary);
color: white;
box-shadow: 0 0 12px rgba(99, 102, 241, 0.5);
transform: scale(1.1);
}
.step-dot.completed {
border-color: var(--success);
background: var(--success);
color: white;
}
.step-line {
width: 14px; height: 2px;
background: rgba(255,255,255,0.15);
border-radius: 1px;
transition: background 0.3s;
}
/* ── Language Switcher ─────────────── */
.lang-switch {
display: flex;
align-items: center;
gap: 6px;
--wails-draggable: no-drag;
}
.lang-sep {
color: rgba(255,255,255,0.3);
font-size: 12px;
}
.lang-btn {
background: transparent;
border: 1px solid rgba(255,255,255,0.25);
color: rgba(255,255,255,0.6);
padding: 3px 10px;
border-radius: 6px;
font-size: 11px;
font-weight: 600;
cursor: pointer;
transition: all 0.2s;
}
.lang-btn:hover {
border-color: rgba(255,255,255,0.5);
color: rgba(255,255,255,0.9);
}
.lang-btn.active {
background: rgba(255,255,255,0.15);
border-color: rgba(255,255,255,0.4);
color: white;
}
/* ── Main Content ───────────────────── */
main {
flex: 1;
position: relative;
overflow: hidden;
}
.step {
position: absolute;
inset: 0;
padding: 32px 36px;
display: none;
flex-direction: column;
overflow-y: auto;
opacity: 0;
transform: translateX(20px);
transition: opacity 0.35s ease, transform 0.35s ease;
}
.step.active {
display: flex;
opacity: 1;
transform: translateX(0);
}
h1 {
font-size: 22px;
font-weight: 700;
margin-bottom: 6px;
color: var(--text);
}
.subtitle {
font-size: 13px;
color: var(--text-secondary);
margin-bottom: 24px;
line-height: 1.6;
}
/* ── Info Card ──────────────────────── */
.info-card {
background: var(--surface);
border: 1px solid var(--border);
border-radius: var(--radius);
padding: 14px 18px;
margin-bottom: 20px;
box-shadow: 0 1px 3px rgba(0,0,0,0.04);
}
.info-row {
display: flex;
justify-content: space-between;
align-items: center;
padding: 7px 0;
}
.info-row + .info-row { border-top: 1px solid var(--border); }
.info-label { font-size: 13px; color: var(--text-secondary); }
.info-value { font-size: 13px; font-weight: 500; }
.status-ok { color: var(--success); }
.status-installed { color: var(--success); }
.status-warn { color: var(--warning); }
.status-err { color: var(--error); }
.status-skipped { color: var(--text-secondary); }
/* ── Warning Card ───────────────────── */
.warning-card {
background: #fef3c7;
border: 1px solid #f59e0b;
border-radius: var(--radius);
padding: 14px 18px;
margin-bottom: 20px;
font-size: 13px;
}
.warning-card p { margin: 6px 0; color: #92400e; }
.existing-path { font-family: "SF Mono", Menlo, monospace; font-size: 12px; }
/* ── Buttons ────────────────────────── */
.actions {
margin-top: auto;
padding-top: 20px;
display: flex;
justify-content: flex-end;
gap: 10px;
}
.btn {
padding: 9px 22px;
border: none;
border-radius: var(--radius);
font-size: 13px;
font-weight: 600;
cursor: pointer;
transition: all 0.2s ease;
outline: none;
}
.btn:disabled { opacity: 0.5; cursor: not-allowed; }
.btn-primary {
background: var(--primary);
color: white;
box-shadow: 0 1px 3px rgba(99, 102, 241, 0.3);
}
.btn-primary:hover:not(:disabled) {
background: var(--primary-hover);
box-shadow: 0 2px 8px rgba(99, 102, 241, 0.4);
transform: translateY(-1px);
}
.btn-secondary {
background: var(--border);
color: var(--text);
}
.btn-secondary:hover:not(:disabled) { background: #cbd5e1; }
.btn-ghost {
background: transparent;
color: var(--text-secondary);
border: 1px solid var(--border);
}
.btn-ghost:hover:not(:disabled) {
background: var(--surface);
color: var(--text);
border-color: #cbd5e1;
}
.btn-danger { background: var(--danger); color: white; }
.btn-danger:hover:not(:disabled) { background: var(--danger-hover); }
.btn-warning { background: var(--warning); color: white; }
.btn-warning:hover:not(:disabled) { background: #d97706; }
/* ── Form Groups ───────────────────── */
.form-group {
margin-bottom: 18px;
}
.field-label {
display: block;
font-size: 13px;
font-weight: 600;
color: var(--text);
margin-bottom: 6px;
}
.field-hint {
font-size: 12px;
color: var(--text-secondary);
margin-top: 4px;
line-height: 1.5;
font-style: italic;
}
/* ── Path Input ─────────────────────── */
.path-input-group {
display: flex;
gap: 8px;
margin-bottom: 8px;
}
.input-field {
flex: 1;
padding: 10px 14px;
border: 1px solid var(--border);
border-radius: var(--radius);
font-size: 13px;
background: var(--surface);
color: var(--text);
transition: border-color 0.2s, box-shadow 0.2s;
outline: none;
}
.input-field:focus {
border-color: var(--primary);
box-shadow: 0 0 0 3px rgba(99, 102, 241, 0.12);
}
.input-field::placeholder {
color: #94a3b8;
}
.status-text {
font-size: 12px;
color: var(--text-secondary);
margin-bottom: 12px;
min-height: 18px;
}
.status-text.error { color: var(--error); }
/* ── Component List ─────────────────── */
.component-list {
display: flex;
flex-direction: column;
gap: 6px;
margin-bottom: 20px;
}
.component-item {
display: flex;
align-items: flex-start;
gap: 12px;
padding: 12px 14px;
background: var(--surface);
border: 1px solid var(--border);
border-radius: var(--radius);
cursor: pointer;
transition: border-color 0.2s, box-shadow 0.2s;
}
.component-item:hover {
border-color: var(--primary);
box-shadow: 0 0 0 2px rgba(99, 102, 241, 0.08);
}
.component-item.required { opacity: 0.85; }
.component-check {
display: flex;
align-items: center;
padding-top: 1px;
}
.component-check input[type="checkbox"] { margin: 0; }
.component-info { display: flex; flex-direction: column; gap: 3px; }
.component-name { font-size: 13px; font-weight: 600; }
.component-desc { font-size: 11px; color: var(--text-secondary); line-height: 1.4; }
/* ── Progress ───────────────────────── */
.progress-container {
display: flex;
align-items: center;
gap: 14px;
margin-bottom: 10px;
}
.progress-bar {
flex: 1;
height: 10px;
background: var(--border);
border-radius: 5px;
overflow: hidden;
position: relative;
}
.progress-fill {
height: 100%;
background: linear-gradient(90deg, var(--primary), #818cf8, var(--primary));
background-size: 200% 100%;
border-radius: 5px;
transition: width 0.4s ease;
animation: progress-pulse 2s ease-in-out infinite;
}
@keyframes progress-pulse {
0%, 100% { background-position: 0% 0%; }
50% { background-position: 100% 0%; }
}
.progress-percent {
font-size: 14px;
font-weight: 700;
color: var(--primary);
min-width: 40px;
text-align: right;
}
.progress-message {
font-size: 13px;
color: var(--text-secondary);
margin-bottom: 14px;
}
.log-area {
flex: 1;
min-height: 120px;
max-height: 220px;
background: #0f172a;
color: #94a3b8;
border-radius: var(--radius);
padding: 12px 14px;
font-family: "SF Mono", "Menlo", "Cascadia Code", monospace;
font-size: 11px;
line-height: 1.7;
overflow-y: auto;
box-shadow: inset 0 1px 4px rgba(0,0,0,0.2);
}
.log-area .log-line { color: #94a3b8; }
.log-area .log-error { color: var(--error); }
.log-area .log-success { color: var(--success); }
/* ── Hardware List ──────────────────── */
.hardware-list { margin-bottom: 20px; }
.device-card {
display: flex;
align-items: center;
gap: 14px;
padding: 14px 18px;
background: var(--surface);
border: 1px solid var(--border);
border-radius: var(--radius);
margin-bottom: 8px;
box-shadow: 0 1px 3px rgba(0,0,0,0.04);
transition: border-color 0.2s;
}
.device-card:hover { border-color: var(--primary); }
.device-card .device-icon {
width: 40px; height: 40px;
background: var(--primary-light);
border-radius: 10px;
display: flex; align-items: center; justify-content: center;
font-size: 18px;
color: var(--primary);
}
.device-card .device-info { display: flex; flex-direction: column; gap: 3px; }
.device-card .device-name { font-size: 14px; font-weight: 600; }
.device-card .device-detail { font-size: 11px; color: var(--text-secondary); }
.device-scanning {
display: flex;
flex-direction: column;
align-items: center;
gap: 12px;
padding: 32px;
color: var(--text-secondary);
font-size: 13px;
}
.spinner {
width: 28px; height: 28px;
border: 3px solid var(--border);
border-top-color: var(--primary);
border-radius: 50%;
animation: spin 0.8s linear infinite;
}
@keyframes spin {
to { transform: rotate(360deg); }
}
.no-devices {
text-align: center;
padding: 28px;
color: var(--text-secondary);
font-size: 13px;
}
/* ── Complete Icon ──────────────────── */
.complete-icon {
text-align: center;
margin-bottom: 12px;
color: var(--success);
animation: checkmark-pop 0.5s ease;
}
@keyframes checkmark-pop {
0% { transform: scale(0.5); opacity: 0; }
60% { transform: scale(1.1); }
100% { transform: scale(1); opacity: 1; }
}
/* ── Summary List ──────────────────── */
.summary-list {
background: var(--surface);
border: 1px solid var(--border);
border-radius: var(--radius);
padding: 14px 18px;
margin-bottom: 20px;
}
/* ── Scrollbar ──────────────────────── */
::-webkit-scrollbar { width: 6px; }
::-webkit-scrollbar-track { background: transparent; }
::-webkit-scrollbar-thumb { background: #cbd5e1; border-radius: 3px; }
::-webkit-scrollbar-thumb:hover { background: #94a3b8; }
/* ── Step transition for newly shown steps ── */
@keyframes step-enter {
from {
opacity: 0;
transform: translateX(24px);
}
to {
opacity: 1;
transform: translateX(0);
}
}
.step.active {
animation: step-enter 0.35s ease forwards;
}

35
installer/go.mod Normal file
View File

@ -0,0 +1,35 @@
module edge-ai-installer
go 1.22.0
require github.com/wailsapp/wails/v2 v2.11.0
require (
github.com/bep/debounce v1.2.1 // indirect
github.com/go-ole/go-ole v1.3.0 // indirect
github.com/godbus/dbus/v5 v5.1.0 // indirect
github.com/google/uuid v1.6.0 // indirect
github.com/gorilla/websocket v1.5.3 // indirect
github.com/jchv/go-winloader v0.0.0-20210711035445-715c2860da7e // indirect
github.com/labstack/echo/v4 v4.13.3 // indirect
github.com/labstack/gommon v0.4.2 // indirect
github.com/leaanthony/go-ansi-parser v1.6.1 // indirect
github.com/leaanthony/gosod v1.0.4 // indirect
github.com/leaanthony/slicer v1.6.0 // indirect
github.com/leaanthony/u v1.1.1 // indirect
github.com/mattn/go-colorable v0.1.13 // indirect
github.com/mattn/go-isatty v0.0.20 // indirect
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/rivo/uniseg v0.4.7 // indirect
github.com/samber/lo v1.49.1 // indirect
github.com/tkrajina/go-reflector v0.5.8 // indirect
github.com/valyala/bytebufferpool v1.0.0 // indirect
github.com/valyala/fasttemplate v1.2.2 // indirect
github.com/wailsapp/go-webview2 v1.0.22 // indirect
github.com/wailsapp/mimetype v1.4.1 // indirect
golang.org/x/crypto v0.33.0 // indirect
golang.org/x/net v0.35.0 // indirect
golang.org/x/sys v0.30.0 // indirect
golang.org/x/text v0.22.0 // indirect
)

81
installer/go.sum Normal file
View File

@ -0,0 +1,81 @@
github.com/bep/debounce v1.2.1 h1:v67fRdBA9UQu2NhLFXrSg0Brw7CexQekrBwDMM8bzeY=
github.com/bep/debounce v1.2.1/go.mod h1:H8yggRPQKLUhUoqrJC1bO2xNya7vanpDl7xR3ISbCJ0=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/go-ole/go-ole v1.3.0 h1:Dt6ye7+vXGIKZ7Xtk4s6/xVdGDQynvom7xCFEdWr6uE=
github.com/go-ole/go-ole v1.3.0/go.mod h1:5LS6F96DhAwUc7C+1HLexzMXY1xGRSryjyPPKW6zv78=
github.com/godbus/dbus/v5 v5.1.0 h1:4KLkAxT3aOY8Li4FRJe/KvhoNFFxo0m6fNuFUO8QJUk=
github.com/godbus/dbus/v5 v5.1.0/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg=
github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/jchv/go-winloader v0.0.0-20210711035445-715c2860da7e h1:Q3+PugElBCf4PFpxhErSzU3/PY5sFL5Z6rfv4AbGAck=
github.com/jchv/go-winloader v0.0.0-20210711035445-715c2860da7e/go.mod h1:alcuEEnZsY1WQsagKhZDsoPCRoOijYqhZvPwLG0kzVs=
github.com/labstack/echo/v4 v4.13.3 h1:pwhpCPrTl5qry5HRdM5FwdXnhXSLSY+WE+YQSeCaafY=
github.com/labstack/echo/v4 v4.13.3/go.mod h1:o90YNEeQWjDozo584l7AwhJMHN0bOC4tAfg+Xox9q5g=
github.com/labstack/gommon v0.4.2 h1:F8qTUNXgG1+6WQmqoUWnz8WiEU60mXVVw0P4ht1WRA0=
github.com/labstack/gommon v0.4.2/go.mod h1:QlUFxVM+SNXhDL/Z7YhocGIBYOiwB0mXm1+1bAPHPyU=
github.com/leaanthony/debme v1.2.1 h1:9Tgwf+kjcrbMQ4WnPcEIUcQuIZYqdWftzZkBr+i/oOc=
github.com/leaanthony/debme v1.2.1/go.mod h1:3V+sCm5tYAgQymvSOfYQ5Xx2JCr+OXiD9Jkw3otUjiA=
github.com/leaanthony/go-ansi-parser v1.6.1 h1:xd8bzARK3dErqkPFtoF9F3/HgN8UQk0ed1YDKpEz01A=
github.com/leaanthony/go-ansi-parser v1.6.1/go.mod h1:+vva/2y4alzVmmIEpk9QDhA7vLC5zKDTRwfZGOp3IWU=
github.com/leaanthony/gosod v1.0.4 h1:YLAbVyd591MRffDgxUOU1NwLhT9T1/YiwjKZpkNFeaI=
github.com/leaanthony/gosod v1.0.4/go.mod h1:GKuIL0zzPj3O1SdWQOdgURSuhkF+Urizzxh26t9f1cw=
github.com/leaanthony/slicer v1.6.0 h1:1RFP5uiPJvT93TAHi+ipd3NACobkW53yUiBqZheE/Js=
github.com/leaanthony/slicer v1.6.0/go.mod h1:o/Iz29g7LN0GqH3aMjWAe90381nyZlDNquK+mtH2Fj8=
github.com/leaanthony/u v1.1.1 h1:TUFjwDGlNX+WuwVEzDqQwC2lOv0P4uhTQw7CMFdiK7M=
github.com/leaanthony/u v1.1.1/go.mod h1:9+o6hejoRljvZ3BzdYlVL0JYCwtnAsVuN9pVTQcaRfI=
github.com/matryer/is v1.4.0/go.mod h1:8I/i5uYgLzgsgEloJE1U6xx5HkBQpAZvepWuujKwMRU=
github.com/matryer/is v1.4.1 h1:55ehd8zaGABKLXQUe2awZ99BD/PTc2ls+KV/dXphgEQ=
github.com/matryer/is v1.4.1/go.mod h1:8I/i5uYgLzgsgEloJE1U6xx5HkBQpAZvepWuujKwMRU=
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ=
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c/go.mod h1:7rwL4CYBLnjLxUqIJNnCWiEdr3bn6IUYi15bNlnbCCU=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ=
github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
github.com/samber/lo v1.49.1 h1:4BIFyVfuQSEpluc7Fua+j1NolZHiEHEpaSEKdsH0tew=
github.com/samber/lo v1.49.1/go.mod h1:dO6KHFzUKXgP8LDhU0oI8d2hekjXnGOu0DB8Jecxd6o=
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/tkrajina/go-reflector v0.5.8 h1:yPADHrwmUbMq4RGEyaOUpz2H90sRsETNVpjzo3DLVQQ=
github.com/tkrajina/go-reflector v0.5.8/go.mod h1:ECbqLgccecY5kPmPmXg1MrHW585yMcDkVl6IvJe64T4=
github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw=
github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
github.com/valyala/fasttemplate v1.2.2 h1:lxLXG0uE3Qnshl9QyaK6XJxMXlQZELvChBOCmQD0Loo=
github.com/valyala/fasttemplate v1.2.2/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ=
github.com/wailsapp/go-webview2 v1.0.22 h1:YT61F5lj+GGaat5OB96Aa3b4QA+mybD0Ggq6NZijQ58=
github.com/wailsapp/go-webview2 v1.0.22/go.mod h1:qJmWAmAmaniuKGZPWwne+uor3AHMB5PFhqiK0Bbj8kc=
github.com/wailsapp/mimetype v1.4.1 h1:pQN9ycO7uo4vsUUuPeHEYoUkLVkaRntMnHJxVwYhwHs=
github.com/wailsapp/mimetype v1.4.1/go.mod h1:9aV5k31bBOv5z6u+QP8TltzvNGJPmNJD4XlAL3U+j3o=
github.com/wailsapp/wails/v2 v2.11.0 h1:seLacV8pqupq32IjS4Y7V8ucab0WZwtK6VvUVxSBtqQ=
github.com/wailsapp/wails/v2 v2.11.0/go.mod h1:jrf0ZaM6+GBc1wRmXsM8cIvzlg0karYin3erahI4+0k=
golang.org/x/crypto v0.33.0 h1:IOBPskki6Lysi0lo9qQvbxiQ+FvsCC/YWOecCHAixus=
golang.org/x/crypto v0.33.0/go.mod h1:bVdXmD7IV/4GdElGPozy6U7lWdRXA4qyRVGJV57uQ5M=
golang.org/x/net v0.0.0-20210505024714-0287a6fb4125/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.35.0 h1:T5GQRQb2y08kTAByq9L4/bz8cipCdA8FbRTXewonqY8=
golang.org/x/net v0.35.0/go.mod h1:EglIi67kWsHKlRzzVMUD93VMSWGFOMSZgxFjparz1Qk=
golang.org/x/sys v0.0.0-20200810151505-1b9f1253b3ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.30.0 h1:QjkSwP/36a20jFYWkSue1YwXzLmsV5Gfq7Eiy72C1uc=
golang.org/x/sys v0.30.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.22.0 h1:bofq7m3/HAFvbF51jz3Q9wLg3jkvSPuiZu/pD1XwgtM=
golang.org/x/text v0.22.0/go.mod h1:YRoo4H8PVmsu+E3Ou7cqLVH8oXWIHVoX0jqUWALQhfY=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=

36
installer/main.go Normal file
View File

@ -0,0 +1,36 @@
package main
import (
"embed"
"github.com/wailsapp/wails/v2"
"github.com/wailsapp/wails/v2/pkg/options"
"github.com/wailsapp/wails/v2/pkg/options/assetserver"
)
//go:embed all:frontend
var assets embed.FS
func main() {
installer := NewInstaller(payloadFS)
err := wails.Run(&options.App{
Title: "Edge AI Platform Installer",
Width: 720,
Height: 560,
MinWidth: 720,
MinHeight: 560,
MaxWidth: 720,
MaxHeight: 560,
AssetServer: &assetserver.Options{
Assets: assets,
},
OnStartup: installer.startup,
Bind: []interface{}{
installer,
},
})
if err != nil {
panic(err)
}
}

View File

@ -0,0 +1,150 @@
//go:build darwin
package main
import (
"fmt"
"os"
"os/exec"
"path/filepath"
"strings"
)
const launchdLabel = "com.innovedus.edge-ai-server"
func platformDefaultDir() string {
home, _ := os.UserHomeDir()
return filepath.Join(home, ".edge-ai-platform")
}
func platformConfigDir() string {
home, _ := os.UserHomeDir()
return filepath.Join(home, ".edge-ai-platform")
}
func createSystemLink(installDir string) error {
target := filepath.Join(installDir, "edge-ai-server")
link := "/usr/local/bin/edge-ai-server"
// Remove existing symlink if present
os.Remove(link)
// Try without sudo first
if err := os.Symlink(target, link); err != nil {
// Need admin privileges — use osascript
script := fmt.Sprintf(
`do shell script "ln -sf '%s' '%s'" with administrator privileges`,
target, link,
)
cmd := exec.Command("osascript", "-e", script)
if out, err := cmd.CombinedOutput(); err != nil {
return fmt.Errorf("symlink failed: %s — %w", string(out), err)
}
}
return nil
}
func removeSystemLink() {
os.Remove("/usr/local/bin/edge-ai-server")
}
func installLibusb(installDir string) error {
if _, err := exec.LookPath("brew"); err != nil {
return fmt.Errorf("Homebrew not found. Install from https://brew.sh then retry")
}
// Check if already installed
if err := exec.Command("brew", "list", "libusb").Run(); err == nil {
return nil
}
cmd := exec.Command("brew", "install", "libusb")
if out, err := cmd.CombinedOutput(); err != nil {
return fmt.Errorf("brew install libusb failed: %s — %w", string(out), err)
}
return nil
}
func checkLibusbInstalled() bool {
if _, err := exec.LookPath("brew"); err != nil {
return false
}
return exec.Command("brew", "list", "libusb").Run() == nil
}
func removeQuarantine(installDir string) {
binPath := filepath.Join(installDir, "edge-ai-server")
exec.Command("xattr", "-dr", "com.apple.quarantine", binPath).Run()
}
func installAutoRestart(installDir string) error {
home, err := os.UserHomeDir()
if err != nil {
return fmt.Errorf("cannot determine home directory: %w", err)
}
plistDir := filepath.Join(home, "Library", "LaunchAgents")
plistPath := filepath.Join(plistDir, launchdLabel+".plist")
logDir := filepath.Join(installDir, "logs")
os.MkdirAll(plistDir, 0755)
os.MkdirAll(logDir, 0755)
binPath := filepath.Join(installDir, "edge-ai-server")
plist := strings.Join([]string{
`<?xml version="1.0" encoding="UTF-8"?>`,
`<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">`,
`<plist version="1.0">`,
`<dict>`,
` <key>Label</key>`,
` <string>` + launchdLabel + `</string>`,
` <key>ProgramArguments</key>`,
` <array>`,
` <string>` + binPath + `</string>`,
` <string>--tray</string>`,
` </array>`,
` <key>WorkingDirectory</key>`,
` <string>` + installDir + `</string>`,
` <key>KeepAlive</key>`,
` <dict>`,
` <key>SuccessfulExit</key>`,
` <false/>`,
` </dict>`,
` <key>ThrottleInterval</key>`,
` <integer>5</integer>`,
` <key>StandardOutPath</key>`,
` <string>` + filepath.Join(logDir, "server.log") + `</string>`,
` <key>StandardErrorPath</key>`,
` <string>` + filepath.Join(logDir, "server.err.log") + `</string>`,
` <key>ProcessType</key>`,
` <string>Interactive</string>`,
` <key>RunAtLoad</key>`,
` <true/>`,
`</dict>`,
`</plist>`,
}, "\n")
if err := os.WriteFile(plistPath, []byte(plist), 0644); err != nil {
return fmt.Errorf("write plist: %w", err)
}
// Unload if already loaded, then load
exec.Command("launchctl", "unload", plistPath).Run()
if out, err := exec.Command("launchctl", "load", plistPath).CombinedOutput(); err != nil {
return fmt.Errorf("launchctl load failed: %s — %w", string(out), err)
}
return nil
}
func removeAutoRestart() {
home, err := os.UserHomeDir()
if err != nil {
return
}
plistPath := filepath.Join(home, "Library", "LaunchAgents", launchdLabel+".plist")
exec.Command("launchctl", "unload", plistPath).Run()
os.Remove(plistPath)
}

122
installer/platform_linux.go Normal file
View File

@ -0,0 +1,122 @@
//go:build linux
package main
import (
"fmt"
"os"
"os/exec"
"path/filepath"
"strings"
)
const systemdServiceName = "edge-ai-server"
func platformDefaultDir() string {
home, _ := os.UserHomeDir()
return filepath.Join(home, ".edge-ai-platform")
}
func platformConfigDir() string {
home, _ := os.UserHomeDir()
return filepath.Join(home, ".edge-ai-platform")
}
func createSystemLink(installDir string) error {
target := filepath.Join(installDir, "edge-ai-server")
link := "/usr/local/bin/edge-ai-server"
os.Remove(link)
if err := os.Symlink(target, link); err != nil {
// Try with pkexec for GUI sudo
cmd := exec.Command("pkexec", "ln", "-sf", target, link)
if out, err := cmd.CombinedOutput(); err != nil {
return fmt.Errorf("symlink failed: %s — %w", string(out), err)
}
}
return nil
}
func removeSystemLink() {
os.Remove("/usr/local/bin/edge-ai-server")
}
func installLibusb(installDir string) error {
// Check if already installed
if _, err := exec.LookPath("lsusb"); err == nil {
return nil
}
cmd := exec.Command("pkexec", "apt-get", "install", "-y", "libusb-1.0-0-dev")
if out, err := cmd.CombinedOutput(); err != nil {
return fmt.Errorf("apt install libusb failed: %s — %w", string(out), err)
}
return nil
}
func checkLibusbInstalled() bool {
return exec.Command("dpkg", "-s", "libusb-1.0-0-dev").Run() == nil
}
func removeQuarantine(installDir string) {
// No-op on Linux
}
func installAutoRestart(installDir string) error {
home, err := os.UserHomeDir()
if err != nil {
return fmt.Errorf("cannot determine home directory: %w", err)
}
serviceDir := filepath.Join(home, ".config", "systemd", "user")
servicePath := filepath.Join(serviceDir, systemdServiceName+".service")
logDir := filepath.Join(installDir, "logs")
os.MkdirAll(serviceDir, 0755)
os.MkdirAll(logDir, 0755)
binPath := filepath.Join(installDir, "edge-ai-server")
service := strings.Join([]string{
"[Unit]",
"Description=Edge AI Platform Server",
"After=network.target",
"",
"[Service]",
"Type=simple",
"ExecStart=" + binPath + " --tray",
"WorkingDirectory=" + installDir,
"Restart=on-failure",
"RestartSec=5",
"StandardOutput=append:" + filepath.Join(logDir, "server.log"),
"StandardError=append:" + filepath.Join(logDir, "server.err.log"),
"",
"[Install]",
"WantedBy=default.target",
}, "\n")
if err := os.WriteFile(servicePath, []byte(service), 0644); err != nil {
return fmt.Errorf("write service file: %w", err)
}
exec.Command("systemctl", "--user", "daemon-reload").Run()
exec.Command("systemctl", "--user", "enable", systemdServiceName+".service").Run()
if out, err := exec.Command("systemctl", "--user", "start", systemdServiceName+".service").CombinedOutput(); err != nil {
return fmt.Errorf("systemctl start failed: %s — %w", string(out), err)
}
return nil
}
func removeAutoRestart() {
exec.Command("systemctl", "--user", "disable", "--now", systemdServiceName+".service").Run()
home, err := os.UserHomeDir()
if err != nil {
return
}
servicePath := filepath.Join(home, ".config", "systemd", "user", systemdServiceName+".service")
os.Remove(servicePath)
exec.Command("systemctl", "--user", "daemon-reload").Run()
}

View File

@ -0,0 +1,132 @@
//go:build windows
package main
import (
"fmt"
"os"
"os/exec"
"path/filepath"
"strings"
)
func platformDefaultDir() string {
return filepath.Join(os.Getenv("LOCALAPPDATA"), "EdgeAIPlatform")
}
func platformConfigDir() string {
return filepath.Join(os.Getenv("LOCALAPPDATA"), "EdgeAIPlatform")
}
func createSystemLink(installDir string) error {
// On Windows, add the install directory to user PATH
cmd := exec.Command("powershell", "-Command",
`[Environment]::GetEnvironmentVariable("PATH", "User")`)
out, err := cmd.Output()
if err != nil {
return fmt.Errorf("failed to read PATH: %w", err)
}
currentPath := strings.TrimSpace(string(out))
if strings.Contains(strings.ToLower(currentPath), strings.ToLower(installDir)) {
return nil // already in PATH
}
newPath := currentPath
if newPath != "" {
newPath += ";"
}
newPath += installDir
cmd = exec.Command("powershell", "-Command",
fmt.Sprintf(`[Environment]::SetEnvironmentVariable("PATH", "%s", "User")`, newPath))
if out, err := cmd.CombinedOutput(); err != nil {
return fmt.Errorf("failed to update PATH: %s — %w", string(out), err)
}
return nil
}
func removeSystemLink() {
installDir := platformDefaultDir()
cmd := exec.Command("powershell", "-Command",
`[Environment]::GetEnvironmentVariable("PATH", "User")`)
out, err := cmd.Output()
if err != nil {
return
}
currentPath := strings.TrimSpace(string(out))
parts := strings.Split(currentPath, ";")
var filtered []string
for _, p := range parts {
if !strings.EqualFold(strings.TrimSpace(p), installDir) {
filtered = append(filtered, p)
}
}
newPath := strings.Join(filtered, ";")
exec.Command("powershell", "-Command",
fmt.Sprintf(`[Environment]::SetEnvironmentVariable("PATH", "%s", "User")`, newPath)).Run()
}
func installLibusb(installDir string) error {
// Check known system DLL locations
dllPaths := []string{
filepath.Join(os.Getenv("SystemRoot"), "System32", "libusb-1.0.dll"),
filepath.Join(os.Getenv("SystemRoot"), "SysWOW64", "libusb-1.0.dll"),
}
for _, p := range dllPaths {
if _, err := os.Stat(p); err == nil {
return nil // already installed
}
}
return fmt.Errorf("libusb not found. Please install the WinUSB driver via Zadig: https://zadig.akeo.ie")
}
func checkLibusbInstalled() bool {
dllPaths := []string{
filepath.Join(os.Getenv("SystemRoot"), "System32", "libusb-1.0.dll"),
filepath.Join(os.Getenv("SystemRoot"), "SysWOW64", "libusb-1.0.dll"),
}
for _, p := range dllPaths {
if _, err := os.Stat(p); err == nil {
return true
}
}
return false
}
func removeQuarantine(installDir string) {
// No-op on Windows
}
func installAutoRestart(installDir string) error {
taskName := "EdgeAIPlatformServer"
binPath := filepath.Join(installDir, "edge-ai-server.exe")
// Remove existing task if present
exec.Command("schtasks", "/Delete", "/TN", taskName, "/F").Run()
// Create scheduled task that runs at logon and restarts on failure
// Using PowerShell for more control over restart settings
psScript := fmt.Sprintf(`
$Action = New-ScheduledTaskAction -Execute '%s' -Argument '--tray' -WorkingDirectory '%s'
$Trigger = New-ScheduledTaskTrigger -AtLogOn
$Settings = New-ScheduledTaskSettingsSet -RestartCount 3 -RestartInterval (New-TimeSpan -Seconds 5) -AllowStartIfOnBatteries -DontStopIfGoingOnBatteries -StartWhenAvailable -ExecutionTimeLimit (New-TimeSpan -Days 0)
Register-ScheduledTask -TaskName '%s' -Action $Action -Trigger $Trigger -Settings $Settings -Description 'Edge AI Platform Server' -Force
Start-ScheduledTask -TaskName '%s'
`, binPath, installDir, taskName, taskName)
cmd := exec.Command("powershell", "-NoProfile", "-Command", psScript)
if out, err := cmd.CombinedOutput(); err != nil {
return fmt.Errorf("scheduled task creation failed: %s — %w", string(out), err)
}
return nil
}
func removeAutoRestart() {
exec.Command("schtasks", "/Delete", "/TN", "EdgeAIPlatformServer", "/F").Run()
}

20
installer/wails.json Normal file
View File

@ -0,0 +1,20 @@
{
"$schema": "https://wails.io/schemas/config.v2.json",
"name": "EdgeAI-Installer",
"outputfilename": "EdgeAI-Installer",
"frontend:install": "",
"frontend:build": "",
"frontend:dev:watcher": "",
"frontend:dev:serverUrl": "",
"assetdir": "./frontend",
"author": {
"name": "Innovedus",
"email": "support@innovedus.com"
},
"info": {
"companyName": "Innovedus",
"productName": "Edge AI Platform Installer",
"productVersion": "0.1.0",
"copyright": "Copyright 2026 Innovedus"
}
}

BIN
relay-server-linux Executable file

Binary file not shown.

408
scripts/deploy-aws.sh Executable file
View File

@ -0,0 +1,408 @@
#!/usr/bin/env bash
set -euo pipefail
# Edge AI Platform — AWS Frontend Deployment (CloudFront + S3)
#
# Usage:
# bash scripts/deploy-aws.sh # sync only (requires prior build)
# bash scripts/deploy-aws.sh --build # build frontend then deploy
# bash scripts/deploy-aws.sh --setup # first-time: create S3 + CloudFront
# bash scripts/deploy-aws.sh --help # show help
#
# Environment variables:
# AWS_BUCKET_NAME S3 bucket name (default: edge-ai-platform-frontend)
# AWS_REGION AWS region (default: ap-northeast-1)
# AWS_PROFILE AWS CLI profile (optional)
#
# Prerequisites:
# - AWS CLI v2 installed and configured (aws configure)
# - Sufficient IAM permissions: S3, CloudFront, IAM (for OAC)
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
FRONTEND_DIR="$PROJECT_ROOT/frontend"
OUT_DIR="$FRONTEND_DIR/out"
STATE_FILE="$PROJECT_ROOT/.aws-deploy-state"
BUCKET_NAME="${AWS_BUCKET_NAME:-edge-ai-platform-frontend}"
REGION="${AWS_REGION:-ap-northeast-1}"
PROFILE_FLAG=""
if [ -n "${AWS_PROFILE:-}" ]; then
PROFILE_FLAG="--profile $AWS_PROFILE"
fi
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
CYAN='\033[0;36m'
NC='\033[0m'
info() { echo -e "${GREEN}[INFO]${NC} $*"; }
warn() { echo -e "${YELLOW}[WARN]${NC} $*"; }
error() { echo -e "${RED}[ERROR]${NC} $*" >&2; exit 1; }
step() { echo -e "\n${CYAN}=== $* ===${NC}\n"; }
# ── Helpers ──────────────────────────────────────────────
aws_cmd() {
# shellcheck disable=SC2086
aws $PROFILE_FLAG "$@"
}
load_state() {
if [ -f "$STATE_FILE" ]; then
# shellcheck disable=SC1090
source "$STATE_FILE"
fi
}
save_state() {
cat > "$STATE_FILE" <<EOF
# Auto-generated by deploy-aws.sh — do not edit manually
DISTRIBUTION_ID="$DISTRIBUTION_ID"
OAC_ID="$OAC_ID"
BUCKET_NAME="$BUCKET_NAME"
REGION="$REGION"
CLOUDFRONT_DOMAIN="$CLOUDFRONT_DOMAIN"
EOF
info "State saved to $STATE_FILE"
}
show_help() {
cat <<'HELP'
Edge AI Platform — AWS Frontend Deployment
Usage:
bash scripts/deploy-aws.sh [OPTIONS]
Options:
--setup First-time setup: create S3 bucket + CloudFront distribution
--build Build the frontend before deploying
--help Show this help message
Environment:
AWS_BUCKET_NAME S3 bucket name (default: edge-ai-platform-frontend)
AWS_REGION AWS region (default: ap-northeast-1)
AWS_PROFILE AWS CLI profile (optional)
Examples:
# First-time deployment (creates infrastructure + deploys)
bash scripts/deploy-aws.sh --setup --build
# Update deployment (build + sync + invalidate cache)
bash scripts/deploy-aws.sh --build
# Quick sync (frontend already built)
bash scripts/deploy-aws.sh
# Custom bucket and region
AWS_BUCKET_NAME=my-app AWS_REGION=us-east-1 bash scripts/deploy-aws.sh --setup --build
HELP
}
# ── Check Prerequisites ──────────────────────────────────
check_aws_cli() {
if ! command -v aws &>/dev/null; then
error "AWS CLI not found. Install: https://docs.aws.amazon.com/cli/latest/userguide/getting-started-install.html"
fi
if ! aws_cmd sts get-caller-identity &>/dev/null; then
error "AWS credentials not configured. Run: aws configure"
fi
local identity
identity=$(aws_cmd sts get-caller-identity --output text --query 'Account')
info "AWS Account: $identity"
}
# ── Build Frontend ───────────────────────────────────────
build_frontend() {
step "Building frontend"
if ! command -v pnpm &>/dev/null; then
error "pnpm not found. Install: npm install -g pnpm"
fi
(cd "$FRONTEND_DIR" && pnpm build)
info "Frontend built: $OUT_DIR"
}
# ── Setup S3 Bucket ──────────────────────────────────────
setup_s3_bucket() {
step "Setting up S3 bucket: $BUCKET_NAME"
# Create bucket if not exists
if aws_cmd s3api head-bucket --bucket "$BUCKET_NAME" 2>/dev/null; then
info "Bucket already exists: $BUCKET_NAME"
else
info "Creating bucket: $BUCKET_NAME"
if [ "$REGION" = "us-east-1" ]; then
aws_cmd s3api create-bucket --bucket "$BUCKET_NAME" --region "$REGION"
else
aws_cmd s3api create-bucket --bucket "$BUCKET_NAME" --region "$REGION" \
--create-bucket-configuration LocationConstraint="$REGION"
fi
info "Bucket created"
fi
# Block all public access (CloudFront OAC will access it)
aws_cmd s3api put-public-access-block --bucket "$BUCKET_NAME" \
--public-access-block-configuration \
"BlockPublicAcls=true,IgnorePublicAcls=true,BlockPublicPolicy=true,RestrictPublicBuckets=true"
info "Public access blocked (CloudFront OAC will be used)"
}
# ── Setup CloudFront ─────────────────────────────────────
setup_cloudfront() {
step "Setting up CloudFront distribution"
# Create OAC (Origin Access Control)
local oac_name="edge-ai-platform-oac"
OAC_ID=$(aws_cmd cloudfront list-origin-access-controls --output text \
--query "OriginAccessControlList.Items[?Name=='$oac_name'].Id | [0]" 2>/dev/null || true)
if [ -z "$OAC_ID" ] || [ "$OAC_ID" = "None" ]; then
info "Creating Origin Access Control..."
OAC_ID=$(aws_cmd cloudfront create-origin-access-control \
--origin-access-control-config \
"Name=$oac_name,Description=Edge AI Platform OAC,SigningProtocol=sigv4,SigningBehavior=always,OriginAccessControlOriginType=s3" \
--output text --query 'OriginAccessControl.Id')
info "OAC created: $OAC_ID"
else
info "OAC already exists: $OAC_ID"
fi
# Check if distribution already exists
DISTRIBUTION_ID=$(aws_cmd cloudfront list-distributions --output text \
--query "DistributionList.Items[?Origins.Items[0].DomainName=='${BUCKET_NAME}.s3.${REGION}.amazonaws.com'].Id | [0]" 2>/dev/null || true)
if [ -n "$DISTRIBUTION_ID" ] && [ "$DISTRIBUTION_ID" != "None" ]; then
info "CloudFront distribution already exists: $DISTRIBUTION_ID"
CLOUDFRONT_DOMAIN=$(aws_cmd cloudfront get-distribution --id "$DISTRIBUTION_ID" \
--output text --query 'Distribution.DomainName')
else
info "Creating CloudFront distribution..."
local caller_ref
caller_ref="edge-ai-$(date +%s)"
local dist_config
dist_config=$(cat <<DISTCONFIG
{
"CallerReference": "$caller_ref",
"Comment": "Edge AI Platform Frontend",
"Enabled": true,
"DefaultRootObject": "index.html",
"Origins": {
"Quantity": 1,
"Items": [
{
"Id": "S3-${BUCKET_NAME}",
"DomainName": "${BUCKET_NAME}.s3.${REGION}.amazonaws.com",
"OriginAccessControlId": "$OAC_ID",
"S3OriginConfig": {
"OriginAccessIdentity": ""
}
}
]
},
"DefaultCacheBehavior": {
"TargetOriginId": "S3-${BUCKET_NAME}",
"ViewerProtocolPolicy": "redirect-to-https",
"AllowedMethods": {
"Quantity": 2,
"Items": ["GET", "HEAD"],
"CachedMethods": {
"Quantity": 2,
"Items": ["GET", "HEAD"]
}
},
"CachePolicyId": "658327ea-f89d-4fab-a63d-7e88639e58f6",
"Compress": true
},
"CustomErrorResponses": {
"Quantity": 1,
"Items": [
{
"ErrorCode": 403,
"ResponsePagePath": "/index.html",
"ResponseCode": "200",
"ErrorCachingMinTTL": 0
}
]
},
"PriceClass": "PriceClass_200",
"HttpVersion": "http2and3"
}
DISTCONFIG
)
local result
result=$(aws_cmd cloudfront create-distribution \
--distribution-config "$dist_config" \
--output json)
DISTRIBUTION_ID=$(echo "$result" | python3 -c "import sys,json; print(json.load(sys.stdin)['Distribution']['Id'])")
CLOUDFRONT_DOMAIN=$(echo "$result" | python3 -c "import sys,json; print(json.load(sys.stdin)['Distribution']['DomainName'])")
info "CloudFront distribution created: $DISTRIBUTION_ID"
info "Domain: $CLOUDFRONT_DOMAIN"
fi
# Set S3 bucket policy to allow CloudFront OAC
local account_id
account_id=$(aws_cmd sts get-caller-identity --output text --query 'Account')
local bucket_policy
bucket_policy=$(cat <<POLICY
{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "AllowCloudFrontServicePrincipalReadOnly",
"Effect": "Allow",
"Principal": {
"Service": "cloudfront.amazonaws.com"
},
"Action": "s3:GetObject",
"Resource": "arn:aws:s3:::${BUCKET_NAME}/*",
"Condition": {
"StringEquals": {
"AWS:SourceArn": "arn:aws:cloudfront::${account_id}:distribution/${DISTRIBUTION_ID}"
}
}
}
]
}
POLICY
)
aws_cmd s3api put-bucket-policy --bucket "$BUCKET_NAME" --policy "$bucket_policy"
info "Bucket policy updated for CloudFront OAC"
save_state
}
# ── Sync Files to S3 ────────────────────────────────────
sync_to_s3() {
step "Syncing files to S3"
if [ ! -d "$OUT_DIR" ]; then
error "Frontend build output not found: $OUT_DIR\n Run with --build flag or run 'cd frontend && pnpm build' first."
fi
# 1. HTML files — no cache (always fresh)
info "Uploading HTML files (no cache)..."
aws_cmd s3 sync "$OUT_DIR" "s3://$BUCKET_NAME" \
--exclude "*" \
--include "*.html" \
--cache-control "public, max-age=0, must-revalidate" \
--content-type "text/html" \
--delete
# 2. _next/ assets — immutable long cache (hashed filenames)
info "Uploading _next/ assets (immutable cache)..."
aws_cmd s3 sync "$OUT_DIR/_next" "s3://$BUCKET_NAME/_next" \
--cache-control "public, max-age=31536000, immutable"
# 3. Everything else — 1 day cache
info "Uploading other static assets..."
aws_cmd s3 sync "$OUT_DIR" "s3://$BUCKET_NAME" \
--exclude "*.html" \
--exclude "_next/*" \
--cache-control "public, max-age=86400" \
--delete
info "Sync complete"
}
# ── Invalidate CloudFront Cache ──────────────────────────
invalidate_cache() {
step "Invalidating CloudFront cache"
if [ -z "${DISTRIBUTION_ID:-}" ] || [ "$DISTRIBUTION_ID" = "None" ]; then
warn "No CloudFront distribution ID found. Skipping invalidation."
return
fi
local inv_id
inv_id=$(aws_cmd cloudfront create-invalidation \
--distribution-id "$DISTRIBUTION_ID" \
--paths "/*" \
--output text --query 'Invalidation.Id')
info "Invalidation created: $inv_id"
info "Note: CloudFront invalidation may take 1-2 minutes to propagate."
}
# ── Main ─────────────────────────────────────────────────
DO_BUILD=false
DO_SETUP=false
for arg in "$@"; do
case "$arg" in
--build) DO_BUILD=true ;;
--setup) DO_SETUP=true ;;
--help) show_help; exit 0 ;;
*) error "Unknown option: $arg. Use --help for usage." ;;
esac
done
echo ""
info "Edge AI Platform — AWS Frontend Deployment"
echo ""
info "Bucket: $BUCKET_NAME"
info "Region: $REGION"
echo ""
# Load saved state
DISTRIBUTION_ID=""
OAC_ID=""
CLOUDFRONT_DOMAIN=""
load_state
# Check AWS CLI
check_aws_cli
# Build if requested
if [ "$DO_BUILD" = true ]; then
build_frontend
fi
# Setup infrastructure if requested
if [ "$DO_SETUP" = true ]; then
setup_s3_bucket
setup_cloudfront
fi
# Sync files
sync_to_s3
# Invalidate cache
invalidate_cache
# Done
echo ""
echo -e "${GREEN}=== Deployment complete! ===${NC}"
echo ""
if [ -n "${CLOUDFRONT_DOMAIN:-}" ] && [ "$CLOUDFRONT_DOMAIN" != "None" ]; then
info "URL: https://$CLOUDFRONT_DOMAIN"
echo ""
info "Next steps:"
info " 1. Open the URL above in your browser"
info " 2. Go to Settings and set the Backend URL to your local server"
info " (e.g., http://<your-local-ip>:3721)"
info " 3. Start your local edge-ai-server and connect!"
else
info "Files uploaded to s3://$BUCKET_NAME"
info "Run with --setup flag to create CloudFront distribution."
fi
echo ""

481
scripts/deploy-ec2.sh Executable file
View File

@ -0,0 +1,481 @@
#!/usr/bin/env bash
set -euo pipefail
# Edge AI Platform — EC2 Frontend + Relay Deployment (nginx)
#
# Builds the frontend locally, uploads to EC2 via scp, and configures nginx.
# Optionally deploys the relay-server binary for cloud-to-local tunnelling.
#
# Usage:
# bash scripts/deploy-ec2.sh user@host # deploy frontend only
# bash scripts/deploy-ec2.sh user@host --build # build + deploy frontend
# bash scripts/deploy-ec2.sh user@host --relay # deploy frontend + relay server (multi-tenant)
# bash scripts/deploy-ec2.sh user@host --key ~/.ssh/id.pem # specify SSH key
# bash scripts/deploy-ec2.sh --help
#
# First-time: installs nginx + configures SPA routing automatically.
# Subsequent runs: uploads files + reloads nginx.
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
FRONTEND_DIR="$PROJECT_ROOT/frontend"
OUT_DIR="$FRONTEND_DIR/out"
RELAY_BINARY="$PROJECT_ROOT/dist/relay-server"
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
CYAN='\033[0;36m'
NC='\033[0m'
info() { echo -e "${GREEN}[INFO]${NC} $*"; }
warn() { echo -e "${YELLOW}[WARN]${NC} $*"; }
error() { echo -e "${RED}[ERROR]${NC} $*" >&2; exit 1; }
step() { echo -e "\n${CYAN}=== $* ===${NC}\n"; }
show_help() {
cat <<'HELP'
Edge AI Platform — EC2 Frontend + Relay Deployment
Usage:
bash scripts/deploy-ec2.sh <user@host> [OPTIONS]
Arguments:
user@host SSH destination (e.g., ec2-user@1.2.3.4, ubuntu@myhost)
Options:
--key <path> Path to SSH private key (e.g., ~/.ssh/mykey.pem)
--build Build the frontend before deploying
--relay Deploy relay-server for cloud-to-local tunnelling (multi-tenant)
--relay-port <port> Relay server listen port (default: 3800)
--port <port> nginx listening port (default: 80)
--help Show this help message
Examples:
# Frontend only
bash scripts/deploy-ec2.sh ec2-user@54.199.1.2 --key ~/.ssh/mykey.pem --build
# Frontend + relay server (multi-tenant, no token needed)
bash scripts/deploy-ec2.sh ec2-user@54.199.1.2 --key ~/.ssh/mykey.pem --build --relay
# Update relay-server only (no frontend rebuild)
bash scripts/deploy-ec2.sh ec2-user@54.199.1.2 --key ~/.ssh/mykey.pem --relay
# Connect local server to the relay:
# ./edge-ai-server --relay-url ws://ec2-host:3800/tunnel/connect # token auto-generated from hardware ID
HELP
}
# ── Parse Arguments ──────────────────────────────────────
SSH_HOST=""
SSH_KEY=""
DO_BUILD=false
DO_RELAY=false
NGINX_PORT=80
RELAY_PORT=3800
RELAY_TOKEN=""
while [ $# -gt 0 ]; do
case "$1" in
--key) SSH_KEY="$2"; shift 2 ;;
--build) DO_BUILD=true; shift ;;
--relay) DO_RELAY=true; shift ;;
--relay-token) RELAY_TOKEN="$2"; shift 2 ;;
--relay-port) RELAY_PORT="$2"; shift 2 ;;
--port) NGINX_PORT="$2"; shift 2 ;;
--help) show_help; exit 0 ;;
-*) error "Unknown option: $1. Use --help for usage." ;;
*)
if [ -z "$SSH_HOST" ]; then
SSH_HOST="$1"
else
error "Unexpected argument: $1"
fi
shift ;;
esac
done
if [ -z "$SSH_HOST" ]; then
error "Missing SSH destination. Usage: bash scripts/deploy-ec2.sh user@host [--key key.pem] [--build] [--relay]"
fi
# Build SSH command
SSH_OPTS="-o StrictHostKeyChecking=accept-new -o ConnectTimeout=10"
if [ -n "$SSH_KEY" ]; then
SSH_OPTS="$SSH_OPTS -i $SSH_KEY"
fi
ssh_cmd() {
# shellcheck disable=SC2086
ssh $SSH_OPTS "$SSH_HOST" "$@"
}
scp_cmd() {
# shellcheck disable=SC2086
scp $SSH_OPTS "$@"
}
# Count total steps
TOTAL_STEPS=4
if [ "$DO_RELAY" = true ]; then
TOTAL_STEPS=5
fi
# ── Main ─────────────────────────────────────────────────
echo ""
info "Edge AI Platform — EC2 Deployment"
echo ""
info "Host: $SSH_HOST"
info "Port: $NGINX_PORT"
if [ "$DO_RELAY" = true ]; then
info "Relay: enabled (port $RELAY_PORT)"
fi
echo ""
# 1. Build frontend if requested
if [ "$DO_BUILD" = true ]; then
step "1/$TOTAL_STEPS Building frontend"
if ! command -v pnpm &>/dev/null; then
error "pnpm not found. Install: npm install -g pnpm"
fi
(cd "$FRONTEND_DIR" && pnpm build)
info "Frontend built: $OUT_DIR"
else
step "1/$TOTAL_STEPS Checking build output"
fi
if [ ! -d "$OUT_DIR" ]; then
error "Frontend build output not found: $OUT_DIR\n Run with --build flag or run 'cd frontend && pnpm build' first."
fi
# Build relay-server if needed
if [ "$DO_RELAY" = true ] && [ ! -f "$RELAY_BINARY" ]; then
info "Building relay-server binary..."
(cd "$PROJECT_ROOT" && make build-relay)
fi
# 2. Test SSH connection
step "2/$TOTAL_STEPS Connecting to EC2"
if ! ssh_cmd "echo 'SSH connection OK'" 2>/dev/null; then
error "Cannot connect to $SSH_HOST. Check your host and key."
fi
info "SSH connection successful"
# Detect OS on remote
REMOTE_OS=$(ssh_cmd "cat /etc/os-release 2>/dev/null | head -1 || echo unknown")
info "Remote OS: $REMOTE_OS"
# Detect package manager
PKG_MGR=$(ssh_cmd "command -v apt-get >/dev/null 2>&1 && echo apt || (command -v yum >/dev/null 2>&1 && echo yum || (command -v dnf >/dev/null 2>&1 && echo dnf || echo unknown))")
info "Package manager: $PKG_MGR"
# 3. Install and configure nginx
step "3/$TOTAL_STEPS Setting up nginx on EC2"
DEPLOY_DIR="/var/www/edge-ai-platform"
ssh_cmd "DEPLOY_DIR=$DEPLOY_DIR NGINX_PORT=$NGINX_PORT RELAY_PORT=$RELAY_PORT DO_RELAY=$DO_RELAY PKG_MGR=$PKG_MGR bash -s" <<'REMOTE_SETUP'
set -euo pipefail
# Install nginx if not present
if ! command -v nginx &>/dev/null; then
echo "[INFO] Installing nginx..."
if [ "$PKG_MGR" = "apt" ]; then
sudo apt-get update -qq
sudo apt-get install -y -qq nginx
elif [ "$PKG_MGR" = "yum" ]; then
sudo yum install -y nginx
elif [ "$PKG_MGR" = "dnf" ]; then
sudo dnf install -y nginx
else
echo "[ERROR] Unknown package manager. Install nginx manually."
exit 1
fi
echo "[INFO] nginx installed"
else
echo "[INFO] nginx already installed"
fi
# Create deploy directory
sudo mkdir -p "$DEPLOY_DIR"
sudo chown "$(whoami):$(id -gn)" "$DEPLOY_DIR"
# Build nginx config
if [ "$DO_RELAY" = "true" ]; then
# With relay proxy rules
sudo tee /etc/nginx/conf.d/edge-ai-platform.conf > /dev/null <<NGINX_CONF
server {
listen ${NGINX_PORT};
server_name _;
root ${DEPLOY_DIR};
index index.html;
# API requests → relay server
location /api/ {
proxy_pass http://127.0.0.1:${RELAY_PORT};
proxy_buffering off;
proxy_read_timeout 3600s;
proxy_send_timeout 3600s;
client_max_body_size 100M;
proxy_set_header Host \$host;
proxy_set_header X-Real-IP \$remote_addr;
}
# WebSocket connections → relay server
location /ws/ {
proxy_pass http://127.0.0.1:${RELAY_PORT};
proxy_http_version 1.1;
proxy_set_header Upgrade \$http_upgrade;
proxy_set_header Connection "upgrade";
proxy_read_timeout 3600s;
proxy_set_header Host \$host;
}
# Tunnel endpoint (edge-ai-server ↔ relay)
location /tunnel/ {
proxy_pass http://127.0.0.1:${RELAY_PORT};
proxy_http_version 1.1;
proxy_set_header Upgrade \$http_upgrade;
proxy_set_header Connection "upgrade";
proxy_read_timeout 86400s;
}
# Relay status
location /relay/ {
proxy_pass http://127.0.0.1:${RELAY_PORT};
}
# Next.js hashed assets: cache forever
location /_next/static/ {
expires max;
add_header Cache-Control "public, max-age=31536000, immutable";
}
# Other static assets: 1 day cache
location ~* \.(js|css|png|jpg|jpeg|gif|ico|svg|woff|woff2|ttf|eot)\$ {
expires 1d;
add_header Cache-Control "public, max-age=86400";
}
# Next.js dynamic route fallbacks — serve the placeholder HTML for each
# dynamic segment so that client-side hydration renders the correct page.
location ~ ^/devices/[^/]+(/?)$ {
try_files \$uri /devices/_/index.html;
}
location ~ ^/models/[^/]+(/?)$ {
try_files \$uri /models/_/index.html;
}
location ~ ^/workspace/cluster/[^/]+(/?)$ {
try_files \$uri /workspace/cluster/_/index.html;
}
location ~ ^/workspace/[^/]+(/?)$ {
try_files \$uri /workspace/_/index.html;
}
# SPA fallback: all other routes serve index.html
location / {
try_files \$uri \$uri/ /index.html;
}
# Gzip compression
gzip on;
gzip_types text/plain text/css application/json application/javascript text/xml application/xml text/javascript image/svg+xml;
gzip_min_length 1000;
}
NGINX_CONF
else
# Frontend only (no relay)
sudo tee /etc/nginx/conf.d/edge-ai-platform.conf > /dev/null <<NGINX_CONF
server {
listen ${NGINX_PORT};
server_name _;
root ${DEPLOY_DIR};
index index.html;
# Next.js hashed assets: cache forever
location /_next/static/ {
expires max;
add_header Cache-Control "public, max-age=31536000, immutable";
}
# Other static assets: 1 day cache
location ~* \.(js|css|png|jpg|jpeg|gif|ico|svg|woff|woff2|ttf|eot)\$ {
expires 1d;
add_header Cache-Control "public, max-age=86400";
}
# Next.js dynamic route fallbacks
location ~ ^/devices/[^/]+(/?)$ {
try_files \$uri /devices/_/index.html;
}
location ~ ^/models/[^/]+(/?)$ {
try_files \$uri /models/_/index.html;
}
location ~ ^/workspace/cluster/[^/]+(/?)$ {
try_files \$uri /workspace/cluster/_/index.html;
}
location ~ ^/workspace/[^/]+(/?)$ {
try_files \$uri /workspace/_/index.html;
}
# SPA fallback: all routes serve index.html
location / {
try_files \$uri \$uri/ /index.html;
}
# Gzip compression
gzip on;
gzip_types text/plain text/css application/json application/javascript text/xml application/xml text/javascript image/svg+xml;
gzip_min_length 1000;
}
NGINX_CONF
fi
# Disable default site if it exists (avoid port conflict)
if [ -f /etc/nginx/sites-enabled/default ]; then
sudo rm -f /etc/nginx/sites-enabled/default
fi
# Comment out default server block in nginx.conf if present (Amazon Linux)
if grep -q "^[[:space:]]*server {" /etc/nginx/nginx.conf 2>/dev/null; then
sudo sed -i '/^[[:space:]]*server {/,/^[[:space:]]*}/s/^/#/' /etc/nginx/nginx.conf
echo "[INFO] Commented out default server block in nginx.conf"
fi
# Amazon Linux: ensure nginx starts on boot
sudo systemctl enable nginx 2>/dev/null || true
# Test nginx config
sudo nginx -t
echo "[INFO] nginx configured"
REMOTE_SETUP
info "nginx configured on EC2"
# 4. Upload frontend files
step "4/$TOTAL_STEPS Uploading frontend files"
# Create a tarball for faster upload
TAR_FILE=$(mktemp /tmp/edge-ai-frontend-XXXXX.tar.gz)
(cd "$OUT_DIR" && tar czf "$TAR_FILE" .)
TAR_SIZE=$(du -h "$TAR_FILE" | cut -f1)
info "Archive size: $TAR_SIZE"
info "Uploading to $SSH_HOST:$DEPLOY_DIR ..."
scp_cmd "$TAR_FILE" "$SSH_HOST:/tmp/edge-ai-frontend.tar.gz"
rm -f "$TAR_FILE"
# Extract on remote and restart nginx
ssh_cmd "DEPLOY_DIR=$DEPLOY_DIR bash -s" <<'REMOTE_DEPLOY'
set -euo pipefail
# Clear old files and extract new
rm -rf "${DEPLOY_DIR:?}"/*
tar xzf /tmp/edge-ai-frontend.tar.gz -C "$DEPLOY_DIR"
rm -f /tmp/edge-ai-frontend.tar.gz
# Restart nginx
sudo systemctl reload nginx 2>/dev/null || sudo systemctl restart nginx
echo "[INFO] Files deployed and nginx reloaded"
REMOTE_DEPLOY
info "Upload complete"
# 5. Deploy relay-server (if --relay)
if [ "$DO_RELAY" = true ]; then
step "5/$TOTAL_STEPS Deploying relay-server"
if [ ! -f "$RELAY_BINARY" ]; then
error "Relay binary not found at $RELAY_BINARY. Run 'make build-relay' first."
fi
info "Uploading relay-server binary..."
scp_cmd "$RELAY_BINARY" "$SSH_HOST:/tmp/relay-server"
ssh_cmd "RELAY_PORT=$RELAY_PORT RELAY_TOKEN=$RELAY_TOKEN bash -s" <<'REMOTE_RELAY'
set -euo pipefail
RELAY_DIR="/opt/edge-ai-relay"
RELAY_BIN="$RELAY_DIR/relay-server"
# Stop existing service if running
sudo systemctl stop edge-ai-relay 2>/dev/null || true
# Install binary
sudo mkdir -p "$RELAY_DIR"
sudo mv /tmp/relay-server "$RELAY_BIN"
sudo chmod +x "$RELAY_BIN"
# Build relay-server command with flags (multi-tenant: no --token needed)
RELAY_EXEC="$RELAY_BIN --port $RELAY_PORT"
# Create systemd service
sudo tee /etc/systemd/system/edge-ai-relay.service > /dev/null <<SERVICE
[Unit]
Description=Edge AI Relay Server
After=network.target
[Service]
Type=simple
ExecStart=$RELAY_EXEC
Restart=on-failure
RestartSec=5
StandardOutput=journal
StandardError=journal
[Install]
WantedBy=multi-user.target
SERVICE
# Enable and start
sudo systemctl daemon-reload
sudo systemctl enable edge-ai-relay
sudo systemctl start edge-ai-relay
echo "[INFO] relay-server deployed and started"
echo "[INFO] Check status: sudo systemctl status edge-ai-relay"
echo "[INFO] View logs: sudo journalctl -u edge-ai-relay -f"
REMOTE_RELAY
info "relay-server deployed and running on port $RELAY_PORT"
fi
# Get public IP
PUBLIC_IP=$(ssh_cmd "curl -s --connect-timeout 3 http://169.254.169.254/latest/meta-data/public-ipv4 2>/dev/null || echo '$SSH_HOST'" | tail -1)
# Done
echo ""
echo -e "${GREEN}=== Deployment complete! ===${NC}"
echo ""
if [ "$NGINX_PORT" = "80" ]; then
info "URL: http://$PUBLIC_IP"
else
info "URL: http://$PUBLIC_IP:$NGINX_PORT"
fi
echo ""
info "Next steps:"
info " 1. Ensure EC2 Security Group allows inbound port $NGINX_PORT (HTTP)"
info " 2. Open the URL above in your browser"
if [ "$DO_RELAY" = true ]; then
echo ""
info "Relay tunnel:"
info " Connect your local edge-ai-server to the cloud relay:"
info " ./edge-ai-server --relay-url ws://$PUBLIC_IP:$RELAY_PORT/tunnel/connect"
echo ""
info " Check relay status: curl http://$PUBLIC_IP/relay/status"
else
echo ""
info " 3. Go to Settings and set the Backend URL to your local edge-ai-server"
info " (e.g., http://<your-local-ip>:3721)"
fi
echo ""
info "To update later:"
info " bash scripts/deploy-ec2.sh $SSH_HOST${SSH_KEY:+ --key $SSH_KEY} --build${DO_RELAY:+ --relay}"
echo ""

282
scripts/install.ps1 Normal file
View File

@ -0,0 +1,282 @@
# Edge AI Platform Installer for Windows
# Usage: irm https://gitea.innovedus.com/.../install.ps1 | iex
#
# Installs:
# 1. Edge AI Platform binary + data files
# 2. Python venv with pyusb (for Kneron hardware)
# 3. Optional: ffmpeg, yt-dlp (shows install hints)
#
# Uninstall:
# Remove-Item -Recurse -Force "$env:LOCALAPPDATA\EdgeAIPlatform"
# # Then remove EdgeAIPlatform from your PATH in System Environment Variables
$ErrorActionPreference = "Stop"
$Version = if ($env:EDGE_AI_VERSION) { $env:EDGE_AI_VERSION } else { "latest" }
$InstallDir = if ($env:EDGE_AI_INSTALL_DIR) { $env:EDGE_AI_INSTALL_DIR } else { "$env:LOCALAPPDATA\EdgeAIPlatform" }
$VenvDir = "$InstallDir\venv"
$GiteaServer = if ($env:GITEA_SERVER) { $env:GITEA_SERVER } else { "https://gitea.innovedus.com" }
$Repo = "warrenchen/web_academy_prototype"
function Write-Info($msg) { Write-Host "[INFO] $msg" -ForegroundColor Green }
function Write-Warn($msg) { Write-Host "[WARN] $msg" -ForegroundColor Yellow }
function Write-Step($msg) { Write-Host "`n=== $msg ===`n" -ForegroundColor Cyan }
function Write-Err($msg) { Write-Host "[ERROR] $msg" -ForegroundColor Red; exit 1 }
Write-Host ""
Write-Info "Edge AI Platform Installer"
Write-Host ""
# ── Step 1/5: Download and install binary ──
Write-Step "1/5 Installing Edge AI Platform"
# Resolve latest version
if ($Version -eq "latest") {
$apiUrl = "$GiteaServer/api/v1/repos/$Repo/releases/latest"
try {
$release = Invoke-RestMethod -Uri $apiUrl -UseBasicParsing
$Version = $release.tag_name
} catch {
Write-Err "Failed to resolve latest version: $_"
}
}
Write-Info "Version: $Version"
# Download
$arch = "amd64"
$archiveName = "edge-ai-platform_${Version}_windows_${arch}.zip"
$downloadUrl = "$GiteaServer/$Repo/releases/download/$Version/$archiveName"
$tmpDir = Join-Path $env:TEMP "edge-ai-install-$(Get-Random)"
New-Item -ItemType Directory -Path $tmpDir -Force | Out-Null
$zipPath = Join-Path $tmpDir $archiveName
Write-Info "Downloading from: $downloadUrl"
try {
Invoke-WebRequest -Uri $downloadUrl -OutFile $zipPath -UseBasicParsing
} catch {
Write-Err "Download failed. Check version and URL: $_"
}
# Extract
Write-Info "Extracting to $InstallDir ..."
if (Test-Path $InstallDir) {
# Preserve venv if it exists
$preserveVenv = $false
if (Test-Path $VenvDir) {
$venvBackup = Join-Path $env:TEMP "edge-ai-venv-backup-$(Get-Random)"
Move-Item -Path $VenvDir -Destination $venvBackup -Force
$preserveVenv = $true
}
Remove-Item -Recurse -Force $InstallDir
}
New-Item -ItemType Directory -Path $InstallDir -Force | Out-Null
Expand-Archive -Path $zipPath -DestinationPath $tmpDir -Force
# Move files from the extracted subfolder
$extractedDir = Get-ChildItem -Path $tmpDir -Directory | Where-Object { $_.Name -like "edge-ai-platform_*" } | Select-Object -First 1
if ($extractedDir) {
Copy-Item -Path "$($extractedDir.FullName)\*" -Destination $InstallDir -Recurse -Force
} else {
Copy-Item -Path "$tmpDir\*" -Destination $InstallDir -Recurse -Force
}
# Restore venv if backed up
if ($preserveVenv -and (Test-Path $venvBackup)) {
Move-Item -Path $venvBackup -Destination $VenvDir -Force
Write-Info "Restored existing Python venv"
}
# Add to PATH (user scope)
$currentPath = [Environment]::GetEnvironmentVariable("PATH", "User")
if ($currentPath -notlike "*$InstallDir*") {
[Environment]::SetEnvironmentVariable("PATH", "$currentPath;$InstallDir", "User")
Write-Info "Added $InstallDir to PATH (restart terminal to take effect)"
} else {
Write-Info "$InstallDir already in PATH"
}
# Cleanup temp
Remove-Item -Recurse -Force $tmpDir
# ── Step 2/5: Install libusb (Windows) ──
Write-Step "2/5 Setting up USB driver"
# On Windows, libusb is typically bundled or installed via Zadig.
# Check if libusb-1.0.dll is accessible
$libusbFound = $false
$libusbPaths = @(
"$env:SystemRoot\System32\libusb-1.0.dll",
"$env:SystemRoot\SysWOW64\libusb-1.0.dll",
"$InstallDir\libusb-1.0.dll"
)
foreach ($p in $libusbPaths) {
if (Test-Path $p) {
$libusbFound = $true
break
}
}
if ($libusbFound) {
Write-Info "libusb: found"
} else {
Write-Warn "libusb: NOT FOUND"
Write-Warn " Kneron USB devices require a libusb-compatible driver."
Write-Warn " Option 1: Download Zadig (https://zadig.akeo.ie/) and install WinUSB driver"
Write-Warn " Option 2: Install libusb via vcpkg or manually copy libusb-1.0.dll"
}
# ── Step 3/5: Setup Python venv + pyusb ──
Write-Step "3/5 Setting up Kneron hardware environment"
$pythonCmd = $null
# Try python3 first, then python (Windows often uses 'python' for Python 3)
if (Get-Command python3 -ErrorAction SilentlyContinue) {
$pythonCmd = "python3"
} elseif (Get-Command python -ErrorAction SilentlyContinue) {
$pyVer = & python --version 2>&1
if ($pyVer -match "Python 3") {
$pythonCmd = "python"
}
}
if (-not $pythonCmd) {
Write-Warn "Python 3 not found. Skipping Kneron hardware setup."
Write-Warn " Install: winget install Python.Python.3.12"
} else {
$pyVersion = & $pythonCmd --version 2>&1
Write-Info "Python: $pyVersion"
$venvPython = "$VenvDir\Scripts\python.exe"
$venvPip = "$VenvDir\Scripts\pip.exe"
# Check if venv already set up with pyusb
$venvReady = $false
if ((Test-Path $venvPython) -and (Test-Path $venvPip)) {
try {
& $venvPython -c "import usb.core" 2>$null
if ($LASTEXITCODE -eq 0) { $venvReady = $true }
} catch {}
}
if ($venvReady) {
Write-Info "Python venv already set up: $VenvDir"
} else {
Write-Info "Creating Python venv: $VenvDir ..."
& $pythonCmd -m venv $VenvDir
if ($LASTEXITCODE -ne 0) {
Write-Warn "Failed to create Python venv. Skipping Kneron setup."
} else {
Write-Info "Installing pyusb ..."
& $venvPip install --quiet pyusb
if ($LASTEXITCODE -ne 0) {
Write-Warn "Failed to install pyusb."
} else {
Write-Info "Python environment ready"
}
}
}
}
# ── Step 4/5: Check environment ──
Write-Step "4/5 Checking environment"
# Check optional dependencies
if (Get-Command ffmpeg -ErrorAction SilentlyContinue) {
$ffmpegVer = & ffmpeg -version 2>&1 | Select-Object -First 1
Write-Info "ffmpeg: $ffmpegVer"
} else {
Write-Warn "ffmpeg: NOT FOUND"
Write-Warn " Camera capture requires ffmpeg."
Write-Warn " Install: winget install Gyan.FFmpeg"
}
if (Get-Command yt-dlp -ErrorAction SilentlyContinue) {
$ytdlpVer = & yt-dlp --version 2>&1
Write-Info "yt-dlp: $ytdlpVer"
} else {
Write-Warn "yt-dlp: NOT FOUND (optional, for YouTube URL support)"
Write-Warn " Install: winget install yt-dlp"
}
# Detect Kneron hardware
Write-Host ""
Write-Info "Detecting Kneron hardware..."
$detectScript = "$InstallDir\scripts\kneron_detect.py"
$venvPythonPath = "$VenvDir\Scripts\python.exe"
if ((Test-Path $venvPythonPath) -and (Test-Path $detectScript)) {
try {
& $venvPythonPath $detectScript
} catch {
Write-Warn "Hardware detection failed: $_"
}
} else {
Write-Warn "Skipping hardware detection (Python venv or detection script not available)"
}
# ── Step 5/5: Setup auto-restart service ──
Write-Step "5/5 Setting up auto-restart service"
$TaskName = "EdgeAIPlatformServer"
$BinPath = Join-Path $InstallDir "edge-ai-server.exe"
$LogDir = Join-Path $InstallDir "logs"
New-Item -ItemType Directory -Path $LogDir -Force | Out-Null
# Remove existing scheduled task if present
try {
Unregister-ScheduledTask -TaskName $TaskName -Confirm:$false -ErrorAction SilentlyContinue
} catch {}
# Create a scheduled task that:
# 1. Runs at user logon
# 2. Restarts on failure (up to 3 times with 5-second delay)
$Action = New-ScheduledTaskAction -Execute $BinPath -WorkingDirectory $InstallDir
$Trigger = New-ScheduledTaskTrigger -AtLogOn
$Settings = New-ScheduledTaskSettingsSet `
-RestartCount 3 `
-RestartInterval (New-TimeSpan -Seconds 5) `
-AllowStartIfOnBatteries `
-DontStopIfGoingOnBatteries `
-StartWhenAvailable `
-ExecutionTimeLimit (New-TimeSpan -Days 0)
try {
Register-ScheduledTask -TaskName $TaskName -Action $Action -Trigger $Trigger -Settings $Settings -Description "Edge AI Platform Server - auto-starts and restarts on failure" -Force | Out-Null
Write-Info "Scheduled task installed: $TaskName"
Write-Info " Server will auto-start on logon and restart on crash (up to 3 times)."
Write-Info " Logs: $LogDir"
Write-Info ""
Write-Info " Manual controls:"
Write-Info " Start-ScheduledTask -TaskName $TaskName # start"
Write-Info " Stop-ScheduledTask -TaskName $TaskName # stop"
Write-Info " Unregister-ScheduledTask -TaskName $TaskName # remove"
# Start the server now
Start-ScheduledTask -TaskName $TaskName
Write-Info "Server started."
} catch {
Write-Warn "Failed to create scheduled task: $_"
Write-Warn "You can start the server manually: $BinPath"
}
# ── Done ──
Write-Host ""
Write-Host "=== Installation complete! ===" -ForegroundColor Green
Write-Host ""
Write-Info "Installed to: $InstallDir"
Write-Info "Server is running and will auto-restart on crash."
Write-Info ""
Write-Info "Open: http://127.0.0.1:3721"
Write-Host ""
Write-Info "Uninstall:"
Write-Info " Unregister-ScheduledTask -TaskName $TaskName -Confirm:`$false"
Write-Info " Remove-Item -Recurse -Force `"$InstallDir`""
Write-Info " # Remove EdgeAIPlatform from PATH in System Environment Variables"

344
scripts/install.sh Executable file
View File

@ -0,0 +1,344 @@
#!/usr/bin/env bash
set -euo pipefail
# Edge AI Platform Installer (macOS / Linux)
# Usage: curl -fsSL https://gitea.innovedus.com/.../install.sh | bash
#
# Installs:
# 1. Edge AI Platform binary + data files
# 2. Python venv with pyusb (for Kneron hardware)
# 3. Optional: ffmpeg, yt-dlp (prompts user)
#
# Uninstall:
# rm -rf ~/.edge-ai-platform
# sudo rm -f /usr/local/bin/edge-ai-server
VERSION="${EDGE_AI_VERSION:-latest}"
INSTALL_DIR="${EDGE_AI_INSTALL_DIR:-$HOME/.edge-ai-platform}"
VENV_DIR="$INSTALL_DIR/venv"
BIN_LINK="/usr/local/bin/edge-ai-server"
GITEA_SERVER="${GITEA_SERVER:-https://gitea.innovedus.com}"
REPO="warrenchen/web_academy_prototype"
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
CYAN='\033[0;36m'
NC='\033[0m'
info() { echo -e "${GREEN}[INFO]${NC} $*"; }
warn() { echo -e "${YELLOW}[WARN]${NC} $*"; }
error() { echo -e "${RED}[ERROR]${NC} $*" >&2; exit 1; }
step() { echo -e "\n${CYAN}=== $* ===${NC}\n"; }
detect_platform() {
local os arch
os="$(uname -s | tr '[:upper:]' '[:lower:]')"
arch="$(uname -m)"
case "$os" in
darwin) os="darwin" ;;
linux) os="linux" ;;
*) error "Unsupported OS: $os" ;;
esac
case "$arch" in
x86_64) arch="amd64" ;;
aarch64|arm64) arch="arm64" ;;
*) error "Unsupported architecture: $arch" ;;
esac
echo "${os}_${arch}"
}
resolve_version() {
if [ "$VERSION" = "latest" ]; then
local api_url="${GITEA_SERVER}/api/v1/repos/${REPO}/releases/latest"
VERSION=$(curl -fsSL "$api_url" 2>/dev/null | grep -o '"tag_name":"[^"]*"' | head -1 | cut -d'"' -f4) || true
if [ -z "$VERSION" ]; then
error "Failed to resolve latest version. Set EDGE_AI_VERSION manually."
fi
fi
info "Version: $VERSION"
}
install_binary() {
local platform="$1"
local archive_name="edge-ai-platform_${VERSION}_${platform}.tar.gz"
local download_url="${GITEA_SERVER}/${REPO}/releases/download/${VERSION}/${archive_name}"
info "Downloading from: $download_url"
local tmp_dir
tmp_dir="$(mktemp -d)"
trap 'rm -rf "$tmp_dir"' EXIT
curl -fsSL "$download_url" -o "$tmp_dir/archive.tar.gz" || error "Download failed. Check version and platform."
info "Extracting to $INSTALL_DIR ..."
mkdir -p "$INSTALL_DIR"
tar -xzf "$tmp_dir/archive.tar.gz" -C "$INSTALL_DIR" --strip-components=1
chmod +x "$INSTALL_DIR/edge-ai-server"
# Remove macOS quarantine attribute
xattr -d com.apple.quarantine "$INSTALL_DIR/edge-ai-server" 2>/dev/null || true
}
create_symlink() {
if [ -w "$(dirname "$BIN_LINK")" ]; then
ln -sf "$INSTALL_DIR/edge-ai-server" "$BIN_LINK"
info "Symlinked to $BIN_LINK"
else
info "Creating symlink requires sudo..."
sudo ln -sf "$INSTALL_DIR/edge-ai-server" "$BIN_LINK"
info "Symlinked to $BIN_LINK (via sudo)"
fi
}
setup_python_venv() {
if ! command -v python3 &>/dev/null; then
warn "python3 not found. Skipping Kneron hardware setup."
warn " Install: brew install python3 (macOS) / apt install python3 (Linux)"
return
fi
info "Python: $(python3 --version 2>&1)"
if [ -d "$VENV_DIR" ] && "$VENV_DIR/bin/python3" -c "import usb.core" 2>/dev/null; then
info "Python venv already set up: $VENV_DIR"
return
fi
info "Creating Python venv: $VENV_DIR ..."
python3 -m venv "$VENV_DIR"
info "Installing pyusb ..."
"$VENV_DIR/bin/pip" install --quiet pyusb
info "Python environment ready"
}
setup_libusb() {
local os_type
os_type="$(uname -s)"
if [ "$os_type" = "Darwin" ]; then
if ! command -v brew &>/dev/null; then
warn "Homebrew not found. Please install libusb manually."
return
fi
if brew list libusb &>/dev/null 2>&1; then
info "libusb: already installed"
else
info "Installing libusb ..."
brew install libusb
fi
elif [ "$os_type" = "Linux" ]; then
if dpkg -s libusb-1.0-0-dev &>/dev/null 2>&1; then
info "libusb: already installed"
else
info "Installing libusb ..."
sudo apt-get install -y libusb-1.0-0-dev
fi
fi
}
detect_kneron_devices() {
if [ ! -f "$VENV_DIR/bin/python3" ]; then
warn "Python venv not available, skipping device detection."
return
fi
"$VENV_DIR/bin/python3" "$INSTALL_DIR/scripts/kneron_detect.py" 2>/dev/null || true
}
setup_auto_restart() {
local os_type
os_type="$(uname -s)"
if [ "$os_type" = "Darwin" ]; then
setup_launchd_service
elif [ "$os_type" = "Linux" ]; then
setup_systemd_service
fi
}
setup_launchd_service() {
local plist_dir="$HOME/Library/LaunchAgents"
local plist_name="com.innovedus.edge-ai-server"
local plist_path="$plist_dir/$plist_name.plist"
local log_dir="$INSTALL_DIR/logs"
mkdir -p "$plist_dir"
mkdir -p "$log_dir"
cat > "$plist_path" <<PLIST
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
<plist version="1.0">
<dict>
<key>Label</key>
<string>${plist_name}</string>
<key>ProgramArguments</key>
<array>
<string>${INSTALL_DIR}/edge-ai-server</string>
</array>
<key>WorkingDirectory</key>
<string>${INSTALL_DIR}</string>
<key>KeepAlive</key>
<dict>
<key>SuccessfulExit</key>
<false/>
</dict>
<key>ThrottleInterval</key>
<integer>5</integer>
<key>StandardOutPath</key>
<string>${log_dir}/server.log</string>
<key>StandardErrorPath</key>
<string>${log_dir}/server.err.log</string>
<key>ProcessType</key>
<string>Background</string>
<key>RunAtLoad</key>
<true/>
</dict>
</plist>
PLIST
# Load the service (unload first if already loaded)
launchctl unload "$plist_path" 2>/dev/null || true
launchctl load "$plist_path"
info "launchd service installed: $plist_name"
info " Server will auto-start on login and restart on crash."
info " Logs: $log_dir/server.log"
info ""
info " Manual controls:"
info " launchctl stop $plist_name # stop"
info " launchctl start $plist_name # start"
info " launchctl unload $plist_path # disable"
}
setup_systemd_service() {
local service_dir="$HOME/.config/systemd/user"
local service_name="edge-ai-server"
local service_path="$service_dir/$service_name.service"
local log_dir="$INSTALL_DIR/logs"
mkdir -p "$service_dir"
mkdir -p "$log_dir"
cat > "$service_path" <<SERVICE
[Unit]
Description=Edge AI Platform Server
After=network.target
[Service]
Type=simple
ExecStart=${INSTALL_DIR}/edge-ai-server
WorkingDirectory=${INSTALL_DIR}
Restart=on-failure
RestartSec=5
StandardOutput=append:${log_dir}/server.log
StandardError=append:${log_dir}/server.err.log
[Install]
WantedBy=default.target
SERVICE
# Reload and enable
systemctl --user daemon-reload
systemctl --user enable "$service_name.service"
systemctl --user start "$service_name.service"
info "systemd user service installed: $service_name"
info " Server will auto-start on login and restart on crash."
info " Logs: $log_dir/server.log"
info ""
info " Manual controls:"
info " systemctl --user stop $service_name # stop"
info " systemctl --user start $service_name # start"
info " systemctl --user disable $service_name # disable"
}
check_optional_deps() {
if command -v ffmpeg &>/dev/null; then
info "ffmpeg: $(ffmpeg -version 2>&1 | head -1)"
else
warn "ffmpeg: NOT FOUND"
warn " Camera capture requires ffmpeg."
warn " Install: brew install ffmpeg (macOS) / apt install ffmpeg (Linux)"
fi
if command -v yt-dlp &>/dev/null; then
info "yt-dlp: $(yt-dlp --version 2>&1)"
else
warn "yt-dlp: NOT FOUND (optional, for YouTube URL support)"
warn " Install: brew install yt-dlp (macOS) / pip install yt-dlp (Linux)"
fi
}
main() {
echo ""
info "Edge AI Platform Installer"
echo ""
local platform
platform="$(detect_platform)"
info "Platform: $platform"
# Step 1: Download and install binary
step "1/5 Installing Edge AI Platform"
resolve_version
install_binary "$platform"
create_symlink
# Step 2: Install libusb (system dependency for Kneron USB)
step "2/5 Setting up USB driver"
setup_libusb
# Step 3: Setup Python venv + pyusb (for Kneron hardware)
step "3/5 Setting up Kneron hardware environment"
setup_python_venv
# Step 4: Check dependencies and detect hardware
step "4/5 Checking environment"
check_optional_deps
echo ""
info "Detecting Kneron hardware..."
detect_kneron_devices
# Step 5: Setup auto-restart service
step "5/5 Setting up auto-restart service"
setup_auto_restart
# Done
echo ""
echo -e "${GREEN}=== Installation complete! ===${NC}"
echo ""
info "Installed to: $INSTALL_DIR"
info "Server is running and will auto-restart on crash."
info ""
info "Open: http://127.0.0.1:3721"
echo ""
info "Uninstall:"
local os_type
os_type="$(uname -s)"
if [ "$os_type" = "Darwin" ]; then
info " launchctl unload ~/Library/LaunchAgents/com.innovedus.edge-ai-server.plist"
elif [ "$os_type" = "Linux" ]; then
info " systemctl --user disable --now edge-ai-server"
fi
info " rm -rf $INSTALL_DIR"
info " sudo rm -f $BIN_LINK"
}
main "$@"

46
scripts/kneron_detect.py Normal file
View File

@ -0,0 +1,46 @@
"""Kneron USB device detection — shared by install scripts and kneron_bridge.py"""
import sys
try:
import usb.core
except ImportError:
print('{"error": "pyusb not installed"}')
sys.exit(1)
KNERON_VENDOR_ID = 0x3231
KNOWN_PRODUCTS = {
0x0100: "KL520",
0x0200: "KL720",
0x0720: "KL720",
0x0530: "KL530",
0x0630: "KL630",
0x0730: "KL730",
}
devices = list(usb.core.find(find_all=True, idVendor=KNERON_VENDOR_ID))
if not devices:
print("No Kneron devices found.")
sys.exit(0)
print(f"Found {len(devices)} Kneron device(s):\n")
for i, dev in enumerate(devices):
product_name = KNOWN_PRODUCTS.get(dev.idProduct, f"Unknown (0x{dev.idProduct:04X})")
serial = "N/A"
product = "N/A"
try:
serial = dev.serial_number or "N/A"
except Exception:
pass
try:
product = dev.product or "N/A"
except Exception:
pass
print(f" Device #{i+1}:")
print(f" Model: Kneron {product_name}")
print(f" Product: {product}")
print(f" Serial: {serial}")
print(f" Vendor: 0x{dev.idVendor:04X}")
print(f" Product: 0x{dev.idProduct:04X}")
print(f" Bus: {dev.bus}, Address: {dev.address}")
print()

123
scripts/setup-kneron.sh Executable file
View File

@ -0,0 +1,123 @@
#!/usr/bin/env bash
set -euo pipefail
# Kneron KL520/KL720 環境設定腳本macOS
# 用法: bash scripts/setup-kneron.sh
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
NC='\033[0m'
info() { echo -e "${GREEN}[INFO]${NC} $*"; }
warn() { echo -e "${YELLOW}[WARN]${NC} $*"; }
error() { echo -e "${RED}[ERROR]${NC} $*" >&2; exit 1; }
VENV_DIR="${EDGE_AI_VENV:-$HOME/.edge-ai-platform/venv}"
echo ""
info "=== Kneron 硬體環境設定 ==="
echo ""
# Step 1: 檢查 Homebrew
if ! command -v brew &>/dev/null; then
error "需要 Homebrew。請先安裝: https://brew.sh"
fi
info "Homebrew: OK"
# Step 2: 安裝 libusb
if brew list libusb &>/dev/null 2>&1; then
info "libusb: 已安裝"
else
info "安裝 libusb ..."
brew install libusb
info "libusb: 安裝完成"
fi
# Step 3: 檢查 Python3
if ! command -v python3 &>/dev/null; then
error "需要 Python 3。請安裝: brew install python3"
fi
PYTHON_VER=$(python3 --version 2>&1)
info "Python: $PYTHON_VER"
# Step 4: 建立 Python venv
if [ -d "$VENV_DIR" ]; then
info "Python venv 已存在: $VENV_DIR"
else
info "建立 Python venv: $VENV_DIR ..."
mkdir -p "$(dirname "$VENV_DIR")"
python3 -m venv "$VENV_DIR"
info "Python venv: 建立完成"
fi
# Activate venv
source "$VENV_DIR/bin/activate"
# Step 5: 安裝 pyusb
if python3 -c "import usb.core" 2>/dev/null; then
info "pyusb: 已安裝"
else
info "安裝 pyusb ..."
pip install --quiet pyusb
info "pyusb: 安裝完成"
fi
# Step 6: 偵測 Kneron 裝置
echo ""
info "=== 偵測 Kneron USB 裝置 ==="
echo ""
python3 << 'PYEOF'
import usb.core
import usb.util
KNERON_VENDOR_ID = 0x3231
KNOWN_PRODUCTS = {
0x0100: "KL520",
0x0200: "KL720",
0x0720: "KL720",
0x0530: "KL530",
0x0630: "KL630",
0x0730: "KL730",
}
devices = list(usb.core.find(find_all=True, idVendor=KNERON_VENDOR_ID))
if not devices:
print("[WARN] 未偵測到 Kneron 裝置。")
print(" 請確認 USB Dongle 已插入。")
else:
print(f"[INFO] 偵測到 {len(devices)} 個 Kneron 裝置:")
print("")
for i, dev in enumerate(devices):
product_name = KNOWN_PRODUCTS.get(dev.idProduct, f"Unknown (0x{dev.idProduct:04X})")
serial = "N/A"
product = "N/A"
try:
serial = dev.serial_number or "N/A"
except Exception:
pass
try:
product = dev.product or "N/A"
except Exception:
pass
print(f" 裝置 #{i+1}:")
print(f" 型號: Kneron {product_name}")
print(f" Product: {product}")
print(f" Serial: {serial}")
print(f" Vendor: 0x{dev.idVendor:04X}")
print(f" Product: 0x{dev.idProduct:04X}")
print(f" Bus: {dev.bus}, Device: {dev.address}")
print("")
print("[INFO] USB 連線正常!")
PYEOF
echo ""
info "=== 設定完成 ==="
echo ""
info "Python venv 位置: $VENV_DIR"
info ""
info "下次啟動 server 時會自動使用此環境。"
info "如需手動測試: source $VENV_DIR/bin/activate"

1
server/.next/trace Normal file
View File

@ -0,0 +1 @@
[{"name":"generate-buildid","duration":266,"timestamp":494591728065,"id":4,"parentId":1,"tags":{},"startTime":1772620716188,"traceId":"5e5954c44fbe79f2"},{"name":"load-custom-routes","duration":354,"timestamp":494591728448,"id":5,"parentId":1,"tags":{},"startTime":1772620716188,"traceId":"5e5954c44fbe79f2"},{"name":"create-dist-dir","duration":421,"timestamp":494591728832,"id":6,"parentId":1,"tags":{},"startTime":1772620716188,"traceId":"5e5954c44fbe79f2"},{"name":"clean","duration":2253,"timestamp":494591729919,"id":7,"parentId":1,"tags":{},"startTime":1772620716189,"traceId":"5e5954c44fbe79f2"},{"name":"next-build","duration":1692006,"timestamp":494590042560,"id":1,"tags":{"buildMode":"default","version":"16.1.6","bundler":"turbopack"},"startTime":1772620714502,"traceId":"5e5954c44fbe79f2"}]

1
server/.next/trace-build Normal file
View File

@ -0,0 +1 @@
[{"name":"next-build","duration":1692006,"timestamp":494590042560,"id":1,"tags":{"buildMode":"default","version":"16.1.6","bundler":"turbopack"},"startTime":1772620714502,"traceId":"5e5954c44fbe79f2"}]

View File

@ -0,0 +1,44 @@
// Command relay-server runs a reverse-proxy relay that bridges browser
// clients to tunnelled edge-ai-servers over yamux-multiplexed WebSockets.
// Multiple local servers can connect simultaneously, each identified by
// a unique token derived from their hardware ID.
package main
import (
"flag"
"log"
"net/http"
"os"
"os/signal"
"syscall"
"edge-ai-platform/internal/relay"
)
func main() {
port := flag.Int("port", 3800, "Listen port")
flag.Parse()
srv := relay.NewServer()
addr := relay.FormatAddr(*port)
httpServer := &http.Server{
Addr: addr,
Handler: srv.Handler(),
}
// Graceful shutdown on SIGINT/SIGTERM
quit := make(chan os.Signal, 1)
signal.Notify(quit, syscall.SIGINT, syscall.SIGTERM)
go func() {
<-quit
log.Println("[relay] shutting down...")
srv.Shutdown()
httpServer.Close()
}()
log.Printf("[relay] listening on %s (multi-tenant mode)", addr)
if err := httpServer.ListenAndServe(); err != nil && err != http.ErrServerClosed {
log.Fatalf("[relay] server error: %v", err)
}
}

340
server/data/models.json Normal file
View File

@ -0,0 +1,340 @@
[
{
"id": "yolov5-face-detection",
"name": "YOLOv5 Face Detection",
"description": "Real-time face detection model based on YOLOv5 architecture, optimized for edge deployment on Kneron KL720. Detects faces with high accuracy in various lighting conditions.",
"thumbnail": "/images/models/yolov5-face.png",
"taskType": "object_detection",
"categories": ["face", "security", "people"],
"framework": "ONNX",
"inputSize": {"width": 640, "height": 640},
"modelSize": 14200000,
"quantization": "INT8",
"accuracy": 0.92,
"latencyMs": 33,
"fps": 30,
"supportedHardware": ["KL720", "KL730"],
"labels": ["face"],
"version": "1.0.0",
"author": "Kneron",
"license": "Apache-2.0",
"createdAt": "2024-01-15T00:00:00Z",
"updatedAt": "2024-06-01T00:00:00Z"
},
{
"id": "imagenet-classification",
"name": "ImageNet Classification (ResNet18)",
"description": "ResNet18-based image classification model trained on ImageNet. Supports 1000 object categories with efficient inference on KL520 edge devices.",
"thumbnail": "/images/models/imagenet-cls.png",
"taskType": "classification",
"categories": ["general", "image-classification"],
"framework": "ONNX",
"inputSize": {"width": 224, "height": 224},
"modelSize": 12000000,
"quantization": "INT8",
"accuracy": 0.78,
"latencyMs": 15,
"fps": 60,
"supportedHardware": ["KL520", "KL720", "KL730"],
"labels": ["airplane", "automobile", "bird", "cat", "deer", "dog", "frog", "horse", "ship", "truck"],
"filePath": "data/nef/kl520/kl520_20001_resnet18_w224h224.nef",
"version": "2.1.0",
"author": "Kneron",
"license": "MIT",
"createdAt": "2024-02-10T00:00:00Z",
"updatedAt": "2024-07-15T00:00:00Z"
},
{
"id": "person-detection",
"name": "Person Detection",
"description": "Lightweight person detection model optimized for real-time surveillance and people counting. Low latency with high accuracy on person class.",
"thumbnail": "/images/models/person-det.png",
"taskType": "object_detection",
"categories": ["people", "security", "surveillance"],
"framework": "ONNX",
"inputSize": {"width": 416, "height": 416},
"modelSize": 11800000,
"quantization": "INT8",
"accuracy": 0.89,
"latencyMs": 28,
"fps": 35,
"supportedHardware": ["KL720", "KL730"],
"labels": ["person"],
"version": "1.2.0",
"author": "Kneron",
"license": "Apache-2.0",
"createdAt": "2024-03-01T00:00:00Z",
"updatedAt": "2024-08-01T00:00:00Z"
},
{
"id": "vehicle-classification",
"name": "Vehicle Classification",
"description": "Vehicle type classification model that identifies cars, trucks, buses, motorcycles, and bicycles. Ideal for traffic monitoring and smart parking.",
"thumbnail": "/images/models/vehicle-cls.png",
"taskType": "classification",
"categories": ["vehicle", "traffic", "transportation"],
"framework": "ONNX",
"inputSize": {"width": 224, "height": 224},
"modelSize": 6200000,
"quantization": "INT8",
"accuracy": 0.85,
"latencyMs": 12,
"fps": 75,
"supportedHardware": ["KL520", "KL720", "KL730"],
"labels": ["car", "truck", "bus", "motorcycle", "bicycle"],
"version": "1.0.0",
"author": "Kneron",
"license": "MIT",
"createdAt": "2024-03-20T00:00:00Z",
"updatedAt": "2024-05-10T00:00:00Z"
},
{
"id": "hand-gesture-recognition",
"name": "Hand Gesture Recognition",
"description": "Recognizes 10 common hand gestures in real-time. Suitable for touchless interfaces and gesture-based control systems.",
"thumbnail": "/images/models/hand-gesture.png",
"taskType": "classification",
"categories": ["gesture", "hand", "interaction"],
"framework": "ONNX",
"inputSize": {"width": 224, "height": 224},
"modelSize": 5800000,
"quantization": "INT8",
"accuracy": 0.88,
"latencyMs": 18,
"fps": 50,
"supportedHardware": ["KL520", "KL720"],
"labels": ["thumbs_up", "thumbs_down", "open_palm", "fist", "peace", "ok", "pointing", "wave", "grab", "pinch"],
"version": "1.1.0",
"author": "Kneron",
"license": "Apache-2.0",
"createdAt": "2024-04-05T00:00:00Z",
"updatedAt": "2024-09-01T00:00:00Z"
},
{
"id": "coco-object-detection",
"name": "COCO Object Detection",
"description": "General-purpose object detection model trained on COCO dataset. Detects 80 common object categories including people, animals, vehicles, and household items.",
"thumbnail": "/images/models/coco-det.png",
"taskType": "object_detection",
"categories": ["general", "multi-object", "coco"],
"framework": "ONNX",
"inputSize": {"width": 640, "height": 640},
"modelSize": 23500000,
"quantization": "INT8",
"accuracy": 0.82,
"latencyMs": 45,
"fps": 22,
"supportedHardware": ["KL720", "KL730"],
"labels": ["person", "bicycle", "car", "motorcycle", "airplane", "bus", "train", "truck", "boat", "traffic light", "fire hydrant", "stop sign", "parking meter", "bench", "bird", "cat", "dog", "horse", "sheep", "cow"],
"version": "3.0.0",
"author": "Kneron",
"license": "Apache-2.0",
"createdAt": "2024-01-01T00:00:00Z",
"updatedAt": "2024-10-01T00:00:00Z"
},
{
"id": "face-mask-detection",
"name": "Face Mask Detection",
"description": "Detects whether a person is wearing a face mask, wearing it incorrectly, or not wearing one. Built for health compliance monitoring.",
"thumbnail": "/images/models/face-mask.png",
"taskType": "object_detection",
"categories": ["face", "health", "safety"],
"framework": "ONNX",
"inputSize": {"width": 320, "height": 320},
"modelSize": 9800000,
"quantization": "INT8",
"accuracy": 0.91,
"latencyMs": 22,
"fps": 45,
"supportedHardware": ["KL720", "KL730"],
"labels": ["mask_on", "mask_off", "mask_incorrect"],
"version": "1.3.0",
"author": "Kneron",
"license": "MIT",
"createdAt": "2024-02-28T00:00:00Z",
"updatedAt": "2024-07-20T00:00:00Z"
},
{
"id": "license-plate-detection",
"name": "License Plate Detection",
"description": "Detects and localizes license plates in images and video streams. Optimized for various plate formats and viewing angles.",
"thumbnail": "/images/models/license-plate.png",
"taskType": "object_detection",
"categories": ["vehicle", "traffic", "ocr"],
"framework": "ONNX",
"inputSize": {"width": 416, "height": 416},
"modelSize": 12400000,
"quantization": "INT8",
"accuracy": 0.87,
"latencyMs": 30,
"fps": 33,
"supportedHardware": ["KL720", "KL730"],
"labels": ["license_plate"],
"version": "1.0.0",
"author": "Kneron",
"license": "Apache-2.0",
"createdAt": "2024-05-15T00:00:00Z",
"updatedAt": "2024-08-30T00:00:00Z"
},
{
"id": "kl520-yolov5-detection",
"name": "YOLOv5 Detection (KL520)",
"description": "YOLOv5 object detection model compiled for Kneron KL520. No upsample variant optimized for NPU inference at 640x640 resolution.",
"thumbnail": "/images/models/yolov5-det.png",
"taskType": "object_detection",
"categories": ["general", "multi-object"],
"framework": "NEF",
"inputSize": {"width": 640, "height": 640},
"modelSize": 7200000,
"quantization": "INT8",
"accuracy": 0.80,
"latencyMs": 50,
"fps": 20,
"supportedHardware": ["KL520"],
"labels": ["person", "bicycle", "car", "motorcycle", "airplane", "bus", "train", "truck", "boat", "traffic light"],
"filePath": "data/nef/kl520/kl520_20005_yolov5-noupsample_w640h640.nef",
"version": "1.0.0",
"author": "Kneron",
"license": "Apache-2.0",
"createdAt": "2024-01-01T00:00:00Z",
"updatedAt": "2024-01-01T00:00:00Z"
},
{
"id": "kl520-fcos-detection",
"name": "FCOS Detection (KL520)",
"description": "FCOS (Fully Convolutional One-Stage) object detection with DarkNet53s backbone, compiled for KL520. Anchor-free detection at 512x512.",
"thumbnail": "/images/models/fcos-det.png",
"taskType": "object_detection",
"categories": ["general", "multi-object"],
"framework": "NEF",
"inputSize": {"width": 512, "height": 512},
"modelSize": 8900000,
"quantization": "INT8",
"accuracy": 0.78,
"latencyMs": 45,
"fps": 22,
"supportedHardware": ["KL520"],
"labels": ["person", "bicycle", "car", "motorcycle", "airplane", "bus", "train", "truck", "boat", "traffic light"],
"filePath": "data/nef/kl520/kl520_20004_fcos-drk53s_w512h512.nef",
"version": "1.0.0",
"author": "Kneron",
"license": "Apache-2.0",
"createdAt": "2024-01-01T00:00:00Z",
"updatedAt": "2024-01-01T00:00:00Z"
},
{
"id": "kl520-ssd-face-detection",
"name": "SSD Face Detection (KL520)",
"description": "SSD-based face detection with landmark localization, compiled for KL520. Lightweight model suitable for face detection and alignment tasks.",
"thumbnail": "/images/models/ssd-face.png",
"taskType": "object_detection",
"categories": ["face", "security"],
"framework": "NEF",
"inputSize": {"width": 320, "height": 240},
"modelSize": 1000000,
"quantization": "INT8",
"accuracy": 0.85,
"latencyMs": 10,
"fps": 100,
"supportedHardware": ["KL520"],
"labels": ["face"],
"filePath": "data/nef/kl520/kl520_ssd_fd_lm.nef",
"version": "1.0.0",
"author": "Kneron",
"license": "Apache-2.0",
"createdAt": "2024-01-01T00:00:00Z",
"updatedAt": "2024-01-01T00:00:00Z"
},
{
"id": "kl520-tiny-yolov3",
"name": "Tiny YOLOv3 (KL520)",
"description": "Tiny YOLOv3 object detection model compiled for KL520. Compact and fast model for general-purpose multi-object detection on edge devices.",
"thumbnail": "/images/models/tiny-yolov3.png",
"taskType": "object_detection",
"categories": ["general", "multi-object"],
"framework": "NEF",
"inputSize": {"width": 416, "height": 416},
"modelSize": 9400000,
"quantization": "INT8",
"accuracy": 0.75,
"latencyMs": 35,
"fps": 28,
"supportedHardware": ["KL520"],
"labels": ["person", "bicycle", "car", "motorcycle", "airplane", "bus", "train", "truck", "boat", "traffic light"],
"filePath": "data/nef/kl520/kl520_tiny_yolo_v3.nef",
"version": "1.0.0",
"author": "Kneron",
"license": "Apache-2.0",
"createdAt": "2024-01-01T00:00:00Z",
"updatedAt": "2024-01-01T00:00:00Z"
},
{
"id": "kl720-yolov5-detection",
"name": "YOLOv5 Detection (KL720)",
"description": "YOLOv5 object detection model compiled for Kneron KL720. No upsample variant optimized for KL720 NPU inference at 640x640 resolution with USB 3.0 throughput.",
"thumbnail": "/images/models/yolov5-det.png",
"taskType": "object_detection",
"categories": ["general", "multi-object"],
"framework": "NEF",
"inputSize": {"width": 640, "height": 640},
"modelSize": 10168348,
"quantization": "INT8",
"accuracy": 0.82,
"latencyMs": 30,
"fps": 33,
"supportedHardware": ["KL720"],
"labels": ["person", "bicycle", "car", "motorcycle", "airplane", "bus", "train", "truck", "boat", "traffic light"],
"filePath": "data/nef/kl720/kl720_20005_yolov5-noupsample_w640h640.nef",
"version": "1.0.0",
"author": "Kneron",
"license": "Apache-2.0",
"createdAt": "2024-01-01T00:00:00Z",
"updatedAt": "2024-01-01T00:00:00Z"
},
{
"id": "kl720-resnet18-classification",
"name": "ImageNet Classification ResNet18 (KL720)",
"description": "ResNet18-based image classification compiled for KL720. Supports 1000 ImageNet categories with fast inference via USB 3.0.",
"thumbnail": "/images/models/imagenet-cls.png",
"taskType": "classification",
"categories": ["general", "image-classification"],
"framework": "NEF",
"inputSize": {"width": 224, "height": 224},
"modelSize": 12826804,
"quantization": "INT8",
"accuracy": 0.78,
"latencyMs": 10,
"fps": 100,
"supportedHardware": ["KL720"],
"labels": ["airplane", "automobile", "bird", "cat", "deer", "dog", "frog", "horse", "ship", "truck"],
"filePath": "data/nef/kl720/kl720_20001_resnet18_w224h224.nef",
"version": "1.0.0",
"author": "Kneron",
"license": "MIT",
"createdAt": "2024-01-01T00:00:00Z",
"updatedAt": "2024-01-01T00:00:00Z"
},
{
"id": "kl720-fcos-detection",
"name": "FCOS Detection (KL720)",
"description": "FCOS (Fully Convolutional One-Stage) object detection with DarkNet53s backbone, compiled for KL720. Anchor-free detection at 512x512.",
"thumbnail": "/images/models/fcos-det.png",
"taskType": "object_detection",
"categories": ["general", "multi-object"],
"framework": "NEF",
"inputSize": {"width": 512, "height": 512},
"modelSize": 13004640,
"quantization": "INT8",
"accuracy": 0.80,
"latencyMs": 30,
"fps": 33,
"supportedHardware": ["KL720"],
"labels": ["person", "bicycle", "car", "motorcycle", "airplane", "bus", "train", "truck", "boat", "traffic light"],
"filePath": "data/nef/kl720/kl720_20004_fcos-drk53s_w512h512.nef",
"version": "1.0.0",
"author": "Kneron",
"license": "Apache-2.0",
"createdAt": "2024-01-01T00:00:00Z",
"updatedAt": "2024-01-01T00:00:00Z"
}
]

Binary file not shown.

Binary file not shown.

BIN
server/edge-ai-server Executable file

Binary file not shown.

46
server/go.mod Normal file
View File

@ -0,0 +1,46 @@
module edge-ai-platform
go 1.26.0
require (
github.com/gin-gonic/gin v1.11.0
github.com/gorilla/websocket v1.5.3
)
require (
fyne.io/systray v1.12.0 // indirect
github.com/bytedance/sonic v1.14.0 // indirect
github.com/bytedance/sonic/loader v0.3.0 // indirect
github.com/cloudwego/base64x v0.1.6 // indirect
github.com/gabriel-vasile/mimetype v1.4.8 // indirect
github.com/gin-contrib/sse v1.1.0 // indirect
github.com/go-playground/locales v0.14.1 // indirect
github.com/go-playground/universal-translator v0.18.1 // indirect
github.com/go-playground/validator/v10 v10.27.0 // indirect
github.com/goccy/go-json v0.10.2 // indirect
github.com/goccy/go-yaml v1.18.0 // indirect
github.com/godbus/dbus/v5 v5.1.0 // indirect
github.com/google/uuid v1.6.0 // indirect
github.com/hashicorp/yamux v0.1.2 // indirect
github.com/json-iterator/go v1.1.12 // indirect
github.com/klauspost/cpuid/v2 v2.3.0 // indirect
github.com/leodido/go-urn v1.4.0 // indirect
github.com/mattn/go-isatty v0.0.20 // indirect
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421 // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect
github.com/pelletier/go-toml/v2 v2.2.4 // indirect
github.com/quic-go/qpack v0.5.1 // indirect
github.com/quic-go/quic-go v0.54.0 // indirect
github.com/twitchyliquid64/golang-asm v0.15.1 // indirect
github.com/ugorji/go/codec v1.3.0 // indirect
go.uber.org/mock v0.5.0 // indirect
golang.org/x/arch v0.20.0 // indirect
golang.org/x/crypto v0.40.0 // indirect
golang.org/x/mod v0.25.0 // indirect
golang.org/x/net v0.42.0 // indirect
golang.org/x/sync v0.16.0 // indirect
golang.org/x/sys v0.35.0 // indirect
golang.org/x/text v0.27.0 // indirect
golang.org/x/tools v0.34.0 // indirect
google.golang.org/protobuf v1.36.9 // indirect
)

98
server/go.sum Normal file
View File

@ -0,0 +1,98 @@
fyne.io/systray v1.12.0 h1:CA1Kk0e2zwFlxtc02L3QFSiIbxJ/P0n582YrZHT7aTM=
fyne.io/systray v1.12.0/go.mod h1:RVwqP9nYMo7h5zViCBHri2FgjXF7H2cub7MAq4NSoLs=
github.com/bytedance/sonic v1.14.0 h1:/OfKt8HFw0kh2rj8N0F6C/qPGRESq0BbaNZgcNXXzQQ=
github.com/bytedance/sonic v1.14.0/go.mod h1:WoEbx8WTcFJfzCe0hbmyTGrfjt8PzNEBdxlNUO24NhA=
github.com/bytedance/sonic/loader v0.3.0 h1:dskwH8edlzNMctoruo8FPTJDF3vLtDT0sXZwvZJyqeA=
github.com/bytedance/sonic/loader v0.3.0/go.mod h1:N8A3vUdtUebEY2/VQC0MyhYeKUFosQU6FxH2JmUe6VI=
github.com/cloudwego/base64x v0.1.6 h1:t11wG9AECkCDk5fMSoxmufanudBtJ+/HemLstXDLI2M=
github.com/cloudwego/base64x v0.1.6/go.mod h1:OFcloc187FXDaYHvrNIjxSe8ncn0OOM8gEHfghB2IPU=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/gabriel-vasile/mimetype v1.4.8 h1:FfZ3gj38NjllZIeJAmMhr+qKL8Wu+nOoI3GqacKw1NM=
github.com/gabriel-vasile/mimetype v1.4.8/go.mod h1:ByKUIKGjh1ODkGM1asKUbQZOLGrPjydw3hYPU2YU9t8=
github.com/gin-contrib/sse v1.1.0 h1:n0w2GMuUpWDVp7qSpvze6fAu9iRxJY4Hmj6AmBOU05w=
github.com/gin-contrib/sse v1.1.0/go.mod h1:hxRZ5gVpWMT7Z0B0gSNYqqsSCNIJMjzvm6fqCz9vjwM=
github.com/gin-gonic/gin v1.11.0 h1:OW/6PLjyusp2PPXtyxKHU0RbX6I/l28FTdDlae5ueWk=
github.com/gin-gonic/gin v1.11.0/go.mod h1:+iq/FyxlGzII0KHiBGjuNn4UNENUlKbGlNmc+W50Dls=
github.com/go-playground/assert/v2 v2.2.0 h1:JvknZsQTYeFEAhQwI4qEt9cyV5ONwRHC+lYKSsYSR8s=
github.com/go-playground/assert/v2 v2.2.0/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4=
github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/oXslEjJA=
github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY=
github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY=
github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY=
github.com/go-playground/validator/v10 v10.27.0 h1:w8+XrWVMhGkxOaaowyKH35gFydVHOvC0/uWoy2Fzwn4=
github.com/go-playground/validator/v10 v10.27.0/go.mod h1:I5QpIEbmr8On7W0TktmJAumgzX4CA1XNl4ZmDuVHKKo=
github.com/goccy/go-json v0.10.2 h1:CrxCmQqYDkv1z7lO7Wbh2HN93uovUHgrECaO5ZrCXAU=
github.com/goccy/go-json v0.10.2/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I=
github.com/goccy/go-yaml v1.18.0 h1:8W7wMFS12Pcas7KU+VVkaiCng+kG8QiFeFwzFb+rwuw=
github.com/goccy/go-yaml v1.18.0/go.mod h1:XBurs7gK8ATbW4ZPGKgcbrY1Br56PdM69F7LkFRi1kA=
github.com/godbus/dbus/v5 v5.1.0 h1:4KLkAxT3aOY8Li4FRJe/KvhoNFFxo0m6fNuFUO8QJUk=
github.com/godbus/dbus/v5 v5.1.0/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg=
github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/hashicorp/yamux v0.1.2 h1:XtB8kyFOyHXYVFnwT5C3+Bdo8gArse7j2AQ0DA0Uey8=
github.com/hashicorp/yamux v0.1.2/go.mod h1:C+zze2n6e/7wshOZep2A70/aQU6QBRWJO/G6FT1wIns=
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
github.com/klauspost/cpuid/v2 v2.3.0 h1:S4CRMLnYUhGeDFDqkGriYKdfoFlDnMtqTiI/sFzhA9Y=
github.com/klauspost/cpuid/v2 v2.3.0/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0=
github.com/leodido/go-urn v1.4.0 h1:WT9HwE9SGECu3lg4d/dIA+jxlljEa1/ffXKmRjqdmIQ=
github.com/leodido/go-urn v1.4.0/go.mod h1:bvxc+MVxLKB4z00jd1z+Dvzr47oO32F/QSNjSBOlFxI=
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421 h1:ZqeYNhU3OHLH3mGKHDcjJRFFRrJa6eAM5H+CtDdOsPc=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
github.com/pelletier/go-toml/v2 v2.2.4 h1:mye9XuhQ6gvn5h28+VilKrrPoQVanw5PMw/TB0t5Ec4=
github.com/pelletier/go-toml/v2 v2.2.4/go.mod h1:2gIqNv+qfxSVS7cM2xJQKtLSTLUE9V8t9Stt+h56mCY=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/quic-go/qpack v0.5.1 h1:giqksBPnT/HDtZ6VhtFKgoLOWmlyo9Ei6u9PqzIMbhI=
github.com/quic-go/qpack v0.5.1/go.mod h1:+PC4XFrEskIVkcLzpEkbLqq1uCoxPhQuvK5rH1ZgaEg=
github.com/quic-go/quic-go v0.54.0 h1:6s1YB9QotYI6Ospeiguknbp2Znb/jZYjZLRXn9kMQBg=
github.com/quic-go/quic-go v0.54.0/go.mod h1:e68ZEaCdyviluZmy44P6Iey98v/Wfz6HCjQEm+l8zTY=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U=
github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=
github.com/twitchyliquid64/golang-asm v0.15.1 h1:SU5vSMR7hnwNxj24w34ZyCi/FmDZTkS4MhqMhdFk5YI=
github.com/twitchyliquid64/golang-asm v0.15.1/go.mod h1:a1lVb/DtPvCB8fslRZhAngC2+aY1QWCk3Cedj/Gdt08=
github.com/ugorji/go/codec v1.3.0 h1:Qd2W2sQawAfG8XSvzwhBeoGq71zXOC/Q1E9y/wUcsUA=
github.com/ugorji/go/codec v1.3.0/go.mod h1:pRBVtBSKl77K30Bv8R2P+cLSGaTtex6fsA2Wjqmfxj4=
go.uber.org/mock v0.5.0 h1:KAMbZvZPyBPWgD14IrIQ38QCyjwpvVVV6K/bHl1IwQU=
go.uber.org/mock v0.5.0/go.mod h1:ge71pBPLYDk7QIi1LupWxdAykm7KIEFchiOqd6z7qMM=
golang.org/x/arch v0.20.0 h1:dx1zTU0MAE98U+TQ8BLl7XsJbgze2WnNKF/8tGp/Q6c=
golang.org/x/arch v0.20.0/go.mod h1:bdwinDaKcfZUGpH09BB7ZmOfhalA8lQdzl62l8gGWsk=
golang.org/x/crypto v0.40.0 h1:r4x+VvoG5Fm+eJcxMaY8CQM7Lb0l1lsmjGBQ6s8BfKM=
golang.org/x/crypto v0.40.0/go.mod h1:Qr1vMER5WyS2dfPHAlsOj01wgLbsyWtFn/aY+5+ZdxY=
golang.org/x/mod v0.25.0 h1:n7a+ZbQKQA/Ysbyb0/6IbB1H/X41mKgbhfv7AfG/44w=
golang.org/x/mod v0.25.0/go.mod h1:IXM97Txy2VM4PJ3gI61r1YEk/gAj6zAHN3AdZt6S9Ww=
golang.org/x/net v0.42.0 h1:jzkYrhi3YQWD6MLBJcsklgQsoAcw89EcZbJw8Z614hs=
golang.org/x/net v0.42.0/go.mod h1:FF1RA5d3u7nAYA4z2TkclSCKh68eSXtiFwcWQpPXdt8=
golang.org/x/sync v0.16.0 h1:ycBJEhp9p4vXvUZNszeOq0kGTPghopOL8q0fq3vstxw=
golang.org/x/sync v0.16.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.35.0 h1:vz1N37gP5bs89s7He8XuIYXpyY0+QlsKmzipCbUtyxI=
golang.org/x/sys v0.35.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
golang.org/x/text v0.27.0 h1:4fGWRpyh641NLlecmyl4LOe6yDdfaYNrGb2zdfo4JV4=
golang.org/x/text v0.27.0/go.mod h1:1D28KMCvyooCX9hBiosv5Tz/+YLxj0j7XhWjpSUF7CU=
golang.org/x/tools v0.34.0 h1:qIpSLOxeCYGg9TrcJokLBG4KFA6d795g0xkBkiESGlo=
golang.org/x/tools v0.34.0/go.mod h1:pAP9OwEaY1CAW3HOmg3hLZC5Z0CCmzjAF2UQMSqNARg=
google.golang.org/protobuf v1.36.9 h1:w2gp2mA27hUeUzj9Ex9FBjsBm40zfaDtEWow293U7Iw=
google.golang.org/protobuf v1.36.9/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=

View File

@ -0,0 +1,790 @@
package handlers
import (
"fmt"
"io"
"net/url"
"os"
"path/filepath"
"strconv"
"strings"
"time"
"edge-ai-platform/internal/api/ws"
"edge-ai-platform/internal/camera"
"edge-ai-platform/internal/device"
"edge-ai-platform/internal/driver"
"edge-ai-platform/internal/inference"
"github.com/gin-gonic/gin"
)
type CameraHandler struct {
cameraMgr *camera.Manager
deviceMgr *device.Manager
inferenceSvc *inference.Service
wsHub *ws.Hub
streamer *camera.MJPEGStreamer
pipeline *camera.InferencePipeline
activeSource camera.FrameSource
sourceType camera.SourceType
// Video seek state — preserved across seek operations
videoPath string // original file path or resolved URL
videoIsURL bool // true if source is a URL
videoFPS float64 // target FPS
videoInfo camera.VideoInfo // duration, total frames
activeDeviceID string // device ID for current video session
}
func NewCameraHandler(
cameraMgr *camera.Manager,
deviceMgr *device.Manager,
inferenceSvc *inference.Service,
wsHub *ws.Hub,
) *CameraHandler {
streamer := camera.NewMJPEGStreamer()
go streamer.Run()
return &CameraHandler{
cameraMgr: cameraMgr,
deviceMgr: deviceMgr,
inferenceSvc: inferenceSvc,
wsHub: wsHub,
streamer: streamer,
}
}
func (h *CameraHandler) ListCameras(c *gin.Context) {
cameras := h.cameraMgr.ListCameras()
c.JSON(200, gin.H{"success": true, "data": gin.H{"cameras": cameras}})
}
func (h *CameraHandler) StartPipeline(c *gin.Context) {
var req struct {
CameraID string `json:"cameraId"`
DeviceID string `json:"deviceId"`
Width int `json:"width"`
Height int `json:"height"`
}
if err := c.ShouldBindJSON(&req); err != nil {
c.JSON(400, gin.H{"success": false, "error": gin.H{"code": "BAD_REQUEST", "message": err.Error()}})
return
}
if req.Width == 0 {
req.Width = 640
}
if req.Height == 0 {
req.Height = 480
}
// Clean up any existing pipeline
h.stopActivePipeline()
// Open camera
if err := h.cameraMgr.Open(0, req.Width, req.Height); err != nil {
c.JSON(500, gin.H{"success": false, "error": gin.H{"code": "CAMERA_OPEN_FAILED", "message": err.Error()}})
return
}
// Get device driver
session, err := h.deviceMgr.GetDevice(req.DeviceID)
if err != nil {
c.JSON(404, gin.H{"success": false, "error": gin.H{"code": "DEVICE_NOT_FOUND", "message": err.Error()}})
return
}
// Create inference result channel
resultCh := make(chan *driver.InferenceResult, 10)
// Forward results to WebSocket, enriching with device ID
go func() {
room := "inference:" + req.DeviceID
for result := range resultCh {
result.DeviceID = req.DeviceID
h.wsHub.BroadcastToRoom(room, result)
}
}()
// Start pipeline with camera as source
h.activeSource = h.cameraMgr
h.sourceType = camera.SourceCamera
h.pipeline = camera.NewInferencePipeline(
h.cameraMgr,
camera.SourceCamera,
session.Driver,
h.streamer.FrameChannel(),
resultCh,
)
h.pipeline.Start()
streamURL := "/api/camera/stream"
c.JSON(200, gin.H{
"success": true,
"data": gin.H{
"streamUrl": streamURL,
"sourceType": "camera",
},
})
}
func (h *CameraHandler) StopPipeline(c *gin.Context) {
h.stopActivePipeline()
c.JSON(200, gin.H{"success": true})
}
func (h *CameraHandler) StreamMJPEG(c *gin.Context) {
h.streamer.ServeHTTP(c.Writer, c.Request)
}
// UploadImage handles image file upload for single-shot inference.
func (h *CameraHandler) UploadImage(c *gin.Context) {
h.stopActivePipeline()
deviceID := c.PostForm("deviceId")
if deviceID == "" {
c.JSON(400, gin.H{"success": false, "error": gin.H{"code": "BAD_REQUEST", "message": "deviceId is required"}})
return
}
file, header, err := c.Request.FormFile("file")
if err != nil {
c.JSON(400, gin.H{"success": false, "error": gin.H{"code": "BAD_REQUEST", "message": "file is required"}})
return
}
defer file.Close()
ext := strings.ToLower(filepath.Ext(header.Filename))
if ext != ".jpg" && ext != ".jpeg" && ext != ".png" {
c.JSON(400, gin.H{"success": false, "error": gin.H{"code": "BAD_REQUEST", "message": "only JPG/PNG files are supported"}})
return
}
// Save to temp file
tmpFile, err := os.CreateTemp("", "edge-ai-image-*"+ext)
if err != nil {
c.JSON(500, gin.H{"success": false, "error": gin.H{"code": "STORAGE_ERROR", "message": err.Error()}})
return
}
if _, err := io.Copy(tmpFile, file); err != nil {
tmpFile.Close()
os.Remove(tmpFile.Name())
c.JSON(500, gin.H{"success": false, "error": gin.H{"code": "STORAGE_ERROR", "message": err.Error()}})
return
}
tmpFile.Close()
// Create ImageSource
imgSource, err := camera.NewImageSource(tmpFile.Name())
if err != nil {
os.Remove(tmpFile.Name())
c.JSON(500, gin.H{"success": false, "error": gin.H{"code": "IMAGE_DECODE_FAILED", "message": err.Error()}})
return
}
// Get device driver
session, err := h.deviceMgr.GetDevice(deviceID)
if err != nil {
imgSource.Close()
c.JSON(404, gin.H{"success": false, "error": gin.H{"code": "DEVICE_NOT_FOUND", "message": err.Error()}})
return
}
resultCh := make(chan *driver.InferenceResult, 10)
go func() {
room := "inference:" + deviceID
for result := range resultCh {
result.DeviceID = deviceID
h.wsHub.BroadcastToRoom(room, result)
}
}()
h.activeSource = imgSource
h.sourceType = camera.SourceImage
h.pipeline = camera.NewInferencePipeline(
imgSource,
camera.SourceImage,
session.Driver,
h.streamer.FrameChannel(),
resultCh,
)
h.pipeline.Start()
// Clean up result channel after pipeline completes
go func() {
<-h.pipeline.Done()
close(resultCh)
}()
w, ht := imgSource.Dimensions()
streamURL := "/api/camera/stream"
c.JSON(200, gin.H{
"success": true,
"data": gin.H{
"streamUrl": streamURL,
"sourceType": "image",
"width": w,
"height": ht,
"filename": header.Filename,
},
})
}
// UploadVideo handles video file upload for frame-by-frame inference.
func (h *CameraHandler) UploadVideo(c *gin.Context) {
h.stopActivePipeline()
deviceID := c.PostForm("deviceId")
if deviceID == "" {
c.JSON(400, gin.H{"success": false, "error": gin.H{"code": "BAD_REQUEST", "message": "deviceId is required"}})
return
}
file, header, err := c.Request.FormFile("file")
if err != nil {
c.JSON(400, gin.H{"success": false, "error": gin.H{"code": "BAD_REQUEST", "message": "file is required"}})
return
}
defer file.Close()
ext := strings.ToLower(filepath.Ext(header.Filename))
if ext != ".mp4" && ext != ".avi" && ext != ".mov" {
c.JSON(400, gin.H{"success": false, "error": gin.H{"code": "BAD_REQUEST", "message": "only MP4/AVI/MOV files are supported"}})
return
}
// Save to temp file
tmpFile, err := os.CreateTemp("", "edge-ai-video-*"+ext)
if err != nil {
c.JSON(500, gin.H{"success": false, "error": gin.H{"code": "STORAGE_ERROR", "message": err.Error()}})
return
}
if _, err := io.Copy(tmpFile, file); err != nil {
tmpFile.Close()
os.Remove(tmpFile.Name())
c.JSON(500, gin.H{"success": false, "error": gin.H{"code": "STORAGE_ERROR", "message": err.Error()}})
return
}
tmpFile.Close()
// Probe video info (duration, frame count) before starting pipeline
videoInfo := camera.ProbeVideoInfo(tmpFile.Name(), 15)
// Create VideoSource
videoSource, err := camera.NewVideoSource(tmpFile.Name(), 15)
if err != nil {
os.Remove(tmpFile.Name())
c.JSON(500, gin.H{"success": false, "error": gin.H{"code": "VIDEO_DECODE_FAILED", "message": err.Error()}})
return
}
if videoInfo.TotalFrames > 0 {
videoSource.SetTotalFrames(videoInfo.TotalFrames)
}
// Get device driver
session, err := h.deviceMgr.GetDevice(deviceID)
if err != nil {
videoSource.Close()
c.JSON(404, gin.H{"success": false, "error": gin.H{"code": "DEVICE_NOT_FOUND", "message": err.Error()}})
return
}
resultCh := make(chan *driver.InferenceResult, 10)
go func() {
room := "inference:" + deviceID
for result := range resultCh {
result.DeviceID = deviceID
h.wsHub.BroadcastToRoom(room, result)
}
}()
h.activeSource = videoSource
h.sourceType = camera.SourceVideo
h.videoPath = tmpFile.Name()
h.videoIsURL = false
h.videoFPS = 15
h.videoInfo = videoInfo
h.activeDeviceID = deviceID
h.pipeline = camera.NewInferencePipeline(
videoSource,
camera.SourceVideo,
session.Driver,
h.streamer.FrameChannel(),
resultCh,
)
h.pipeline.Start()
// Notify frontend when video playback completes
go func() {
<-h.pipeline.Done()
close(resultCh)
h.wsHub.BroadcastToRoom("inference:"+deviceID, map[string]interface{}{
"type": "pipeline_complete",
"sourceType": "video",
})
}()
streamURL := "/api/camera/stream"
c.JSON(200, gin.H{
"success": true,
"data": gin.H{
"streamUrl": streamURL,
"sourceType": "video",
"filename": header.Filename,
"totalFrames": videoInfo.TotalFrames,
"durationSeconds": videoInfo.DurationSec,
},
})
}
// ytdlpHosts lists hostnames where yt-dlp should be used to resolve the actual
// video stream URL before passing to ffmpeg.
var ytdlpHosts = map[string]bool{
"youtube.com": true, "www.youtube.com": true, "youtu.be": true, "m.youtube.com": true,
"vimeo.com": true, "www.vimeo.com": true,
"dailymotion.com": true, "www.dailymotion.com": true,
"twitch.tv": true, "www.twitch.tv": true,
"bilibili.com": true, "www.bilibili.com": true,
"tiktok.com": true, "www.tiktok.com": true,
"facebook.com": true, "www.facebook.com": true, "fb.watch": true,
"instagram.com": true, "www.instagram.com": true,
"twitter.com": true, "x.com": true,
}
type urlKind int
const (
urlDirect urlKind = iota // direct video file or RTSP, pass to ffmpeg directly
urlYTDLP // needs yt-dlp to resolve first
urlBad // invalid or unsupported
)
// classifyVideoURL determines how to handle the given URL.
func classifyVideoURL(rawURL string) (urlKind, string) {
parsed, err := url.Parse(rawURL)
if err != nil {
return urlBad, "Invalid URL format"
}
scheme := strings.ToLower(parsed.Scheme)
host := strings.ToLower(parsed.Hostname())
// RTSP streams — direct to ffmpeg
if scheme == "rtsp" || scheme == "rtsps" {
return urlDirect, ""
}
// Must be http or https
if scheme != "http" && scheme != "https" {
return urlBad, "Unsupported protocol: " + scheme + ". Use http, https, or rtsp."
}
// Known video platforms — use yt-dlp
if ytdlpHosts[host] {
return urlYTDLP, ""
}
// Everything else — pass directly to ffmpeg
return urlDirect, ""
}
// StartFromURL handles video/stream inference from a URL (HTTP, HTTPS, RTSP).
func (h *CameraHandler) StartFromURL(c *gin.Context) {
var req struct {
URL string `json:"url"`
DeviceID string `json:"deviceId"`
}
if err := c.ShouldBindJSON(&req); err != nil {
c.JSON(400, gin.H{"success": false, "error": gin.H{"code": "BAD_REQUEST", "message": err.Error()}})
return
}
if req.URL == "" {
c.JSON(400, gin.H{"success": false, "error": gin.H{"code": "BAD_REQUEST", "message": "url is required"}})
return
}
if req.DeviceID == "" {
c.JSON(400, gin.H{"success": false, "error": gin.H{"code": "BAD_REQUEST", "message": "deviceId is required"}})
return
}
// Classify the URL
kind, reason := classifyVideoURL(req.URL)
if kind == urlBad {
c.JSON(400, gin.H{"success": false, "error": gin.H{"code": "UNSUPPORTED_URL", "message": reason}})
return
}
// For video platforms (YouTube, etc.), resolve actual stream URL via yt-dlp
videoURL := req.URL
if kind == urlYTDLP {
resolved, err := camera.ResolveWithYTDLP(req.URL)
if err != nil {
c.JSON(500, gin.H{"success": false, "error": gin.H{"code": "URL_RESOLVE_FAILED", "message": "無法解析影片連結: " + err.Error()}})
return
}
videoURL = resolved
}
h.stopActivePipeline()
// Probe video info (duration, frame count) - may be slow for remote URLs
videoInfo := camera.ProbeVideoInfo(videoURL, 15)
// Create VideoSource from URL (ffmpeg supports HTTP/HTTPS/RTSP natively)
videoSource, err := camera.NewVideoSourceFromURL(videoURL, 15)
if err != nil {
c.JSON(500, gin.H{"success": false, "error": gin.H{"code": "URL_OPEN_FAILED", "message": err.Error()}})
return
}
if videoInfo.TotalFrames > 0 {
videoSource.SetTotalFrames(videoInfo.TotalFrames)
}
// Get device driver
session, err := h.deviceMgr.GetDevice(req.DeviceID)
if err != nil {
videoSource.Close()
c.JSON(404, gin.H{"success": false, "error": gin.H{"code": "DEVICE_NOT_FOUND", "message": err.Error()}})
return
}
resultCh := make(chan *driver.InferenceResult, 10)
go func() {
room := "inference:" + req.DeviceID
for result := range resultCh {
h.wsHub.BroadcastToRoom(room, result)
}
}()
h.activeSource = videoSource
h.sourceType = camera.SourceVideo
h.videoPath = videoURL
h.videoIsURL = true
h.videoFPS = 15
h.videoInfo = videoInfo
h.activeDeviceID = req.DeviceID
h.pipeline = camera.NewInferencePipeline(
videoSource,
camera.SourceVideo,
session.Driver,
h.streamer.FrameChannel(),
resultCh,
)
h.pipeline.Start()
go func() {
<-h.pipeline.Done()
close(resultCh)
h.wsHub.BroadcastToRoom("inference:"+req.DeviceID, map[string]interface{}{
"type": "pipeline_complete",
"sourceType": "video",
})
}()
streamURL := "/api/camera/stream"
c.JSON(200, gin.H{
"success": true,
"data": gin.H{
"streamUrl": streamURL,
"sourceType": "video",
"filename": req.URL,
"totalFrames": videoInfo.TotalFrames,
"durationSeconds": videoInfo.DurationSec,
},
})
}
// UploadBatchImages handles multiple image files for sequential batch inference.
func (h *CameraHandler) UploadBatchImages(c *gin.Context) {
h.stopActivePipeline()
deviceID := c.PostForm("deviceId")
if deviceID == "" {
c.JSON(400, gin.H{"success": false, "error": gin.H{"code": "BAD_REQUEST", "message": "deviceId is required"}})
return
}
form, err := c.MultipartForm()
if err != nil {
c.JSON(400, gin.H{"success": false, "error": gin.H{"code": "BAD_REQUEST", "message": "multipart form required"}})
return
}
files := form.File["files"]
if len(files) == 0 {
c.JSON(400, gin.H{"success": false, "error": gin.H{"code": "BAD_REQUEST", "message": "at least one file is required"}})
return
}
if len(files) > 50 {
c.JSON(400, gin.H{"success": false, "error": gin.H{"code": "BAD_REQUEST", "message": "maximum 50 images per batch"}})
return
}
// Save all files to temp
filePaths := make([]string, 0, len(files))
filenames := make([]string, 0, len(files))
for _, fh := range files {
ext := strings.ToLower(filepath.Ext(fh.Filename))
if ext != ".jpg" && ext != ".jpeg" && ext != ".png" {
for _, fp := range filePaths {
os.Remove(fp)
}
c.JSON(400, gin.H{"success": false, "error": gin.H{
"code": "BAD_REQUEST",
"message": fmt.Sprintf("unsupported file: %s (only JPG/PNG)", fh.Filename),
}})
return
}
f, openErr := fh.Open()
if openErr != nil {
for _, fp := range filePaths {
os.Remove(fp)
}
c.JSON(500, gin.H{"success": false, "error": gin.H{"code": "STORAGE_ERROR", "message": openErr.Error()}})
return
}
tmpFile, tmpErr := os.CreateTemp("", "edge-ai-batch-*"+ext)
if tmpErr != nil {
f.Close()
for _, fp := range filePaths {
os.Remove(fp)
}
c.JSON(500, gin.H{"success": false, "error": gin.H{"code": "STORAGE_ERROR", "message": tmpErr.Error()}})
return
}
io.Copy(tmpFile, f)
tmpFile.Close()
f.Close()
filePaths = append(filePaths, tmpFile.Name())
filenames = append(filenames, fh.Filename)
}
// Create MultiImageSource
batchSource, err := camera.NewMultiImageSource(filePaths, filenames)
if err != nil {
for _, fp := range filePaths {
os.Remove(fp)
}
c.JSON(500, gin.H{"success": false, "error": gin.H{"code": "IMAGE_DECODE_FAILED", "message": err.Error()}})
return
}
// Get device driver
session, err := h.deviceMgr.GetDevice(deviceID)
if err != nil {
batchSource.Close()
c.JSON(404, gin.H{"success": false, "error": gin.H{"code": "DEVICE_NOT_FOUND", "message": err.Error()}})
return
}
batchID := fmt.Sprintf("batch-%d", time.Now().UnixNano())
resultCh := make(chan *driver.InferenceResult, 10)
go func() {
room := "inference:" + deviceID
for result := range resultCh {
result.DeviceID = deviceID
h.wsHub.BroadcastToRoom(room, result)
}
}()
h.activeSource = batchSource
h.sourceType = camera.SourceBatchImage
h.pipeline = camera.NewInferencePipeline(
batchSource,
camera.SourceBatchImage,
session.Driver,
h.streamer.FrameChannel(),
resultCh,
)
h.pipeline.Start()
// Notify frontend when batch completes
go func() {
<-h.pipeline.Done()
close(resultCh)
h.wsHub.BroadcastToRoom("inference:"+deviceID, map[string]interface{}{
"type": "pipeline_complete",
"sourceType": "batch_image",
"batchId": batchID,
})
}()
// Build image list for response
imageList := make([]gin.H, len(batchSource.Images()))
for i, entry := range batchSource.Images() {
imageList[i] = gin.H{
"index": i,
"filename": entry.Filename,
"width": entry.Width,
"height": entry.Height,
}
}
streamURL := "/api/camera/stream"
c.JSON(200, gin.H{
"success": true,
"data": gin.H{
"streamUrl": streamURL,
"sourceType": "batch_image",
"batchId": batchID,
"totalImages": len(files),
"images": imageList,
},
})
}
// GetBatchImageFrame serves a specific image from the active batch by index.
func (h *CameraHandler) GetBatchImageFrame(c *gin.Context) {
if h.sourceType != camera.SourceBatchImage || h.activeSource == nil {
c.JSON(404, gin.H{"success": false, "error": gin.H{"code": "NO_BATCH", "message": "no batch image source active"}})
return
}
indexStr := c.Param("index")
index, err := strconv.Atoi(indexStr)
if err != nil || index < 0 {
c.JSON(400, gin.H{"success": false, "error": gin.H{"code": "BAD_REQUEST", "message": "invalid index"}})
return
}
mis, ok := h.activeSource.(*camera.MultiImageSource)
if !ok {
c.JSON(500, gin.H{"success": false, "error": gin.H{"code": "INTERNAL_ERROR", "message": "source type mismatch"}})
return
}
jpegData, err := mis.GetImageByIndex(index)
if err != nil {
c.JSON(404, gin.H{"success": false, "error": gin.H{"code": "NOT_FOUND", "message": err.Error()}})
return
}
c.Data(200, "image/jpeg", jpegData)
}
// stopPipelineForSeek stops the pipeline and ffmpeg process but keeps the video file.
func (h *CameraHandler) stopPipelineForSeek() {
if h.pipeline != nil {
h.pipeline.Stop()
h.pipeline = nil
}
if h.activeSource != nil {
if vs, ok := h.activeSource.(*camera.VideoSource); ok {
vs.CloseWithoutRemove()
}
}
h.activeSource = nil
}
// stopActivePipeline stops the current pipeline and cleans up resources.
func (h *CameraHandler) stopActivePipeline() {
if h.pipeline != nil {
h.pipeline.Stop()
h.pipeline = nil
}
// Only close non-camera sources (camera is managed by cameraMgr)
if h.activeSource != nil && h.sourceType != camera.SourceCamera {
h.activeSource.Close()
}
if h.sourceType == camera.SourceCamera {
h.cameraMgr.Close()
}
h.activeSource = nil
h.sourceType = ""
h.videoPath = ""
h.videoIsURL = false
h.activeDeviceID = ""
}
// SeekVideo seeks to a specific position in the current video and restarts inference.
func (h *CameraHandler) SeekVideo(c *gin.Context) {
var req struct {
TimeSeconds float64 `json:"timeSeconds"`
}
if err := c.ShouldBindJSON(&req); err != nil {
c.JSON(400, gin.H{"success": false, "error": gin.H{"code": "BAD_REQUEST", "message": err.Error()}})
return
}
if h.videoPath == "" || h.sourceType != camera.SourceVideo {
c.JSON(400, gin.H{"success": false, "error": gin.H{"code": "NO_VIDEO", "message": "no video is currently playing"}})
return
}
// Clamp seek time
if req.TimeSeconds < 0 {
req.TimeSeconds = 0
}
if h.videoInfo.DurationSec > 0 && req.TimeSeconds > h.videoInfo.DurationSec {
req.TimeSeconds = h.videoInfo.DurationSec
}
// Stop current pipeline without deleting the video file
h.stopPipelineForSeek()
// Create new VideoSource with seek position
var videoSource *camera.VideoSource
var err error
if h.videoIsURL {
videoSource, err = camera.NewVideoSourceFromURLWithSeek(h.videoPath, h.videoFPS, req.TimeSeconds)
} else {
videoSource, err = camera.NewVideoSourceWithSeek(h.videoPath, h.videoFPS, req.TimeSeconds)
}
if err != nil {
c.JSON(500, gin.H{"success": false, "error": gin.H{"code": "SEEK_FAILED", "message": err.Error()}})
return
}
if h.videoInfo.TotalFrames > 0 {
videoSource.SetTotalFrames(h.videoInfo.TotalFrames)
}
// Get device driver
session, err := h.deviceMgr.GetDevice(h.activeDeviceID)
if err != nil {
videoSource.Close()
c.JSON(404, gin.H{"success": false, "error": gin.H{"code": "DEVICE_NOT_FOUND", "message": err.Error()}})
return
}
// Calculate frame offset from seek position
frameOffset := int(req.TimeSeconds * h.videoFPS)
resultCh := make(chan *driver.InferenceResult, 10)
go func() {
room := "inference:" + h.activeDeviceID
for result := range resultCh {
result.DeviceID = h.activeDeviceID
h.wsHub.BroadcastToRoom(room, result)
}
}()
h.activeSource = videoSource
h.pipeline = camera.NewInferencePipelineWithOffset(
videoSource,
camera.SourceVideo,
session.Driver,
h.streamer.FrameChannel(),
resultCh,
frameOffset,
)
h.pipeline.Start()
go func() {
<-h.pipeline.Done()
close(resultCh)
h.wsHub.BroadcastToRoom("inference:"+h.activeDeviceID, map[string]interface{}{
"type": "pipeline_complete",
"sourceType": "video",
})
}()
c.JSON(200, gin.H{
"success": true,
"data": gin.H{
"seekTo": req.TimeSeconds,
"frameOffset": frameOffset,
},
})
}

View File

@ -0,0 +1,376 @@
package handlers
import (
"context"
"fmt"
"sync"
"time"
"edge-ai-platform/internal/api/ws"
"edge-ai-platform/internal/cluster"
"edge-ai-platform/internal/driver"
"edge-ai-platform/internal/flash"
"edge-ai-platform/internal/model"
"github.com/gin-gonic/gin"
)
type ClusterHandler struct {
clusterMgr *cluster.Manager
flashSvc *flash.Service
modelRepo *model.Repository
wsHub *ws.Hub
pipelines map[string]*cluster.ClusterPipeline
mu sync.Mutex
}
func NewClusterHandler(
clusterMgr *cluster.Manager,
flashSvc *flash.Service,
modelRepo *model.Repository,
wsHub *ws.Hub,
) *ClusterHandler {
return &ClusterHandler{
clusterMgr: clusterMgr,
flashSvc: flashSvc,
modelRepo: modelRepo,
wsHub: wsHub,
pipelines: make(map[string]*cluster.ClusterPipeline),
}
}
func (h *ClusterHandler) ListClusters(c *gin.Context) {
clusters := h.clusterMgr.ListClusters()
c.JSON(200, gin.H{
"success": true,
"data": gin.H{
"clusters": clusters,
},
})
}
func (h *ClusterHandler) GetCluster(c *gin.Context) {
id := c.Param("id")
cl, err := h.clusterMgr.GetCluster(id)
if err != nil {
c.JSON(404, gin.H{
"success": false,
"error": gin.H{"code": "CLUSTER_NOT_FOUND", "message": err.Error()},
})
return
}
c.JSON(200, gin.H{"success": true, "data": cl})
}
func (h *ClusterHandler) CreateCluster(c *gin.Context) {
var req struct {
Name string `json:"name"`
DeviceIDs []string `json:"deviceIds"`
}
if err := c.ShouldBindJSON(&req); err != nil {
c.JSON(400, gin.H{
"success": false,
"error": gin.H{"code": "BAD_REQUEST", "message": "name and deviceIds are required"},
})
return
}
if req.Name == "" || len(req.DeviceIDs) == 0 {
c.JSON(400, gin.H{
"success": false,
"error": gin.H{"code": "BAD_REQUEST", "message": "name and deviceIds are required"},
})
return
}
cl, err := h.clusterMgr.CreateCluster(req.Name, req.DeviceIDs)
if err != nil {
c.JSON(400, gin.H{
"success": false,
"error": gin.H{"code": "CREATE_FAILED", "message": err.Error()},
})
return
}
c.JSON(200, gin.H{"success": true, "data": cl})
}
func (h *ClusterHandler) DeleteCluster(c *gin.Context) {
id := c.Param("id")
// Stop any running pipeline first.
h.mu.Lock()
if p, ok := h.pipelines[id]; ok {
p.Stop()
delete(h.pipelines, id)
}
h.mu.Unlock()
if err := h.clusterMgr.DeleteCluster(id); err != nil {
c.JSON(404, gin.H{
"success": false,
"error": gin.H{"code": "CLUSTER_NOT_FOUND", "message": err.Error()},
})
return
}
c.JSON(200, gin.H{"success": true})
}
func (h *ClusterHandler) AddDevice(c *gin.Context) {
clusterID := c.Param("id")
var req struct {
DeviceID string `json:"deviceId"`
Weight int `json:"weight"`
}
if err := c.ShouldBindJSON(&req); err != nil || req.DeviceID == "" {
c.JSON(400, gin.H{
"success": false,
"error": gin.H{"code": "BAD_REQUEST", "message": "deviceId is required"},
})
return
}
if err := h.clusterMgr.AddDevice(clusterID, req.DeviceID, req.Weight); err != nil {
c.JSON(400, gin.H{
"success": false,
"error": gin.H{"code": "ADD_DEVICE_FAILED", "message": err.Error()},
})
return
}
c.JSON(200, gin.H{"success": true})
}
func (h *ClusterHandler) RemoveDevice(c *gin.Context) {
clusterID := c.Param("id")
deviceID := c.Param("deviceId")
if err := h.clusterMgr.RemoveDevice(clusterID, deviceID); err != nil {
c.JSON(400, gin.H{
"success": false,
"error": gin.H{"code": "REMOVE_DEVICE_FAILED", "message": err.Error()},
})
return
}
c.JSON(200, gin.H{"success": true})
}
func (h *ClusterHandler) UpdateWeight(c *gin.Context) {
clusterID := c.Param("id")
deviceID := c.Param("deviceId")
var req struct {
Weight int `json:"weight"`
}
if err := c.ShouldBindJSON(&req); err != nil {
c.JSON(400, gin.H{
"success": false,
"error": gin.H{"code": "BAD_REQUEST", "message": "weight is required"},
})
return
}
if err := h.clusterMgr.UpdateWeight(clusterID, deviceID, req.Weight); err != nil {
c.JSON(400, gin.H{
"success": false,
"error": gin.H{"code": "UPDATE_WEIGHT_FAILED", "message": err.Error()},
})
return
}
c.JSON(200, gin.H{"success": true})
}
func (h *ClusterHandler) FlashCluster(c *gin.Context) {
clusterID := c.Param("id")
var req struct {
ModelID string `json:"modelId"`
}
if err := c.ShouldBindJSON(&req); err != nil || req.ModelID == "" {
c.JSON(400, gin.H{
"success": false,
"error": gin.H{"code": "BAD_REQUEST", "message": "modelId is required"},
})
return
}
cl, err := h.clusterMgr.GetCluster(clusterID)
if err != nil {
c.JSON(404, gin.H{
"success": false,
"error": gin.H{"code": "CLUSTER_NOT_FOUND", "message": err.Error()},
})
return
}
// Flash each device in the cluster. Each device gets its own
// flash task, and progress is forwarded to the cluster flash WS room.
room := "flash:cluster:" + clusterID
deviceMgr := h.clusterMgr.DeviceManager()
var flashErrors []string
for _, member := range cl.Devices {
if member.Status == cluster.MemberRemoved {
continue
}
_, progressCh, err := h.flashSvc.StartFlash(member.DeviceID, req.ModelID)
if err != nil {
flashErrors = append(flashErrors, fmt.Sprintf("%s: %s", member.DeviceID, err.Error()))
continue
}
go func(did string) {
for progress := range progressCh {
h.wsHub.BroadcastToRoom(room, cluster.ClusterFlashProgress{
DeviceID: did,
Percent: progress.Percent,
Stage: progress.Stage,
Message: progress.Message,
Error: progress.Error,
})
}
}(member.DeviceID)
}
_ = deviceMgr // used above to verify devices exist via flashSvc
if len(flashErrors) > 0 && len(flashErrors) == len(cl.Devices) {
c.JSON(400, gin.H{
"success": false,
"error": gin.H{"code": "FLASH_FAILED", "message": fmt.Sprintf("all devices failed: %v", flashErrors)},
})
return
}
cl.ModelID = req.ModelID
c.JSON(200, gin.H{"success": true, "data": gin.H{
"clusterId": clusterID,
"errors": flashErrors,
}})
}
func (h *ClusterHandler) StartInference(c *gin.Context) {
clusterID := c.Param("id")
cl, err := h.clusterMgr.GetCluster(clusterID)
if err != nil {
c.JSON(404, gin.H{
"success": false,
"error": gin.H{"code": "CLUSTER_NOT_FOUND", "message": err.Error()},
})
return
}
h.mu.Lock()
if _, exists := h.pipelines[clusterID]; exists {
h.mu.Unlock()
c.JSON(400, gin.H{
"success": false,
"error": gin.H{"code": "ALREADY_RUNNING", "message": "cluster inference already running"},
})
return
}
h.mu.Unlock()
// Build drivers list from cluster members.
deviceMgr := h.clusterMgr.DeviceManager()
var members []cluster.DeviceMember
var drivers []driver.DeviceDriver
for _, m := range cl.Devices {
if m.Status == cluster.MemberRemoved {
continue
}
session, err := deviceMgr.GetDevice(m.DeviceID)
if err != nil {
continue
}
if !session.Driver.IsConnected() {
continue
}
members = append(members, m)
drivers = append(drivers, session.Driver)
}
if len(drivers) == 0 {
c.JSON(400, gin.H{
"success": false,
"error": gin.H{"code": "NO_ACTIVE_DEVICES", "message": "no connected devices in cluster"},
})
return
}
// Start inference on each device.
for _, drv := range drivers {
if err := drv.StartInference(); err != nil {
// Non-fatal: log and continue with devices that succeed.
continue
}
}
resultCh := make(chan *cluster.ClusterResult, 20)
dispatcher := cluster.NewDispatcher(members, drivers)
pipeline := cluster.NewClusterPipeline(cl, dispatcher, resultCh)
pipeline.StartContinuous()
h.mu.Lock()
h.pipelines[clusterID] = pipeline
h.mu.Unlock()
h.clusterMgr.SetStatus(clusterID, cluster.ClusterInferencing)
// Forward cluster results to WebSocket.
go func() {
room := "inference:cluster:" + clusterID
for result := range resultCh {
h.wsHub.BroadcastToRoom(room, result)
}
}()
c.JSON(200, gin.H{"success": true})
}
func (h *ClusterHandler) StopInference(c *gin.Context) {
clusterID := c.Param("id")
h.mu.Lock()
pipeline, exists := h.pipelines[clusterID]
if exists {
delete(h.pipelines, clusterID)
}
h.mu.Unlock()
if !exists {
c.JSON(400, gin.H{
"success": false,
"error": gin.H{"code": "NOT_RUNNING", "message": "cluster inference not running"},
})
return
}
// Stop pipeline with timeout.
ctx, cancel := context.WithTimeout(c.Request.Context(), 10*time.Second)
defer cancel()
doneCh := make(chan struct{})
go func() {
pipeline.Stop()
// Stop inference on each device.
cl, err := h.clusterMgr.GetCluster(clusterID)
if err == nil {
deviceMgr := h.clusterMgr.DeviceManager()
for _, m := range cl.Devices {
if s, err := deviceMgr.GetDevice(m.DeviceID); err == nil {
s.Driver.StopInference()
}
}
}
close(doneCh)
}()
select {
case <-doneCh:
h.clusterMgr.SetStatus(clusterID, cluster.ClusterIdle)
c.JSON(200, gin.H{"success": true})
case <-ctx.Done():
h.clusterMgr.SetStatus(clusterID, cluster.ClusterIdle)
c.JSON(200, gin.H{"success": true})
}
}

View File

@ -0,0 +1,181 @@
package handlers
import (
"context"
"fmt"
"time"
"edge-ai-platform/internal/api/ws"
"edge-ai-platform/internal/device"
"edge-ai-platform/internal/driver"
"edge-ai-platform/internal/flash"
"edge-ai-platform/internal/inference"
"github.com/gin-gonic/gin"
)
type DeviceHandler struct {
deviceMgr *device.Manager
flashSvc *flash.Service
inferenceSvc *inference.Service
wsHub *ws.Hub
}
func NewDeviceHandler(
deviceMgr *device.Manager,
flashSvc *flash.Service,
inferenceSvc *inference.Service,
wsHub *ws.Hub,
) *DeviceHandler {
return &DeviceHandler{
deviceMgr: deviceMgr,
flashSvc: flashSvc,
inferenceSvc: inferenceSvc,
wsHub: wsHub,
}
}
func (h *DeviceHandler) ScanDevices(c *gin.Context) {
devices := h.deviceMgr.Rescan()
c.JSON(200, gin.H{
"success": true,
"data": gin.H{
"devices": devices,
},
})
}
func (h *DeviceHandler) ListDevices(c *gin.Context) {
devices := h.deviceMgr.ListDevices()
c.JSON(200, gin.H{
"success": true,
"data": gin.H{
"devices": devices,
},
})
}
func (h *DeviceHandler) GetDevice(c *gin.Context) {
id := c.Param("id")
session, err := h.deviceMgr.GetDevice(id)
if err != nil {
c.JSON(404, gin.H{
"success": false,
"error": gin.H{"code": "DEVICE_NOT_FOUND", "message": err.Error()},
})
return
}
c.JSON(200, gin.H{"success": true, "data": session.Driver.Info()})
}
func (h *DeviceHandler) ConnectDevice(c *gin.Context) {
id := c.Param("id")
// Run connect with a 30-second timeout to avoid blocking the HTTP
// request for over a minute when the SDK connect hangs.
ctx, cancel := context.WithTimeout(c.Request.Context(), 30*time.Second)
defer cancel()
errCh := make(chan error, 1)
go func() {
errCh <- h.deviceMgr.Connect(id)
}()
select {
case err := <-errCh:
if err != nil {
c.JSON(400, gin.H{
"success": false,
"error": gin.H{"code": "CONNECT_FAILED", "message": err.Error()},
})
return
}
c.JSON(200, gin.H{"success": true})
case <-ctx.Done():
c.JSON(504, gin.H{
"success": false,
"error": gin.H{"code": "CONNECT_TIMEOUT", "message": fmt.Sprintf("device connect timed out after 30s for %s", id)},
})
}
}
func (h *DeviceHandler) DisconnectDevice(c *gin.Context) {
id := c.Param("id")
if err := h.deviceMgr.Disconnect(id); err != nil {
c.JSON(400, gin.H{
"success": false,
"error": gin.H{"code": "DISCONNECT_FAILED", "message": err.Error()},
})
return
}
c.JSON(200, gin.H{"success": true})
}
func (h *DeviceHandler) FlashDevice(c *gin.Context) {
id := c.Param("id")
var req struct {
ModelID string `json:"modelId"`
}
if err := c.ShouldBindJSON(&req); err != nil {
c.JSON(400, gin.H{
"success": false,
"error": gin.H{"code": "BAD_REQUEST", "message": "modelId is required"},
})
return
}
taskID, progressCh, err := h.flashSvc.StartFlash(id, req.ModelID)
if err != nil {
c.JSON(400, gin.H{
"success": false,
"error": gin.H{"code": "FLASH_FAILED", "message": err.Error()},
})
return
}
// Forward progress to WebSocket
go func() {
room := "flash:" + id
for progress := range progressCh {
h.wsHub.BroadcastToRoom(room, progress)
}
}()
c.JSON(200, gin.H{"success": true, "data": gin.H{"taskId": taskID}})
}
func (h *DeviceHandler) StartInference(c *gin.Context) {
id := c.Param("id")
resultCh := make(chan *driver.InferenceResult, 10)
if err := h.inferenceSvc.Start(id, resultCh); err != nil {
c.JSON(400, gin.H{
"success": false,
"error": gin.H{"code": "INFERENCE_ERROR", "message": err.Error()},
})
return
}
// Forward results to WebSocket, enriching with device ID
go func() {
room := "inference:" + id
for result := range resultCh {
result.DeviceID = id
h.wsHub.BroadcastToRoom(room, result)
}
}()
c.JSON(200, gin.H{"success": true})
}
func (h *DeviceHandler) StopInference(c *gin.Context) {
id := c.Param("id")
if err := h.inferenceSvc.Stop(id); err != nil {
c.JSON(400, gin.H{
"success": false,
"error": gin.H{"code": "INFERENCE_ERROR", "message": err.Error()},
})
return
}
c.JSON(200, gin.H{"success": true})
}

View File

@ -0,0 +1,47 @@
package handlers
import (
"edge-ai-platform/internal/model"
"github.com/gin-gonic/gin"
)
type ModelHandler struct {
repo *model.Repository
}
func NewModelHandler(repo *model.Repository) *ModelHandler {
return &ModelHandler{repo: repo}
}
func (h *ModelHandler) ListModels(c *gin.Context) {
filter := model.ModelFilter{
TaskType: c.Query("type"),
Hardware: c.Query("hardware"),
Query: c.Query("q"),
}
models, total := h.repo.List(filter)
c.JSON(200, gin.H{
"success": true,
"data": gin.H{
"models": models,
"total": total,
},
})
}
func (h *ModelHandler) GetModel(c *gin.Context) {
id := c.Param("id")
m, err := h.repo.GetByID(id)
if err != nil {
c.JSON(404, gin.H{
"success": false,
"error": gin.H{
"code": "MODEL_NOT_FOUND",
"message": "Model not found",
},
})
return
}
c.JSON(200, gin.H{"success": true, "data": m})
}

View File

@ -0,0 +1,154 @@
package handlers
import (
"encoding/json"
"fmt"
"path/filepath"
"strings"
"time"
"edge-ai-platform/internal/model"
"github.com/gin-gonic/gin"
"github.com/google/uuid"
)
type ModelUploadHandler struct {
repo *model.Repository
store *model.ModelStore
}
func NewModelUploadHandler(repo *model.Repository, store *model.ModelStore) *ModelUploadHandler {
return &ModelUploadHandler{repo: repo, store: store}
}
func (h *ModelUploadHandler) UploadModel(c *gin.Context) {
// Get uploaded file
file, header, err := c.Request.FormFile("file")
if err != nil {
c.JSON(400, gin.H{
"success": false,
"error": gin.H{"code": "BAD_REQUEST", "message": "file is required"},
})
return
}
defer file.Close()
// Validate file extension
ext := strings.ToLower(filepath.Ext(header.Filename))
if ext != ".nef" {
c.JSON(400, gin.H{
"success": false,
"error": gin.H{"code": "BAD_REQUEST", "message": "only .nef files are supported"},
})
return
}
// Get required fields
name := c.PostForm("name")
taskType := c.PostForm("taskType")
labelsStr := c.PostForm("labels")
if name == "" || taskType == "" || labelsStr == "" {
c.JSON(400, gin.H{
"success": false,
"error": gin.H{"code": "BAD_REQUEST", "message": "name, taskType, and labels are required"},
})
return
}
// Parse labels
var labels []string
if err := json.Unmarshal([]byte(labelsStr), &labels); err != nil {
c.JSON(400, gin.H{
"success": false,
"error": gin.H{"code": "BAD_REQUEST", "message": "labels must be a JSON array"},
})
return
}
// Optional fields
description := c.PostForm("description")
quantization := c.PostForm("quantization")
if quantization == "" {
quantization = "INT8"
}
inputWidth := 640
inputHeight := 640
if w := c.PostForm("inputWidth"); w != "" {
fmt.Sscanf(w, "%d", &inputWidth)
}
if h := c.PostForm("inputHeight"); h != "" {
fmt.Sscanf(h, "%d", &inputHeight)
}
// Generate ID
id := uuid.New().String()
now := time.Now().UTC().Format(time.RFC3339)
// Save file
nefPath, err := h.store.SaveModel(id, file)
if err != nil {
c.JSON(500, gin.H{
"success": false,
"error": gin.H{"code": "STORAGE_ERROR", "message": err.Error()},
})
return
}
// Build model
m := model.Model{
ID: id,
Name: name,
Description: description,
TaskType: taskType,
Categories: []string{"custom"},
Framework: "NEF",
InputSize: model.InputSize{Width: inputWidth, Height: inputHeight},
ModelSize: header.Size,
Quantization: quantization,
SupportedHardware: []string{"KL520", "KL720"},
Labels: labels,
Version: "1.0.0",
Author: "Custom",
License: "Custom",
CreatedAt: now,
UpdatedAt: now,
IsCustom: true,
FilePath: nefPath,
}
// Save metadata
if err := h.store.SaveMetadata(id, m); err != nil {
c.JSON(500, gin.H{
"success": false,
"error": gin.H{"code": "STORAGE_ERROR", "message": err.Error()},
})
return
}
// Add to repository
h.repo.Add(m)
c.JSON(200, gin.H{"success": true, "data": m})
}
func (h *ModelUploadHandler) DeleteModel(c *gin.Context) {
id := c.Param("id")
// Remove from repository (validates it's custom)
if err := h.repo.Remove(id); err != nil {
c.JSON(400, gin.H{
"success": false,
"error": gin.H{"code": "DELETE_FAILED", "message": err.Error()},
})
return
}
// Remove files
if err := h.store.DeleteModel(id); err != nil {
fmt.Printf("[WARN] Failed to delete model files for %s: %v\n", id, err)
}
c.JSON(200, gin.H{"success": true})
}

View File

@ -0,0 +1,102 @@
package handlers
import (
"net/http"
"runtime"
"time"
"edge-ai-platform/internal/deps"
"edge-ai-platform/internal/update"
"github.com/gin-gonic/gin"
)
type SystemHandler struct {
startTime time.Time
version string
buildTime string
shutdownFn func()
depsCache []deps.Dependency
giteaURL string
}
func NewSystemHandler(version, buildTime, giteaURL string, shutdownFn func()) *SystemHandler {
return &SystemHandler{
startTime: time.Now(),
version: version,
buildTime: buildTime,
shutdownFn: shutdownFn,
depsCache: deps.CheckAll(),
giteaURL: giteaURL,
}
}
func (h *SystemHandler) HealthCheck(c *gin.Context) {
c.JSON(200, gin.H{"status": "ok"})
}
func (h *SystemHandler) Info(c *gin.Context) {
c.JSON(200, gin.H{
"success": true,
"data": gin.H{
"version": h.version,
"platform": runtime.GOOS + "/" + runtime.GOARCH,
"uptime": time.Since(h.startTime).Seconds(),
"goVersion": runtime.Version(),
},
})
}
func (h *SystemHandler) Metrics(c *gin.Context) {
var ms runtime.MemStats
runtime.ReadMemStats(&ms)
c.JSON(200, gin.H{
"success": true,
"data": gin.H{
"version": h.version,
"buildTime": h.buildTime,
"platform": runtime.GOOS + "/" + runtime.GOARCH,
"goVersion": runtime.Version(),
"uptimeSeconds": time.Since(h.startTime).Seconds(),
"goroutines": runtime.NumGoroutine(),
"memHeapAllocMB": float64(ms.HeapAlloc) / 1024 / 1024,
"memSysMB": float64(ms.Sys) / 1024 / 1024,
"memHeapObjects": ms.HeapObjects,
"gcCycles": ms.NumGC,
"nextGcMB": float64(ms.NextGC) / 1024 / 1024,
},
})
}
func (h *SystemHandler) Deps(c *gin.Context) {
c.JSON(200, gin.H{
"success": true,
"data": gin.H{"deps": h.depsCache},
})
}
func (h *SystemHandler) CheckUpdate(c *gin.Context) {
// Gitea release repo: use the same owner/repo as .goreleaser.yaml
const owner = "warrenchen"
const repo = "web_academy_prototype"
info := update.Check(h.version, h.giteaURL, owner, repo)
c.JSON(http.StatusOK, gin.H{
"success": true,
"data": info,
})
}
func (h *SystemHandler) Restart(c *gin.Context) {
c.JSON(200, gin.H{"success": true, "data": gin.H{"message": "restarting"}})
if f, ok := c.Writer.(http.Flusher); ok {
f.Flush()
}
go func() {
time.Sleep(200 * time.Millisecond)
// shutdownFn signals the main goroutine to perform exec after server shutdown
h.shutdownFn()
}()
}

View File

@ -0,0 +1,29 @@
package api
import (
"net/http"
"github.com/gin-gonic/gin"
)
func CORSMiddleware() gin.HandlerFunc {
return func(c *gin.Context) {
origin := c.GetHeader("Origin")
if origin != "" {
// In production, frontend is same-origin so browsers don't send Origin header.
// In dev, Next.js on :3000 needs CORS to reach Go on :3721.
// Allow all origins since this is a local-first application.
c.Header("Access-Control-Allow-Origin", origin)
}
c.Header("Access-Control-Allow-Methods", "GET, POST, PUT, DELETE, OPTIONS")
c.Header("Access-Control-Allow-Headers", "Content-Type, Authorization, X-Relay-Token")
c.Header("Access-Control-Allow-Credentials", "true")
if c.Request.Method == http.MethodOptions {
c.AbortWithStatus(http.StatusNoContent)
return
}
c.Next()
}
}

View File

@ -0,0 +1,204 @@
package api
import (
"fmt"
"io"
"net/http"
"strings"
"time"
"edge-ai-platform/internal/api/handlers"
"edge-ai-platform/internal/api/ws"
"edge-ai-platform/internal/camera"
"edge-ai-platform/internal/cluster"
"edge-ai-platform/internal/device"
"edge-ai-platform/internal/flash"
"edge-ai-platform/internal/inference"
"edge-ai-platform/internal/model"
"edge-ai-platform/pkg/logger"
"github.com/gin-gonic/gin"
)
func NewRouter(
modelRepo *model.Repository,
modelStore *model.ModelStore,
deviceMgr *device.Manager,
cameraMgr *camera.Manager,
clusterMgr *cluster.Manager,
flashSvc *flash.Service,
inferenceSvc *inference.Service,
wsHub *ws.Hub,
staticFS http.FileSystem,
logBroadcaster *logger.Broadcaster,
systemHandler *handlers.SystemHandler,
relayToken string,
) *gin.Engine {
// Use gin.New() instead of gin.Default() to replace the default logger
// with one that also pushes to the WebSocket broadcaster.
r := gin.New()
r.Use(gin.Recovery())
r.Use(broadcasterLogger(logBroadcaster))
r.Use(CORSMiddleware())
modelHandler := handlers.NewModelHandler(modelRepo)
modelUploadHandler := handlers.NewModelUploadHandler(modelRepo, modelStore)
deviceHandler := handlers.NewDeviceHandler(deviceMgr, flashSvc, inferenceSvc, wsHub)
cameraHandler := handlers.NewCameraHandler(cameraMgr, deviceMgr, inferenceSvc, wsHub)
clusterHandler := handlers.NewClusterHandler(clusterMgr, flashSvc, modelRepo, wsHub)
api := r.Group("/api")
{
api.GET("/system/health", systemHandler.HealthCheck)
api.GET("/system/info", systemHandler.Info)
api.GET("/system/metrics", systemHandler.Metrics)
api.GET("/system/deps", systemHandler.Deps)
api.POST("/system/restart", systemHandler.Restart)
api.GET("/system/update-check", systemHandler.CheckUpdate)
api.GET("/models", modelHandler.ListModels)
api.GET("/models/:id", modelHandler.GetModel)
api.POST("/models/upload", modelUploadHandler.UploadModel)
api.DELETE("/models/:id", modelUploadHandler.DeleteModel)
api.GET("/devices", deviceHandler.ListDevices)
api.POST("/devices/scan", deviceHandler.ScanDevices)
api.GET("/devices/:id", deviceHandler.GetDevice)
api.POST("/devices/:id/connect", deviceHandler.ConnectDevice)
api.POST("/devices/:id/disconnect", deviceHandler.DisconnectDevice)
api.POST("/devices/:id/flash", deviceHandler.FlashDevice)
api.POST("/devices/:id/inference/start", deviceHandler.StartInference)
api.POST("/devices/:id/inference/stop", deviceHandler.StopInference)
api.GET("/camera/list", cameraHandler.ListCameras)
api.POST("/camera/start", cameraHandler.StartPipeline)
api.POST("/camera/stop", cameraHandler.StopPipeline)
api.GET("/camera/stream", cameraHandler.StreamMJPEG)
api.POST("/media/upload/image", cameraHandler.UploadImage)
api.POST("/media/upload/video", cameraHandler.UploadVideo)
api.POST("/media/upload/batch-images", cameraHandler.UploadBatchImages)
api.GET("/media/batch-images/:index", cameraHandler.GetBatchImageFrame)
api.POST("/media/url", cameraHandler.StartFromURL)
api.POST("/media/seek", cameraHandler.SeekVideo)
api.GET("/clusters", clusterHandler.ListClusters)
api.POST("/clusters", clusterHandler.CreateCluster)
api.GET("/clusters/:id", clusterHandler.GetCluster)
api.DELETE("/clusters/:id", clusterHandler.DeleteCluster)
api.POST("/clusters/:id/devices", clusterHandler.AddDevice)
api.DELETE("/clusters/:id/devices/:deviceId", clusterHandler.RemoveDevice)
api.PUT("/clusters/:id/devices/:deviceId/weight", clusterHandler.UpdateWeight)
api.POST("/clusters/:id/flash", clusterHandler.FlashCluster)
api.POST("/clusters/:id/inference/start", clusterHandler.StartInference)
api.POST("/clusters/:id/inference/stop", clusterHandler.StopInference)
}
// Relay token endpoint — browser fetches this from localhost to auto-detect token.
// CORS is handled by CORSMiddleware. We must register OPTIONS explicitly
// because Gin only runs middleware on routes with a matching method handler;
// without this, preflight requests get 405 and no CORS headers.
r.GET("/auth/token", func(c *gin.Context) {
c.JSON(http.StatusOK, gin.H{"token": relayToken})
})
r.OPTIONS("/auth/token", func(c *gin.Context) {
c.Status(http.StatusNoContent)
})
r.GET("/ws/devices/events", ws.DeviceEventsHandler(wsHub, deviceMgr))
r.GET("/ws/devices/:id/flash-progress", ws.FlashProgressHandler(wsHub))
r.GET("/ws/devices/:id/inference", ws.InferenceHandler(wsHub, inferenceSvc))
r.GET("/ws/server-logs", ws.ServerLogsHandler(wsHub, logBroadcaster))
r.GET("/ws/clusters/:id/inference", ws.ClusterInferenceHandler(wsHub))
r.GET("/ws/clusters/:id/flash-progress", ws.ClusterFlashProgressHandler(wsHub))
// Embedded frontend static file serving (production mode)
if staticFS != nil {
fileServer := http.FileServer(staticFS)
// Serve Next.js static assets
r.GET("/_next/*filepath", func(c *gin.Context) {
fileServer.ServeHTTP(c.Writer, c.Request)
})
r.GET("/favicon.ico", func(c *gin.Context) {
fileServer.ServeHTTP(c.Writer, c.Request)
})
// SPA fallback for all other routes (client-side routing)
r.NoRoute(spaFallback(staticFS))
}
return r
}
// broadcasterLogger is a Gin middleware that logs HTTP requests to both
// stdout (like gin.Logger) and the WebSocket log broadcaster so that
// request logs are visible in the frontend Settings page.
func broadcasterLogger(b *logger.Broadcaster) gin.HandlerFunc {
return func(c *gin.Context) {
start := time.Now()
path := c.Request.URL.Path
raw := c.Request.URL.RawQuery
c.Next()
latency := time.Since(start)
status := c.Writer.Status()
method := c.Request.Method
if raw != "" {
path = path + "?" + raw
}
msg := fmt.Sprintf("%3d | %13v | %-7s %s",
status, latency, method, path)
// Write to stdout (original Gin behaviour)
fmt.Printf("[GIN] %s\n", msg)
// Push to broadcaster for WebSocket streaming
if b != nil {
level := "INFO"
if status >= 500 {
level = "ERROR"
} else if status >= 400 {
level = "WARN"
}
b.Push(level, fmt.Sprintf("[GIN] %s", msg))
}
}
}
// spaFallback tries to serve the exact file from the embedded FS.
// If the file doesn't exist, it serves index.html for client-side routing.
func spaFallback(staticFS http.FileSystem) gin.HandlerFunc {
return func(c *gin.Context) {
path := c.Request.URL.Path
// Don't serve index.html for API or WebSocket routes
if strings.HasPrefix(path, "/api/") || strings.HasPrefix(path, "/ws/") {
c.Status(http.StatusNotFound)
return
}
// Try to serve the exact file (e.g., /models/index.html)
f, err := staticFS.Open(path)
if err == nil {
f.Close()
http.FileServer(staticFS).ServeHTTP(c.Writer, c.Request)
return
}
// Fall back to root index.html for SPA routing
index, err := staticFS.Open("/index.html")
if err != nil {
c.Status(http.StatusInternalServerError)
return
}
defer index.Close()
c.Header("Content-Type", "text/html; charset=utf-8")
c.Status(http.StatusOK)
io.Copy(c.Writer, index)
}
}

View File

@ -0,0 +1,39 @@
package ws
import (
"github.com/gin-gonic/gin"
"github.com/gorilla/websocket"
)
func ClusterFlashProgressHandler(hub *Hub) gin.HandlerFunc {
return func(c *gin.Context) {
clusterID := c.Param("id")
conn, err := upgrader.Upgrade(c.Writer, c.Request, nil)
if err != nil {
return
}
defer conn.Close()
client := &Client{Conn: conn, Send: make(chan []byte, 20)}
room := "flash:cluster:" + clusterID
sub := &Subscription{Client: client, Room: room}
hub.RegisterSync(sub)
defer hub.Unregister(sub)
// Read pump — drain incoming messages (ping/pong, close frames)
go func() {
defer conn.Close()
for {
if _, _, err := conn.ReadMessage(); err != nil {
break
}
}
}()
for msg := range client.Send {
if err := conn.WriteMessage(websocket.TextMessage, msg); err != nil {
return
}
}
}
}

View File

@ -0,0 +1,29 @@
package ws
import (
"github.com/gin-gonic/gin"
"github.com/gorilla/websocket"
)
func ClusterInferenceHandler(hub *Hub) gin.HandlerFunc {
return func(c *gin.Context) {
clusterID := c.Param("id")
conn, err := upgrader.Upgrade(c.Writer, c.Request, nil)
if err != nil {
return
}
defer conn.Close()
client := &Client{Conn: conn, Send: make(chan []byte, 20)}
room := "inference:cluster:" + clusterID
sub := &Subscription{Client: client, Room: room}
hub.Register(sub)
defer hub.Unregister(sub)
for msg := range client.Send {
if err := conn.WriteMessage(websocket.TextMessage, msg); err != nil {
return
}
}
}
}

View File

@ -0,0 +1,44 @@
package ws
import (
"net/http"
"edge-ai-platform/internal/device"
"github.com/gin-gonic/gin"
"github.com/gorilla/websocket"
)
var upgrader = websocket.Upgrader{
CheckOrigin: func(r *http.Request) bool { return true },
}
func DeviceEventsHandler(hub *Hub, deviceMgr *device.Manager) gin.HandlerFunc {
return func(c *gin.Context) {
conn, err := upgrader.Upgrade(c.Writer, c.Request, nil)
if err != nil {
return
}
defer conn.Close()
client := &Client{Conn: conn, Send: make(chan []byte, 10)}
room := "device-events"
sub := &Subscription{Client: client, Room: room}
hub.Register(sub)
defer hub.Unregister(sub)
// Forward device events to this WebSocket room
go func() {
for event := range deviceMgr.Events() {
hub.BroadcastToRoom(room, event)
}
}()
// Write pump
for msg := range client.Send {
if err := conn.WriteMessage(websocket.TextMessage, msg); err != nil {
return
}
}
}
}

View File

@ -0,0 +1,39 @@
package ws
import (
"github.com/gin-gonic/gin"
"github.com/gorilla/websocket"
)
func FlashProgressHandler(hub *Hub) gin.HandlerFunc {
return func(c *gin.Context) {
deviceID := c.Param("id")
conn, err := upgrader.Upgrade(c.Writer, c.Request, nil)
if err != nil {
return
}
defer conn.Close()
client := &Client{Conn: conn, Send: make(chan []byte, 20)}
room := "flash:" + deviceID
sub := &Subscription{Client: client, Room: room}
hub.RegisterSync(sub)
defer hub.Unregister(sub)
// Read pump — drain incoming messages (ping/pong, close frames)
go func() {
defer conn.Close()
for {
if _, _, err := conn.ReadMessage(); err != nil {
break
}
}
}()
for msg := range client.Send {
if err := conn.WriteMessage(websocket.TextMessage, msg); err != nil {
return
}
}
}
}

View File

@ -0,0 +1,106 @@
package ws
import (
"encoding/json"
"sync"
"github.com/gorilla/websocket"
)
type Client struct {
Conn *websocket.Conn
Send chan []byte
}
type Subscription struct {
Client *Client
Room string
done chan struct{} // used by RegisterSync to wait for completion
}
type RoomMessage struct {
Room string
Message []byte
}
type Hub struct {
rooms map[string]map[*Client]bool
register chan *Subscription
unregister chan *Subscription
broadcast chan *RoomMessage
mu sync.RWMutex
}
func NewHub() *Hub {
return &Hub{
rooms: make(map[string]map[*Client]bool),
register: make(chan *Subscription, 10),
unregister: make(chan *Subscription, 10),
broadcast: make(chan *RoomMessage, 100),
}
}
func (h *Hub) Run() {
for {
select {
case sub := <-h.register:
h.mu.Lock()
if h.rooms[sub.Room] == nil {
h.rooms[sub.Room] = make(map[*Client]bool)
}
h.rooms[sub.Room][sub.Client] = true
h.mu.Unlock()
if sub.done != nil {
close(sub.done)
}
case sub := <-h.unregister:
h.mu.Lock()
if clients, ok := h.rooms[sub.Room]; ok {
if _, exists := clients[sub.Client]; exists {
delete(clients, sub.Client)
close(sub.Client.Send)
}
}
h.mu.Unlock()
case msg := <-h.broadcast:
h.mu.RLock()
if clients, ok := h.rooms[msg.Room]; ok {
for client := range clients {
select {
case client.Send <- msg.Message:
default:
close(client.Send)
delete(clients, client)
}
}
}
h.mu.RUnlock()
}
}
}
func (h *Hub) Register(sub *Subscription) {
h.register <- sub
}
// RegisterSync registers a subscription and blocks until the Hub has processed it,
// ensuring the client is in the room before returning.
func (h *Hub) RegisterSync(sub *Subscription) {
sub.done = make(chan struct{})
h.register <- sub
<-sub.done
}
func (h *Hub) Unregister(sub *Subscription) {
h.unregister <- sub
}
func (h *Hub) BroadcastToRoom(room string, data interface{}) {
jsonData, err := json.Marshal(data)
if err != nil {
return
}
h.broadcast <- &RoomMessage{Room: room, Message: jsonData}
}

View File

@ -0,0 +1,31 @@
package ws
import (
"edge-ai-platform/internal/inference"
"github.com/gin-gonic/gin"
"github.com/gorilla/websocket"
)
func InferenceHandler(hub *Hub, inferenceSvc *inference.Service) gin.HandlerFunc {
return func(c *gin.Context) {
deviceID := c.Param("id")
conn, err := upgrader.Upgrade(c.Writer, c.Request, nil)
if err != nil {
return
}
defer conn.Close()
client := &Client{Conn: conn, Send: make(chan []byte, 20)}
room := "inference:" + deviceID
sub := &Subscription{Client: client, Room: room}
hub.Register(sub)
defer hub.Unregister(sub)
for msg := range client.Send {
if err := conn.WriteMessage(websocket.TextMessage, msg); err != nil {
return
}
}
}
}

View File

@ -0,0 +1,57 @@
package ws
import (
"encoding/json"
"edge-ai-platform/pkg/logger"
"github.com/gin-gonic/gin"
"github.com/gorilla/websocket"
)
func ServerLogsHandler(hub *Hub, broadcaster *logger.Broadcaster) gin.HandlerFunc {
return func(c *gin.Context) {
conn, err := upgrader.Upgrade(c.Writer, c.Request, nil)
if err != nil {
return
}
defer conn.Close()
client := &Client{Conn: conn, Send: make(chan []byte, 100)}
sub := &Subscription{Client: client, Room: "server-logs"}
hub.RegisterSync(sub)
defer hub.Unregister(sub)
// Send buffered recent logs to the newly connected client
if broadcaster != nil {
for _, entry := range broadcaster.Recent() {
data, err := json.Marshal(entry)
if err != nil {
continue
}
select {
case client.Send <- data:
default:
return
}
}
}
// Read pump — drain incoming messages (close frames)
go func() {
defer conn.Close()
for {
if _, _, err := conn.ReadMessage(); err != nil {
break
}
}
}()
// Write pump
for msg := range client.Send {
if err := conn.WriteMessage(websocket.TextMessage, msg); err != nil {
return
}
}
}
}

View File

@ -0,0 +1,188 @@
package camera
import (
"bufio"
"fmt"
"io"
"os/exec"
"runtime"
"sync"
)
// FFmpegCamera captures webcam frames using ffmpeg subprocess.
// Supports macOS (AVFoundation) and Windows (DirectShow).
// ffmpeg outputs a continuous MJPEG stream to stdout which is parsed
// by scanning for JPEG SOI (0xFFD8) and EOI (0xFFD9) markers.
type FFmpegCamera struct {
cmd *exec.Cmd
stdout io.ReadCloser
latestFrame []byte
mu sync.Mutex
done chan struct{}
err error
}
// NewFFmpegCamera starts an ffmpeg process to capture from the given camera.
// On macOS, cameraIndex is used (e.g. 0 for first camera).
// On Windows, cameraName from device detection is used; cameraIndex is ignored
// unless no name is provided.
func NewFFmpegCamera(cameraIndex, width, height, framerate int) (*FFmpegCamera, error) {
return NewFFmpegCameraWithName(cameraIndex, "", width, height, framerate)
}
// NewFFmpegCameraWithName starts ffmpeg with explicit camera name (needed for Windows dshow).
func NewFFmpegCameraWithName(cameraIndex int, cameraName string, width, height, framerate int) (*FFmpegCamera, error) {
args := buildCaptureArgs(cameraIndex, cameraName, width, height, framerate)
cmd := exec.Command("ffmpeg", args...)
stdout, err := cmd.StdoutPipe()
if err != nil {
return nil, fmt.Errorf("failed to get stdout pipe: %w", err)
}
// Suppress ffmpeg's stderr banner/logs
cmd.Stderr = nil
if err := cmd.Start(); err != nil {
return nil, fmt.Errorf("failed to start ffmpeg: %w", err)
}
cam := &FFmpegCamera{
cmd: cmd,
stdout: stdout,
done: make(chan struct{}),
}
go cam.readLoop()
return cam, nil
}
// buildCaptureArgs returns the ffmpeg arguments for the current OS.
func buildCaptureArgs(cameraIndex int, cameraName string, width, height, framerate int) []string {
videoSize := fmt.Sprintf("%dx%d", width, height)
fps := fmt.Sprintf("%d", framerate)
switch runtime.GOOS {
case "windows":
// DirectShow on Windows: -f dshow -i video="Camera Name"
inputName := cameraName
if inputName == "" {
// Fallback: try to detect first camera
devices := ListFFmpegDevices()
if len(devices) > 0 {
inputName = devices[0].Name
} else {
inputName = "Integrated Camera"
}
}
return []string{
"-f", "dshow",
"-framerate", fps,
"-video_size", videoSize,
"-i", fmt.Sprintf("video=%s", inputName),
"-f", "image2pipe",
"-vcodec", "mjpeg",
"-q:v", "5",
"-an",
"-",
}
default:
// AVFoundation on macOS: -f avfoundation -i "index:none"
return []string{
"-f", "avfoundation",
"-framerate", fps,
"-video_size", videoSize,
"-i", fmt.Sprintf("%d:none", cameraIndex),
"-f", "image2pipe",
"-vcodec", "mjpeg",
"-q:v", "5",
"-an",
"-",
}
}
}
// readLoop continuously reads ffmpeg's stdout and extracts JPEG frames.
func (c *FFmpegCamera) readLoop() {
defer close(c.done)
reader := bufio.NewReaderSize(c.stdout, 1024*1024) // 1MB buffer
buf := make([]byte, 0, 512*1024) // 512KB initial frame buffer
inFrame := false
for {
b, err := reader.ReadByte()
if err != nil {
c.mu.Lock()
c.err = fmt.Errorf("ffmpeg stream ended: %w", err)
c.mu.Unlock()
return
}
if !inFrame {
// Look for SOI marker: 0xFF 0xD8
if b == 0xFF {
next, err := reader.ReadByte()
if err != nil {
c.mu.Lock()
c.err = fmt.Errorf("ffmpeg stream ended: %w", err)
c.mu.Unlock()
return
}
if next == 0xD8 {
// Start of JPEG
buf = buf[:0]
buf = append(buf, 0xFF, 0xD8)
inFrame = true
}
}
continue
}
// Inside a frame, collect bytes
buf = append(buf, b)
// Look for EOI marker: 0xFF 0xD9
if b == 0xD9 && len(buf) >= 2 && buf[len(buf)-2] == 0xFF {
// Complete JPEG frame
frame := make([]byte, len(buf))
copy(frame, buf)
c.mu.Lock()
c.latestFrame = frame
c.mu.Unlock()
inFrame = false
}
}
}
// ReadFrame returns the most recently captured JPEG frame.
func (c *FFmpegCamera) ReadFrame() ([]byte, error) {
c.mu.Lock()
defer c.mu.Unlock()
if c.err != nil {
return nil, c.err
}
if c.latestFrame == nil {
return nil, fmt.Errorf("no frame available yet")
}
// Return a copy to avoid data races
frame := make([]byte, len(c.latestFrame))
copy(frame, c.latestFrame)
return frame, nil
}
// Close stops the ffmpeg process and cleans up resources.
func (c *FFmpegCamera) Close() error {
if c.cmd != nil && c.cmd.Process != nil {
_ = c.cmd.Process.Kill()
_ = c.cmd.Wait()
}
<-c.done
return nil
}

View File

@ -0,0 +1,134 @@
package camera
import (
"fmt"
"os/exec"
"regexp"
"runtime"
"strconv"
"strings"
)
// DetectFFmpeg checks if ffmpeg is available on the system.
func DetectFFmpeg() bool {
_, err := exec.LookPath("ffmpeg")
return err == nil
}
// ListFFmpegDevices detects available video devices using ffmpeg.
// Automatically selects the correct capture framework for the current OS:
// - macOS: AVFoundation
// - Windows: DirectShow (dshow)
func ListFFmpegDevices() []CameraInfo {
if !DetectFFmpeg() {
return nil
}
switch runtime.GOOS {
case "windows":
return listDShowDevices()
default:
return listAVFoundationDevices()
}
}
// --- macOS (AVFoundation) ---
func listAVFoundationDevices() []CameraInfo {
cmd := exec.Command("ffmpeg", "-f", "avfoundation", "-list_devices", "true", "-i", "")
output, _ := cmd.CombinedOutput()
return parseAVFoundationOutput(string(output))
}
// parseAVFoundationOutput parses ffmpeg AVFoundation device listing.
// Example:
//
// [AVFoundation indev @ 0x...] AVFoundation video devices:
// [AVFoundation indev @ 0x...] [0] FaceTime HD Camera
// [AVFoundation indev @ 0x...] [1] Capture screen 0
// [AVFoundation indev @ 0x...] AVFoundation audio devices:
func parseAVFoundationOutput(output string) []CameraInfo {
var cameras []CameraInfo
lines := strings.Split(output, "\n")
deviceRe := regexp.MustCompile(`\[AVFoundation[^\]]*\]\s*\[(\d+)\]\s*(.+)`)
inVideoSection := false
for _, line := range lines {
if strings.Contains(line, "AVFoundation video devices") {
inVideoSection = true
continue
}
if strings.Contains(line, "AVFoundation audio devices") {
break
}
if !inVideoSection {
continue
}
matches := deviceRe.FindStringSubmatch(line)
if len(matches) == 3 {
index, err := strconv.Atoi(matches[1])
if err != nil {
continue
}
name := strings.TrimSpace(matches[2])
// Skip screen capture devices
if strings.Contains(strings.ToLower(name), "capture screen") {
continue
}
cameras = append(cameras, CameraInfo{
ID: fmt.Sprintf("cam-%d", index),
Name: name,
Index: index,
Width: 640,
Height: 480,
})
}
}
return cameras
}
// --- Windows (DirectShow) ---
func listDShowDevices() []CameraInfo {
cmd := exec.Command("ffmpeg", "-f", "dshow", "-list_devices", "true", "-i", "dummy")
output, _ := cmd.CombinedOutput()
return parseDShowOutput(string(output))
}
// parseDShowOutput parses ffmpeg DirectShow device listing.
// Example:
//
// [dshow @ 0x...] "Integrated Camera" (video)
// [dshow @ 0x...] Alternative name "@device_pnp_..."
// [dshow @ 0x...] "Microphone" (audio)
func parseDShowOutput(output string) []CameraInfo {
var cameras []CameraInfo
lines := strings.Split(output, "\n")
// Match: [dshow @ 0x...] "Device Name" (video)
deviceRe := regexp.MustCompile(`\[dshow[^\]]*\]\s*"([^"]+)"\s*\(video\)`)
index := 0
for _, line := range lines {
matches := deviceRe.FindStringSubmatch(line)
if len(matches) == 2 {
name := strings.TrimSpace(matches[1])
cameras = append(cameras, CameraInfo{
ID: fmt.Sprintf("cam-%d", index),
Name: name,
Index: index,
Width: 640,
Height: 480,
})
index++
}
}
return cameras
}

View File

@ -0,0 +1,8 @@
package camera
// FrameSource abstracts anything that produces JPEG frames.
// Camera manager, image files, and video files all implement this.
type FrameSource interface {
ReadFrame() ([]byte, error)
Close() error
}

View File

@ -0,0 +1,82 @@
package camera
import (
"bytes"
"fmt"
"image"
"image/jpeg"
"image/png"
"os"
"path/filepath"
"strings"
)
// ImageSource provides a single JPEG frame from an uploaded image file.
// ReadFrame() returns the frame once, then subsequent calls return an error
// to signal the pipeline that the source is exhausted.
type ImageSource struct {
jpegData []byte
width int
height int
filePath string
done bool
}
// NewImageSource reads an image file (JPG or PNG) and converts to JPEG bytes.
func NewImageSource(filePath string) (*ImageSource, error) {
f, err := os.Open(filePath)
if err != nil {
return nil, fmt.Errorf("failed to open image: %w", err)
}
defer f.Close()
ext := strings.ToLower(filepath.Ext(filePath))
var img image.Image
switch ext {
case ".jpg", ".jpeg":
img, err = jpeg.Decode(f)
case ".png":
img, err = png.Decode(f)
default:
return nil, fmt.Errorf("unsupported image format: %s", ext)
}
if err != nil {
return nil, fmt.Errorf("failed to decode image: %w", err)
}
bounds := img.Bounds()
var buf bytes.Buffer
if err := jpeg.Encode(&buf, img, &jpeg.Options{Quality: 90}); err != nil {
return nil, fmt.Errorf("failed to encode JPEG: %w", err)
}
return &ImageSource{
jpegData: buf.Bytes(),
width: bounds.Dx(),
height: bounds.Dy(),
filePath: filePath,
}, nil
}
func (s *ImageSource) ReadFrame() ([]byte, error) {
if !s.done {
s.done = true
}
// Always return the same frame so the MJPEG streamer can serve it
// to clients that connect at any time.
return s.jpegData, nil
}
func (s *ImageSource) Close() error {
return os.Remove(s.filePath)
}
// Dimensions returns the image width and height.
func (s *ImageSource) Dimensions() (int, int) {
return s.width, s.height
}
// IsDone returns whether the single frame has been consumed.
func (s *ImageSource) IsDone() bool {
return s.done
}

View File

@ -0,0 +1,107 @@
package camera
import (
"fmt"
"sync"
)
type CameraInfo struct {
ID string `json:"id"`
Name string `json:"name"`
Index int `json:"index"`
Width int `json:"width"`
Height int `json:"height"`
}
type Manager struct {
mockMode bool
mockCamera *MockCamera
ffmpegCam *FFmpegCamera
isOpen bool
mu sync.Mutex
}
func NewManager(mockMode bool) *Manager {
return &Manager{mockMode: mockMode}
}
func (m *Manager) ListCameras() []CameraInfo {
if m.mockMode {
return []CameraInfo{
{ID: "mock-cam-0", Name: "Mock Camera 0", Index: 0, Width: 640, Height: 480},
}
}
// Try to detect real cameras via ffmpeg (auto-detects OS)
devices := ListFFmpegDevices()
if len(devices) > 0 {
return devices
}
if !DetectFFmpeg() {
fmt.Println("[WARN] ffmpeg not found — install with: brew install ffmpeg (macOS) or winget install ffmpeg (Windows)")
} else {
fmt.Println("[WARN] No video devices detected by ffmpeg")
}
return []CameraInfo{}
}
func (m *Manager) Open(index, width, height int) error {
m.mu.Lock()
defer m.mu.Unlock()
if m.mockMode {
m.mockCamera = NewMockCamera(width, height)
m.isOpen = true
return nil
}
// Try real camera via ffmpeg
if !DetectFFmpeg() {
return fmt.Errorf("ffmpeg not found — install with: brew install ffmpeg (macOS) or winget install ffmpeg (Windows)")
}
cam, err := NewFFmpegCamera(index, width, height, 30)
if err != nil {
return fmt.Errorf("failed to open camera (index=%d): %w", index, err)
}
m.ffmpegCam = cam
m.isOpen = true
fmt.Printf("[INFO] Opened real camera (index=%d) via ffmpeg\n", index)
return nil
}
func (m *Manager) Close() error {
m.mu.Lock()
defer m.mu.Unlock()
if m.ffmpegCam != nil {
_ = m.ffmpegCam.Close()
m.ffmpegCam = nil
}
m.mockCamera = nil
m.isOpen = false
return nil
}
func (m *Manager) ReadFrame() ([]byte, error) {
m.mu.Lock()
defer m.mu.Unlock()
if !m.isOpen {
return nil, fmt.Errorf("camera not open")
}
if m.ffmpegCam != nil {
return m.ffmpegCam.ReadFrame()
}
if m.mockCamera != nil {
return m.mockCamera.ReadFrame()
}
return nil, fmt.Errorf("no camera available")
}
func (m *Manager) IsOpen() bool {
m.mu.Lock()
defer m.mu.Unlock()
return m.isOpen
}

View File

@ -0,0 +1,81 @@
package camera
import (
"fmt"
"net/http"
"sync"
)
type MJPEGStreamer struct {
frameCh chan []byte
clients map[chan []byte]bool
mu sync.Mutex
}
func NewMJPEGStreamer() *MJPEGStreamer {
return &MJPEGStreamer{
frameCh: make(chan []byte, 3),
clients: make(map[chan []byte]bool),
}
}
func (s *MJPEGStreamer) FrameChannel() chan<- []byte {
return s.frameCh
}
func (s *MJPEGStreamer) Run() {
for frame := range s.frameCh {
s.mu.Lock()
for ch := range s.clients {
select {
case ch <- frame:
default:
// drop frame for slow client
}
}
s.mu.Unlock()
}
}
func (s *MJPEGStreamer) AddClient() chan []byte {
ch := make(chan []byte, 3)
s.mu.Lock()
s.clients[ch] = true
s.mu.Unlock()
return ch
}
func (s *MJPEGStreamer) RemoveClient(ch chan []byte) {
s.mu.Lock()
delete(s.clients, ch)
s.mu.Unlock()
}
func (s *MJPEGStreamer) ServeHTTP(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "multipart/x-mixed-replace; boundary=frame")
w.Header().Set("Cache-Control", "no-cache")
w.Header().Set("Connection", "keep-alive")
flusher, ok := w.(http.Flusher)
if !ok {
http.Error(w, "Streaming not supported", 500)
return
}
clientCh := s.AddClient()
defer s.RemoveClient(clientCh)
for {
select {
case <-r.Context().Done():
return
case frame := <-clientCh:
fmt.Fprintf(w, "--frame\r\n")
fmt.Fprintf(w, "Content-Type: image/jpeg\r\n")
fmt.Fprintf(w, "Content-Length: %d\r\n\r\n", len(frame))
w.Write(frame)
fmt.Fprintf(w, "\r\n")
flusher.Flush()
}
}
}

View File

@ -0,0 +1,95 @@
package camera
import (
"bytes"
"fmt"
"image"
"image/color"
"image/jpeg"
"time"
)
type MockCamera struct {
width int
height int
frameCount int
}
func NewMockCamera(width, height int) *MockCamera {
return &MockCamera{width: width, height: height}
}
func (mc *MockCamera) ReadFrame() ([]byte, error) {
mc.frameCount++
return mc.generateTestCard()
}
func (mc *MockCamera) generateTestCard() ([]byte, error) {
img := image.NewRGBA(image.Rect(0, 0, mc.width, mc.height))
offset := mc.frameCount % mc.width
for y := 0; y < mc.height; y++ {
for x := 0; x < mc.width; x++ {
pos := (x + offset) % mc.width
ratio := float64(pos) / float64(mc.width)
var r, g, b uint8
if ratio < 0.33 {
r = uint8(255 * (1 - ratio/0.33))
g = uint8(255 * ratio / 0.33)
} else if ratio < 0.66 {
g = uint8(255 * (1 - (ratio-0.33)/0.33))
b = uint8(255 * (ratio - 0.33) / 0.33)
} else {
b = uint8(255 * (1 - (ratio-0.66)/0.34))
r = uint8(255 * (ratio - 0.66) / 0.34)
}
img.SetRGBA(x, y, color.RGBA{R: r, G: g, B: b, A: 255})
}
}
// Draw dark overlay bar at top for text area
for y := 0; y < 40; y++ {
for x := 0; x < mc.width; x++ {
img.SetRGBA(x, y, color.RGBA{R: 0, G: 0, B: 0, A: 180})
}
}
// Draw "MOCK CAMERA" text block and frame counter using simple rectangles
drawTextBlock(img, 10, 10, fmt.Sprintf("MOCK CAMERA | Frame: %d | %s", mc.frameCount, time.Now().Format("15:04:05")))
// Draw center crosshair
cx, cy := mc.width/2, mc.height/2
for i := -20; i <= 20; i++ {
if cx+i >= 0 && cx+i < mc.width {
img.SetRGBA(cx+i, cy, color.RGBA{R: 255, G: 255, B: 255, A: 200})
}
if cy+i >= 0 && cy+i < mc.height {
img.SetRGBA(cx, cy+i, color.RGBA{R: 255, G: 255, B: 255, A: 200})
}
}
var buf bytes.Buffer
if err := jpeg.Encode(&buf, img, &jpeg.Options{Quality: 75}); err != nil {
return nil, err
}
return buf.Bytes(), nil
}
func drawTextBlock(img *image.RGBA, x, y int, text string) {
white := color.RGBA{R: 255, G: 255, B: 255, A: 255}
// Simple pixel-based text rendering: each character is a 5x7 block
for i, ch := range text {
if ch == ' ' {
continue
}
px := x + i*6
// Draw a small white dot for each character position
for dy := 0; dy < 5; dy++ {
for dx := 0; dx < 4; dx++ {
if px+dx < img.Bounds().Max.X && y+dy < img.Bounds().Max.Y {
img.SetRGBA(px+dx, y+dy, white)
}
}
}
}
}

View File

@ -0,0 +1,146 @@
package camera
import (
"bytes"
"fmt"
"image"
"image/jpeg"
"image/png"
"os"
"path/filepath"
"strings"
"sync"
)
// BatchImageEntry holds metadata and decoded JPEG data for a single image in a batch.
type BatchImageEntry struct {
Filename string
FilePath string
JpegData []byte
Width int
Height int
}
// MultiImageSource provides sequential JPEG frames from multiple uploaded images.
// It implements FrameSource. The pipeline calls ReadFrame for the current image,
// then Advance to move to the next one. After the last image, ReadFrame returns an error.
type MultiImageSource struct {
images []BatchImageEntry
currentIdx int
mu sync.Mutex
}
// NewMultiImageSource creates a source from multiple file paths.
// Each file is decoded (JPG/PNG) and converted to JPEG in memory.
func NewMultiImageSource(filePaths []string, filenames []string) (*MultiImageSource, error) {
if len(filePaths) != len(filenames) {
return nil, fmt.Errorf("filePaths and filenames length mismatch")
}
entries := make([]BatchImageEntry, 0, len(filePaths))
for i, fp := range filePaths {
entry, err := loadBatchImageEntry(fp, filenames[i])
if err != nil {
// Clean up already-loaded temp files
for _, e := range entries {
os.Remove(e.FilePath)
}
return nil, fmt.Errorf("image %d (%s): %w", i, filenames[i], err)
}
entries = append(entries, entry)
}
return &MultiImageSource{images: entries}, nil
}
func loadBatchImageEntry(filePath, filename string) (BatchImageEntry, error) {
f, err := os.Open(filePath)
if err != nil {
return BatchImageEntry{}, fmt.Errorf("failed to open: %w", err)
}
defer f.Close()
ext := strings.ToLower(filepath.Ext(filePath))
var img image.Image
switch ext {
case ".jpg", ".jpeg":
img, err = jpeg.Decode(f)
case ".png":
img, err = png.Decode(f)
default:
return BatchImageEntry{}, fmt.Errorf("unsupported format: %s", ext)
}
if err != nil {
return BatchImageEntry{}, fmt.Errorf("failed to decode: %w", err)
}
bounds := img.Bounds()
var buf bytes.Buffer
if err := jpeg.Encode(&buf, img, &jpeg.Options{Quality: 90}); err != nil {
return BatchImageEntry{}, fmt.Errorf("failed to encode JPEG: %w", err)
}
return BatchImageEntry{
Filename: filename,
FilePath: filePath,
JpegData: buf.Bytes(),
Width: bounds.Dx(),
Height: bounds.Dy(),
}, nil
}
// ReadFrame returns the current image's JPEG data.
func (s *MultiImageSource) ReadFrame() ([]byte, error) {
s.mu.Lock()
defer s.mu.Unlock()
if s.currentIdx >= len(s.images) {
return nil, fmt.Errorf("all images consumed")
}
return s.images[s.currentIdx].JpegData, nil
}
// Advance moves to the next image. Returns false if no more images remain.
func (s *MultiImageSource) Advance() bool {
s.mu.Lock()
defer s.mu.Unlock()
s.currentIdx++
return s.currentIdx < len(s.images)
}
// CurrentIndex returns the 0-based index of the current image.
func (s *MultiImageSource) CurrentIndex() int {
s.mu.Lock()
defer s.mu.Unlock()
return s.currentIdx
}
// CurrentEntry returns metadata for the current image.
func (s *MultiImageSource) CurrentEntry() BatchImageEntry {
s.mu.Lock()
defer s.mu.Unlock()
return s.images[s.currentIdx]
}
// TotalImages returns the number of images in the batch.
func (s *MultiImageSource) TotalImages() int {
return len(s.images)
}
// GetImageByIndex returns JPEG data for a specific image by index.
func (s *MultiImageSource) GetImageByIndex(index int) ([]byte, error) {
if index < 0 || index >= len(s.images) {
return nil, fmt.Errorf("image index %d out of range [0, %d)", index, len(s.images))
}
return s.images[index].JpegData, nil
}
// Images returns all batch entries.
func (s *MultiImageSource) Images() []BatchImageEntry {
return s.images
}
// Close removes all temporary files.
func (s *MultiImageSource) Close() error {
for _, entry := range s.images {
os.Remove(entry.FilePath)
}
return nil
}

View File

@ -0,0 +1,230 @@
package camera
import (
"context"
"time"
"edge-ai-platform/internal/driver"
)
// SourceType identifies the kind of frame source used in the pipeline.
type SourceType string
const (
SourceCamera SourceType = "camera"
SourceImage SourceType = "image"
SourceVideo SourceType = "video"
SourceBatchImage SourceType = "batch_image"
)
type InferencePipeline struct {
source FrameSource
sourceType SourceType
device driver.DeviceDriver
frameCh chan<- []byte
resultCh chan<- *driver.InferenceResult
cancel context.CancelFunc
doneCh chan struct{}
frameOffset int // starting frame index (non-zero after seek)
}
func NewInferencePipeline(
source FrameSource,
sourceType SourceType,
device driver.DeviceDriver,
frameCh chan<- []byte,
resultCh chan<- *driver.InferenceResult,
) *InferencePipeline {
return &InferencePipeline{
source: source,
sourceType: sourceType,
device: device,
frameCh: frameCh,
resultCh: resultCh,
doneCh: make(chan struct{}),
}
}
// NewInferencePipelineWithOffset creates a pipeline with a frame offset (used after seek).
func NewInferencePipelineWithOffset(
source FrameSource,
sourceType SourceType,
device driver.DeviceDriver,
frameCh chan<- []byte,
resultCh chan<- *driver.InferenceResult,
frameOffset int,
) *InferencePipeline {
return &InferencePipeline{
source: source,
sourceType: sourceType,
device: device,
frameCh: frameCh,
resultCh: resultCh,
doneCh: make(chan struct{}),
frameOffset: frameOffset,
}
}
func (p *InferencePipeline) Start() {
ctx, cancel := context.WithCancel(context.Background())
p.cancel = cancel
go p.run(ctx)
}
func (p *InferencePipeline) Stop() {
if p.cancel != nil {
p.cancel()
}
}
// Done returns a channel that closes when the pipeline finishes.
// For camera mode this only closes on Stop(); for image/video it
// closes when the source is exhausted.
func (p *InferencePipeline) Done() <-chan struct{} {
return p.doneCh
}
func (p *InferencePipeline) run(ctx context.Context) {
defer close(p.doneCh)
targetInterval := time.Second / 15 // 15 FPS
inferenceRan := false // for image mode: only run inference once
frameIndex := 0 // video frame counter
for {
select {
case <-ctx.Done():
return
default:
}
start := time.Now()
var jpegFrame []byte
var readErr error
// Video mode: ReadFrame blocks on channel, need to respect ctx cancel
if p.sourceType == SourceVideo {
vs := p.source.(*VideoSource)
select {
case <-ctx.Done():
return
case frame, ok := <-vs.frameCh:
if !ok {
return // all frames consumed
}
jpegFrame = frame
}
} else {
jpegFrame, readErr = p.source.ReadFrame()
if readErr != nil {
time.Sleep(100 * time.Millisecond)
continue
}
}
// Send to MJPEG stream
select {
case p.frameCh <- jpegFrame:
default:
}
// Batch image mode: process each image sequentially, then advance.
if p.sourceType == SourceBatchImage {
mis := p.source.(*MultiImageSource)
for {
select {
case <-ctx.Done():
return
default:
}
frame, err := mis.ReadFrame()
if err != nil {
return
}
// Send current frame to MJPEG
select {
case p.frameCh <- frame:
default:
}
// Run inference on this image
result, inferErr := p.device.RunInference(frame)
if inferErr == nil {
entry := mis.CurrentEntry()
result.ImageIndex = mis.CurrentIndex()
result.TotalImages = mis.TotalImages()
result.Filename = entry.Filename
select {
case p.resultCh <- result:
default:
}
}
// Move to next image
if !mis.Advance() {
// Keep sending last frame for late-connecting MJPEG clients (~2s)
for i := 0; i < 30; i++ {
select {
case <-ctx.Done():
return
default:
}
select {
case p.frameCh <- frame:
default:
}
time.Sleep(time.Second / 15)
}
return
}
}
}
// Image mode: only run inference once, then keep sending
// the same frame to MJPEG so late-connecting clients can see it.
if p.sourceType == SourceImage {
if !inferenceRan {
inferenceRan = true
result, err := p.device.RunInference(jpegFrame)
if err == nil {
select {
case p.resultCh <- result:
default:
}
}
}
elapsed := time.Since(start)
if elapsed < targetInterval {
time.Sleep(targetInterval - elapsed)
}
continue
}
// Camera / Video mode: run inference every frame
result, err := p.device.RunInference(jpegFrame)
if err != nil {
continue
}
// Video mode: attach frame progress
if p.sourceType == SourceVideo {
result.FrameIndex = p.frameOffset + frameIndex
frameIndex++
vs := p.source.(*VideoSource)
result.TotalFrames = vs.TotalFrames()
}
select {
case p.resultCh <- result:
default:
}
elapsed := time.Since(start)
if elapsed < targetInterval {
time.Sleep(targetInterval - elapsed)
}
}
}

View File

@ -0,0 +1,277 @@
package camera
import (
"bufio"
"fmt"
"io"
"os"
"os/exec"
"strconv"
"strings"
"sync"
"sync/atomic"
)
// VideoInfo holds metadata extracted by ffprobe before pipeline starts.
type VideoInfo struct {
DurationSec float64 // total duration in seconds
TotalFrames int // estimated total frames at target FPS
}
// ProbeVideoInfo runs ffprobe to extract duration from a video file or URL.
// Returns zero values (no error) when duration is indeterminate (e.g. live streams).
func ProbeVideoInfo(input string, fps float64) VideoInfo {
cmd := exec.Command("ffprobe",
"-v", "error",
"-select_streams", "v:0",
"-show_entries", "format=duration",
"-of", "csv=p=0",
input,
)
out, err := cmd.Output()
if err != nil {
return VideoInfo{}
}
durStr := strings.TrimSpace(string(out))
if durStr == "" || durStr == "N/A" {
return VideoInfo{}
}
dur, err := strconv.ParseFloat(durStr, 64)
if err != nil {
return VideoInfo{}
}
if fps <= 0 {
fps = 15
}
return VideoInfo{
DurationSec: dur,
TotalFrames: int(dur * fps),
}
}
// VideoSource reads a video file or URL frame-by-frame using ffmpeg, outputting
// JPEG frames via stdout. Reuses the same JPEG SOI/EOI marker parsing
// pattern as FFmpegCamera.
type VideoSource struct {
cmd *exec.Cmd
stdout io.ReadCloser
frameCh chan []byte // decoded frames queue
mu sync.Mutex
done chan struct{}
finished bool
err error
filePath string // local file path (empty for URL sources)
isURL bool // true when source is a URL, skip file cleanup
totalFrames int64 // 0 means unknown
frameCount int64 // atomic counter incremented in readLoop
}
// NewVideoSource starts an ffmpeg process that decodes a video file
// and outputs MJPEG frames to stdout at the specified FPS.
func NewVideoSource(filePath string, fps float64) (*VideoSource, error) {
return newVideoSource(filePath, fps, false, 0)
}
// NewVideoSourceWithSeek starts ffmpeg from a specific position (in seconds).
func NewVideoSourceWithSeek(filePath string, fps float64, seekSeconds float64) (*VideoSource, error) {
return newVideoSource(filePath, fps, false, seekSeconds)
}
// NewVideoSourceFromURL starts an ffmpeg process that reads from a URL
// (HTTP, HTTPS, RTSP, etc.) and outputs MJPEG frames to stdout.
func NewVideoSourceFromURL(rawURL string, fps float64) (*VideoSource, error) {
return newVideoSource(rawURL, fps, true, 0)
}
// NewVideoSourceFromURLWithSeek starts ffmpeg from a URL at a specific position.
func NewVideoSourceFromURLWithSeek(rawURL string, fps float64, seekSeconds float64) (*VideoSource, error) {
return newVideoSource(rawURL, fps, true, seekSeconds)
}
// ResolveWithYTDLP uses yt-dlp to extract the direct video stream URL
// from platforms like YouTube, Vimeo, etc.
// Returns the resolved direct URL or an error.
func ResolveWithYTDLP(rawURL string) (string, error) {
// yt-dlp -f "best[ext=mp4]/best" --get-url <URL>
cmd := exec.Command("yt-dlp", "-f", "best[ext=mp4]/best", "--get-url", rawURL)
out, err := cmd.Output()
if err != nil {
if exitErr, ok := err.(*exec.ExitError); ok {
return "", fmt.Errorf("yt-dlp failed: %s", string(exitErr.Stderr))
}
return "", fmt.Errorf("yt-dlp not available: %w", err)
}
resolved := strings.TrimSpace(string(out))
if resolved == "" {
return "", fmt.Errorf("yt-dlp returned empty URL")
}
// yt-dlp may return multiple lines (video + audio); take only the first
if idx := strings.Index(resolved, "\n"); idx > 0 {
resolved = resolved[:idx]
}
return resolved, nil
}
func newVideoSource(input string, fps float64, isURL bool, seekSeconds float64) (*VideoSource, error) {
if fps <= 0 {
fps = 15
}
args := []string{}
if seekSeconds > 0 {
args = append(args, "-ss", fmt.Sprintf("%.3f", seekSeconds))
}
args = append(args,
"-i", input,
"-vf", fmt.Sprintf("fps=%g", fps),
"-f", "image2pipe",
"-vcodec", "mjpeg",
"-q:v", "5",
"-an",
"-",
)
cmd := exec.Command("ffmpeg", args...)
stdout, err := cmd.StdoutPipe()
if err != nil {
return nil, fmt.Errorf("failed to get stdout pipe: %w", err)
}
cmd.Stderr = nil
if err := cmd.Start(); err != nil {
return nil, fmt.Errorf("failed to start ffmpeg: %w", err)
}
filePath := ""
if !isURL {
filePath = input
}
vs := &VideoSource{
cmd: cmd,
stdout: stdout,
frameCh: make(chan []byte, 30), // buffer up to 30 frames
done: make(chan struct{}),
filePath: filePath,
isURL: isURL,
}
go vs.readLoop()
return vs, nil
}
// readLoop scans ffmpeg stdout for JPEG SOI/EOI markers to extract frames.
func (v *VideoSource) readLoop() {
defer close(v.done)
defer close(v.frameCh)
reader := bufio.NewReaderSize(v.stdout, 1024*1024)
buf := make([]byte, 0, 512*1024)
inFrame := false
for {
b, err := reader.ReadByte()
if err != nil {
v.mu.Lock()
v.finished = true
if err != io.EOF {
v.err = fmt.Errorf("ffmpeg stream ended: %w", err)
}
v.mu.Unlock()
return
}
if !inFrame {
if b == 0xFF {
next, err := reader.ReadByte()
if err != nil {
v.mu.Lock()
v.finished = true
v.mu.Unlock()
return
}
if next == 0xD8 {
buf = buf[:0]
buf = append(buf, 0xFF, 0xD8)
inFrame = true
}
}
continue
}
buf = append(buf, b)
if b == 0xD9 && len(buf) >= 2 && buf[len(buf)-2] == 0xFF {
frame := make([]byte, len(buf))
copy(frame, buf)
v.frameCh <- frame // blocks if buffer full, applies backpressure
atomic.AddInt64(&v.frameCount, 1)
inFrame = false
}
}
}
// ReadFrame returns the next decoded frame, blocking until one is available.
// Returns an error when all frames have been consumed and ffmpeg has finished.
func (v *VideoSource) ReadFrame() ([]byte, error) {
frame, ok := <-v.frameCh
if !ok {
return nil, fmt.Errorf("video playback complete")
}
return frame, nil
}
// SetTotalFrames sets the expected total frame count (from ffprobe).
func (v *VideoSource) SetTotalFrames(n int) {
atomic.StoreInt64(&v.totalFrames, int64(n))
}
// TotalFrames returns the expected total frame count, or 0 if unknown.
func (v *VideoSource) TotalFrames() int {
return int(atomic.LoadInt64(&v.totalFrames))
}
// FrameCount returns the number of frames decoded so far.
func (v *VideoSource) FrameCount() int {
return int(atomic.LoadInt64(&v.frameCount))
}
// IsFinished returns true when the video file has been fully decoded
// AND all buffered frames have been consumed.
func (v *VideoSource) IsFinished() bool {
v.mu.Lock()
finished := v.finished
v.mu.Unlock()
return finished && len(v.frameCh) == 0
}
// CloseWithoutRemove stops the ffmpeg process but does NOT delete the temp file.
// Used when seeking: we need to restart ffmpeg from a different position but keep the file.
func (v *VideoSource) CloseWithoutRemove() error {
if v.cmd != nil && v.cmd.Process != nil {
_ = v.cmd.Process.Kill()
_ = v.cmd.Wait()
}
for range v.frameCh {
}
<-v.done
return nil
}
func (v *VideoSource) Close() error {
if v.cmd != nil && v.cmd.Process != nil {
_ = v.cmd.Process.Kill()
_ = v.cmd.Wait()
}
// Drain any remaining frames so readLoop can exit
for range v.frameCh {
}
<-v.done
// Only remove temp files, not URL sources
if !v.isURL && v.filePath != "" {
_ = os.Remove(v.filePath)
}
return nil
}

View File

@ -0,0 +1,100 @@
package cluster
import (
"fmt"
"sync"
"edge-ai-platform/internal/driver"
)
// Dispatcher implements Weighted Round-Robin frame dispatching across
// multiple devices. Each device receives frames proportional to its weight.
//
// Example: devices A(w=3), B(w=1), C(w=3)
// Sequence: A,A,A, B, C,C,C, A,A,A, B, ...
type Dispatcher struct {
members []DeviceMember
drivers []driver.DeviceDriver
degraded map[string]bool
current int
remaining int
frameIndex int64
mu sync.Mutex
}
// NewDispatcher creates a dispatcher with the given members and drivers.
// The members and drivers slices must be in the same order.
func NewDispatcher(members []DeviceMember, drivers []driver.DeviceDriver) *Dispatcher {
d := &Dispatcher{
members: members,
drivers: drivers,
degraded: make(map[string]bool),
}
if len(members) > 0 {
d.remaining = members[0].Weight
}
return d
}
// Next returns the next device driver to dispatch a frame to, along
// with a monotonically increasing frame index. Degraded devices are
// skipped. Returns an error if no active devices remain.
func (d *Dispatcher) Next() (driver.DeviceDriver, int64, error) {
d.mu.Lock()
defer d.mu.Unlock()
if len(d.members) == 0 {
return nil, 0, fmt.Errorf("no devices in dispatcher")
}
// Try to find an active device within one full cycle.
tried := 0
for tried < len(d.members) {
if d.remaining <= 0 {
d.current = (d.current + 1) % len(d.members)
d.remaining = d.members[d.current].Weight
}
if !d.degraded[d.members[d.current].DeviceID] {
drv := d.drivers[d.current]
idx := d.frameIndex
d.frameIndex++
d.remaining--
return drv, idx, nil
}
// Skip degraded device.
d.remaining = 0
tried++
}
return nil, 0, fmt.Errorf("no active devices available")
}
// MarkDegraded marks a device as degraded; the dispatcher will skip it.
func (d *Dispatcher) MarkDegraded(deviceID string) {
d.mu.Lock()
defer d.mu.Unlock()
d.degraded[deviceID] = true
}
// MarkActive re-enables a previously degraded device.
func (d *Dispatcher) MarkActive(deviceID string) {
d.mu.Lock()
defer d.mu.Unlock()
delete(d.degraded, deviceID)
}
// ActiveCount returns the number of non-degraded devices.
func (d *Dispatcher) ActiveCount() int {
d.mu.Lock()
defer d.mu.Unlock()
count := 0
for _, m := range d.members {
if !d.degraded[m.DeviceID] {
count++
}
}
return count
}

View File

@ -0,0 +1,233 @@
package cluster
import (
"fmt"
"strings"
"sync"
"edge-ai-platform/internal/device"
)
// Manager handles cluster lifecycle (CRUD) and holds cluster state in memory.
type Manager struct {
clusters map[string]*Cluster
deviceMgr *device.Manager
nextID int
mu sync.RWMutex
}
// NewManager creates a new cluster manager.
func NewManager(deviceMgr *device.Manager) *Manager {
return &Manager{
clusters: make(map[string]*Cluster),
deviceMgr: deviceMgr,
}
}
// ListClusters returns all clusters.
func (m *Manager) ListClusters() []*Cluster {
m.mu.RLock()
defer m.mu.RUnlock()
result := make([]*Cluster, 0, len(m.clusters))
for _, c := range m.clusters {
result = append(result, c)
}
return result
}
// GetCluster returns a cluster by ID.
func (m *Manager) GetCluster(id string) (*Cluster, error) {
m.mu.RLock()
defer m.mu.RUnlock()
c, ok := m.clusters[id]
if !ok {
return nil, fmt.Errorf("cluster not found: %s", id)
}
return c, nil
}
// CreateCluster creates a new cluster with the given devices.
// Devices must exist and be connected.
func (m *Manager) CreateCluster(name string, deviceIDs []string) (*Cluster, error) {
if len(deviceIDs) == 0 {
return nil, fmt.Errorf("at least one device is required")
}
if len(deviceIDs) > MaxClusterSize {
return nil, fmt.Errorf("cluster size exceeds maximum of %d devices", MaxClusterSize)
}
m.mu.Lock()
defer m.mu.Unlock()
// Verify devices exist and are not in another cluster.
members := make([]DeviceMember, 0, len(deviceIDs))
for _, did := range deviceIDs {
session, err := m.deviceMgr.GetDevice(did)
if err != nil {
return nil, fmt.Errorf("device %s not found", did)
}
// Check device isn't already in a cluster.
for _, existing := range m.clusters {
for _, dm := range existing.Devices {
if dm.DeviceID == did && dm.Status != MemberRemoved {
return nil, fmt.Errorf("device %s is already in cluster %s", did, existing.Name)
}
}
}
info := session.Driver.Info()
weight := defaultWeight(info.Type)
members = append(members, DeviceMember{
DeviceID: did,
Weight: weight,
Status: MemberActive,
DeviceName: info.Name,
DeviceType: info.Type,
})
}
m.nextID++
id := fmt.Sprintf("cluster-%d", m.nextID)
cluster := &Cluster{
ID: id,
Name: name,
Devices: members,
Status: ClusterIdle,
}
m.clusters[id] = cluster
return cluster, nil
}
// DeleteCluster removes a cluster.
func (m *Manager) DeleteCluster(id string) error {
m.mu.Lock()
defer m.mu.Unlock()
if _, ok := m.clusters[id]; !ok {
return fmt.Errorf("cluster not found: %s", id)
}
delete(m.clusters, id)
return nil
}
// AddDevice adds a device to an existing cluster.
func (m *Manager) AddDevice(clusterID, deviceID string, weight int) error {
m.mu.Lock()
defer m.mu.Unlock()
c, ok := m.clusters[clusterID]
if !ok {
return fmt.Errorf("cluster not found: %s", clusterID)
}
if len(c.Devices) >= MaxClusterSize {
return fmt.Errorf("cluster already has maximum %d devices", MaxClusterSize)
}
// Check not already in this cluster.
for _, dm := range c.Devices {
if dm.DeviceID == deviceID && dm.Status != MemberRemoved {
return fmt.Errorf("device %s is already in this cluster", deviceID)
}
}
session, err := m.deviceMgr.GetDevice(deviceID)
if err != nil {
return fmt.Errorf("device %s not found", deviceID)
}
info := session.Driver.Info()
if weight <= 0 {
weight = defaultWeight(info.Type)
}
c.Devices = append(c.Devices, DeviceMember{
DeviceID: deviceID,
Weight: weight,
Status: MemberActive,
DeviceName: info.Name,
DeviceType: info.Type,
})
return nil
}
// RemoveDevice removes a device from a cluster.
func (m *Manager) RemoveDevice(clusterID, deviceID string) error {
m.mu.Lock()
defer m.mu.Unlock()
c, ok := m.clusters[clusterID]
if !ok {
return fmt.Errorf("cluster not found: %s", clusterID)
}
found := false
filtered := make([]DeviceMember, 0, len(c.Devices))
for _, dm := range c.Devices {
if dm.DeviceID == deviceID {
found = true
continue
}
filtered = append(filtered, dm)
}
if !found {
return fmt.Errorf("device %s not found in cluster", deviceID)
}
c.Devices = filtered
return nil
}
// UpdateWeight changes a device's dispatch weight.
func (m *Manager) UpdateWeight(clusterID, deviceID string, weight int) error {
if weight < 1 {
return fmt.Errorf("weight must be at least 1")
}
m.mu.Lock()
defer m.mu.Unlock()
c, ok := m.clusters[clusterID]
if !ok {
return fmt.Errorf("cluster not found: %s", clusterID)
}
for i := range c.Devices {
if c.Devices[i].DeviceID == deviceID {
c.Devices[i].Weight = weight
return nil
}
}
return fmt.Errorf("device %s not found in cluster", deviceID)
}
// SetStatus updates the cluster status.
func (m *Manager) SetStatus(clusterID string, status ClusterStatus) {
m.mu.Lock()
defer m.mu.Unlock()
if c, ok := m.clusters[clusterID]; ok {
c.Status = status
}
}
// DeviceManager returns the underlying device manager.
func (m *Manager) DeviceManager() *device.Manager {
return m.deviceMgr
}
func defaultWeight(deviceType string) int {
dt := strings.ToLower(deviceType)
if strings.Contains(dt, "kl720") || strings.Contains(dt, "kl730") {
return DefaultWeightKL720
}
return DefaultWeightKL520
}

View File

@ -0,0 +1,135 @@
package cluster
import (
"context"
"fmt"
"sync"
"edge-ai-platform/internal/driver"
)
// frameJob is sent from the main loop to a per-device worker goroutine.
type frameJob struct {
frame []byte
frameIndex int64
}
// ClusterPipeline manages parallel inference across multiple devices.
// It dispatches frames using a weighted round-robin dispatcher and
// collects results into a unified output channel.
type ClusterPipeline struct {
cluster *Cluster
dispatcher *Dispatcher
resultCh chan<- *ClusterResult
cancel context.CancelFunc
doneCh chan struct{}
}
// NewClusterPipeline creates a pipeline for the given cluster.
func NewClusterPipeline(
cluster *Cluster,
dispatcher *Dispatcher,
resultCh chan<- *ClusterResult,
) *ClusterPipeline {
return &ClusterPipeline{
cluster: cluster,
dispatcher: dispatcher,
resultCh: resultCh,
doneCh: make(chan struct{}),
}
}
// RunInference runs a single frame through the cluster dispatcher and
// returns the result. This is used by the camera pipeline integration.
func (p *ClusterPipeline) RunInference(imageData []byte) (*ClusterResult, error) {
drv, frameIdx, err := p.dispatcher.Next()
if err != nil {
return nil, fmt.Errorf("dispatcher error: %w", err)
}
result, err := drv.RunInference(imageData)
if err != nil {
return nil, err
}
return &ClusterResult{
InferenceResult: result,
ClusterID: p.cluster.ID,
FrameIndex: frameIdx,
}, nil
}
// StartContinuous starts the pipeline in continuous mode, where frames
// are read from all devices in parallel. Each device runs its own
// inference loop. Results are merged into the unified resultCh.
func (p *ClusterPipeline) StartContinuous() {
ctx, cancel := context.WithCancel(context.Background())
p.cancel = cancel
var wg sync.WaitGroup
for i := range p.dispatcher.members {
member := p.dispatcher.members[i]
drv := p.dispatcher.drivers[i]
if p.dispatcher.degraded[member.DeviceID] {
continue
}
wg.Add(1)
go func(deviceID string, d driver.DeviceDriver) {
defer wg.Done()
consecutiveErrors := 0
for {
select {
case <-ctx.Done():
return
default:
}
result, err := d.ReadInference()
if err != nil {
consecutiveErrors++
if consecutiveErrors >= 3 {
p.dispatcher.MarkDegraded(deviceID)
p.cluster.Status = ClusterDegraded
}
continue
}
consecutiveErrors = 0
result.DeviceID = deviceID
cr := &ClusterResult{
InferenceResult: result,
ClusterID: p.cluster.ID,
FrameIndex: -1, // continuous mode doesn't use ordered frames
}
select {
case p.resultCh <- cr:
default:
// Drop result if channel is full.
}
}
}(member.DeviceID, drv)
}
go func() {
wg.Wait()
close(p.doneCh)
}()
}
// Stop cancels the pipeline and waits for all workers to finish.
func (p *ClusterPipeline) Stop() {
if p.cancel != nil {
p.cancel()
}
<-p.doneCh
}
// Done returns a channel that is closed when the pipeline has stopped.
func (p *ClusterPipeline) Done() <-chan struct{} {
return p.doneCh
}

View File

@ -0,0 +1,64 @@
package cluster
import "edge-ai-platform/internal/driver"
// Default dispatch weights per chip type.
const (
DefaultWeightKL720 = 3
DefaultWeightKL520 = 1
)
// MaxClusterSize is the maximum number of devices in a single cluster.
const MaxClusterSize = 8
// ClusterStatus represents the current state of a cluster.
type ClusterStatus string
const (
ClusterIdle ClusterStatus = "idle"
ClusterInferencing ClusterStatus = "inferencing"
ClusterDegraded ClusterStatus = "degraded"
)
// MemberStatus represents the state of a device within a cluster.
type MemberStatus string
const (
MemberActive MemberStatus = "active"
MemberDegraded MemberStatus = "degraded"
MemberRemoved MemberStatus = "removed"
)
// DeviceMember represents a device participating in a cluster.
type DeviceMember struct {
DeviceID string `json:"deviceId"`
Weight int `json:"weight"`
Status MemberStatus `json:"status"`
DeviceName string `json:"deviceName,omitempty"`
DeviceType string `json:"deviceType,omitempty"`
}
// Cluster represents a group of devices performing parallel inference.
type Cluster struct {
ID string `json:"id"`
Name string `json:"name"`
Devices []DeviceMember `json:"devices"`
ModelID string `json:"modelId,omitempty"`
Status ClusterStatus `json:"status"`
}
// ClusterResult extends InferenceResult with cluster-specific metadata.
type ClusterResult struct {
*driver.InferenceResult
ClusterID string `json:"clusterId"`
FrameIndex int64 `json:"frameIndex"`
}
// ClusterFlashProgress reports per-device flash progress within a cluster.
type ClusterFlashProgress struct {
DeviceID string `json:"deviceId"`
Percent int `json:"percent"`
Stage string `json:"stage"`
Message string `json:"message"`
Error string `json:"error,omitempty"`
}

View File

@ -0,0 +1,41 @@
package config
import (
"flag"
"fmt"
)
type Config struct {
Port int
Host string
MockMode bool
MockCamera bool
MockDeviceCount int
LogLevel string
DevMode bool
RelayURL string
RelayToken string
TrayMode bool
GiteaURL string
}
func Load() *Config {
cfg := &Config{}
flag.IntVar(&cfg.Port, "port", 3721, "Server port")
flag.StringVar(&cfg.Host, "host", "127.0.0.1", "Server host")
flag.BoolVar(&cfg.MockMode, "mock", false, "Enable mock device driver")
flag.BoolVar(&cfg.MockCamera, "mock-camera", false, "Enable mock camera")
flag.IntVar(&cfg.MockDeviceCount, "mock-devices", 1, "Number of mock devices")
flag.StringVar(&cfg.LogLevel, "log-level", "info", "Log level (debug/info/warn/error)")
flag.BoolVar(&cfg.DevMode, "dev", false, "Dev mode: disable embedded static file serving")
flag.StringVar(&cfg.RelayURL, "relay-url", "", "Relay server WebSocket URL (e.g. ws://relay-host:3800/tunnel/connect)")
flag.StringVar(&cfg.RelayToken, "relay-token", "", "Authentication token for relay tunnel")
flag.BoolVar(&cfg.TrayMode, "tray", false, "Run as system tray launcher")
flag.StringVar(&cfg.GiteaURL, "gitea-url", "", "Gitea server URL for update checks (e.g. https://gitea.example.com)")
flag.Parse()
return cfg
}
func (c *Config) Addr() string {
return fmt.Sprintf("%s:%d", c.Host, c.Port)
}

View File

@ -0,0 +1,73 @@
package deps
import (
"os/exec"
"strings"
)
// Dependency describes an external CLI tool the platform may use.
type Dependency struct {
Name string `json:"name"`
Available bool `json:"available"`
Version string `json:"version,omitempty"`
Required bool `json:"required"`
InstallHint string `json:"installHint,omitempty"`
}
// CheckAll probes all known external dependencies.
func CheckAll() []Dependency {
return []Dependency{
check("ffmpeg", false,
"macOS: brew install ffmpeg | Windows: winget install Gyan.FFmpeg",
"ffmpeg", "-version"),
check("yt-dlp", false,
"macOS: brew install yt-dlp | Windows: winget install yt-dlp",
"yt-dlp", "--version"),
check("python3", false,
"Required only for Kneron KL720 hardware. macOS: brew install python3",
"python3", "--version"),
}
}
func check(name string, required bool, hint string, cmd string, args ...string) Dependency {
d := Dependency{
Name: name,
Required: required,
InstallHint: hint,
}
path, err := exec.LookPath(cmd)
if err != nil {
return d
}
d.Available = true
out, err := exec.Command(path, args...).Output()
if err == nil {
lines := strings.SplitN(string(out), "\n", 2)
if len(lines) > 0 {
d.Version = strings.TrimSpace(lines[0])
}
}
return d
}
// Logger is the minimal interface used for startup reporting.
type Logger interface {
Info(msg string, args ...interface{})
}
// PrintStartupReport logs the status of every external dependency.
func PrintStartupReport(logger Logger) {
deps := CheckAll()
logger.Info("External dependency check:")
for _, d := range deps {
if d.Available {
logger.Info(" [OK] %s: %s", d.Name, d.Version)
} else {
tag := "OPTIONAL"
if d.Required {
tag = "MISSING"
}
logger.Info(" [%s] %s: not found — %s", tag, d.Name, d.InstallHint)
}
}
}

View File

@ -0,0 +1,173 @@
package device
import (
"fmt"
"log"
"sync"
"edge-ai-platform/internal/driver"
"edge-ai-platform/internal/driver/kneron"
mockdriver "edge-ai-platform/internal/driver/mock"
"edge-ai-platform/pkg/logger"
)
type Manager struct {
registry *DriverRegistry
sessions map[string]*DeviceSession
eventBus chan DeviceEvent
mockMode bool
scriptPath string
logBroadcaster *logger.Broadcaster
mu sync.RWMutex
}
func NewManager(registry *DriverRegistry, mockMode bool, mockCount int, scriptPath string) *Manager {
m := &Manager{
registry: registry,
sessions: make(map[string]*DeviceSession),
eventBus: make(chan DeviceEvent, 100),
mockMode: mockMode,
scriptPath: scriptPath,
}
if mockMode {
for i := 0; i < mockCount; i++ {
id := fmt.Sprintf("mock-device-%d", i+1)
d := mockdriver.Factory(id, i)
m.sessions[id] = NewSession(d)
}
}
return m
}
// SetLogBroadcaster attaches a log broadcaster so that Kneron driver
// and bridge logs are forwarded to the frontend.
func (m *Manager) SetLogBroadcaster(b *logger.Broadcaster) {
m.logBroadcaster = b
// Also set on any already-registered kneron drivers.
m.mu.RLock()
defer m.mu.RUnlock()
for _, s := range m.sessions {
if kd, ok := s.Driver.(*kneron.KneronDriver); ok {
kd.SetLogBroadcaster(b)
}
}
}
func (m *Manager) Start() {
if m.mockMode {
return
}
// Detect real Kneron devices (KL520, KL720, etc.) via Python bridge.
devices := kneron.DetectDevices(m.scriptPath)
if len(devices) == 0 {
log.Println("No Kneron devices detected")
return
}
m.mu.Lock()
defer m.mu.Unlock()
for _, info := range devices {
d := kneron.NewKneronDriver(info, m.scriptPath)
if m.logBroadcaster != nil {
d.SetLogBroadcaster(m.logBroadcaster)
}
m.sessions[info.ID] = NewSession(d)
log.Printf("Registered Kneron device: %s (%s, type=%s)", info.Name, info.ID, info.Type)
}
}
// Rescan re-detects connected Kneron devices. New devices are registered,
// removed devices are cleaned up, and existing devices are left untouched.
func (m *Manager) Rescan() []driver.DeviceInfo {
if m.mockMode {
return m.ListDevices()
}
detected := kneron.DetectDevices(m.scriptPath)
// Build a set of detected device IDs.
detectedIDs := make(map[string]driver.DeviceInfo, len(detected))
for _, info := range detected {
detectedIDs[info.ID] = info
}
m.mu.Lock()
defer m.mu.Unlock()
// Remove devices that are no longer present.
for id, s := range m.sessions {
if _, exists := detectedIDs[id]; !exists {
log.Printf("Device removed: %s", id)
s.Driver.Disconnect()
delete(m.sessions, id)
}
}
// Add newly detected devices.
for _, info := range detected {
if _, exists := m.sessions[info.ID]; !exists {
d := kneron.NewKneronDriver(info, m.scriptPath)
if m.logBroadcaster != nil {
d.SetLogBroadcaster(m.logBroadcaster)
}
m.sessions[info.ID] = NewSession(d)
log.Printf("Registered Kneron device: %s (%s, type=%s)", info.Name, info.ID, info.Type)
}
}
// Return current list.
devices := make([]driver.DeviceInfo, 0, len(m.sessions))
for _, s := range m.sessions {
devices = append(devices, s.Driver.Info())
}
return devices
}
func (m *Manager) ListDevices() []driver.DeviceInfo {
m.mu.RLock()
defer m.mu.RUnlock()
devices := make([]driver.DeviceInfo, 0, len(m.sessions))
for _, s := range m.sessions {
devices = append(devices, s.Driver.Info())
}
return devices
}
func (m *Manager) GetDevice(id string) (*DeviceSession, error) {
m.mu.RLock()
defer m.mu.RUnlock()
s, ok := m.sessions[id]
if !ok {
return nil, fmt.Errorf("device not found: %s", id)
}
return s, nil
}
func (m *Manager) Connect(id string) error {
s, err := m.GetDevice(id)
if err != nil {
return err
}
if err := s.Driver.Connect(); err != nil {
return err
}
m.eventBus <- DeviceEvent{Event: "updated", Device: s.Driver.Info()}
return nil
}
func (m *Manager) Disconnect(id string) error {
s, err := m.GetDevice(id)
if err != nil {
return err
}
if err := s.Driver.Disconnect(); err != nil {
return err
}
m.eventBus <- DeviceEvent{Event: "updated", Device: s.Driver.Info()}
return nil
}
func (m *Manager) Events() <-chan DeviceEvent {
return m.eventBus
}

View File

@ -0,0 +1,93 @@
package device
import (
"testing"
"edge-ai-platform/internal/driver"
)
type testDriver struct {
info driver.DeviceInfo
connected bool
}
func (d *testDriver) Info() driver.DeviceInfo { return d.info }
func (d *testDriver) Connect() error { d.connected = true; d.info.Status = driver.StatusConnected; return nil }
func (d *testDriver) Disconnect() error { d.connected = false; d.info.Status = driver.StatusDisconnected; return nil }
func (d *testDriver) IsConnected() bool { return d.connected }
func (d *testDriver) Flash(_ string, _ chan<- driver.FlashProgress) error { return nil }
func (d *testDriver) StartInference() error { return nil }
func (d *testDriver) StopInference() error { return nil }
func (d *testDriver) ReadInference() (*driver.InferenceResult, error) { return nil, nil }
func (d *testDriver) RunInference(_ []byte) (*driver.InferenceResult, error) { return nil, nil }
func (d *testDriver) GetModelInfo() (*driver.ModelInfo, error) { return nil, nil }
func TestNewManager_MockMode(t *testing.T) {
registry := NewRegistry()
mgr := NewManager(registry, true, 2, "")
devices := mgr.ListDevices()
if len(devices) != 2 {
t.Errorf("NewManager mock mode: got %d devices, want 2", len(devices))
}
}
func TestManager_ListDevices(t *testing.T) {
registry := NewRegistry()
mgr := NewManager(registry, false, 0, "")
mgr.sessions["test-1"] = NewSession(&testDriver{
info: driver.DeviceInfo{ID: "test-1", Name: "Test Device", Type: "KL720", Status: driver.StatusDetected},
})
devices := mgr.ListDevices()
if len(devices) != 1 {
t.Errorf("ListDevices() = %d, want 1", len(devices))
}
}
func TestManager_GetDevice(t *testing.T) {
registry := NewRegistry()
mgr := NewManager(registry, false, 0, "")
mgr.sessions["test-1"] = NewSession(&testDriver{
info: driver.DeviceInfo{ID: "test-1"},
})
t.Run("existing device", func(t *testing.T) {
s, err := mgr.GetDevice("test-1")
if err != nil {
t.Errorf("GetDevice() error = %v", err)
}
if s == nil {
t.Error("GetDevice() returned nil session")
}
})
t.Run("non-existing device", func(t *testing.T) {
_, err := mgr.GetDevice("test-999")
if err == nil {
t.Error("GetDevice() expected error for non-existing device")
}
})
}
func TestManager_Connect(t *testing.T) {
registry := NewRegistry()
mgr := NewManager(registry, false, 0, "")
td := &testDriver{info: driver.DeviceInfo{ID: "test-1", Status: driver.StatusDetected}}
mgr.sessions["test-1"] = NewSession(td)
// Drain event bus in background
go func() {
for range mgr.Events() {
}
}()
err := mgr.Connect("test-1")
if err != nil {
t.Errorf("Connect() error = %v", err)
}
if !td.connected {
t.Error("Connect() did not connect device")
}
}

View File

@ -0,0 +1,20 @@
package device
import "edge-ai-platform/internal/driver"
type DriverFactory struct {
Name string
Create func(id string, index int) driver.DeviceDriver
}
type DriverRegistry struct {
factories []DriverFactory
}
func NewRegistry() *DriverRegistry {
return &DriverRegistry{}
}
func (r *DriverRegistry) Register(factory DriverFactory) {
r.factories = append(r.factories, factory)
}

View File

@ -0,0 +1,15 @@
package device
import (
"edge-ai-platform/internal/driver"
"sync"
)
type DeviceSession struct {
Driver driver.DeviceDriver
mu sync.Mutex
}
func NewSession(d driver.DeviceDriver) *DeviceSession {
return &DeviceSession{Driver: d}
}

View File

@ -0,0 +1,8 @@
package device
import "edge-ai-platform/internal/driver"
type DeviceEvent struct {
Event string `json:"event"`
Device driver.DeviceInfo `json:"device"`
}

View File

@ -0,0 +1,90 @@
package driver
import "time"
type DeviceDriver interface {
Info() DeviceInfo
Connect() error
Disconnect() error
IsConnected() bool
Flash(modelPath string, progressCh chan<- FlashProgress) error
StartInference() error
StopInference() error
ReadInference() (*InferenceResult, error)
RunInference(imageData []byte) (*InferenceResult, error)
GetModelInfo() (*ModelInfo, error)
}
type DeviceInfo struct {
ID string `json:"id"`
Name string `json:"name"`
Type string `json:"type"`
Port string `json:"port"`
VendorID uint16 `json:"vendorId,omitempty"`
ProductID uint16 `json:"productId,omitempty"`
Status DeviceStatus `json:"status"`
FirmwareVer string `json:"firmwareVersion,omitempty"`
FlashedModel string `json:"flashedModel,omitempty"`
}
type DeviceStatus string
const (
StatusDetected DeviceStatus = "detected"
StatusConnecting DeviceStatus = "connecting"
StatusConnected DeviceStatus = "connected"
StatusFlashing DeviceStatus = "flashing"
StatusInferencing DeviceStatus = "inferencing"
StatusError DeviceStatus = "error"
StatusDisconnected DeviceStatus = "disconnected"
)
type FlashProgress struct {
Percent int `json:"percent"`
Stage string `json:"stage"`
Message string `json:"message,omitempty"`
Error string `json:"error,omitempty"`
}
type InferenceResult struct {
DeviceID string `json:"deviceId,omitempty"`
ModelID string `json:"modelId,omitempty"`
TaskType string `json:"taskType"`
Timestamp int64 `json:"timestamp"`
LatencyMs float64 `json:"latencyMs"`
Classifications []ClassResult `json:"classifications,omitempty"`
Detections []DetectionResult `json:"detections,omitempty"`
// Batch image fields (omitted for single-image/camera/video modes)
ImageIndex int `json:"imageIndex,omitempty"`
TotalImages int `json:"totalImages,omitempty"`
Filename string `json:"filename,omitempty"`
// Video progress fields (omitted for non-video modes)
FrameIndex int `json:"frameIndex,omitempty"`
TotalFrames int `json:"totalFrames,omitempty"`
}
type ClassResult struct {
Label string `json:"label"`
Confidence float64 `json:"confidence"`
}
type DetectionResult struct {
Label string `json:"label"`
Confidence float64 `json:"confidence"`
BBox BBox `json:"bbox"`
}
type BBox struct {
X float64 `json:"x"`
Y float64 `json:"y"`
Width float64 `json:"width"`
Height float64 `json:"height"`
}
type ModelInfo struct {
ID string `json:"id"`
Name string `json:"name"`
LoadedAt time.Time `json:"loadedAt"`
}

View File

@ -0,0 +1,192 @@
package kneron
import (
"encoding/json"
"fmt"
"os"
"os/exec"
"path/filepath"
"strings"
"time"
"edge-ai-platform/internal/driver"
)
// ResolvePython finds the best Python interpreter for the given script path.
// Search order: script-local venv → parent venv → ~/.edge-ai-platform/venv → system python3.
func ResolvePython(scriptPath string) string {
scriptDir := filepath.Dir(scriptPath)
candidates := []string{
filepath.Join(scriptDir, "venv", "bin", "python3"),
filepath.Join(filepath.Dir(scriptDir), "venv", "bin", "python3"),
}
if home, err := os.UserHomeDir(); err == nil {
candidates = append(candidates, filepath.Join(home, ".edge-ai-platform", "venv", "bin", "python3"))
}
for _, p := range candidates {
if _, err := os.Stat(p); err == nil {
return p
}
}
return "python3"
}
// KneronVendorID is the USB vendor ID for Kneron devices.
const KneronVendorID uint16 = 0x3231
// Known Kneron product IDs.
const (
ProductIDKL520 = "0x0100"
ProductIDKL720 = "0x0200"
ProductIDKL720Alt = "0x0720"
)
// chipFromProductID returns the chip name and device type from the product_id
// reported by the Python bridge scan result.
func chipFromProductID(productID string) (chip string, deviceType string) {
pid := strings.ToLower(strings.TrimSpace(productID))
switch pid {
case "0x0100":
return "KL520", "kneron_kl520"
case "0x0200", "0x0720":
return "KL720", "kneron_kl720"
default:
// Unknown product — default to KL520 for USB Boot devices,
// otherwise use the raw product ID as suffix.
return "KL520", "kneron_kl520"
}
}
// DetectDevices attempts to discover all connected Kneron devices (KL520, KL720, etc.)
// by invoking the Python bridge script with a scan command. If Python or
// the bridge script is not available, it returns an empty list.
func DetectDevices(scriptPath string) []driver.DeviceInfo {
// Try to run the bridge script with a scan command via a short-lived process.
pythonBin := ResolvePython(scriptPath)
cmd := exec.Command(pythonBin, scriptPath)
cmd.Stdin = nil
stdinPipe, err := cmd.StdinPipe()
if err != nil {
return nil
}
stdoutPipe, err := cmd.StdoutPipe()
if err != nil {
stdinPipe.Close()
return nil
}
if err := cmd.Start(); err != nil {
return nil
}
defer func() {
stdinPipe.Close()
cmd.Process.Kill()
cmd.Wait()
}()
// Read the ready signal.
decoder := json.NewDecoder(stdoutPipe)
var readyResp map[string]interface{}
done := make(chan error, 1)
go func() {
done <- decoder.Decode(&readyResp)
}()
select {
case err := <-done:
if err != nil {
return nil
}
case <-time.After(5 * time.Second):
return nil
}
if status, ok := readyResp["status"].(string); !ok || status != "ready" {
return nil
}
// Send the scan command.
scanCmd, _ := json.Marshal(map[string]interface{}{"cmd": "scan"})
scanCmd = append(scanCmd, '\n')
if _, err := stdinPipe.Write(scanCmd); err != nil {
return nil
}
// Read the scan response.
var scanResp map[string]interface{}
scanDone := make(chan error, 1)
go func() {
scanDone <- decoder.Decode(&scanResp)
}()
select {
case err := <-scanDone:
if err != nil {
return nil
}
case <-time.After(5 * time.Second):
return nil
}
// Parse detected devices from the response.
devicesRaw, ok := scanResp["devices"].([]interface{})
if !ok || len(devicesRaw) == 0 {
return nil
}
// Track per-chip counters for naming (e.g. "KL520 #1", "KL720 #1").
chipCount := map[string]int{}
var devices []driver.DeviceInfo
for _, devRaw := range devicesRaw {
dev, ok := devRaw.(map[string]interface{})
if !ok {
continue
}
port := ""
if p, ok := dev["port"].(string); ok {
port = p
}
fw := ""
if f, ok := dev["firmware"].(string); ok {
fw = f
}
productID := ""
if p, ok := dev["product_id"].(string); ok {
productID = p
}
chip, devType := chipFromProductID(productID)
chipCount[chip]++
idx := chipCount[chip]
info := driver.DeviceInfo{
ID: fmt.Sprintf("%s-%d", strings.ToLower(chip), idx-1),
Name: fmt.Sprintf("Kneron %s #%d", chip, idx),
Type: devType,
Port: port,
VendorID: KneronVendorID,
Status: driver.StatusDetected,
FirmwareVer: fw,
}
devices = append(devices, info)
}
return devices
}
// DetectKL720Devices is a backward-compatible alias for DetectDevices.
// Deprecated: Use DetectDevices instead.
func DetectKL720Devices(scriptPath string) []driver.DeviceInfo {
return DetectDevices(scriptPath)
}

View File

@ -0,0 +1,669 @@
package kneron
import (
"bufio"
"encoding/base64"
"encoding/json"
"fmt"
"io"
"os"
"os/exec"
"path/filepath"
"runtime"
"strings"
"sync"
"time"
"edge-ai-platform/internal/driver"
"edge-ai-platform/pkg/logger"
)
// LogFunc is a function that writes a log line to both stderr and
// the WebSocket broadcaster. When nil, logs go only to stderr.
type LogFunc func(level, msg string)
// KneronDriver implements driver.DeviceDriver for Kneron NPU devices
// (KL520, KL720, etc.). It delegates hardware operations to a Python
// subprocess (kneron_bridge.py) that communicates via JSON-RPC over
// stdin/stdout.
type KneronDriver struct {
info driver.DeviceInfo
connected bool
inferring bool
modelLoaded string
chipType string // "KL520" or "KL720" — derived from info.Type
mu sync.Mutex
scriptPath string
pythonCmd *exec.Cmd
stdin io.WriteCloser
stdout *bufio.Scanner
pythonReady bool
logBroadcaster *logger.Broadcaster
needsReset bool // true on first connect after server start to clear stale models
}
// NewKneronDriver creates a new KneronDriver with the given device info and
// path to the kneron_bridge.py script. Works for any Kneron chip variant.
func NewKneronDriver(info driver.DeviceInfo, scriptPath string) *KneronDriver {
chip := "KL520"
if strings.Contains(strings.ToLower(info.Type), "kl720") {
chip = "KL720"
}
return &KneronDriver{
info: info,
scriptPath: scriptPath,
chipType: chip,
needsReset: true,
}
}
// SetLogBroadcaster attaches a log broadcaster so that bridge stderr
// and driver messages are forwarded to the frontend.
func (d *KneronDriver) SetLogBroadcaster(b *logger.Broadcaster) {
d.logBroadcaster = b
}
// driverLog writes a log message to stderr and the broadcaster.
func (d *KneronDriver) driverLog(level, format string, args ...interface{}) {
msg := fmt.Sprintf(format, args...)
fmt.Fprintf(os.Stderr, "%s\n", msg)
if d.logBroadcaster != nil {
d.logBroadcaster.Push(level, msg)
}
}
// NewKL720Driver is a backward-compatible alias for NewKneronDriver.
// Deprecated: Use NewKneronDriver instead.
func NewKL720Driver(info driver.DeviceInfo, scriptPath string) *KneronDriver {
return NewKneronDriver(info, scriptPath)
}
// KL720Driver is a backward-compatible type alias for KneronDriver.
// Deprecated: Use KneronDriver instead.
type KL720Driver = KneronDriver
// resolvePython finds the best Python interpreter using the package-level resolver.
func (d *KneronDriver) resolvePython() string {
return ResolvePython(d.scriptPath)
}
// startPython launches the Python bridge subprocess and waits for the
// "ready" signal on stdout.
func (d *KneronDriver) startPython() error {
pythonBin := d.resolvePython()
scriptDir := filepath.Dir(d.scriptPath)
cmd := exec.Command(pythonBin, d.scriptPath)
// On macOS with Apple Silicon, Kneron SDK requires x86_64 (Rosetta 2).
// The venv should already contain the correct architecture Python.
// Set DYLD_LIBRARY_PATH so libkplus.dylib can be found.
cmd.Env = append(os.Environ(),
"PYTHONUNBUFFERED=1",
)
// Add library path for native kp module if lib directory exists.
libDir := filepath.Join(scriptDir, "lib")
if _, err := os.Stat(libDir); err == nil {
if runtime.GOOS == "darwin" {
cmd.Env = append(cmd.Env, "DYLD_LIBRARY_PATH="+libDir)
} else {
cmd.Env = append(cmd.Env, "LD_LIBRARY_PATH="+libDir)
}
}
stdinPipe, err := cmd.StdinPipe()
if err != nil {
return fmt.Errorf("failed to create stdin pipe: %w", err)
}
stdoutPipe, err := cmd.StdoutPipe()
if err != nil {
stdinPipe.Close()
return fmt.Errorf("failed to create stdout pipe: %w", err)
}
// Capture stderr from the Python bridge: forward each line to both
// os.Stderr and the WebSocket broadcaster so it shows in the frontend.
stderrPipe, err := cmd.StderrPipe()
if err != nil {
stdinPipe.Close()
stdoutPipe.Close()
return fmt.Errorf("failed to create stderr pipe: %w", err)
}
if err := cmd.Start(); err != nil {
stdinPipe.Close()
return fmt.Errorf("failed to start python bridge (%s): %w", pythonBin, err)
}
// Forward bridge stderr line-by-line to os.Stderr + broadcaster.
go func() {
scanner := bufio.NewScanner(stderrPipe)
for scanner.Scan() {
line := scanner.Text()
fmt.Fprintln(os.Stderr, line)
if d.logBroadcaster != nil {
d.logBroadcaster.Push("DEBUG", line)
}
}
}()
d.pythonCmd = cmd
d.stdin = stdinPipe
d.stdout = bufio.NewScanner(stdoutPipe)
// Increase scanner buffer for large inference responses.
d.stdout.Buffer(make([]byte, 0, 64*1024), 1024*1024)
// Wait for the ready signal from the Python process.
if d.stdout.Scan() {
var resp map[string]interface{}
if err := json.Unmarshal([]byte(d.stdout.Text()), &resp); err == nil {
if status, ok := resp["status"].(string); ok && status == "ready" {
d.pythonReady = true
return nil
}
}
}
// If we didn't get a ready signal, clean up and report failure.
d.stopPython()
return fmt.Errorf("python bridge did not send ready signal")
}
// sendCommand sends a JSON command to the Python subprocess and returns
// the parsed JSON response.
func (d *KneronDriver) sendCommand(cmd map[string]interface{}) (map[string]interface{}, error) {
if !d.pythonReady {
return nil, fmt.Errorf("python bridge is not running")
}
data, err := json.Marshal(cmd)
if err != nil {
return nil, fmt.Errorf("failed to marshal command: %w", err)
}
// Write the JSON command followed by a newline.
if _, err := fmt.Fprintf(d.stdin, "%s\n", data); err != nil {
return nil, fmt.Errorf("failed to write to python bridge: %w", err)
}
// Read the response line.
if !d.stdout.Scan() {
if err := d.stdout.Err(); err != nil {
return nil, fmt.Errorf("failed to read from python bridge: %w", err)
}
return nil, fmt.Errorf("python bridge closed unexpectedly")
}
var resp map[string]interface{}
if err := json.Unmarshal([]byte(d.stdout.Text()), &resp); err != nil {
return nil, fmt.Errorf("failed to parse python response: %w", err)
}
// Check for error responses from the bridge.
if errMsg, ok := resp["error"].(string); ok {
return nil, fmt.Errorf("python bridge error: %s", errMsg)
}
return resp, nil
}
// stopPython kills the Python subprocess and cleans up resources.
func (d *KneronDriver) stopPython() {
d.pythonReady = false
if d.stdin != nil {
d.stdin.Close()
d.stdin = nil
}
if d.pythonCmd != nil && d.pythonCmd.Process != nil {
d.pythonCmd.Process.Kill()
d.pythonCmd.Wait()
d.pythonCmd = nil
}
d.stdout = nil
}
// Info returns the current device information.
func (d *KneronDriver) Info() driver.DeviceInfo {
d.mu.Lock()
defer d.mu.Unlock()
return d.info
}
// Connect starts the Python bridge subprocess and connects to the Kneron device.
// On the first connect after server start, the device is reset to clear any
// stale model from a previous session.
func (d *KneronDriver) Connect() error {
d.mu.Lock()
if d.connected {
d.mu.Unlock()
return nil
}
needsReset := d.needsReset
d.info.Status = driver.StatusConnecting
// Start the Python bridge process.
if err := d.startPython(); err != nil {
d.info.Status = driver.StatusError
d.mu.Unlock()
return fmt.Errorf("failed to start hardware bridge: %w", err)
}
// Send connect command to the bridge.
resp, err := d.sendCommand(map[string]interface{}{
"cmd": "connect",
"port": d.info.Port,
"index": 0,
"device_type": d.info.Type,
})
if err != nil {
d.stopPython()
d.info.Status = driver.StatusError
d.mu.Unlock()
return fmt.Errorf("failed to connect to device: %w", err)
}
d.connected = true
d.needsReset = false
d.info.Status = driver.StatusConnected
if fw, ok := resp["firmware"].(string); ok {
d.info.FirmwareVer = fw
}
d.mu.Unlock()
// First connect after server start: reset device to clear stale models.
if needsReset {
d.driverLog("INFO", "[kneron] first connect after server start — resetting device to clear stale model...")
if err := d.restartBridge(); err != nil {
d.driverLog("WARN", "[kneron] reset on connect failed (non-fatal): %v", err)
// Non-fatal: device is still connected, just might have stale model
} else {
d.driverLog("INFO", "[kneron] device reset complete — clean state ready")
}
}
return nil
}
// Disconnect stops the Python bridge and disconnects from the device.
func (d *KneronDriver) Disconnect() error {
d.mu.Lock()
defer d.mu.Unlock()
if !d.connected {
return nil
}
// Try to send disconnect command if Python is running.
if d.pythonReady {
d.sendCommand(map[string]interface{}{"cmd": "disconnect"})
}
d.stopPython()
d.connected = false
d.inferring = false
d.info.Status = driver.StatusDisconnected
return nil
}
// IsConnected returns whether the driver is currently connected.
func (d *KneronDriver) IsConnected() bool {
d.mu.Lock()
defer d.mu.Unlock()
return d.connected
}
// restartBridge resets the Kneron device and restarts the Python bridge.
//
// The KL520 USB Boot mode only allows loading one model per firmware
// session. To load a different model we must:
// 1. Send a "reset" command via the current bridge — this calls
// kp.core.reset_device() which forces the device back to Loader
// (USB Boot) state, wiping firmware + model from RAM.
// 2. Kill the Python bridge process.
// 3. Wait for the device to re-enumerate on USB (~8 s).
// 4. Start a fresh Python bridge.
// 5. Send "connect" which reloads firmware from scratch.
//
// After this the device is in a clean state ready for load_model.
//
// Caller must NOT hold d.mu.
func (d *KneronDriver) restartBridge() error {
d.mu.Lock()
port := d.info.Port
d.modelLoaded = ""
// Step 1: Ask the running bridge to reset the device.
if d.pythonReady {
d.driverLog("INFO", "[kneron] sending reset command to device...")
d.sendCommand(map[string]interface{}{"cmd": "reset"})
// Ignore errors — the device may have already disconnected.
}
// Step 2: Kill the bridge process.
d.stopPython()
d.mu.Unlock()
// Step 3: Wait for USB device to re-enumerate after hardware reset.
// The reset causes the device to drop off USB and reappear as a
// Loader-mode device. This typically takes 5-8 seconds.
d.driverLog("INFO", "[kneron] bridge stopped, waiting for USB re-enumerate after reset...")
time.Sleep(8 * time.Second)
d.mu.Lock()
defer d.mu.Unlock()
// Step 4: Start a fresh Python bridge.
d.driverLog("INFO", "[kneron] starting new bridge process...")
if err := d.startPython(); err != nil {
return fmt.Errorf("failed to restart bridge: %w", err)
}
// Step 5: Reconnect — firmware will be loaded fresh.
d.driverLog("INFO", "[kneron] bridge started, reconnecting to device (port=%s)...", port)
_, err := d.sendCommand(map[string]interface{}{
"cmd": "connect",
"port": port,
"index": 0,
"device_type": d.info.Type,
})
if err != nil {
d.stopPython()
return fmt.Errorf("failed to reconnect after bridge restart: %w", err)
}
d.driverLog("INFO", "[kneron] device reconnected after reset + bridge restart")
return nil
}
// Flash loads a model onto the Kneron device. Progress is reported through
// the provided channel.
//
// Behavior differs by chip:
// - KL520 (USB Boot): only one model per session. Error 40 triggers
// a full device reset + bridge restart + firmware reload.
// - KL720 (flash-based): models can be freely reloaded. Error 40
// should not occur; if it does, a simple retry is attempted first.
func (d *KneronDriver) Flash(modelPath string, progressCh chan<- driver.FlashProgress) error {
d.mu.Lock()
d.info.Status = driver.StatusFlashing
pythonReady := d.pythonReady
currentModel := d.modelLoaded
chip := d.chipType
d.mu.Unlock()
if !pythonReady {
d.mu.Lock()
d.info.Status = driver.StatusConnected
d.mu.Unlock()
return fmt.Errorf("hardware bridge is not running — cannot flash model")
}
// Same model already loaded — skip, report success
if currentModel != "" && currentModel == modelPath {
d.driverLog("INFO", "[kneron] model already loaded (%s), skipping reload", modelPath)
progressCh <- driver.FlashProgress{
Percent: 50,
Stage: "transferring",
Message: "model already loaded on device",
}
d.mu.Lock()
d.info.Status = driver.StatusConnected
d.mu.Unlock()
progressCh <- driver.FlashProgress{Percent: 100, Stage: "done", Message: "Flash complete (model already loaded)"}
return nil
}
// Try loading the model
progressCh <- driver.FlashProgress{
Percent: 5,
Stage: "preparing",
Message: "preparing... loading model to device",
}
d.mu.Lock()
_, err := d.sendCommand(map[string]interface{}{
"cmd": "load_model",
"path": modelPath,
})
d.mu.Unlock()
// Handle retryable errors (error 40, broken pipe).
if err != nil {
errMsg := err.Error()
d.driverLog("WARN", "[kneron] load_model failed: %s", errMsg)
isRetryable := strings.Contains(errMsg, "Error code: 40") ||
strings.Contains(errMsg, "SECOND_MODEL") ||
strings.Contains(errMsg, "broken pipe") ||
strings.Contains(errMsg, "USB_TIMEOUT")
if isRetryable {
if chip == "KL720" {
// KL720: error 40 should not occur. Try a simple retry
// without full bridge restart first.
d.driverLog("WARN", "[kneron] KL720 unexpected retryable error, retrying without restart...")
progressCh <- driver.FlashProgress{
Percent: 5,
Stage: "preparing",
Message: "preparing... retrying model load",
}
d.mu.Lock()
_, err = d.sendCommand(map[string]interface{}{
"cmd": "load_model",
"path": modelPath,
})
d.mu.Unlock()
// If still failing, fall back to bridge restart as last resort.
if err != nil {
d.driverLog("WARN", "[kneron] KL720 retry failed: %v, falling back to bridge restart...", err)
if restartErr := d.restartBridge(); restartErr != nil {
d.mu.Lock()
d.info.Status = driver.StatusConnected
d.mu.Unlock()
return fmt.Errorf("failed to reset device: %w", restartErr)
}
d.mu.Lock()
d.info.Status = driver.StatusFlashing
_, err = d.sendCommand(map[string]interface{}{
"cmd": "load_model",
"path": modelPath,
})
d.mu.Unlock()
}
} else {
// KL520: error 40 means a model is already loaded in this
// USB Boot session. Must reset device + reload firmware.
d.driverLog("WARN", "[kneron] KL520 retryable error, restarting bridge...")
progressCh <- driver.FlashProgress{
Percent: 5,
Stage: "preparing",
Message: "preparing... resetting device for new model",
}
if restartErr := d.restartBridge(); restartErr != nil {
d.driverLog("ERROR", "[kneron] restartBridge failed: %v", restartErr)
d.mu.Lock()
d.info.Status = driver.StatusConnected
d.mu.Unlock()
return fmt.Errorf("failed to reset device: %w", restartErr)
}
d.driverLog("INFO", "[kneron] bridge restarted, retrying load_model...")
d.mu.Lock()
d.info.Status = driver.StatusFlashing
_, err = d.sendCommand(map[string]interface{}{
"cmd": "load_model",
"path": modelPath,
})
d.mu.Unlock()
}
}
}
if err != nil {
d.driverLog("ERROR", "[kneron] load_model ultimately failed: %v", err)
d.mu.Lock()
d.info.Status = driver.StatusConnected
d.mu.Unlock()
return fmt.Errorf("failed to load model: %w", err)
}
d.driverLog("INFO", "[kneron] load_model succeeded: %s", modelPath)
// Simulate remaining flash progress stages (the Kneron SDK does not
// provide granular progress, so we approximate it after the model
// has been loaded successfully).
type stage struct {
name string
duration time.Duration
startPct int
endPct int
}
stages := []stage{
{"transferring", 2 * time.Second, 10, 80},
{"verifying", 1 * time.Second, 80, 95},
{"finalizing", 500 * time.Millisecond, 95, 99},
}
// KL720 is faster (USB 3.0, no firmware reload needed)
if chip == "KL720" {
stages = []stage{
{"transferring", 1 * time.Second, 10, 80},
{"verifying", 500 * time.Millisecond, 80, 95},
{"finalizing", 200 * time.Millisecond, 95, 99},
}
}
for _, s := range stages {
steps := (s.endPct - s.startPct) / 5
if steps < 1 {
steps = 1
}
interval := s.duration / time.Duration(steps)
for i := 0; i <= steps; i++ {
pct := s.startPct + (s.endPct-s.startPct)*i/steps
progressCh <- driver.FlashProgress{
Percent: pct,
Stage: s.name,
Message: fmt.Sprintf("%s... %d%%", s.name, pct),
}
time.Sleep(interval)
}
}
d.mu.Lock()
d.modelLoaded = modelPath
d.info.FlashedModel = modelPath
d.info.Status = driver.StatusConnected
d.mu.Unlock()
progressCh <- driver.FlashProgress{Percent: 100, Stage: "done", Message: "Flash complete"}
return nil
}
// StartInference begins continuous inference mode.
func (d *KneronDriver) StartInference() error {
d.mu.Lock()
defer d.mu.Unlock()
if !d.connected {
return fmt.Errorf("device not connected")
}
d.inferring = true
d.info.Status = driver.StatusInferencing
return nil
}
// StopInference stops continuous inference mode.
func (d *KneronDriver) StopInference() error {
d.mu.Lock()
defer d.mu.Unlock()
d.inferring = false
d.info.Status = driver.StatusConnected
return nil
}
// ReadInference reads the latest inference result. This is equivalent to
// calling RunInference with nil image data.
func (d *KneronDriver) ReadInference() (*driver.InferenceResult, error) {
return d.RunInference(nil)
}
// RunInference runs inference on the provided image data and returns
// the result. If imageData is nil, the bridge will run inference on
// a default/empty input.
func (d *KneronDriver) RunInference(imageData []byte) (*driver.InferenceResult, error) {
d.mu.Lock()
pythonReady := d.pythonReady
d.mu.Unlock()
if !pythonReady {
return nil, fmt.Errorf("hardware bridge is not running — device may not be connected")
}
// Encode image data as base64 for transmission to Python.
imageB64 := ""
if imageData != nil {
imageB64 = base64.StdEncoding.EncodeToString(imageData)
}
d.mu.Lock()
resp, err := d.sendCommand(map[string]interface{}{
"cmd": "inference",
"image_base64": imageB64,
})
d.mu.Unlock()
if err != nil {
return nil, fmt.Errorf("inference failed: %w", err)
}
return parseInferenceResult(resp)
}
// parseInferenceResult converts a JSON response map into an InferenceResult.
func parseInferenceResult(resp map[string]interface{}) (*driver.InferenceResult, error) {
// Re-marshal to JSON and unmarshal into the struct for clean conversion.
data, err := json.Marshal(resp)
if err != nil {
return nil, fmt.Errorf("failed to marshal response: %w", err)
}
var result driver.InferenceResult
if err := json.Unmarshal(data, &result); err != nil {
return nil, fmt.Errorf("failed to parse inference result: %w", err)
}
return &result, nil
}
// GetModelInfo returns information about the currently loaded model.
func (d *KneronDriver) GetModelInfo() (*driver.ModelInfo, error) {
d.mu.Lock()
defer d.mu.Unlock()
if d.modelLoaded == "" {
return nil, fmt.Errorf("no model loaded")
}
return &driver.ModelInfo{
ID: d.modelLoaded,
Name: d.modelLoaded,
LoadedAt: time.Now(),
}, nil
}

View File

@ -0,0 +1,183 @@
package mock
import (
"fmt"
"math/rand"
"sync"
"time"
"edge-ai-platform/internal/driver"
)
var mockLabels = []string{"person", "car", "bicycle", "dog", "cat", "chair", "bottle", "phone"}
type MockDriver struct {
info driver.DeviceInfo
connected bool
inferring bool
modelLoaded string
mu sync.Mutex
}
func NewMockDriver(info driver.DeviceInfo) *MockDriver {
return &MockDriver{info: info}
}
func Factory(id string, index int) driver.DeviceDriver {
info := driver.DeviceInfo{
ID: id,
Name: fmt.Sprintf("Kneron KL720 (Mock #%d)", index+1),
Type: "kneron_kl720",
Port: fmt.Sprintf("/dev/ttyMOCK%d", index),
Status: driver.StatusDetected,
FirmwareVer: "2.2.0-mock",
}
return NewMockDriver(info)
}
func (d *MockDriver) Info() driver.DeviceInfo {
d.mu.Lock()
defer d.mu.Unlock()
return d.info
}
func (d *MockDriver) Connect() error {
d.mu.Lock()
defer d.mu.Unlock()
time.Sleep(200 * time.Millisecond)
d.connected = true
d.info.Status = driver.StatusConnected
return nil
}
func (d *MockDriver) Disconnect() error {
d.mu.Lock()
defer d.mu.Unlock()
d.connected = false
d.inferring = false
d.info.Status = driver.StatusDisconnected
return nil
}
func (d *MockDriver) IsConnected() bool {
d.mu.Lock()
defer d.mu.Unlock()
return d.connected
}
func (d *MockDriver) Flash(modelPath string, progressCh chan<- driver.FlashProgress) error {
d.mu.Lock()
d.info.Status = driver.StatusFlashing
d.mu.Unlock()
type stage struct {
name string
duration time.Duration
startPct int
endPct int
}
stages := []stage{
{"preparing", 1 * time.Second, 0, 10},
{"transferring", 6 * time.Second, 10, 80},
{"verifying", 2 * time.Second, 80, 95},
{"rebooting", 1 * time.Second, 95, 99},
}
for _, s := range stages {
steps := (s.endPct - s.startPct) / 5
if steps < 1 {
steps = 1
}
interval := s.duration / time.Duration(steps)
for i := 0; i <= steps; i++ {
pct := s.startPct + (s.endPct-s.startPct)*i/steps
progressCh <- driver.FlashProgress{
Percent: pct,
Stage: s.name,
Message: fmt.Sprintf("%s... %d%%", s.name, pct),
}
time.Sleep(interval)
}
}
d.mu.Lock()
d.modelLoaded = modelPath
d.info.FlashedModel = modelPath
d.info.Status = driver.StatusConnected
d.mu.Unlock()
progressCh <- driver.FlashProgress{Percent: 100, Stage: "done", Message: "Flash complete"}
return nil
}
func (d *MockDriver) StartInference() error {
d.mu.Lock()
defer d.mu.Unlock()
d.inferring = true
d.info.Status = driver.StatusInferencing
return nil
}
func (d *MockDriver) StopInference() error {
d.mu.Lock()
defer d.mu.Unlock()
d.inferring = false
d.info.Status = driver.StatusConnected
return nil
}
func (d *MockDriver) ReadInference() (*driver.InferenceResult, error) {
return d.RunInference(nil)
}
func (d *MockDriver) RunInference(imageData []byte) (*driver.InferenceResult, error) {
time.Sleep(30 * time.Millisecond)
numDetections := rand.Intn(3) + 1
detections := make([]driver.DetectionResult, numDetections)
for i := 0; i < numDetections; i++ {
w := 0.1 + rand.Float64()*0.3
h := 0.1 + rand.Float64()*0.3
detections[i] = driver.DetectionResult{
Label: mockLabels[rand.Intn(len(mockLabels))],
Confidence: 0.3 + rand.Float64()*0.7,
BBox: driver.BBox{
X: rand.Float64() * (1 - w),
Y: rand.Float64() * (1 - h),
Width: w,
Height: h,
},
}
}
classifications := []driver.ClassResult{
{Label: "person", Confidence: 0.5 + rand.Float64()*0.5},
{Label: "car", Confidence: rand.Float64() * 0.5},
{Label: "dog", Confidence: rand.Float64() * 0.3},
{Label: "cat", Confidence: rand.Float64() * 0.2},
{Label: "bicycle", Confidence: rand.Float64() * 0.15},
}
return &driver.InferenceResult{
TaskType: "detection",
Timestamp: time.Now().UnixMilli(),
LatencyMs: 20 + rand.Float64()*30,
Detections: detections,
Classifications: classifications,
}, nil
}
func (d *MockDriver) GetModelInfo() (*driver.ModelInfo, error) {
d.mu.Lock()
defer d.mu.Unlock()
if d.modelLoaded == "" {
return nil, fmt.Errorf("no model loaded")
}
return &driver.ModelInfo{
ID: d.modelLoaded,
Name: d.modelLoaded,
LoadedAt: time.Now(),
}, nil
}

View File

@ -0,0 +1,51 @@
package flash
import (
"edge-ai-platform/internal/driver"
"sync"
)
type FlashTask struct {
ID string
DeviceID string
ModelID string
ProgressCh chan driver.FlashProgress
Done bool
}
type ProgressTracker struct {
tasks map[string]*FlashTask
mu sync.RWMutex
}
func NewProgressTracker() *ProgressTracker {
return &ProgressTracker{
tasks: make(map[string]*FlashTask),
}
}
func (pt *ProgressTracker) Create(taskID, deviceID, modelID string) *FlashTask {
pt.mu.Lock()
defer pt.mu.Unlock()
task := &FlashTask{
ID: taskID,
DeviceID: deviceID,
ModelID: modelID,
ProgressCh: make(chan driver.FlashProgress, 20),
}
pt.tasks[taskID] = task
return task
}
func (pt *ProgressTracker) Get(taskID string) (*FlashTask, bool) {
pt.mu.RLock()
defer pt.mu.RUnlock()
t, ok := pt.tasks[taskID]
return t, ok
}
func (pt *ProgressTracker) Remove(taskID string) {
pt.mu.Lock()
defer pt.mu.Unlock()
delete(pt.tasks, taskID)
}

View File

@ -0,0 +1,140 @@
package flash
import (
"fmt"
"os"
"path/filepath"
"strings"
"time"
"edge-ai-platform/internal/device"
"edge-ai-platform/internal/driver"
"edge-ai-platform/internal/model"
)
// isCompatible checks if any of the model's supported hardware types match
// the device type. The match is case-insensitive and also checks if the
// device type string contains the hardware name (e.g. "kneron_kl720" contains "KL720").
func isCompatible(modelHardware []string, deviceType string) bool {
dt := strings.ToUpper(deviceType)
for _, hw := range modelHardware {
if strings.ToUpper(hw) == dt || strings.Contains(dt, strings.ToUpper(hw)) {
return true
}
}
return false
}
// resolveModelPath checks if a chip-specific NEF file exists for the given
// model. For cross-platform models whose filePath points to a KL520 NEF,
// this tries to find the equivalent KL720 NEF (and vice versa).
//
// Resolution: data/nef/kl520/kl520_20001_... → data/nef/kl720/kl720_20001_...
func resolveModelPath(filePath string, deviceType string) string {
if filePath == "" {
return filePath
}
targetChip := ""
if strings.Contains(strings.ToLower(deviceType), "kl720") {
targetChip = "kl720"
} else if strings.Contains(strings.ToLower(deviceType), "kl520") {
targetChip = "kl520"
}
if targetChip == "" {
return filePath
}
// Already points to the target chip directory — use as-is.
if strings.Contains(filePath, "/"+targetChip+"/") {
return filePath
}
// Try to swap chip prefix in both directory and filename.
dir := filepath.Dir(filePath)
base := filepath.Base(filePath)
sourceChip := ""
if strings.Contains(dir, "kl520") {
sourceChip = "kl520"
} else if strings.Contains(dir, "kl720") {
sourceChip = "kl720"
}
if sourceChip != "" && sourceChip != targetChip {
newDir := strings.Replace(dir, sourceChip, targetChip, 1)
newBase := strings.Replace(base, sourceChip, targetChip, 1)
candidate := filepath.Join(newDir, newBase)
if _, err := os.Stat(candidate); err == nil {
return candidate
}
}
return filePath
}
type Service struct {
deviceMgr *device.Manager
modelRepo *model.Repository
tracker *ProgressTracker
}
func NewService(deviceMgr *device.Manager, modelRepo *model.Repository) *Service {
return &Service{
deviceMgr: deviceMgr,
modelRepo: modelRepo,
tracker: NewProgressTracker(),
}
}
func (s *Service) StartFlash(deviceID, modelID string) (string, <-chan driver.FlashProgress, error) {
session, err := s.deviceMgr.GetDevice(deviceID)
if err != nil {
return "", nil, fmt.Errorf("device not found: %w", err)
}
if !session.Driver.IsConnected() {
return "", nil, fmt.Errorf("device not connected")
}
m, err := s.modelRepo.GetByID(modelID)
if err != nil {
return "", nil, fmt.Errorf("model not found: %w", err)
}
// Check hardware compatibility
deviceInfo := session.Driver.Info()
if !isCompatible(m.SupportedHardware, deviceInfo.Type) {
return "", nil, fmt.Errorf("model not compatible with device type %s", deviceInfo.Type)
}
// Use the model's .nef file path if available, otherwise fall back to modelID.
modelPath := m.FilePath
if modelPath == "" {
modelPath = modelID
}
// Resolve chip-specific NEF (e.g. KL520 path → KL720 equivalent).
modelPath = resolveModelPath(modelPath, deviceInfo.Type)
taskID := fmt.Sprintf("flash-%s-%s", deviceID, modelID)
task := s.tracker.Create(taskID, deviceID, modelID)
go func() {
defer func() {
task.Done = true
close(task.ProgressCh)
}()
// Brief pause to allow the WebSocket client to connect before
// progress messages start flowing.
time.Sleep(500 * time.Millisecond)
if err := session.Driver.Flash(modelPath, task.ProgressCh); err != nil {
task.ProgressCh <- driver.FlashProgress{
Percent: -1,
Stage: "error",
Error: err.Error(),
}
}
}()
return taskID, task.ProgressCh, nil
}

View File

@ -0,0 +1,94 @@
package inference
import (
"context"
"sync"
"edge-ai-platform/internal/device"
"edge-ai-platform/internal/driver"
)
type stream struct {
cancel context.CancelFunc
done chan struct{}
}
type Service struct {
deviceMgr *device.Manager
streams map[string]*stream
mu sync.Mutex
}
func NewService(deviceMgr *device.Manager) *Service {
return &Service{
deviceMgr: deviceMgr,
streams: make(map[string]*stream),
}
}
func (s *Service) Start(deviceID string, resultCh chan<- *driver.InferenceResult) error {
session, err := s.deviceMgr.GetDevice(deviceID)
if err != nil {
return err
}
if err := session.Driver.StartInference(); err != nil {
return err
}
ctx, cancel := context.WithCancel(context.Background())
done := make(chan struct{})
s.mu.Lock()
s.streams[deviceID] = &stream{cancel: cancel, done: done}
s.mu.Unlock()
go func() {
defer close(done)
defer session.Driver.StopInference()
for {
select {
case <-ctx.Done():
return
default:
result, err := session.Driver.ReadInference()
if err != nil {
continue
}
select {
case resultCh <- result:
default:
}
}
}
}()
return nil
}
// StopAll stops all running inference streams. Used during graceful shutdown.
func (s *Service) StopAll() {
s.mu.Lock()
ids := make([]string, 0, len(s.streams))
for id := range s.streams {
ids = append(ids, id)
}
s.mu.Unlock()
for _, id := range ids {
_ = s.Stop(id)
}
}
func (s *Service) Stop(deviceID string) error {
s.mu.Lock()
st, ok := s.streams[deviceID]
if ok {
delete(s.streams, deviceID)
}
s.mu.Unlock()
if ok {
st.cancel()
<-st.done // wait for goroutine to finish and StopInference to complete
}
return nil
}

View File

@ -0,0 +1,100 @@
package model
import (
"encoding/json"
"fmt"
"os"
"strings"
"sync"
)
type Repository struct {
models []Model
mu sync.RWMutex
}
func NewRepository(dataPath string) *Repository {
r := &Repository{}
data, err := os.ReadFile(dataPath)
if err != nil {
fmt.Printf("Warning: could not load models from %s: %v\n", dataPath, err)
return r
}
if err := json.Unmarshal(data, &r.models); err != nil {
fmt.Printf("Warning: could not parse models JSON: %v\n", err)
}
return r
}
func (r *Repository) List(filter ModelFilter) ([]ModelSummary, int) {
r.mu.RLock()
defer r.mu.RUnlock()
var results []ModelSummary
for _, m := range r.models {
if filter.TaskType != "" && m.TaskType != filter.TaskType {
continue
}
if filter.Hardware != "" {
found := false
for _, hw := range m.SupportedHardware {
if hw == filter.Hardware {
found = true
break
}
}
if !found {
continue
}
}
if filter.Query != "" {
q := strings.ToLower(filter.Query)
if !strings.Contains(strings.ToLower(m.Name), q) &&
!strings.Contains(strings.ToLower(m.Description), q) {
continue
}
}
results = append(results, m.ToSummary())
}
return results, len(results)
}
func (r *Repository) GetByID(id string) (*Model, error) {
r.mu.RLock()
defer r.mu.RUnlock()
for i := range r.models {
if r.models[i].ID == id {
return &r.models[i], nil
}
}
return nil, fmt.Errorf("model not found: %s", id)
}
func (r *Repository) Count() int {
r.mu.RLock()
defer r.mu.RUnlock()
return len(r.models)
}
func (r *Repository) Add(m Model) {
r.mu.Lock()
defer r.mu.Unlock()
r.models = append(r.models, m)
}
func (r *Repository) Remove(id string) error {
r.mu.Lock()
defer r.mu.Unlock()
for i := range r.models {
if r.models[i].ID == id {
if !r.models[i].IsCustom {
return fmt.Errorf("cannot delete built-in model: %s", id)
}
r.models = append(r.models[:i], r.models[i+1:]...)
return nil
}
}
return fmt.Errorf("model not found: %s", id)
}

View File

@ -0,0 +1,122 @@
package model
import (
"testing"
)
func newTestRepo() *Repository {
return &Repository{
models: []Model{
{
ID: "model-1",
Name: "YOLOv8",
Description: "Object detection model",
TaskType: "object_detection",
SupportedHardware: []string{"KL720", "KL730"},
},
{
ID: "model-2",
Name: "ResNet",
Description: "Classification model",
TaskType: "classification",
SupportedHardware: []string{"KL720"},
},
{
ID: "custom-1",
Name: "My Custom Model",
TaskType: "object_detection",
IsCustom: true,
},
},
}
}
func TestRepository_List(t *testing.T) {
repo := newTestRepo()
tests := []struct {
name string
filter ModelFilter
expectedCount int
}{
{"no filter", ModelFilter{}, 3},
{"filter by task type", ModelFilter{TaskType: "object_detection"}, 2},
{"filter by hardware", ModelFilter{Hardware: "KL730"}, 1},
{"filter by query", ModelFilter{Query: "YOLO"}, 1},
{"query case insensitive", ModelFilter{Query: "resnet"}, 1},
{"no matches", ModelFilter{TaskType: "segmentation"}, 0},
{"combined filters", ModelFilter{TaskType: "object_detection", Query: "YOLO"}, 1},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
results, count := repo.List(tt.filter)
if count != tt.expectedCount {
t.Errorf("List() count = %d, want %d", count, tt.expectedCount)
}
if len(results) != tt.expectedCount {
t.Errorf("List() len(results) = %d, want %d", len(results), tt.expectedCount)
}
})
}
}
func TestRepository_GetByID(t *testing.T) {
repo := newTestRepo()
tests := []struct {
name string
id string
wantErr bool
}{
{"existing model", "model-1", false},
{"another existing", "model-2", false},
{"non-existing", "model-999", true},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
m, err := repo.GetByID(tt.id)
if (err != nil) != tt.wantErr {
t.Errorf("GetByID() error = %v, wantErr %v", err, tt.wantErr)
}
if !tt.wantErr && m.ID != tt.id {
t.Errorf("GetByID() ID = %s, want %s", m.ID, tt.id)
}
})
}
}
func TestRepository_Add(t *testing.T) {
repo := &Repository{models: []Model{}}
m := Model{ID: "new-1", Name: "New Model"}
repo.Add(m)
if repo.Count() != 1 {
t.Errorf("Count() = %d, want 1", repo.Count())
}
}
func TestRepository_Remove(t *testing.T) {
repo := newTestRepo()
tests := []struct {
name string
id string
wantErr bool
}{
{"remove custom model", "custom-1", false},
{"cannot remove built-in", "model-1", true},
{"not found", "model-999", true},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
err := repo.Remove(tt.id)
if (err != nil) != tt.wantErr {
t.Errorf("Remove() error = %v, wantErr %v", err, tt.wantErr)
}
})
}
}

View File

@ -0,0 +1,102 @@
package model
import (
"encoding/json"
"fmt"
"io"
"os"
"path/filepath"
)
// ModelStore manages custom model files on disk.
type ModelStore struct {
baseDir string
}
func NewModelStore(baseDir string) *ModelStore {
_ = os.MkdirAll(baseDir, 0755)
return &ModelStore{baseDir: baseDir}
}
// SaveModel saves a .nef file for the given model ID.
func (s *ModelStore) SaveModel(id string, file io.Reader) (string, error) {
dir := filepath.Join(s.baseDir, id)
if err := os.MkdirAll(dir, 0755); err != nil {
return "", fmt.Errorf("failed to create model directory: %w", err)
}
nefPath := filepath.Join(dir, "model.nef")
f, err := os.Create(nefPath)
if err != nil {
return "", fmt.Errorf("failed to create model file: %w", err)
}
defer f.Close()
written, err := io.Copy(f, file)
if err != nil {
return "", fmt.Errorf("failed to write model file: %w", err)
}
fmt.Printf("[INFO] Saved model file: %s (%d bytes)\n", nefPath, written)
return nefPath, nil
}
// SaveMetadata saves model metadata as JSON.
func (s *ModelStore) SaveMetadata(id string, m Model) error {
dir := filepath.Join(s.baseDir, id)
if err := os.MkdirAll(dir, 0755); err != nil {
return fmt.Errorf("failed to create model directory: %w", err)
}
metaPath := filepath.Join(dir, "metadata.json")
data, err := json.MarshalIndent(m, "", " ")
if err != nil {
return fmt.Errorf("failed to marshal metadata: %w", err)
}
return os.WriteFile(metaPath, data, 0644)
}
// GetModelPath returns the .nef file path for a model.
func (s *ModelStore) GetModelPath(id string) string {
return filepath.Join(s.baseDir, id, "model.nef")
}
// DeleteModel removes a model's directory and all files.
func (s *ModelStore) DeleteModel(id string) error {
dir := filepath.Join(s.baseDir, id)
return os.RemoveAll(dir)
}
// LoadCustomModels scans the store directory and returns all custom models.
func (s *ModelStore) LoadCustomModels() ([]Model, error) {
entries, err := os.ReadDir(s.baseDir)
if err != nil {
if os.IsNotExist(err) {
return nil, nil
}
return nil, err
}
var models []Model
for _, entry := range entries {
if !entry.IsDir() {
continue
}
metaPath := filepath.Join(s.baseDir, entry.Name(), "metadata.json")
data, err := os.ReadFile(metaPath)
if err != nil {
continue
}
var m Model
if err := json.Unmarshal(data, &m); err != nil {
continue
}
m.IsCustom = true
models = append(models, m)
}
return models, nil
}

View File

@ -0,0 +1,65 @@
package model
type Model struct {
ID string `json:"id"`
Name string `json:"name"`
Description string `json:"description"`
Thumbnail string `json:"thumbnail"`
TaskType string `json:"taskType"`
Categories []string `json:"categories"`
Framework string `json:"framework"`
InputSize InputSize `json:"inputSize"`
ModelSize int64 `json:"modelSize"`
Quantization string `json:"quantization"`
Accuracy float64 `json:"accuracy"`
LatencyMs float64 `json:"latencyMs"`
FPS float64 `json:"fps"`
SupportedHardware []string `json:"supportedHardware"`
Labels []string `json:"labels"`
Version string `json:"version"`
Author string `json:"author"`
License string `json:"license"`
CreatedAt string `json:"createdAt"`
UpdatedAt string `json:"updatedAt"`
IsCustom bool `json:"isCustom,omitempty"`
FilePath string `json:"filePath,omitempty"`
}
type InputSize struct {
Width int `json:"width"`
Height int `json:"height"`
}
type ModelSummary struct {
ID string `json:"id"`
Name string `json:"name"`
Thumbnail string `json:"thumbnail"`
TaskType string `json:"taskType"`
Categories []string `json:"categories"`
ModelSize int64 `json:"modelSize"`
Accuracy float64 `json:"accuracy"`
FPS float64 `json:"fps"`
SupportedHardware []string `json:"supportedHardware"`
IsCustom bool `json:"isCustom,omitempty"`
}
type ModelFilter struct {
TaskType string
Hardware string
Query string
}
func (m *Model) ToSummary() ModelSummary {
return ModelSummary{
ID: m.ID,
Name: m.Name,
Thumbnail: m.Thumbnail,
TaskType: m.TaskType,
Categories: m.Categories,
ModelSize: m.ModelSize,
Accuracy: m.Accuracy,
FPS: m.FPS,
SupportedHardware: m.SupportedHardware,
IsCustom: m.IsCustom,
}
}

View File

@ -0,0 +1,327 @@
// Package relay implements a reverse-proxy relay server that forwards HTTP
// and WebSocket traffic through a yamux-multiplexed WebSocket tunnel to a
// remote edge-ai-server. Multiple local servers can connect simultaneously,
// each identified by a unique token (derived from hardware ID).
package relay
import (
"bufio"
"encoding/json"
"fmt"
"io"
"log"
"net"
"net/http"
"strings"
"sync"
"edge-ai-platform/pkg/wsconn"
"github.com/gorilla/websocket"
"github.com/hashicorp/yamux"
)
var upgrader = websocket.Upgrader{
CheckOrigin: func(r *http.Request) bool { return true },
}
// Server is the relay server that bridges browser clients to tunnelled
// edge-ai-servers. Each local server is identified by its token and gets
// its own yamux session.
type Server struct {
sessions map[string]*yamux.Session
mu sync.RWMutex
}
// NewServer creates a multi-tenant relay server.
func NewServer() *Server {
return &Server{
sessions: make(map[string]*yamux.Session),
}
}
// Handler returns an http.Handler that routes tunnel connections and proxied
// requests.
func (s *Server) Handler() http.Handler {
mux := http.NewServeMux()
mux.HandleFunc("/tunnel/connect", s.handleTunnel)
mux.HandleFunc("/relay/status", s.handleStatus)
mux.HandleFunc("/", s.handleProxy)
return mux
}
// handleTunnel accepts a WebSocket connection from an edge-ai-server and
// sets up a yamux session keyed by the provided token.
func (s *Server) handleTunnel(w http.ResponseWriter, r *http.Request) {
tok := r.URL.Query().Get("token")
if tok == "" {
http.Error(w, "token required", http.StatusUnauthorized)
return
}
conn, err := upgrader.Upgrade(w, r, nil)
if err != nil {
log.Printf("[relay] tunnel upgrade failed: %v", err)
return
}
netConn := wsconn.New(conn)
session, err := yamux.Server(netConn, yamux.DefaultConfig())
if err != nil {
log.Printf("[relay] yamux server creation failed: %v", err)
conn.Close()
return
}
// Replace existing session for this token
s.mu.Lock()
old := s.sessions[tok]
s.sessions[tok] = session
s.mu.Unlock()
if old != nil {
old.Close()
}
log.Printf("[relay] tunnel connected: token=%s... from=%s", tok[:8], r.RemoteAddr)
// Block until session closes
<-session.CloseChan()
log.Printf("[relay] tunnel disconnected: token=%s...", tok[:8])
s.mu.Lock()
if s.sessions[tok] == session {
delete(s.sessions, tok)
}
s.mu.Unlock()
}
// handleStatus reports connected tunnels. If a token query param is provided,
// reports status for that specific token only.
func (s *Server) handleStatus(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json")
w.Header().Set("Access-Control-Allow-Origin", "*")
tok := r.URL.Query().Get("token")
if tok != "" {
s.mu.RLock()
sess, ok := s.sessions[tok]
connected := ok && !sess.IsClosed()
s.mu.RUnlock()
json.NewEncoder(w).Encode(map[string]interface{}{
"tunnelConnected": connected,
})
return
}
// No token — report total count
s.mu.RLock()
count := 0
for _, sess := range s.sessions {
if !sess.IsClosed() {
count++
}
}
s.mu.RUnlock()
json.NewEncoder(w).Encode(map[string]interface{}{
"tunnelConnected": count > 0,
"tunnelCount": count,
})
}
// getToken extracts the relay token from the request. It checks the
// X-Relay-Token header first, then the "token" query parameter.
func getToken(r *http.Request) string {
if tok := r.Header.Get("X-Relay-Token"); tok != "" {
return tok
}
return r.URL.Query().Get("token")
}
// handleProxy forwards an HTTP request through the yamux tunnel to the
// edge-ai-server identified by the token. Supports standard requests,
// streaming, and WebSocket upgrades.
func (s *Server) handleProxy(w http.ResponseWriter, r *http.Request) {
// CORS preflight
if r.Method == "OPTIONS" {
w.Header().Set("Access-Control-Allow-Origin", "*")
w.Header().Set("Access-Control-Allow-Methods", "GET, POST, PUT, DELETE, OPTIONS")
w.Header().Set("Access-Control-Allow-Headers", "Content-Type, Authorization, X-Relay-Token")
w.WriteHeader(http.StatusNoContent)
return
}
tok := getToken(r)
if tok == "" {
http.Error(w, `{"success":false,"error":{"code":"NO_TOKEN","message":"X-Relay-Token header or token query param required"}}`, http.StatusUnauthorized)
return
}
s.mu.RLock()
session := s.sessions[tok]
s.mu.RUnlock()
if session == nil || session.IsClosed() {
http.Error(w, `{"success":false,"error":{"code":"TUNNEL_DISCONNECTED","message":"Edge server is not connected"}}`, http.StatusBadGateway)
return
}
// Open a yamux stream
stream, err := session.Open()
if err != nil {
log.Printf("[relay] failed to open yamux stream: %v", err)
http.Error(w, `{"success":false,"error":{"code":"TUNNEL_ERROR","message":"Failed to open tunnel stream"}}`, http.StatusBadGateway)
return
}
defer stream.Close()
// Strip the X-Relay-Token header before forwarding to local server
r.Header.Del("X-Relay-Token")
// Check if this is a WebSocket upgrade
if isWebSocketUpgrade(r) {
s.proxyWebSocket(w, r, stream)
return
}
// Forward the HTTP request
if err := r.Write(stream); err != nil {
log.Printf("[relay] failed to write request to tunnel: %v", err)
http.Error(w, "tunnel write error", http.StatusBadGateway)
return
}
// Read the HTTP response
resp, err := http.ReadResponse(bufio.NewReader(stream), r)
if err != nil {
log.Printf("[relay] failed to read response from tunnel: %v", err)
http.Error(w, "tunnel read error", http.StatusBadGateway)
return
}
defer resp.Body.Close()
// Copy response headers
for key, vals := range resp.Header {
for _, v := range vals {
w.Header().Add(key, v)
}
}
w.WriteHeader(resp.StatusCode)
// Stream body — use flusher for MJPEG and other streaming responses
if flusher, ok := w.(http.Flusher); ok {
buf := make([]byte, 32*1024)
for {
n, err := resp.Body.Read(buf)
if n > 0 {
w.Write(buf[:n])
flusher.Flush()
}
if err != nil {
break
}
}
} else {
io.Copy(w, resp.Body)
}
}
// proxyWebSocket handles WebSocket upgrade requests by forwarding the upgrade
// through the yamux stream and then bidirectionally copying frames.
func (s *Server) proxyWebSocket(w http.ResponseWriter, r *http.Request, stream net.Conn) {
// Write the HTTP upgrade request to tunnel
if err := r.Write(stream); err != nil {
log.Printf("[relay] ws: failed to write upgrade request: %v", err)
http.Error(w, "tunnel write error", http.StatusBadGateway)
return
}
// Read the response (should be 101 Switching Protocols)
resp, err := http.ReadResponse(bufio.NewReader(stream), r)
if err != nil {
log.Printf("[relay] ws: failed to read upgrade response: %v", err)
http.Error(w, "tunnel read error", http.StatusBadGateway)
return
}
if resp.StatusCode != http.StatusSwitchingProtocols {
// Not upgraded — send error response back
for key, vals := range resp.Header {
for _, v := range vals {
w.Header().Add(key, v)
}
}
w.WriteHeader(resp.StatusCode)
io.Copy(w, resp.Body)
resp.Body.Close()
return
}
// Hijack the client connection
hijacker, ok := w.(http.Hijacker)
if !ok {
http.Error(w, "hijacking not supported", http.StatusInternalServerError)
return
}
clientConn, clientBuf, err := hijacker.Hijack()
if err != nil {
log.Printf("[relay] ws: hijack failed: %v", err)
return
}
defer clientConn.Close()
// Write the 101 response to the browser client
resp.Write(clientBuf)
clientBuf.Flush()
// Bidirectional copy
var wg sync.WaitGroup
wg.Add(2)
go func() {
defer wg.Done()
io.Copy(stream, clientConn)
stream.Close()
}()
go func() {
defer wg.Done()
io.Copy(clientConn, stream)
clientConn.Close()
}()
wg.Wait()
}
func isWebSocketUpgrade(r *http.Request) bool {
return strings.EqualFold(r.Header.Get("Upgrade"), "websocket")
}
// TunnelConnected reports whether at least one tunnel session is active.
func (s *Server) TunnelConnected() bool {
s.mu.RLock()
defer s.mu.RUnlock()
for _, sess := range s.sessions {
if !sess.IsClosed() {
return true
}
}
return false
}
// Shutdown closes the relay server and all active tunnels.
func (s *Server) Shutdown() {
s.mu.Lock()
defer s.mu.Unlock()
for tok, sess := range s.sessions {
sess.Close()
delete(s.sessions, tok)
}
}
// FormatAddr formats the listen address from a port number.
func FormatAddr(port int) string {
return fmt.Sprintf(":%d", port)
}

View File

@ -0,0 +1,242 @@
// Package tunnel implements a client that connects to a relay server and
// forwards incoming requests to the local edge-ai-server.
package tunnel
import (
"bufio"
"io"
"log"
"math"
"net"
"net/http"
"net/url"
"sync"
"time"
"edge-ai-platform/pkg/wsconn"
"github.com/gorilla/websocket"
"github.com/hashicorp/yamux"
)
// Client maintains a persistent tunnel connection to a relay server.
type Client struct {
relayURL string // ws(s)://host:port/tunnel/connect
token string
localAddr string // local server address, e.g. "127.0.0.1:3721"
stopCh chan struct{}
stoppedCh chan struct{}
}
// NewClient creates a tunnel client that connects to the given relay URL
// and forwards traffic to localAddr.
func NewClient(relayURL, token, localAddr string) *Client {
return &Client{
relayURL: relayURL,
token: token,
localAddr: localAddr,
stopCh: make(chan struct{}),
stoppedCh: make(chan struct{}),
}
}
// Start begins the tunnel connection loop in a background goroutine.
// It automatically reconnects on failure with exponential backoff.
func (c *Client) Start() {
go c.run()
}
// Stop closes the tunnel connection and stops reconnecting.
func (c *Client) Stop() {
close(c.stopCh)
<-c.stoppedCh
}
func (c *Client) run() {
defer close(c.stoppedCh)
attempt := 0
for {
select {
case <-c.stopCh:
return
default:
}
err := c.connect()
if err != nil {
attempt++
delay := backoff(attempt)
log.Printf("[tunnel] connection failed (attempt %d): %v — retrying in %v", attempt, err, delay)
select {
case <-c.stopCh:
return
case <-time.After(delay):
}
continue
}
// Connected successfully — reset attempt counter
attempt = 0
}
}
// connect establishes a single tunnel session and blocks until it closes.
func (c *Client) connect() error {
// Build WebSocket URL
u, err := url.Parse(c.relayURL)
if err != nil {
return err
}
q := u.Query()
if c.token != "" {
q.Set("token", c.token)
}
u.RawQuery = q.Encode()
log.Printf("[tunnel] connecting to %s", u.Host)
dialer := websocket.DefaultDialer
conn, _, err := dialer.Dial(u.String(), nil)
if err != nil {
return err
}
netConn := wsconn.New(conn)
session, err := yamux.Client(netConn, yamux.DefaultConfig())
if err != nil {
conn.Close()
return err
}
log.Printf("[tunnel] connected to relay at %s", u.Host)
// Accept incoming streams until session closes or stop is requested
var wg sync.WaitGroup
go func() {
<-c.stopCh
session.Close()
}()
for {
stream, err := session.Accept()
if err != nil {
if session.IsClosed() {
break
}
log.Printf("[tunnel] accept error: %v", err)
break
}
wg.Add(1)
go func(s net.Conn) {
defer wg.Done()
c.handleStream(s)
}(stream)
}
wg.Wait()
log.Printf("[tunnel] disconnected from relay")
return nil
}
// handleStream reads an HTTP request from the yamux stream, forwards it
// to the local server, and writes back the response.
func (c *Client) handleStream(stream net.Conn) {
defer stream.Close()
// Read the forwarded HTTP request
req, err := http.ReadRequest(bufio.NewReader(stream))
if err != nil {
log.Printf("[tunnel] failed to read request: %v", err)
return
}
// Set the destination URL
req.URL.Scheme = "http"
req.URL.Host = c.localAddr
req.RequestURI = "" // must clear for http.Client
// Check if this is a WebSocket upgrade
if isWebSocketUpgrade(req) {
c.handleWebSocket(stream, req)
return
}
// Forward to local server
resp, err := http.DefaultTransport.RoundTrip(req)
if err != nil {
log.Printf("[tunnel] local request failed: %v", err)
// Write an error response back through the tunnel
errResp := &http.Response{
StatusCode: http.StatusBadGateway,
ProtoMajor: 1,
ProtoMinor: 1,
Header: make(http.Header),
Body: http.NoBody,
}
errResp.Write(stream)
return
}
defer resp.Body.Close()
// Write response back through the tunnel
resp.Write(stream)
}
// handleWebSocket forwards a WebSocket upgrade request to the local server
// using a raw TCP connection and bidirectional copy.
func (c *Client) handleWebSocket(stream net.Conn, req *http.Request) {
// Connect to local server via raw TCP
localConn, err := net.DialTimeout("tcp", c.localAddr, 10*time.Second)
if err != nil {
log.Printf("[tunnel] ws: failed to connect to local: %v", err)
return
}
defer localConn.Close()
// Write the original HTTP upgrade request to the local server
req.RequestURI = req.URL.RequestURI() // restore for raw write
req.Write(localConn)
// Bidirectional copy between tunnel stream and local connection
var wg sync.WaitGroup
wg.Add(2)
go func() {
defer wg.Done()
io.Copy(localConn, stream)
localConn.Close()
}()
go func() {
defer wg.Done()
io.Copy(stream, localConn)
stream.Close()
}()
wg.Wait()
}
func isWebSocketUpgrade(r *http.Request) bool {
for _, v := range r.Header["Upgrade"] {
if v == "websocket" || v == "Websocket" || v == "WebSocket" {
return true
}
}
return false
}
// backoff returns an exponential backoff duration capped at 30 seconds.
func backoff(attempt int) time.Duration {
d := time.Duration(math.Min(float64(time.Second)*math.Pow(2, float64(attempt-1)), 30)) * time.Second
if d < time.Second {
d = time.Second
}
if d > 30*time.Second {
d = 30 * time.Second
}
return d
}

View File

@ -0,0 +1,83 @@
package update
import (
"encoding/json"
"fmt"
"net/http"
"strings"
"time"
)
// UpdateInfo holds the result of a version check.
type UpdateInfo struct {
CurrentVersion string `json:"currentVersion"`
LatestVersion string `json:"latestVersion"`
UpdateAvailable bool `json:"updateAvailable"`
ReleaseURL string `json:"releaseUrl"`
ReleaseNotes string `json:"releaseNotes"`
PublishedAt string `json:"publishedAt"`
}
// giteaRelease represents a subset of the Gitea release API response.
type giteaRelease struct {
TagName string `json:"tag_name"`
HTMLURL string `json:"html_url"`
Body string `json:"body"`
PublishedAt string `json:"published_at"`
}
// Check queries the Gitea API for the latest release and compares it with
// the current version. If giteaURL is empty, it returns a result indicating
// no update info is available. Network or API errors are treated as
// non-fatal: the function returns an UpdateInfo with UpdateAvailable=false.
func Check(currentVersion, giteaURL, owner, repo string) *UpdateInfo {
info := &UpdateInfo{
CurrentVersion: currentVersion,
}
if giteaURL == "" || owner == "" || repo == "" {
return info
}
// Build API URL: GET /api/v1/repos/{owner}/{repo}/releases/latest
apiURL := fmt.Sprintf("%s/api/v1/repos/%s/%s/releases/latest",
strings.TrimRight(giteaURL, "/"), owner, repo)
client := &http.Client{Timeout: 10 * time.Second}
resp, err := client.Get(apiURL)
if err != nil {
return info
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return info
}
var release giteaRelease
if err := json.NewDecoder(resp.Body).Decode(&release); err != nil {
return info
}
info.LatestVersion = release.TagName
info.ReleaseURL = release.HTMLURL
info.ReleaseNotes = truncateNotes(release.Body, 500)
info.PublishedAt = release.PublishedAt
// Compare versions: strip leading "v" for comparison.
current := strings.TrimPrefix(currentVersion, "v")
latest := strings.TrimPrefix(release.TagName, "v")
if latest != "" && latest != current && currentVersion != "dev" {
info.UpdateAvailable = true
}
return info
}
// truncateNotes limits release notes to maxLen characters.
func truncateNotes(s string, maxLen int) string {
if len(s) <= maxLen {
return s
}
return s[:maxLen] + "..."
}

298
server/main.go Normal file
View File

@ -0,0 +1,298 @@
package main
import (
"context"
"errors"
"fmt"
"log"
"net"
"net/http"
"os"
"os/exec"
"os/signal"
"path/filepath"
"runtime"
"strings"
"syscall"
"time"
"edge-ai-platform/internal/api"
"edge-ai-platform/internal/api/handlers"
"edge-ai-platform/internal/api/ws"
"edge-ai-platform/internal/camera"
"edge-ai-platform/internal/cluster"
"edge-ai-platform/internal/config"
"edge-ai-platform/internal/deps"
"edge-ai-platform/internal/device"
"edge-ai-platform/internal/flash"
"edge-ai-platform/internal/inference"
"edge-ai-platform/internal/model"
"edge-ai-platform/internal/tunnel"
"edge-ai-platform/pkg/hwid"
pkglogger "edge-ai-platform/pkg/logger"
"edge-ai-platform/tray"
"edge-ai-platform/web"
)
var (
Version = "dev"
BuildTime = "unknown"
)
// baseDir returns the base directory for resolving data/ and scripts/ paths.
// In dev mode (go run), uses the working directory.
// In production (compiled binary), uses the binary's directory so the server
// works correctly regardless of the working directory.
func baseDir(devMode bool) string {
if devMode {
return "."
}
exe, err := os.Executable()
if err != nil {
return "."
}
return filepath.Dir(exe)
}
func main() {
cfg := config.Load()
// Tray mode: launch system tray launcher instead of server.
if cfg.TrayMode {
trayCfg := tray.LoadConfig()
tray.Run(trayCfg)
return
}
logger := pkglogger.New(cfg.LogLevel)
logger.Info("Starting Edge AI Platform Server %s (built: %s)", Version, BuildTime)
logger.Info("Mock mode: %v, Mock camera: %v, Dev mode: %v", cfg.MockMode, cfg.MockCamera, cfg.DevMode)
// Check external dependencies
deps.PrintStartupReport(logger)
// Initialize model repository (built-in models from JSON)
baseDir := baseDir(cfg.DevMode)
modelRepo := model.NewRepository(filepath.Join(baseDir, "data", "models.json"))
logger.Info("Loaded %d built-in models", modelRepo.Count())
// Initialize model store (custom uploaded models)
modelStore := model.NewModelStore(filepath.Join(baseDir, "data", "custom-models"))
customModels, err := modelStore.LoadCustomModels()
if err != nil {
logger.Warn("Failed to load custom models: %v", err)
}
for _, m := range customModels {
modelRepo.Add(m)
}
if len(customModels) > 0 {
logger.Info("Loaded %d custom models", len(customModels))
}
// Initialize WebSocket hub (before device manager so log broadcaster is ready)
wsHub := ws.NewHub()
go wsHub.Run()
// Initialize log broadcaster for real-time log streaming
logBroadcaster := pkglogger.NewBroadcaster(500, func(entry pkglogger.LogEntry) {
wsHub.BroadcastToRoom("server-logs", entry)
})
logger.SetBroadcaster(logBroadcaster)
// Initialize device manager
registry := device.NewRegistry()
deviceMgr := device.NewManager(registry, cfg.MockMode, cfg.MockDeviceCount, filepath.Join(baseDir, "scripts", "kneron_bridge.py"))
deviceMgr.SetLogBroadcaster(logBroadcaster)
deviceMgr.Start()
// Initialize camera manager
cameraMgr := camera.NewManager(cfg.MockCamera)
// Initialize cluster manager
clusterMgr := cluster.NewManager(deviceMgr)
// Initialize services
flashSvc := flash.NewService(deviceMgr, modelRepo)
inferenceSvc := inference.NewService(deviceMgr)
// Determine static file system for embedded frontend
var staticFS http.FileSystem
if !cfg.DevMode {
staticFS = web.StaticFS()
logger.Info("Serving embedded frontend static files")
} else {
logger.Info("Dev mode: frontend static serving disabled (use Next.js dev server on :3000)")
}
// Build HTTP server (needed for graceful shutdown and restart)
var httpServer *http.Server
var tunnelClient *tunnel.Client
restartRequested := make(chan struct{}, 1)
shutdownFn := func() {
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
if tunnelClient != nil {
tunnelClient.Stop()
}
inferenceSvc.StopAll()
cameraMgr.Close()
if httpServer != nil {
httpServer.Shutdown(ctx)
}
}
restartFn := func() {
// Signal the main goroutine to perform exec after server shutdown
select {
case restartRequested <- struct{}{}:
default:
}
shutdownFn()
}
// Auto-generate relay token from hardware ID if not explicitly set
relayToken := cfg.RelayToken
if cfg.RelayURL != "" && relayToken == "" {
relayToken = hwid.Generate()
logger.Info("Auto-generated relay token from hardware ID: %s...", relayToken[:8])
}
// Create system handler with injected version and restart function
systemHandler := handlers.NewSystemHandler(Version, BuildTime, cfg.GiteaURL, restartFn)
if cfg.GiteaURL != "" {
logger.Info("Update check enabled: %s", cfg.GiteaURL)
}
// Create router
r := api.NewRouter(modelRepo, modelStore, deviceMgr, cameraMgr, clusterMgr, flashSvc, inferenceSvc, wsHub, staticFS, logBroadcaster, systemHandler, relayToken)
// Configure HTTP server
addr := cfg.Addr()
httpServer = &http.Server{
Addr: addr,
Handler: r,
}
// Handle OS signals for graceful shutdown
quit := make(chan os.Signal, 1)
signal.Notify(quit, syscall.SIGINT, syscall.SIGTERM)
go func() {
sig := <-quit
logger.Info("Received signal %v, shutting down gracefully...", sig)
shutdownFn()
os.Exit(0)
}()
// Start tunnel client if relay URL is configured
if cfg.RelayURL != "" {
tunnelClient = tunnel.NewClient(cfg.RelayURL, relayToken, cfg.Addr())
tunnelClient.Start()
logger.Info("Tunnel client started, connecting to relay: %s", cfg.RelayURL)
// Open browser with token-embedded URL so the user is automatically
// authenticated with the relay.
if relayHTTP := relayWebURL(cfg.RelayURL, relayToken); relayHTTP != "" {
logger.Info("Opening browser: %s", relayHTTP)
openBrowser(relayHTTP)
}
}
// Kill existing process on the port if occupied
killExistingProcess(addr, logger)
// Start server
logger.Info("Server listening on %s", addr)
if err := httpServer.ListenAndServe(); err != nil && !errors.Is(err, http.ErrServerClosed) {
log.Fatalf("Failed to start server: %v", err)
}
// If restart was requested, exec the same binary to replace this process
select {
case <-restartRequested:
logger.Info("Performing self-restart via exec...")
exe, err := os.Executable()
if err != nil {
log.Fatalf("Failed to get executable path: %v", err)
}
exe, err = filepath.EvalSymlinks(exe)
if err != nil {
log.Fatalf("Failed to resolve executable symlinks: %v", err)
}
_ = syscall.Exec(exe, os.Args, os.Environ())
log.Fatalf("syscall.Exec failed")
default:
// Normal shutdown, just exit
}
}
// killExistingProcess checks if the port is already in use and kills the
// occupying process so the server can start cleanly.
func killExistingProcess(addr string, logger *pkglogger.Logger) {
// Extract port from addr (e.g. "127.0.0.1:3721" → "3721")
_, port, err := net.SplitHostPort(addr)
if err != nil {
return
}
// Quick check: try to listen — if it works, port is free
ln, err := net.Listen("tcp", addr)
if err == nil {
ln.Close()
return
}
// Port is occupied, find and kill the process
logger.Info("Port %s is in use, killing existing process...", port)
var cmd *exec.Cmd
if runtime.GOOS == "windows" {
// netstat -ano | findstr :PORT
cmd = exec.Command("cmd", "/C", fmt.Sprintf("for /f \"tokens=5\" %%a in ('netstat -ano ^| findstr :%s') do taskkill /F /PID %%a", port))
} else {
// lsof -ti:PORT | xargs kill -9
cmd = exec.Command("sh", "-c", fmt.Sprintf("lsof -ti:%s | xargs kill -9 2>/dev/null", port))
}
output, err := cmd.CombinedOutput()
if err != nil {
logger.Warn("Failed to kill process on port %s: %v (%s)", port, err, strings.TrimSpace(string(output)))
return
}
// Wait briefly for port to be released
time.Sleep(500 * time.Millisecond)
logger.Info("Previous process killed, port %s is now free", port)
}
// relayWebURL converts a relay WebSocket URL (ws://host/tunnel/connect)
// to an HTTP URL with the token embedded as a query parameter.
func relayWebURL(wsURL, token string) string {
// ws://host:port/tunnel/connect → http://host:port/?token=xxx
u := wsURL
u = strings.Replace(u, "wss://", "https://", 1)
u = strings.Replace(u, "ws://", "http://", 1)
// Strip the /tunnel/connect path
if i := strings.Index(u, "/tunnel"); i != -1 {
u = u[:i]
}
return fmt.Sprintf("%s/?token=%s", u, token)
}
// openBrowser opens a URL in the default browser.
func openBrowser(url string) {
var cmd *exec.Cmd
switch runtime.GOOS {
case "darwin":
cmd = exec.Command("open", url)
case "linux":
cmd = exec.Command("xdg-open", url)
case "windows":
cmd = exec.Command("cmd", "/c", "start", url)
default:
return
}
cmd.Start()
}

39
server/pkg/hwid/hwid.go Normal file
View File

@ -0,0 +1,39 @@
// Package hwid generates a stable hardware identifier from the machine's
// first non-loopback network interface MAC address.
package hwid
import (
"crypto/sha256"
"fmt"
"net"
)
// Generate returns a 16-character hex string derived from the SHA-256 hash of
// the first non-loopback network interface's MAC address. If no suitable
// interface is found, it falls back to a hash of the hostname-like string
// "unknown" so the server can still start.
func Generate() string {
ifaces, err := net.Interfaces()
if err != nil {
return hashString("unknown")
}
for _, iface := range ifaces {
// Skip loopback and interfaces without a hardware address
if iface.Flags&net.FlagLoopback != 0 {
continue
}
mac := iface.HardwareAddr.String()
if mac == "" {
continue
}
return hashString(mac)
}
return hashString("unknown")
}
func hashString(s string) string {
h := sha256.Sum256([]byte(s))
return fmt.Sprintf("%x", h)[:16]
}

View File

@ -0,0 +1,73 @@
package logger
import (
"sync"
"time"
)
// LogEntry represents a single structured log entry for WebSocket streaming.
type LogEntry struct {
Timestamp string `json:"timestamp"`
Level string `json:"level"`
Message string `json:"message"`
}
// BroadcastFunc is called whenever a new log entry is produced.
type BroadcastFunc func(entry LogEntry)
// Broadcaster captures log output, maintains a ring buffer of recent entries,
// and notifies subscribers (via BroadcastFunc) in real time.
type Broadcaster struct {
mu sync.RWMutex
buffer []LogEntry
bufSize int
pos int
full bool
broadcast BroadcastFunc
}
// NewBroadcaster creates a broadcaster with the given ring buffer capacity.
func NewBroadcaster(bufferSize int, fn BroadcastFunc) *Broadcaster {
return &Broadcaster{
buffer: make([]LogEntry, bufferSize),
bufSize: bufferSize,
broadcast: fn,
}
}
// Push adds a log entry to the ring buffer and broadcasts it.
func (b *Broadcaster) Push(level, message string) {
entry := LogEntry{
Timestamp: time.Now().Format("2006-01-02 15:04:05"),
Level: level,
Message: message,
}
b.mu.Lock()
b.buffer[b.pos] = entry
b.pos = (b.pos + 1) % b.bufSize
if b.pos == 0 {
b.full = true
}
b.mu.Unlock()
if b.broadcast != nil {
b.broadcast(entry)
}
}
// Recent returns a copy of all buffered log entries in chronological order.
func (b *Broadcaster) Recent() []LogEntry {
b.mu.RLock()
defer b.mu.RUnlock()
if !b.full {
result := make([]LogEntry, b.pos)
copy(result, b.buffer[:b.pos])
return result
}
result := make([]LogEntry, b.bufSize)
copy(result, b.buffer[b.pos:])
copy(result[b.bufSize-b.pos:], b.buffer[:b.pos])
return result
}

View File

@ -0,0 +1,66 @@
package logger
import (
"fmt"
"log"
"os"
)
type Logger struct {
info *log.Logger
warn *log.Logger
err *log.Logger
debug *log.Logger
level string
broadcaster *Broadcaster
}
func New(level string) *Logger {
return &Logger{
info: log.New(os.Stdout, "[INFO] ", log.Ldate|log.Ltime|log.Lshortfile),
warn: log.New(os.Stdout, "[WARN] ", log.Ldate|log.Ltime|log.Lshortfile),
err: log.New(os.Stderr, "[ERROR] ", log.Ldate|log.Ltime|log.Lshortfile),
debug: log.New(os.Stdout, "[DEBUG] ", log.Ldate|log.Ltime|log.Lshortfile),
level: level,
}
}
// SetBroadcaster attaches a log broadcaster for real-time log streaming.
func (l *Logger) SetBroadcaster(b *Broadcaster) {
l.broadcaster = b
}
// GetBroadcaster returns the attached broadcaster (may be nil).
func (l *Logger) GetBroadcaster() *Broadcaster {
return l.broadcaster
}
func (l *Logger) Info(msg string, args ...interface{}) {
l.info.Printf(msg, args...)
if l.broadcaster != nil {
l.broadcaster.Push("INFO", fmt.Sprintf(msg, args...))
}
}
func (l *Logger) Warn(msg string, args ...interface{}) {
l.warn.Printf(msg, args...)
if l.broadcaster != nil {
l.broadcaster.Push("WARN", fmt.Sprintf(msg, args...))
}
}
func (l *Logger) Error(msg string, args ...interface{}) {
l.err.Printf(msg, args...)
if l.broadcaster != nil {
l.broadcaster.Push("ERROR", fmt.Sprintf(msg, args...))
}
}
func (l *Logger) Debug(msg string, args ...interface{}) {
if l.level == "debug" {
l.debug.Printf(msg, args...)
if l.broadcaster != nil {
l.broadcaster.Push("DEBUG", fmt.Sprintf(msg, args...))
}
}
}

View File

@ -0,0 +1,89 @@
// Package wsconn wraps a gorilla/websocket.Conn into a net.Conn so that
// stream multiplexers like hashicorp/yamux can run on top of a WebSocket.
package wsconn
import (
"io"
"net"
"sync"
"time"
"github.com/gorilla/websocket"
)
// Conn adapts a *websocket.Conn to the net.Conn interface.
// All messages are sent/received as Binary frames.
type Conn struct {
ws *websocket.Conn
reader io.Reader
rmu sync.Mutex
wmu sync.Mutex
}
// New wraps a WebSocket connection as a net.Conn.
func New(ws *websocket.Conn) *Conn {
return &Conn{ws: ws}
}
func (c *Conn) Read(p []byte) (int, error) {
c.rmu.Lock()
defer c.rmu.Unlock()
for {
if c.reader != nil {
n, err := c.reader.Read(p)
if err == io.EOF {
c.reader = nil
if n > 0 {
return n, nil
}
continue
}
return n, err
}
_, reader, err := c.ws.NextReader()
if err != nil {
return 0, err
}
c.reader = reader
}
}
func (c *Conn) Write(p []byte) (int, error) {
c.wmu.Lock()
defer c.wmu.Unlock()
err := c.ws.WriteMessage(websocket.BinaryMessage, p)
if err != nil {
return 0, err
}
return len(p), nil
}
func (c *Conn) Close() error {
return c.ws.Close()
}
func (c *Conn) LocalAddr() net.Addr {
return c.ws.LocalAddr()
}
func (c *Conn) RemoteAddr() net.Addr {
return c.ws.RemoteAddr()
}
func (c *Conn) SetDeadline(t time.Time) error {
if err := c.ws.SetReadDeadline(t); err != nil {
return err
}
return c.ws.SetWriteDeadline(t)
}
func (c *Conn) SetReadDeadline(t time.Time) error {
return c.ws.SetReadDeadline(t)
}
func (c *Conn) SetWriteDeadline(t time.Time) error {
return c.ws.SetWriteDeadline(t)
}

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

View File

@ -0,0 +1,977 @@
#!/usr/bin/env python3
"""Kneron Bridge - JSON-RPC over stdin/stdout
This script acts as a bridge between the Go backend and the Kneron PLUS
Python SDK. It reads JSON commands from stdin and writes JSON responses
to stdout.
Supports:
- KL520 (USB Boot mode - firmware must be loaded each session)
- KL720 (flash-based - firmware pre-installed, models freely reloadable)
"""
import sys
import json
import base64
import time
import os
import io
import numpy as np
try:
import kp
HAS_KP = True
except ImportError:
HAS_KP = False
try:
import cv2
HAS_CV2 = True
except ImportError:
HAS_CV2 = False
# ── Global state ──────────────────────────────────────────────────────
_device_group = None
_model_id = None
_model_nef = None
_model_input_size = 224 # updated on model load
_model_type = "tiny_yolov3" # updated on model load based on model_id / nef name
_firmware_loaded = False
_device_chip = "KL520" # updated on connect from product_id / device_type
# COCO 80-class labels
COCO_CLASSES = [
"person", "bicycle", "car", "motorcycle", "airplane", "bus", "train", "truck",
"boat", "traffic light", "fire hydrant", "stop sign", "parking meter", "bench",
"bird", "cat", "dog", "horse", "sheep", "cow", "elephant", "bear", "zebra",
"giraffe", "backpack", "umbrella", "handbag", "tie", "suitcase", "frisbee",
"skis", "snowboard", "sports ball", "kite", "baseball bat", "baseball glove",
"skateboard", "surfboard", "tennis racket", "bottle", "wine glass", "cup",
"fork", "knife", "spoon", "bowl", "banana", "apple", "sandwich", "orange",
"broccoli", "carrot", "hot dog", "pizza", "donut", "cake", "chair", "couch",
"potted plant", "bed", "dining table", "toilet", "tv", "laptop", "mouse",
"remote", "keyboard", "cell phone", "microwave", "oven", "toaster", "sink",
"refrigerator", "book", "clock", "vase", "scissors", "teddy bear", "hair drier",
"toothbrush"
]
# Anchor boxes per model type (each list entry = one output head)
ANCHORS_TINY_YOLOV3 = [
[(81, 82), (135, 169), (344, 319)], # 7×7 head (large objects)
[(10, 14), (23, 27), (37, 58)], # 14×14 head (small objects)
]
# YOLOv5s anchors (Kneron model 20005, no-upsample variant for KL520)
ANCHORS_YOLOV5S = [
[(116, 90), (156, 198), (373, 326)], # P5/32 (large)
[(30, 61), (62, 45), (59, 119)], # P4/16 (medium)
[(10, 13), (16, 30), (33, 23)], # P3/8 (small)
]
CONF_THRESHOLD = 0.25
NMS_IOU_THRESHOLD = 0.45
# Known Kneron model IDs → (model_type, input_size)
KNOWN_MODELS = {
# Tiny YOLO v3 (default KL520 model)
0: ("tiny_yolov3", 224),
# ResNet18 classification (model 20001)
20001: ("resnet18", 224),
# FCOS DarkNet53s detection (model 20004)
20004: ("fcos", 512),
# YOLOv5s no-upsample (model 20005)
20005: ("yolov5s", 640),
}
def _log(msg):
"""Write log messages to stderr (stdout is reserved for JSON-RPC)."""
print(f"[kneron_bridge] {msg}", file=sys.stderr, flush=True)
def _resolve_firmware_paths(chip="KL520"):
"""Resolve firmware paths relative to this script's directory."""
base = os.path.dirname(os.path.abspath(__file__))
fw_dir = os.path.join(base, "firmware", chip)
scpu = os.path.join(fw_dir, "fw_scpu.bin")
ncpu = os.path.join(fw_dir, "fw_ncpu.bin")
if os.path.exists(scpu) and os.path.exists(ncpu):
return scpu, ncpu
# Fallback: check KNERON_FW_DIR env var
fw_dir = os.environ.get("KNERON_FW_DIR", "")
if fw_dir:
scpu = os.path.join(fw_dir, "fw_scpu.bin")
ncpu = os.path.join(fw_dir, "fw_ncpu.bin")
if os.path.exists(scpu) and os.path.exists(ncpu):
return scpu, ncpu
return None, None
def _detect_model_type(model_id, nef_path):
"""Detect model type and input size from model ID or .nef filename."""
global _model_type, _model_input_size
# Check known model IDs
if model_id in KNOWN_MODELS:
_model_type, _model_input_size = KNOWN_MODELS[model_id]
_log(f"Model type detected by ID {model_id}: {_model_type} ({_model_input_size}x{_model_input_size})")
return
# Fallback: try to infer from filename
basename = os.path.basename(nef_path).lower() if nef_path else ""
if "yolov5" in basename:
_model_type = "yolov5s"
# Try to parse input size from filename like w640h640
_model_input_size = _parse_size_from_name(basename, default=640)
elif "fcos" in basename:
_model_type = "fcos"
_model_input_size = _parse_size_from_name(basename, default=512)
elif "ssd" in basename:
_model_type = "ssd"
_model_input_size = _parse_size_from_name(basename, default=320)
elif "resnet" in basename or "classification" in basename:
_model_type = "resnet18"
_model_input_size = _parse_size_from_name(basename, default=224)
elif "tiny_yolo" in basename or "tinyyolo" in basename:
_model_type = "tiny_yolov3"
_model_input_size = _parse_size_from_name(basename, default=224)
else:
# Default: assume YOLO-like detection
_model_type = "tiny_yolov3"
_model_input_size = 224
_log(f"Model type detected by filename '{basename}': {_model_type} ({_model_input_size}x{_model_input_size})")
def _parse_size_from_name(name, default=224):
"""Extract input size from filename like 'w640h640' or 'w512h512'."""
import re
m = re.search(r'w(\d+)h(\d+)', name)
if m:
return int(m.group(1))
return default
# ── Post-processing ──────────────────────────────────────────────────
def _sigmoid(x):
return 1.0 / (1.0 + np.exp(-np.clip(x, -500, 500)))
def _nms(detections, iou_threshold=NMS_IOU_THRESHOLD):
"""Non-Maximum Suppression."""
detections.sort(key=lambda d: d["confidence"], reverse=True)
keep = []
for d in detections:
skip = False
for k in keep:
if d["class_id"] != k["class_id"]:
continue
x1 = max(d["bbox"]["x"], k["bbox"]["x"])
y1 = max(d["bbox"]["y"], k["bbox"]["y"])
x2 = min(d["bbox"]["x"] + d["bbox"]["width"],
k["bbox"]["x"] + k["bbox"]["width"])
y2 = min(d["bbox"]["y"] + d["bbox"]["height"],
k["bbox"]["y"] + k["bbox"]["height"])
inter = max(0, x2 - x1) * max(0, y2 - y1)
a1 = d["bbox"]["width"] * d["bbox"]["height"]
a2 = k["bbox"]["width"] * k["bbox"]["height"]
if inter / (a1 + a2 - inter + 1e-6) > iou_threshold:
skip = True
break
if not skip:
keep.append(d)
return keep
def _get_preproc_info(result):
"""Extract letterbox padding info from the inference result.
Kneron SDK applies letterbox resize (aspect-ratio-preserving + zero padding)
before inference. The hw_pre_proc_info tells us how to reverse it.
Returns (pad_left, pad_top, resize_w, resize_h, model_w, model_h) or None.
"""
try:
info = result.header.hw_pre_proc_info_list[0]
return {
"pad_left": info.pad_left if hasattr(info, 'pad_left') else 0,
"pad_top": info.pad_top if hasattr(info, 'pad_top') else 0,
"resized_w": info.resized_img_width if hasattr(info, 'resized_img_width') else 0,
"resized_h": info.resized_img_height if hasattr(info, 'resized_img_height') else 0,
"model_w": info.model_input_width if hasattr(info, 'model_input_width') else 0,
"model_h": info.model_input_height if hasattr(info, 'model_input_height') else 0,
"img_w": info.img_width if hasattr(info, 'img_width') else 0,
"img_h": info.img_height if hasattr(info, 'img_height') else 0,
}
except Exception:
return None
def _correct_bbox_for_letterbox(x, y, w, h, preproc, model_size):
"""Remove letterbox padding offset from normalized bbox coordinates.
Input (x, y, w, h) is in model-input-space normalized to 0-1.
Output is re-normalized to the original image aspect ratio (still 0-1).
For KP_PADDING_CORNER (default): image is at top-left, padding at bottom/right.
"""
if preproc is None:
return x, y, w, h
model_w = preproc["model_w"] or model_size
model_h = preproc["model_h"] or model_size
pad_left = preproc["pad_left"]
pad_top = preproc["pad_top"]
resized_w = preproc["resized_w"] or model_w
resized_h = preproc["resized_h"] or model_h
# If no padding was applied, skip correction
if pad_left == 0 and pad_top == 0 and resized_w == model_w and resized_h == model_h:
return x, y, w, h
# Convert from normalized (0-1 of model input) to pixel coords in model space
px = x * model_w
py = y * model_h
pw = w * model_w
ph = h * model_h
# Subtract padding offset
px -= pad_left
py -= pad_top
# Re-normalize to the resized (un-padded) image dimensions
nx = px / resized_w
ny = py / resized_h
nw = pw / resized_w
nh = ph / resized_h
# Clip to 0-1
nx = max(0.0, min(1.0, nx))
ny = max(0.0, min(1.0, ny))
nw = min(1.0 - nx, nw)
nh = min(1.0 - ny, nh)
return nx, ny, nw, nh
def _parse_yolo_output(result, anchors, input_size, num_classes=80):
"""Parse YOLO (v3/v5) raw output into detection results.
Works for both Tiny YOLOv3 and YOLOv5 the tensor layout is the same:
(num_anchors * (5 + num_classes), grid_h, grid_w)
The key differences are:
- anchor values
- input_size used for anchor normalization
- number of output heads
Bounding boxes are corrected for letterbox padding so coordinates
are relative to the original image (normalized 0-1).
"""
detections = []
entry_size = 5 + num_classes # 85 for COCO 80 classes
# Get letterbox padding info
preproc = _get_preproc_info(result)
if preproc:
_log(f"Preproc info: pad=({preproc['pad_left']},{preproc['pad_top']}), "
f"resized=({preproc['resized_w']}x{preproc['resized_h']}), "
f"model=({preproc['model_w']}x{preproc['model_h']}), "
f"img=({preproc['img_w']}x{preproc['img_h']})")
for head_idx in range(result.header.num_output_node):
output = kp.inference.generic_inference_retrieve_float_node(
node_idx=head_idx,
generic_raw_result=result,
channels_ordering=kp.ChannelOrdering.KP_CHANNEL_ORDERING_CHW
)
arr = output.ndarray[0] # (C, H, W)
channels, grid_h, grid_w = arr.shape
# Determine number of anchors for this head
num_anchors = channels // entry_size
if num_anchors < 1:
_log(f"Head {head_idx}: unexpected shape {arr.shape}, skipping")
continue
# Use the correct anchor set for this head
if head_idx < len(anchors):
head_anchors = anchors[head_idx]
else:
_log(f"Head {head_idx}: no anchors defined, skipping")
continue
for a_idx in range(min(num_anchors, len(head_anchors))):
off = a_idx * entry_size
for cy in range(grid_h):
for cx in range(grid_w):
obj_conf = _sigmoid(arr[off + 4, cy, cx])
if obj_conf < CONF_THRESHOLD:
continue
cls_scores = _sigmoid(arr[off + 5:off + entry_size, cy, cx])
cls_id = int(np.argmax(cls_scores))
cls_conf = float(cls_scores[cls_id])
conf = float(obj_conf * cls_conf)
if conf < CONF_THRESHOLD:
continue
bx = (_sigmoid(arr[off, cy, cx]) + cx) / grid_w
by = (_sigmoid(arr[off + 1, cy, cx]) + cy) / grid_h
aw, ah = head_anchors[a_idx]
bw = (np.exp(min(float(arr[off + 2, cy, cx]), 10)) * aw) / input_size
bh = (np.exp(min(float(arr[off + 3, cy, cx]), 10)) * ah) / input_size
# Convert center x,y,w,h to corner x,y,w,h (normalized to model input)
x = max(0.0, bx - bw / 2)
y = max(0.0, by - bh / 2)
w = min(1.0, bx + bw / 2) - x
h = min(1.0, by + bh / 2) - y
# Correct for letterbox padding
x, y, w, h = _correct_bbox_for_letterbox(x, y, w, h, preproc, input_size)
label = COCO_CLASSES[cls_id] if cls_id < len(COCO_CLASSES) else f"class_{cls_id}"
detections.append({
"label": label,
"class_id": cls_id,
"confidence": conf,
"bbox": {"x": x, "y": y, "width": w, "height": h},
})
detections = _nms(detections)
# Remove internal class_id before returning
for d in detections:
del d["class_id"]
return detections
def _parse_ssd_output(result, input_size=320, num_classes=2):
"""Parse SSD face detection output.
SSD typically outputs two tensors:
- locations: (num_boxes, 4) bounding box coordinates
- confidences: (num_boxes, num_classes) class scores
For the KL520 SSD face detection model (kl520_ssd_fd_lm.nef),
the output contains face detections with landmarks.
"""
detections = []
preproc = _get_preproc_info(result)
try:
# Retrieve all output nodes
num_outputs = result.header.num_output_node
outputs = []
for i in range(num_outputs):
output = kp.inference.generic_inference_retrieve_float_node(
node_idx=i,
generic_raw_result=result,
channels_ordering=kp.ChannelOrdering.KP_CHANNEL_ORDERING_CHW
)
outputs.append(output.ndarray[0])
if num_outputs < 2:
_log(f"SSD: expected >=2 output nodes, got {num_outputs}")
return detections
# Heuristic: the larger tensor is locations, smaller is confidences
# Or: first output = locations, second = confidences
locations = outputs[0]
confidences = outputs[1]
# Flatten if needed
if locations.ndim > 2:
locations = locations.reshape(-1, 4)
if confidences.ndim > 2:
confidences = confidences.reshape(-1, confidences.shape[-1])
num_boxes = min(locations.shape[0], confidences.shape[0])
for i in range(num_boxes):
# SSD confidence: class 0 = background, class 1 = face
if confidences.shape[-1] > 1:
conf = float(confidences[i, 1]) # face class
else:
conf = float(_sigmoid(confidences[i, 0]))
if conf < CONF_THRESHOLD:
continue
# SSD outputs are typically [x_min, y_min, x_max, y_max] normalized
x_min = float(np.clip(locations[i, 0], 0.0, 1.0))
y_min = float(np.clip(locations[i, 1], 0.0, 1.0))
x_max = float(np.clip(locations[i, 2], 0.0, 1.0))
y_max = float(np.clip(locations[i, 3], 0.0, 1.0))
w = x_max - x_min
h = y_max - y_min
if w <= 0 or h <= 0:
continue
# Correct for letterbox padding
x_min, y_min, w, h = _correct_bbox_for_letterbox(
x_min, y_min, w, h, preproc, input_size)
detections.append({
"label": "face",
"class_id": 0,
"confidence": conf,
"bbox": {"x": x_min, "y": y_min, "width": w, "height": h},
})
detections = _nms(detections)
for d in detections:
del d["class_id"]
except Exception as e:
_log(f"SSD parse error: {e}")
return detections
def _parse_fcos_output(result, input_size=512, num_classes=80):
"""Parse FCOS (Fully Convolutional One-Stage) detection output.
FCOS outputs per feature level:
- classification: (num_classes, H, W)
- centerness: (1, H, W)
- regression: (4, H, W) distances from each pixel to box edges (l, t, r, b)
The outputs come in groups of 3 per feature level.
"""
detections = []
preproc = _get_preproc_info(result)
try:
num_outputs = result.header.num_output_node
outputs = []
for i in range(num_outputs):
output = kp.inference.generic_inference_retrieve_float_node(
node_idx=i,
generic_raw_result=result,
channels_ordering=kp.ChannelOrdering.KP_CHANNEL_ORDERING_CHW
)
outputs.append(output.ndarray[0])
# FCOS typically has 5 feature levels × 3 outputs = 15 output nodes
# Or fewer for simplified models. Group by 3: (cls, centerness, reg)
# If we can't determine the grouping, try a simpler approach.
strides = [8, 16, 32, 64, 128]
num_levels = num_outputs // 3
for level in range(num_levels):
cls_out = outputs[level * 3] # (num_classes, H, W)
cnt_out = outputs[level * 3 + 1] # (1, H, W)
reg_out = outputs[level * 3 + 2] # (4, H, W)
stride = strides[level] if level < len(strides) else (8 * (2 ** level))
h, w = cls_out.shape[1], cls_out.shape[2]
for cy in range(h):
for cx in range(w):
cls_scores = _sigmoid(cls_out[:, cy, cx])
cls_id = int(np.argmax(cls_scores))
cls_conf = float(cls_scores[cls_id])
centerness = float(_sigmoid(cnt_out[0, cy, cx]))
conf = cls_conf * centerness
if conf < CONF_THRESHOLD:
continue
# Regression: distances from pixel center to box edges
px = (cx + 0.5) * stride
py = (cy + 0.5) * stride
l = float(np.exp(min(reg_out[0, cy, cx], 10))) * stride
t = float(np.exp(min(reg_out[1, cy, cx], 10))) * stride
r = float(np.exp(min(reg_out[2, cy, cx], 10))) * stride
b = float(np.exp(min(reg_out[3, cy, cx], 10))) * stride
x_min = max(0.0, (px - l) / input_size)
y_min = max(0.0, (py - t) / input_size)
x_max = min(1.0, (px + r) / input_size)
y_max = min(1.0, (py + b) / input_size)
bw = x_max - x_min
bh = y_max - y_min
if bw <= 0 or bh <= 0:
continue
# Correct for letterbox padding
x_min, y_min, bw, bh = _correct_bbox_for_letterbox(
x_min, y_min, bw, bh, preproc, input_size)
label = COCO_CLASSES[cls_id] if cls_id < len(COCO_CLASSES) else f"class_{cls_id}"
detections.append({
"label": label,
"class_id": cls_id,
"confidence": conf,
"bbox": {"x": x_min, "y": y_min, "width": bw, "height": bh},
})
detections = _nms(detections)
for d in detections:
del d["class_id"]
except Exception as e:
_log(f"FCOS parse error: {e}")
return detections
def _parse_classification_output(result, num_classes=1000):
"""Parse classification model output (e.g., ResNet18 ImageNet)."""
try:
output = kp.inference.generic_inference_retrieve_float_node(
node_idx=0,
generic_raw_result=result,
channels_ordering=kp.ChannelOrdering.KP_CHANNEL_ORDERING_CHW
)
scores = output.ndarray.flatten()
# Apply softmax
exp_scores = np.exp(scores - np.max(scores))
probs = exp_scores / exp_scores.sum()
# Top-5
top_indices = np.argsort(probs)[::-1][:5]
classifications = []
for idx in top_indices:
label = COCO_CLASSES[idx] if idx < len(COCO_CLASSES) else f"class_{idx}"
classifications.append({
"label": label,
"confidence": float(probs[idx]),
})
return classifications
except Exception as e:
_log(f"Classification parse error: {e}")
return []
# ── Command handlers ─────────────────────────────────────────────────
def handle_scan():
"""Scan for connected Kneron devices."""
if not HAS_KP:
return {"devices": [], "error_detail": "kp module not available"}
try:
descs = kp.core.scan_devices()
devices = []
for i in range(descs.device_descriptor_number):
dev = descs.device_descriptor_list[i]
devices.append({
"port": str(dev.usb_port_id),
"firmware": str(dev.firmware),
"kn_number": f"0x{dev.kn_number:08X}",
"product_id": f"0x{dev.product_id:04X}",
"connectable": dev.is_connectable,
})
return {"devices": devices}
except Exception as e:
return {"devices": [], "error_detail": str(e)}
def handle_connect(params):
"""Connect to a Kneron device and load firmware if needed.
KL520: USB Boot mode firmware MUST be uploaded every session.
KL720 (KDP2, pid=0x0720): Flash-based firmware pre-installed.
KL720 (KDP legacy, pid=0x0200): Old firmware needs connect_without_check
+ firmware load to RAM before normal operation.
"""
global _device_group, _firmware_loaded, _device_chip
if not HAS_KP:
return {"error": "kp module not available"}
try:
port = params.get("port", "")
device_type = params.get("device_type", "")
# Scan to find device
descs = kp.core.scan_devices()
if descs.device_descriptor_number == 0:
return {"error": "no Kneron device found"}
# Find device by port or use first one
target_dev = None
for i in range(descs.device_descriptor_number):
dev = descs.device_descriptor_list[i]
if port and str(dev.usb_port_id) == port:
target_dev = dev
break
if target_dev is None:
target_dev = descs.device_descriptor_list[0]
if not target_dev.is_connectable:
return {"error": "device is not connectable"}
# Determine chip type from device_type param or product_id
pid = target_dev.product_id
if "kl720" in device_type.lower():
_device_chip = "KL720"
elif "kl520" in device_type.lower():
_device_chip = "KL520"
elif pid in (0x0200, 0x0720):
_device_chip = "KL720"
else:
_device_chip = "KL520"
fw_str = str(target_dev.firmware)
is_kdp_legacy = (_device_chip == "KL720" and pid == 0x0200)
_log(f"Chip type: {_device_chip} (product_id=0x{pid:04X}, device_type={device_type}, fw={fw_str})")
# ── KL720 KDP Legacy (pid=0x0200): old firmware, incompatible with SDK ──
if is_kdp_legacy:
_log(f"KL720 has legacy KDP firmware (pid=0x0200). Using connect_devices_without_check...")
_device_group = kp.core.connect_devices_without_check(
usb_port_ids=[target_dev.usb_port_id]
)
kp.core.set_timeout(device_group=_device_group, milliseconds=60000)
# Load KDP2 firmware to RAM so the device can operate with this SDK
scpu_path, ncpu_path = _resolve_firmware_paths("KL720")
if scpu_path and ncpu_path:
_log(f"KL720: Loading KDP2 firmware to RAM: {scpu_path}")
kp.core.load_firmware_from_file(
_device_group, scpu_path, ncpu_path
)
_firmware_loaded = True
_log("KL720: Firmware loaded to RAM, waiting for reboot...")
time.sleep(5)
# Reconnect — device should now be running KDP2 in RAM
descs = kp.core.scan_devices()
reconnected = False
for i in range(descs.device_descriptor_number):
dev = descs.device_descriptor_list[i]
if dev.product_id in (0x0200, 0x0720):
target_dev = dev
reconnected = True
break
if not reconnected:
return {"error": "KL720 not found after firmware load. Unplug and re-plug."}
# Try normal connect first, fallback to without_check
try:
_device_group = kp.core.connect_devices(
usb_port_ids=[target_dev.usb_port_id]
)
except Exception as conn_err:
_log(f"KL720: Normal reconnect failed ({conn_err}), using without_check...")
_device_group = kp.core.connect_devices_without_check(
usb_port_ids=[target_dev.usb_port_id]
)
kp.core.set_timeout(device_group=_device_group, milliseconds=10000)
fw_str = str(target_dev.firmware)
_log(f"KL720: Reconnected after firmware load, pid=0x{target_dev.product_id:04X}, fw={fw_str}")
else:
_log("WARNING: KL720 firmware files not found. Cannot operate with KDP legacy device.")
_device_group = None
return {"error": "KL720 has legacy KDP firmware but KDP2 firmware files not found. "
"Run update_kl720_firmware.py to flash KDP2 permanently."}
return {
"status": "connected",
"firmware": fw_str,
"kn_number": f"0x{target_dev.kn_number:08X}",
"chip": _device_chip,
"kdp_legacy": True,
}
# ── Normal connection (KL520 or KL720 KDP2) ──
# KL720 KDP2: connect_devices() often fails with Error 28. Using it
# before connect_devices_without_check() corrupts SDK internal state
# and causes SIGSEGV. Go directly to connect_devices_without_check()
# for KL720 to avoid the crash.
if _device_chip == "KL720":
_log(f"KL720: Using connect_devices_without_check(usb_port_id={target_dev.usb_port_id})...")
_device_group = kp.core.connect_devices_without_check(
usb_port_ids=[target_dev.usb_port_id]
)
_log(f"connect_devices_without_check succeeded")
else:
_log(f"Calling kp.core.connect_devices(usb_port_id={target_dev.usb_port_id})...")
_device_group = kp.core.connect_devices(
usb_port_ids=[target_dev.usb_port_id]
)
_log(f"connect_devices succeeded")
# KL720 needs longer timeout for large NEF transfers (12MB+ over USB)
_timeout_ms = 60000 if _device_chip == "KL720" else 10000
_log(f"Calling set_timeout(milliseconds={_timeout_ms})...")
kp.core.set_timeout(device_group=_device_group, milliseconds=_timeout_ms)
_log(f"set_timeout succeeded")
# Firmware handling — chip-dependent
if "Loader" in fw_str:
# Device is in USB Boot (Loader) mode and needs firmware
if _device_chip == "KL720":
_log(f"WARNING: {_device_chip} is in Loader mode (unusual). Attempting firmware load...")
scpu_path, ncpu_path = _resolve_firmware_paths(_device_chip)
if scpu_path and ncpu_path:
_log(f"{_device_chip}: Loading firmware: {scpu_path}")
kp.core.load_firmware_from_file(
_device_group, scpu_path, ncpu_path
)
_firmware_loaded = True
_log("Firmware loaded, waiting for reboot...")
time.sleep(5)
# Reconnect after firmware load
descs = kp.core.scan_devices()
target_dev = descs.device_descriptor_list[0]
_device_group = kp.core.connect_devices(
usb_port_ids=[target_dev.usb_port_id]
)
kp.core.set_timeout(
device_group=_device_group, milliseconds=_timeout_ms
)
fw_str = str(target_dev.firmware)
_log(f"Reconnected after firmware load, firmware: {fw_str}")
else:
_log(f"WARNING: {_device_chip} firmware files not found, skipping firmware load")
else:
# Not in Loader mode — firmware already present
_log(f"{_device_chip}: firmware already present (normal). fw={fw_str}")
return {
"status": "connected",
"firmware": fw_str,
"kn_number": f"0x{target_dev.kn_number:08X}",
"chip": _device_chip,
}
except Exception as e:
_device_group = None
return {"error": str(e)}
def handle_disconnect(params):
"""Disconnect from the current device."""
global _device_group, _model_id, _model_nef, _firmware_loaded
global _model_type, _model_input_size, _device_chip
_device_group = None
_model_id = None
_model_nef = None
_model_type = "tiny_yolov3"
_model_input_size = 224
_firmware_loaded = False
_device_chip = "KL520"
return {"status": "disconnected"}
def handle_reset(params):
"""Reset the device back to USB Boot (Loader) state.
This forces the device to drop its firmware and any loaded models.
After reset the device will re-enumerate on USB, so the caller
must wait and issue a fresh 'connect' command.
"""
global _device_group, _model_id, _model_nef, _firmware_loaded
global _model_type, _model_input_size, _device_chip
if _device_group is None:
return {"error": "device not connected"}
try:
_log("Resetting device (kp.core.reset_device KP_RESET_REBOOT)...")
kp.core.reset_device(
device_group=_device_group,
reset_mode=kp.ResetMode.KP_RESET_REBOOT,
)
_log("Device reset command sent successfully")
except Exception as e:
_log(f"reset_device raised: {e}")
# Even if it throws, the device usually does reset.
# Clear all state — the device is gone until it re-enumerates.
_device_group = None
_model_id = None
_model_nef = None
_model_type = "tiny_yolov3"
_model_input_size = 224
_firmware_loaded = False
_device_chip = "KL520"
return {"status": "reset"}
def handle_load_model(params):
"""Load a model file onto the device.
KL520 USB Boot mode limitation: only one model can be loaded per
USB session. If error 40 occurs, the error is returned to the Go
driver which handles it by restarting the entire Python bridge.
"""
global _model_id, _model_nef
if _device_group is None:
return {"error": "device not connected"}
path = params.get("path", "")
if not path or not os.path.exists(path):
return {"error": f"model file not found: {path}"}
try:
_model_nef = kp.core.load_model_from_file(
device_group=_device_group,
file_path=path
)
except Exception as e:
return {"error": str(e)}
try:
model = _model_nef.models[0]
_model_id = model.id
# Detect model type and input size
_detect_model_type(_model_id, path)
_log(f"Model loaded: id={_model_id}, type={_model_type}, "
f"input={_model_input_size}, target={_model_nef.target_chip}")
return {
"status": "loaded",
"model_id": _model_id,
"model_type": _model_type,
"input_size": _model_input_size,
"model_path": path,
"target_chip": str(_model_nef.target_chip),
}
except Exception as e:
return {"error": str(e)}
def handle_inference(params):
"""Run inference on the provided image data."""
if _device_group is None:
return {"error": "device not connected"}
if _model_id is None:
return {"error": "no model loaded"}
image_b64 = params.get("image_base64", "")
try:
t0 = time.time()
if image_b64:
# Decode base64 image
img_bytes = base64.b64decode(image_b64)
if HAS_CV2:
# Decode image with OpenCV
img_array = np.frombuffer(img_bytes, dtype=np.uint8)
img = cv2.imdecode(img_array, cv2.IMREAD_COLOR)
if img is None:
return {"error": "failed to decode image"}
# Convert BGR to BGR565
img_bgr565 = cv2.cvtColor(src=img, code=cv2.COLOR_BGR2BGR565)
else:
# Fallback: try to use raw bytes (assume RGB565 format)
img_bgr565 = np.frombuffer(img_bytes, dtype=np.uint8)
else:
return {"error": "no image data provided"}
# Create inference config
inf_config = kp.GenericImageInferenceDescriptor(
model_id=_model_id,
inference_number=0,
input_node_image_list=[
kp.GenericInputNodeImage(
image=img_bgr565,
image_format=kp.ImageFormat.KP_IMAGE_FORMAT_RGB565,
)
]
)
# Send and receive
kp.inference.generic_image_inference_send(_device_group, inf_config)
result = kp.inference.generic_image_inference_receive(_device_group)
elapsed_ms = (time.time() - t0) * 1000
# Parse output based on model type
detections = []
classifications = []
task_type = "detection"
if _model_type == "resnet18":
task_type = "classification"
classifications = _parse_classification_output(result)
elif _model_type == "ssd":
detections = _parse_ssd_output(result, input_size=_model_input_size)
elif _model_type == "fcos":
detections = _parse_fcos_output(result, input_size=_model_input_size)
elif _model_type == "yolov5s":
detections = _parse_yolo_output(
result,
anchors=ANCHORS_YOLOV5S,
input_size=_model_input_size,
)
else:
# Default: Tiny YOLOv3
detections = _parse_yolo_output(
result,
anchors=ANCHORS_TINY_YOLOV3,
input_size=_model_input_size,
)
return {
"taskType": task_type,
"timestamp": int(time.time() * 1000),
"latencyMs": round(elapsed_ms, 1),
"detections": detections,
"classifications": classifications,
}
except Exception as e:
return {"error": str(e)}
# ── Main loop ────────────────────────────────────────────────────────
def main():
"""Main loop: read JSON commands from stdin, write responses to stdout."""
# Signal readiness
print(json.dumps({"status": "ready"}), flush=True)
_log(f"Bridge started (kp={'yes' if HAS_KP else 'no'}, cv2={'yes' if HAS_CV2 else 'no'})")
for line in sys.stdin:
line = line.strip()
if not line:
continue
try:
cmd = json.loads(line)
action = cmd.get("cmd", "")
if action == "scan":
result = handle_scan()
elif action == "connect":
result = handle_connect(cmd)
elif action == "disconnect":
result = handle_disconnect(cmd)
elif action == "reset":
result = handle_reset(cmd)
elif action == "load_model":
result = handle_load_model(cmd)
elif action == "inference":
result = handle_inference(cmd)
else:
result = {"error": f"unknown command: {action}"}
print(json.dumps(result), flush=True)
except Exception as e:
print(json.dumps({"error": str(e)}), flush=True)
if __name__ == "__main__":
main()

Some files were not shown because too many files have changed in this diff Show More