diff --git a/.vscode/settings.json b/.vscode/settings.json index 2c63c085..0967ef42 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -1,2 +1 @@ -{ -} +{} diff --git a/README.md b/README.md index fdddcdc8..d6c79bc1 100644 --- a/README.md +++ b/README.md @@ -1,22 +1,22 @@ # WaveSpeed -Open-source, cross-platform application for running 100+ AI models — image generation, video generation, face swap, digital human, motion control, and more. Includes a visual workflow editor for building AI pipelines and 12 free creative tools. Available for **Windows**, **macOS**, **Linux**, and **Android**. +Open-source, cross-platform application for running 600+ AI models — image generation, video generation, face swap, digital human, motion control, and more. Features a visual workflow editor for building AI pipelines, Featured Models with smart variant switching, and 12 free creative tools. Available for **Windows**, **macOS**, **Linux**, and **Android**. [![GitHub Release](https://img.shields.io/github/v/release/WaveSpeedAI/wavespeed-desktop?style=flat-square&label=Latest)](https://github.com/WaveSpeedAI/wavespeed-desktop/releases/latest) [![License](https://img.shields.io/github/license/WaveSpeedAI/wavespeed-desktop?style=flat-square)](LICENSE) [![Stars](https://img.shields.io/github/stars/WaveSpeedAI/wavespeed-desktop?style=flat-square)](https://github.com/WaveSpeedAI/wavespeed-desktop/stargazers) -[![Windows](https://img.shields.io/badge/Windows-0078D6?style=for-the-badge&logo=windows&logoColor=white)](https://github.com/WaveSpeedAI/wavespeed-desktop/releases/latest/download/WaveSpeed-Desktop-win-x64.exe) +[![Windows](https://img.shields.io/badge/Windows-0078D6?style=for-the-badge&logo=data:image/svg%2bxml;base64,PHN2ZyB4bWxucz0iaHR0cDovL3d3dy53My5vcmcvMjAwMC9zdmciIHZpZXdCb3g9IjAgMCAyNCAyNCIgZmlsbD0id2hpdGUiPjxwYXRoIGQ9Ik0wIDMuNWw5LjktMS40djkuNUgwem0xMS4xLTEuNUwyNCAwdjExLjVIMTEuMXpNMCAxMi42aDkuOXY5LjVMMCAyMC43em0xMS4xLS4xSDI0VjI0bC0xMi45LTEuOHoiLz48L3N2Zz4=&logoColor=white)](https://github.com/WaveSpeedAI/wavespeed-desktop/releases/latest/download/WaveSpeed-Desktop-win-x64.exe) [![macOS Intel](https://img.shields.io/badge/macOS_Intel-000000?style=for-the-badge&logo=apple&logoColor=white)](https://github.com/WaveSpeedAI/wavespeed-desktop/releases/latest/download/WaveSpeed-Desktop-mac-x64.dmg) [![macOS Apple Silicon](https://img.shields.io/badge/macOS_Silicon-000000?style=for-the-badge&logo=apple&logoColor=white)](https://github.com/WaveSpeedAI/wavespeed-desktop/releases/latest/download/WaveSpeed-Desktop-mac-arm64.dmg) [![Linux](https://img.shields.io/badge/Linux-FCC624?style=for-the-badge&logo=linux&logoColor=black)](https://github.com/WaveSpeedAI/wavespeed-desktop/releases/latest/download/WaveSpeed-Desktop-linux-x86_64.AppImage) [![Android](https://img.shields.io/badge/Android-3DDC84?style=for-the-badge&logo=android&logoColor=white)](https://github.com/WaveSpeedAI/wavespeed-desktop/releases/latest/download/WaveSpeed-Mobile.apk) -![Playground Screenshot](https://github.com/user-attachments/assets/7bd45689-5b24-40ab-9495-2296533e3b5a) +![Playground Screenshot](https://github.com/user-attachments/assets/054a45d8-9bbc-4f1b-8cc1-b6fa4b3b2aac) ## Android App -The Android app shares the same React codebase as the desktop version, giving you access to the AI Playground, Featured Models, Creative Studio, and all 100+ models from your phone. +The Android app shares the same React codebase as the desktop version, giving you access to the AI Playground, Featured Models, Creative Studio, and all 600+ models from your phone. - Full AI Playground with multi-tab support and all input types including camera capture - Featured Models with smart variant switching @@ -49,60 +49,33 @@ The Android app shares the same React codebase as the desktop version, giving yo | **Media Trimmer** | Trim video and audio by selecting start and end times | | **Media Merger** | Merge multiple video or audio files into one | -![WaveSpeed Creative Studio](https://github.com/user-attachments/assets/67359fa7-8ff4-4001-a982-eb4802e5b841) +![WaveSpeed Creative Studio](https://github.com/user-attachments/assets/dea6a526-ec08-408a-810d-7f88cc28797a) ## Visual Workflow Editor Node-based pipeline builder for designing and executing complex AI workflows. Chain any combination of AI models, free tools, and media processing steps into automated pipelines. -![WaveSpeed Visual Workflow Editor](https://github.com/user-attachments/assets/e1243d57-8d7b-4d42-bed3-94bf8adfa6f5) +![WaveSpeed Visual Workflow Editor](https://github.com/user-attachments/assets/31f6889a-aeff-41a9-ab15-c16f7c828712) ## Features -- **Model Browser**: Browse and search available AI models with fuzzy search, sortable by popularity, name, price, or type -- **Favorites**: Star your favorite models for quick access with a dedicated filter -- **Multi-Tab Playground**: Run predictions with multiple models simultaneously in separate tabs -- **Abort Execution**: Cancel running predictions with a smooth abort button (0.5s safety delay) -- **Batch Processing**: Run the same prediction multiple times (2-16) with auto-randomized seeds for variations -- **Dynamic Forms**: Auto-generated forms from model schemas with validation -- **Mask Drawing**: Interactive canvas-based mask editor for models that accept mask inputs, with brush, eraser, and bucket fill tools -- **Templates**: Save and reuse playground configurations as templates for quick access -- **LoRA Support**: Full support for LoRAs including high-noise and low-noise LoRAs for Wan 2.2 models -- **Visual Workflow Editor**: Node-based editor for building and executing AI/processing pipelines - - **Node Types**: Media upload, text input, AI task (any WaveSpeedAI model), 12 free tool nodes, file export, preview display, and annotation notes - - **Canvas Interaction**: Drag & drop nodes, connect handles, zoom/pan, context menus, copy/paste, duplicate, and keyboard shortcuts (Ctrl+Z/Y, Ctrl+C/V, Ctrl+S, Delete) - - **Execution Control**: Run all, run selected node, continue from any node, retry failed nodes, cancel individual or all, and batch runs (1-99x with auto-randomized seeds) - - **Execution Monitor**: Real-time progress panel with per-node status, progress bars, cost tracking, and I/O data inspection - - **Multi-Tab**: Chrome-style tabs with session persistence, tab renaming (double-click), unsaved changes indicator, and auto-restore on restart - - **Results Management**: Per-node execution history, fullscreen preview (images, videos, 3D models, audio), arrow key navigation, download, and clear results - - **Cost Estimation & Budget**: Real-time cost estimate per run, daily budget tracking, per-execution limits, and cost breakdown per node - - **Import/Export**: Save and load workflows as JSON with SQLite-backed persistence - - **Undo/Redo**: Snapshot-based (up to 50 states) with debounced text input support -- **Free Tools**: Free AI-powered image and video tools (no API key required) - - **Image Enhancer**: Upscale images 2x-4x with ESRGAN models (slim, medium, thick quality options) - - **Video Enhancer**: Frame-by-frame video upscaling with real-time progress and ETA - - **Face Enhancer**: Enhance and restore face quality using YOLO v8 for detection and GFPGAN v1.4 for enhancement (WebGPU accelerated) - - **Face Swapper**: Swap faces between images using InsightFace models (SCRFD detection, ArcFace embedding, Inswapper) with optional GFPGAN enhancement - - **Background Remover**: Remove image backgrounds instantly using AI, displaying foreground, background, and mask outputs simultaneously with individual download buttons - - **Image Eraser**: Remove unwanted objects from images using LaMa inpainting model with smart crop and blend (WebGPU accelerated) - - **Segment Anything**: Interactive object segmentation with point prompts using SlimSAM model - - **Video Converter**: Convert videos between formats (MP4, WebM, AVI, MOV, MKV) with codec and quality options - - **Audio Converter**: Convert audio between formats (MP3, WAV, AAC, FLAC, OGG) with bitrate control - - **Image Converter**: Batch convert images between formats (JPG, PNG, WebP, GIF, BMP) with quality settings - - **Media Trimmer**: Trim video/audio files by selecting start and end times - - **Media Merger**: Merge multiple video/audio files into one -- **Z-Image (Local)**: Run local image generation via stable-diffusion.cpp with model/aux downloads, progress, and logs -- **Multi-Phase Progress**: Compact progress bars with phase indicators, real-time status, and ETA for all Free Tools -- **History**: View your recent predictions (last 24 hours) with detailed view, download, and copy prediction ID -- **My Assets**: Save, browse, and manage generated outputs (images, videos, audio) with tags, favorites, and search -- **Auto-Save**: Automatically save generated outputs to your local assets folder (enabled by default) with error reporting -- **File Upload**: Support for image, video, and audio file inputs with drag & drop -- **Media Capture**: Built-in camera capture, video recording with audio waveform, and audio recording -- **View Documentation**: Quick access to model webpage and documentation from the titlebar (context-aware links when a model is selected) -- **Account Balance**: View your current WaveSpeed account balance in Settings with one-click refresh -- **Theme Support**: Auto (system), dark, and light theme options -- **Multi-Language**: Support for 18 languages including English, Chinese, Japanese, Korean, and more -- **Auto Updates**: Automatic update checking with stable and nightly channels +- **AI Playground**: Multi-tab playground with dynamic forms, batch processing (2-16x), mask drawing, LoRA support, abort control, and auto-randomized seeds +- **Featured Models**: Curated model families with smart variant switching — auto-selects the best variant based on inputs and toggles (Seedream 4.5, Seedance 1.5 Pro, Wan Spicy, InfiniteTalk, Kling 2.6, Nano Banana Pro, etc.) +- **Model Browser**: Fuzzy search, sort by popularity/name/price/type, favorites filter +- **Visual Workflow Editor**: Node-based pipeline builder with 20+ node types + - Triggers (directory scan, HTTP API), AI tasks, 12 free tool nodes, processing (concat, select), group/subgraph, I/O nodes + - Run all / run node / continue / retry / cancel / batch runs (1-99x), real-time execution monitor with cost tracking + - Group/subgraph containers with exposed I/O, breadcrumb navigation, and workflow import + - HTTP API mode: expose workflows as REST endpoints via built-in HTTP server — works as a skill server for [OpenClaw](https://github.com/anthropics/openclaw) and other AI agents + - Directory batch processing: auto-execute per media file in a folder + - Prompt optimizer, guided tour, result caching, circuit breaker, cycle detection + - Cost estimation & daily budget, import/export (JSON + SQLite), multi-tab, undo/redo, customizable output naming +- **Free Tools**: 12 AI-powered creative tools (no API key) — see [Creative Studio](#creative-studio) above +- **Z-Image**: Local image generation via stable-diffusion.cpp with model downloads, progress, and logs +- **Templates**: Playground + workflow templates with presets, i18n search, import/export, and usage tracking +- **History & Assets**: Recent predictions (24h), saved outputs with tags/favorites/search, auto-save to local folder +- **Media Input**: File upload (drag & drop), camera capture, video/audio recording +- **18 languages**, dark/light/auto theme, auto updates (stable + nightly), cross-platform (Windows, macOS, Linux, Android) - **Cross-Platform**: Available for Windows, macOS, Linux, and Android ## Installation @@ -111,7 +84,7 @@ Node-based pipeline builder for designing and executing complex AI workflows. Ch #### Desktop -[![Windows](https://img.shields.io/badge/Windows-0078D6?style=for-the-badge&logo=windows&logoColor=white)](https://github.com/WaveSpeedAI/wavespeed-desktop/releases/latest/download/WaveSpeed-Desktop-win-x64.exe) +[![Windows](https://img.shields.io/badge/Windows-0078D6?style=for-the-badge&logo=data:image/svg%2bxml;base64,PHN2ZyB4bWxucz0iaHR0cDovL3d3dy53My5vcmcvMjAwMC9zdmciIHZpZXdCb3g9IjAgMCAyNCAyNCIgZmlsbD0id2hpdGUiPjxwYXRoIGQ9Ik0wIDMuNWw5LjktMS40djkuNUgwem0xMS4xLTEuNUwyNCAwdjExLjVIMTEuMXpNMCAxMi42aDkuOXY5LjVMMCAyMC43em0xMS4xLS4xSDI0VjI0bC0xMi45LTEuOHoiLz48L3N2Zz4=&logoColor=white)](https://github.com/WaveSpeedAI/wavespeed-desktop/releases/latest/download/WaveSpeed-Desktop-win-x64.exe) [![macOS Intel](https://img.shields.io/badge/macOS_Intel-000000?style=for-the-badge&logo=apple&logoColor=white)](https://github.com/WaveSpeedAI/wavespeed-desktop/releases/latest/download/WaveSpeed-Desktop-mac-x64.dmg) [![macOS Apple Silicon](https://img.shields.io/badge/macOS_Silicon-000000?style=for-the-badge&logo=apple&logoColor=white)](https://github.com/WaveSpeedAI/wavespeed-desktop/releases/latest/download/WaveSpeed-Desktop-mac-arm64.dmg) [![Linux](https://img.shields.io/badge/Linux-FCC624?style=for-the-badge&logo=linux&logoColor=black)](https://github.com/WaveSpeedAI/wavespeed-desktop/releases/latest/download/WaveSpeed-Desktop-linux-x86_64.AppImage) @@ -193,12 +166,14 @@ npm run dev | Script | Description | | ---------------------- | ---------------------------------------- | | `npm run dev` | Start development server with hot reload | -| `npx vite` | Start web-only dev server (no Electron) | +| `npm run dev:web` | Start web-only dev server (no Electron) | | `npm run build` | Build the application | +| `npm run build:web` | Build web-only version (no Electron) | | `npm run build:win` | Build for Windows | | `npm run build:mac` | Build for macOS | | `npm run build:linux` | Build for Linux | | `npm run build:all` | Build for all platforms | +| `npm run dist` | Build and package for distribution | | `npm run format` | Format code with Prettier | | `npm run format:check` | Check code formatting | @@ -229,41 +204,48 @@ See [mobile/README.md](mobile/README.md) for detailed mobile development guide. ``` wavespeed-desktop/ -├── electron/ # Electron main process -│ ├── main.ts # Main process entry -│ ├── preload.ts # Preload script (IPC bridge) -│ └── workflow/ # Workflow backend -│ ├── db/ # SQLite database (workflow, node, edge, execution repos) -│ ├── ipc/ # IPC handlers (workflow, execution, history, cost, storage) -│ ├── nodes/ # Node type definitions & handlers (AI task, free tools, I/O) -│ ├── engine/ # Execution engine (DAG runner, scheduler) -│ └── utils/ # File storage, cost estimation +├── data/templates/ # Preset workflow templates (AI generation, image/video/audio processing) +├── electron/ # Electron main process +│ ├── main.ts # Main process entry +│ ├── preload.ts # Preload script (IPC bridge) +│ ├── lib/ # Local generation (sdGenerator for stable-diffusion.cpp) +│ └── workflow/ # Workflow backend +│ ├── db/ # SQLite database (workflow, node, edge, execution, budget, template repos) +│ ├── engine/ # Execution engine (DAG runner, scheduler, cache, circuit breaker) +│ ├── ipc/ # IPC handlers (workflow, execution, history, cost, storage, http-server) +│ ├── nodes/ # Node handlers (AI task, free tools, I/O, triggers, processing, control) +│ ├── services/ # HTTP server, model list, retry, service locator, template loader +│ └── utils/ # File storage, hashing, save-to-assets ├── src/ -│ ├── api/ # API client -│ ├── components/ # React components -│ │ ├── layout/ # Layout components -│ │ ├── playground/ # Playground components -│ │ ├── shared/ # Shared components -│ │ └── ui/ # shadcn/ui components -│ ├── hooks/ # Custom React hooks -│ ├── i18n/ # Internationalization (18 languages) -│ ├── lib/ # Utility functions -│ ├── pages/ # Page components -│ ├── stores/ # Zustand stores -│ ├── types/ # TypeScript types -│ ├── workers/ # Web Workers (upscaler, background remover, image eraser, ffmpeg) -│ └── workflow/ # Workflow frontend -│ ├── components/ # Canvas, node palette, config panel, results panel, run monitor -│ ├── stores/ # Workflow, execution, UI stores (Zustand) -│ ├── hooks/ # Workflow-specific hooks -│ ├── ipc/ # Type-safe IPC client -│ └── types/ # Workflow type definitions -├── mobile/ # Mobile app (Android) -│ ├── src/ # Mobile-specific overrides -│ ├── android/ # Android native project +│ ├── api/ # API client +│ ├── components/ # React components +│ │ ├── ffmpeg/ # FFmpeg components +│ │ ├── layout/ # Layout components +│ │ ├── playground/ # Playground components +│ │ ├── shared/ # Shared components +│ │ ├── templates/ # Template components +│ │ └── ui/ # shadcn/ui components +│ ├── hooks/ # Custom React hooks +│ ├── i18n/ # Internationalization (18 languages) +│ ├── lib/ # Utilities (fuzzy search, schema-to-form, smart form config, etc.) +│ ├── pages/ # Page components +│ ├── stores/ # Zustand stores +│ ├── types/ # TypeScript types +│ ├── workers/ # Web Workers (upscaler, face enhancer/swapper, background remover, image eraser, segmentation, ffmpeg) +│ └── workflow/ # Workflow frontend +│ ├── browser/ # Browser-only workflow API (web mode without Electron) +│ ├── components/ # Canvas, node palette, config panel, results panel, run monitor, prompt optimizer +│ ├── hooks/ # Workflow-specific hooks (undo/redo, group adoption, free tool listener) +│ ├── ipc/ # Type-safe IPC client +│ ├── lib/ # Cycle detection, free tool runner, model converter, topological sort +│ ├── stores/ # Workflow, execution, UI stores (Zustand) +│ └── types/ # Workflow type definitions +├── mobile/ # Mobile app (Android) +│ ├── src/ # Mobile-specific overrides +│ ├── android/ # Android native project │ └── capacitor.config.ts -├── .github/workflows/ # GitHub Actions (desktop + mobile) -└── build/ # Build resources +├── .github/workflows/ # GitHub Actions (desktop + mobile) +└── build/ # Build resources ``` ## Tech Stack @@ -277,6 +259,10 @@ wavespeed-desktop/ - **HTTP Client**: Axios - **Workflow Canvas**: React Flow - **Workflow Database**: sql.js (SQLite in-process) +- **AI/ML (Free Tools)**: @huggingface/transformers, onnxruntime-web, @tensorflow/tfjs, upscaler (ESRGAN) +- **Media Processing**: @ffmpeg/core, mp4-muxer, webm-muxer +- **3D Preview**: @google/model-viewer +- **Local Generation**: stable-diffusion.cpp (via sdGenerator) ### Mobile @@ -307,6 +293,20 @@ The application uses the WaveSpeedAI API v3: | `/api/v3/media/upload/binary` | POST | Upload files | | `/api/v3/balance` | GET | Get account balance | +The built-in workflow HTTP server also exposes: + +| Endpoint | Method | Description | +| ---------------------------- | ------ | ------------------------------------ | +| `/api/health` | GET | Health check | +| `/api/workflows/{id}/run` | POST | Trigger a workflow execution via API | +| `/api/workflows/{id}/schema` | GET | Get workflow input/output schema | +| `/schema` | GET | Get active workflow schema | +| `POST /` (any path) | POST | Run the active workflow | + +Add an HTTP Trigger node to your workflow to define the API input schema (each field becomes an output port), and optionally add an HTTP Response node to customize the response. Start the server from the workflow canvas — it listens on a configurable port (default `3100`) with CORS enabled. + +This turns any workflow into a callable REST endpoint, making it easy to integrate with [OpenClaw](https://github.com/anthropics/openclaw) or other AI agent frameworks as a skill server. For example, an OpenClaw agent can call `GET /api/workflows/{id}/schema` to discover the workflow's input/output contract, then `POST /api/workflows/{id}/run` with the required fields to execute the pipeline and receive results. + ## Contributing 1. Fork the repository diff --git a/electron/main.ts b/electron/main.ts index f8fb7603..6540946f 100644 --- a/electron/main.ts +++ b/electron/main.ts @@ -671,6 +671,49 @@ ipcMain.handle("select-directory", async () => { return { success: true, path: result.filePaths[0] }; }); +// Directory Import node — pick a directory for media scanning +ipcMain.handle("pick-directory", async () => { + const focusedWindow = BrowserWindow.getFocusedWindow(); + if (!focusedWindow) return { success: false, error: "No focused window" }; + + const result = await dialog.showOpenDialog(focusedWindow, { + properties: ["openDirectory"], + title: "Select Media Directory", + }); + + if (result.canceled || !result.filePaths[0]) { + return { success: false, canceled: true }; + } + + return { success: true, path: result.filePaths[0] }; +}); + +// Directory Import node — scan a directory for media files +ipcMain.handle( + "scan-directory", + async (_, dirPath: string, allowedExts: string[]) => { + const { readdirSync } = require("fs"); + const { join, extname } = require("path"); + + const extSet = new Set(allowedExts.map((e: string) => e.toLowerCase())); + const results: string[] = []; + try { + const entries = readdirSync(dirPath, { withFileTypes: true }); + for (const entry of entries) { + if (entry.isFile()) { + const ext = extname(entry.name).toLowerCase(); + if (extSet.has(ext)) { + results.push(join(dirPath, entry.name)); + } + } + } + } catch { + // Skip unreadable + } + return results; + }, +); + ipcMain.handle( "save-asset", async (_, url: string, _type: string, fileName: string, subDir: string) => { diff --git a/electron/preload.ts b/electron/preload.ts index b3013a08..7a4a24c5 100644 --- a/electron/preload.ts +++ b/electron/preload.ts @@ -139,6 +139,10 @@ const electronAPI = { ipcRenderer.invoke("get-zimage-output-path"), selectDirectory: (): Promise => ipcRenderer.invoke("select-directory"), + pickDirectory: (): Promise => + ipcRenderer.invoke("pick-directory"), + scanDirectory: (dirPath: string, allowedExts: string[]): Promise => + ipcRenderer.invoke("scan-directory", dirPath, allowedExts), saveAsset: ( url: string, type: string, diff --git a/electron/workflow/db/edge.repo.ts b/electron/workflow/db/edge.repo.ts index 3363ea5b..36332975 100644 --- a/electron/workflow/db/edge.repo.ts +++ b/electron/workflow/db/edge.repo.ts @@ -12,11 +12,12 @@ function rowToEdge(row: unknown[]): WorkflowEdge { sourceOutputKey: row[3] as string, targetNodeId: row[4] as string, targetInputKey: row[5] as string, + isInternal: row[6] === 1, }; } const EDGE_COLS = - "id, workflow_id, source_node_id, source_output_key, target_node_id, target_input_key"; + "id, workflow_id, source_node_id, source_output_key, target_node_id, target_input_key, is_internal"; export function getEdgesByWorkflowId(workflowId: string): WorkflowEdge[] { const db = getDatabase(); @@ -43,3 +44,12 @@ export function deleteEdge(edgeId: string): void { db.run("DELETE FROM edges WHERE id = ?", [edgeId]); persistDatabase(); } +export function getInternalEdges(workflowId: string): WorkflowEdge[] { + const db = getDatabase(); + const result = db.exec( + `SELECT ${EDGE_COLS} FROM edges WHERE workflow_id = ? AND is_internal = 1`, + [workflowId], + ); + if (!result.length) return []; + return result[0].values.map(rowToEdge); +} diff --git a/electron/workflow/db/node.repo.ts b/electron/workflow/db/node.repo.ts index e46d8a9f..5d27567f 100644 --- a/electron/workflow/db/node.repo.ts +++ b/electron/workflow/db/node.repo.ts @@ -4,21 +4,39 @@ import { getDatabase, persistDatabase } from "./connection"; import type { WorkflowNode } from "../../../src/workflow/types/workflow"; -export function getNodesByWorkflowId(workflowId: string): WorkflowNode[] { - const db = getDatabase(); - const result = db.exec( - "SELECT id, workflow_id, node_type, position_x, position_y, params, current_output_id FROM nodes WHERE workflow_id = ?", - [workflowId], - ); - if (!result.length) return []; - return result[0].values.map((row) => ({ +const NODE_COLS = + "id, workflow_id, node_type, position_x, position_y, params, current_output_id, parent_node_id"; + +function rowToNode(row: unknown[]): WorkflowNode { + return { id: row[0] as string, workflowId: row[1] as string, nodeType: row[2] as string, position: { x: row[3] as number, y: row[4] as number }, params: JSON.parse(row[5] as string), currentOutputId: row[6] as string | null, - })); + parentNodeId: (row[7] as string | null) ?? null, + }; +} + +export function getNodesByWorkflowId(workflowId: string): WorkflowNode[] { + const db = getDatabase(); + const result = db.exec( + `SELECT ${NODE_COLS} FROM nodes WHERE workflow_id = ?`, + [workflowId], + ); + if (!result.length) return []; + return result[0].values.map(rowToNode); +} + +export function getChildNodes(parentNodeId: string): WorkflowNode[] { + const db = getDatabase(); + const result = db.exec( + `SELECT ${NODE_COLS} FROM nodes WHERE parent_node_id = ?`, + [parentNodeId], + ); + if (!result.length) return []; + return result[0].values.map(rowToNode); } export function updateNodeParams( diff --git a/electron/workflow/db/schema.ts b/electron/workflow/db/schema.ts index 13561ca6..06038446 100644 --- a/electron/workflow/db/schema.ts +++ b/electron/workflow/db/schema.ts @@ -86,6 +86,38 @@ const migrations: NamedMigration[] = [ } }, }, + { + id: "004_add_iterator_support", + apply: (db: SqlJsDatabase) => { + console.log("[Schema] Applying migration: 004_add_iterator_support"); + // Add parent_node_id to nodes + const nodeCols = db.exec("PRAGMA table_info(nodes)"); + const hasParentNodeId = nodeCols[0]?.values?.some( + (row) => row[1] === "parent_node_id", + ); + if (!hasParentNodeId) { + db.run( + "ALTER TABLE nodes ADD COLUMN parent_node_id TEXT REFERENCES nodes(id) ON DELETE SET NULL", + ); + db.run( + "CREATE INDEX IF NOT EXISTS idx_nodes_parent ON nodes(parent_node_id)", + ); + } + // Add is_internal to edges + const edgeCols = db.exec("PRAGMA table_info(edges)"); + const hasIsInternal = edgeCols[0]?.values?.some( + (row) => row[1] === "is_internal", + ); + if (!hasIsInternal) { + db.run( + "ALTER TABLE edges ADD COLUMN is_internal INTEGER NOT NULL DEFAULT 0 CHECK (is_internal IN (0, 1))", + ); + db.run( + "CREATE INDEX IF NOT EXISTS idx_edges_internal ON edges(is_internal)", + ); + } + }, + }, ]; export function initializeSchema(db: SqlJsDatabase): void { @@ -112,6 +144,7 @@ export function initializeSchema(db: SqlJsDatabase): void { position_y REAL NOT NULL, params TEXT NOT NULL DEFAULT '{}', current_output_id TEXT, + parent_node_id TEXT REFERENCES nodes(id) ON DELETE SET NULL, FOREIGN KEY (current_output_id) REFERENCES node_executions(id) ON DELETE SET NULL )`); @@ -138,6 +171,7 @@ export function initializeSchema(db: SqlJsDatabase): void { source_output_key TEXT NOT NULL, target_node_id TEXT NOT NULL REFERENCES nodes(id) ON DELETE CASCADE, target_input_key TEXT NOT NULL, + is_internal INTEGER NOT NULL DEFAULT 0 CHECK (is_internal IN (0, 1)), UNIQUE(source_node_id, source_output_key, target_node_id, target_input_key) )`); @@ -202,6 +236,10 @@ export function initializeSchema(db: SqlJsDatabase): void { db.run( "CREATE INDEX IF NOT EXISTS idx_wf_edges_target ON edges(target_node_id)", ); + db.run( + "CREATE INDEX IF NOT EXISTS idx_nodes_parent ON nodes(parent_node_id)", + ); + db.run("CREATE INDEX IF NOT EXISTS idx_edges_internal ON edges(is_internal)"); db.run( "CREATE INDEX IF NOT EXISTS idx_wf_daily_spend_date ON daily_spend(date)", ); diff --git a/electron/workflow/db/workflow.repo.ts b/electron/workflow/db/workflow.repo.ts index 4b38a625..1be09a0b 100644 --- a/electron/workflow/db/workflow.repo.ts +++ b/electron/workflow/db/workflow.repo.ts @@ -173,7 +173,7 @@ export function updateWorkflow( for (const node of uniqueNodes) { db.run( - `INSERT INTO nodes (id, workflow_id, node_type, position_x, position_y, params, current_output_id) VALUES (?, ?, ?, ?, ?, ?, NULL)`, + `INSERT INTO nodes (id, workflow_id, node_type, position_x, position_y, params, current_output_id, parent_node_id) VALUES (?, ?, ?, ?, ?, ?, NULL, ?)`, [ node.id, id, @@ -181,12 +181,13 @@ export function updateWorkflow( node.position.x, node.position.y, JSON.stringify(node.params), + node.parentNodeId ?? null, ], ); } for (const edge of uniqueEdges) { db.run( - `INSERT INTO edges (id, workflow_id, source_node_id, source_output_key, target_node_id, target_input_key) VALUES (?, ?, ?, ?, ?, ?)`, + `INSERT INTO edges (id, workflow_id, source_node_id, source_output_key, target_node_id, target_input_key, is_internal) VALUES (?, ?, ?, ?, ?, ?, ?)`, [ edge.id, id, @@ -194,6 +195,7 @@ export function updateWorkflow( edge.sourceOutputKey, edge.targetNodeId, edge.targetInputKey, + edge.isInternal ? 1 : 0, ], ); } @@ -284,6 +286,9 @@ export function duplicateWorkflow(sourceId: string): Workflow { id: nodeIdMap.get(n.id)!, workflowId: newWf.id, currentOutputId: null, + parentNodeId: n.parentNodeId + ? (nodeIdMap.get(n.parentNodeId) ?? null) + : null, }), ); diff --git a/electron/workflow/engine/dag-utils.ts b/electron/workflow/engine/dag-utils.ts index 52957af1..70d68dad 100644 --- a/electron/workflow/engine/dag-utils.ts +++ b/electron/workflow/engine/dag-utils.ts @@ -1,7 +1,7 @@ /** * DAG validation — cycle detection using DFS. */ -interface SimpleEdge { +export interface SimpleEdge { sourceNodeId: string; targetNodeId: string; } @@ -38,3 +38,89 @@ export function wouldCreateCycle( ): boolean { return hasCycle(nodeIds, [...edges, newEdge]); } + +/** + * Check whether adding `newEdge` would create a cycle within a sub-workflow + * defined by `subNodeIds` and `internalEdges`. Only the sub-node scope is + * considered — edges outside the sub-workflow are ignored. + */ +export function wouldCreateCycleInSubWorkflow( + subNodeIds: string[], + internalEdges: SimpleEdge[], + newEdge: SimpleEdge, +): boolean { + return hasCycle(subNodeIds, [...internalEdges, newEdge]); +} + +/** + * Return edges that cross the iterator boundary (one endpoint inside, one + * outside), remapped so the inside endpoint is replaced with `iteratorNodeId`. + * This lets the outer DAG treat the iterator as a single node. + */ +export function getExternalEdges( + allEdges: SimpleEdge[], + iteratorNodeId: string, + childNodeIds: string[], +): SimpleEdge[] { + const childSet = new Set(childNodeIds); + const result: SimpleEdge[] = []; + + for (const edge of allEdges) { + const srcInside = childSet.has(edge.sourceNodeId); + const tgtInside = childSet.has(edge.targetNodeId); + + if (srcInside && !tgtInside) { + // Edge going from inside the iterator to outside — remap source + result.push({ + sourceNodeId: iteratorNodeId, + targetNodeId: edge.targetNodeId, + }); + } else if (!srcInside && tgtInside) { + // Edge going from outside into the iterator — remap target + result.push({ + sourceNodeId: edge.sourceNodeId, + targetNodeId: iteratorNodeId, + }); + } + // Both inside → internal edge, skip + // Both outside → not related to this iterator, skip + } + + return result; +} + +/** + * Build the node list and edge list for outer-DAG validation. Iterator child + * nodes are removed and their boundary-crossing edges are remapped onto the + * iterator node ID. Fully internal edges are dropped. + * + * `iterators` is an array of `{ iteratorNodeId, childNodeIds }` — one entry + * per iterator node in the workflow. + */ +export function buildOuterDAGView( + allNodeIds: string[], + allEdges: SimpleEdge[], + iterators: { iteratorNodeId: string; childNodeIds: string[] }[], +): { nodeIds: string[]; edges: SimpleEdge[] } { + // Collect all child node IDs across every iterator + const allChildIds = new Set(); + for (const it of iterators) { + for (const cid of it.childNodeIds) allChildIds.add(cid); + } + + // Outer node list: exclude child nodes (iterators themselves stay) + const nodeIds = allNodeIds.filter((id) => !allChildIds.has(id)); + + // Start with edges that don't touch any child node at all + const edges: SimpleEdge[] = allEdges.filter( + (e) => !allChildIds.has(e.sourceNodeId) && !allChildIds.has(e.targetNodeId), + ); + + // Add remapped external edges for each iterator + for (const it of iterators) { + const ext = getExternalEdges(allEdges, it.iteratorNodeId, it.childNodeIds); + edges.push(...ext); + } + + return { nodeIds, edges }; +} diff --git a/electron/workflow/engine/executor.ts b/electron/workflow/engine/executor.ts index 2c917cea..16af463c 100644 --- a/electron/workflow/engine/executor.ts +++ b/electron/workflow/engine/executor.ts @@ -19,6 +19,7 @@ import { getFileStorageInstance } from "../utils/file-storage"; import { saveWorkflowResultToAssets } from "../utils/save-to-assets"; import { getWorkflowById } from "../db/workflow.repo"; import type { NodeExecutionContext, NodeExecutionResult } from "../nodes/base"; +import { isTriggerHandler } from "../nodes/trigger/base"; import type { NodeStatus } from "../../../src/workflow/types/execution"; import type { WorkflowNode, @@ -61,20 +62,117 @@ export class ExecutionEngine { private callbacks: ExecutionCallbacks, ) {} - /** Run all nodes in topological order. */ - async runAll(workflowId: string): Promise { - const nodes = getNodesByWorkflowId(workflowId); - const edges = getEdgesByWorkflowId(workflowId); + /** Run all nodes in topological order. Detects trigger nodes for batch execution. + * Returns collected HTTP response data if an HTTP Response node exists. */ + async runAll( + workflowId: string, + triggerValue?: Record, + ): Promise | void> { + const allNodes = getNodesByWorkflowId(workflowId); + const allEdges = getEdgesByWorkflowId(workflowId); + + // Exclude child nodes (executed internally by their parent Group handler) + const nodes = allNodes.filter((n) => !n.parentNodeId); + // Exclude internal edges (edges between sub-nodes inside a Group) + const edges = allEdges.filter((e) => !e.isInternal); + + // If triggerValue is provided (from HTTP server), inject it into the HTTP Trigger node + const httpTriggerNode = nodes.find((n) => n.nodeType === "trigger/http"); + if (triggerValue && httpTriggerNode) { + httpTriggerNode.params = { + ...httpTriggerNode.params, + __triggerValue: triggerValue, + }; + } + + // Detect batch trigger node (e.g. directory trigger) + const triggerNode = nodes.find( + (n) => n.nodeType.startsWith("trigger/") && n.nodeType !== "trigger/http", + ); + const triggerHandler = triggerNode + ? this.registry.getHandler(triggerNode.nodeType) + : undefined; + + if ( + triggerNode && + triggerHandler && + isTriggerHandler(triggerHandler) && + triggerHandler.triggerMode === "batch" && + triggerHandler.getItems + ) { + // Batch execution: get all items, run workflow once per item + const items = await triggerHandler.getItems(triggerNode.params); + console.log( + `[Executor] Batch trigger: ${items.length} items from ${triggerNode.nodeType}`, + ); + + for (let i = 0; i < items.length; i++) { + const item = items[i]; + this.callbacks.onProgress( + workflowId, + triggerNode.id, + ((i + 1) / items.length) * 100, + `Processing ${item.label ?? `item ${i + 1}`} (${i + 1}/${items.length})`, + ); + + const originalParams = { ...triggerNode.params }; + triggerNode.params = { + ...triggerNode.params, + __triggerValue: item.value, + }; + + try { + await this.runWorkflowOnce(workflowId, nodes, edges); + } finally { + triggerNode.params = originalParams; + } + } + return; + } + + // Single execution + const failures = await this.runWorkflowOnce(workflowId, nodes, edges); + + // Collect HTTP Response node result if present + const httpResponse = this.collectHttpResponse(nodes); + if (httpResponse) return httpResponse; + + // If there were failures and no HTTP Response was collected, return error info + if (failures.length > 0) { + const firstReal = failures.find((f) => !f.error.startsWith("Skipped")); + const errMsg = firstReal?.error ?? failures[0].error; + return { + statusCode: 500, + body: { + error_msg: errMsg, + }, + }; + } + + return undefined; + } + + /** + * Execute the workflow graph once in topological order. + * Extracted from the old runAll so it can be called in a loop for batch triggers. + * Returns an array of { nodeId, nodeType, error } for any failed nodes. + */ + private async runWorkflowOnce( + workflowId: string, + nodes: WorkflowNode[], + edges: WorkflowEdge[], + ): Promise> { const nodeIds = nodes.map((n) => n.id); const simpleEdges = edges.map((e) => ({ sourceNodeId: e.sourceNodeId, targetNodeId: e.targetNodeId, })); - // Cost estimate (for UI only) is done via cost:estimate IPC; we don't block runs on budget since actual API cost varies by inputs. const levels = topologicalLevels(nodeIds, simpleEdges); const nodeMap = new Map(nodes.map((n) => [n.id, n])); const failedNodes = new Set(); + const failures: Array<{ nodeId: string; nodeType: string; error: string }> = + []; // Build upstream dependency map for quick lookup const upstreamMap = new Map(); @@ -85,17 +183,20 @@ export class ExecutionEngine { } for (const level of levels) { - // Stop the entire workflow if any node has failed if (failedNodes.size > 0) break; const batch = level.slice(0, MAX_PARALLEL_EXECUTIONS); await Promise.all( batch.map(async (nodeId) => { - // If another node in this batch failed, skip remaining if (failedNodes.size > 0) return; - // Skip if any upstream node failed const upstreams = upstreamMap.get(nodeId) ?? []; if (upstreams.some((uid) => failedNodes.has(uid))) { failedNodes.add(nodeId); + const node = nodeMap.get(nodeId); + failures.push({ + nodeId, + nodeType: node?.nodeType ?? "unknown", + error: "Skipped: upstream node failed", + }); this.callbacks.onNodeStatus( workflowId, nodeId, @@ -111,23 +212,84 @@ export class ExecutionEngine { edges, true, ); - if (!success) failedNodes.add(nodeId); + if (!success) { + failedNodes.add(nodeId); + const node = nodeMap.get(nodeId); + // Retrieve error message from the latest execution + const errMsg = node?.currentOutputId + ? (() => { + const exec = getExecutionById(node.currentOutputId); + if (!exec?.resultMetadata) return "Execution failed"; + const meta = + typeof exec.resultMetadata === "string" + ? JSON.parse(exec.resultMetadata) + : exec.resultMetadata; + return (meta.error as string) ?? "Execution failed"; + })() + : "Execution failed"; + failures.push({ + nodeId, + nodeType: node?.nodeType ?? "unknown", + error: errMsg, + }); + } }), ); } + + return failures; + } + + /** + * After workflow execution, find the HTTP Response node and extract its result. + * Returns the response body or undefined if no HTTP Response node exists. + */ + private collectHttpResponse( + nodes: WorkflowNode[], + ): Record | undefined { + const responseNode = nodes.find( + (n) => n.nodeType === "output/http-response", + ); + console.log( + `[Executor] collectHttpResponse: responseNode=${responseNode?.id}, currentOutputId=${responseNode?.currentOutputId}`, + ); + if (!responseNode?.currentOutputId) return undefined; + + const execution = getExecutionById(responseNode.currentOutputId); + console.log( + `[Executor] collectHttpResponse: execution status=${execution?.status}, hasMeta=${!!execution?.resultMetadata}`, + ); + if (!execution?.resultMetadata) return undefined; + + const meta = + typeof execution.resultMetadata === "string" + ? JSON.parse(execution.resultMetadata) + : execution.resultMetadata; + + console.log( + `[Executor] collectHttpResponse: meta keys=${Object.keys(meta).join(",")}`, + ); + + const body = meta.__httpResponseBody ?? meta; + return { statusCode: 200, body }; } /** Run a single node, resolving upstream inputs. Always skips cache (user explicitly re-runs). */ async runNode(workflowId: string, nodeId: string): Promise { - const nodes = getNodesByWorkflowId(workflowId); - const edges = getEdgesByWorkflowId(workflowId); + const allNodes = getNodesByWorkflowId(workflowId); + const allEdges = getEdgesByWorkflowId(workflowId); - if (nodes.length === 0) { + if (allNodes.length === 0) { throw new Error( `No nodes found in workflow ${workflowId}. Please ensure the workflow is saved before running nodes.`, ); } + // Include all nodes in the map (needed for resolveInputs to find upstream sources) + // but filter out internal edges so they don't interfere with outer resolution + const nodes = allNodes; + const edges = allEdges.filter((e) => !e.isInternal); + const nodeMap = new Map(nodes.map((n) => [n.id, n])); const node = nodeMap.get(nodeId); @@ -146,15 +308,21 @@ export class ExecutionEngine { /** Continue from a node — execute it and all downstream nodes. */ async continueFrom(workflowId: string, nodeId: string): Promise { - const nodes = getNodesByWorkflowId(workflowId); - const edges = getEdgesByWorkflowId(workflowId); + const allNodes = getNodesByWorkflowId(workflowId); + const allEdges = getEdgesByWorkflowId(workflowId); + + // Exclude child nodes and internal edges from outer workflow execution + const nodes = allNodes.filter((n) => !n.parentNodeId); + const edges = allEdges.filter((e) => !e.isInternal); + const nodeIds = nodes.map((n) => n.id); const simpleEdges = edges.map((e) => ({ sourceNodeId: e.sourceNodeId, targetNodeId: e.targetNodeId, })); const downstream = downstreamNodes(nodeId, nodeIds, simpleEdges); - const nodeMap = new Map(nodes.map((n) => [n.id, n])); + // Use all nodes in the map so resolveInputs can find upstream sources + const nodeMap = new Map(allNodes.map((n) => [n.id, n])); const levels = topologicalLevels(nodeIds, simpleEdges); let stopped = false; @@ -175,9 +343,10 @@ export class ExecutionEngine { throw new Error(`Circuit breaker tripped for node ${nodeId}`); } - const nodes = getNodesByWorkflowId(workflowId); - const edges = getEdgesByWorkflowId(workflowId); - const nodeMap = new Map(nodes.map((n) => [n.id, n])); + const allNodes = getNodesByWorkflowId(workflowId); + const allEdges = getEdgesByWorkflowId(workflowId); + const edges = allEdges.filter((e) => !e.isInternal); + const nodeMap = new Map(allNodes.map((n) => [n.id, n])); const node = nodeMap.get(nodeId); if (!node) throw new Error(`Node ${nodeId} not found`); @@ -210,8 +379,13 @@ export class ExecutionEngine { /** Mark all downstream nodes as needing re-execution. */ markDownstreamStale(workflowId: string, nodeId: string): string[] { - const nodes = getNodesByWorkflowId(workflowId); - const edges = getEdgesByWorkflowId(workflowId); + const allNodes = getNodesByWorkflowId(workflowId); + const allEdges = getEdgesByWorkflowId(workflowId); + + // Exclude child nodes and internal edges from outer workflow graph + const nodes = allNodes.filter((n) => !n.parentNodeId); + const edges = allEdges.filter((e) => !e.isInternal); + const nodeIds = nodes.map((n) => n.id); const simpleEdges = edges.map((e) => ({ sourceNodeId: e.sourceNodeId, @@ -494,6 +668,17 @@ export class ExecutionEngine { ? JSON.parse(execution.resultMetadata) : execution.resultMetadata; outputValue = meta[edge.sourceOutputKey]; + // Debug: log when resolving from a group/iterator node + if (sourceNode.nodeType === "control/iterator") { + console.log( + `[Executor] resolveInputs from group: sourceOutputKey="${edge.sourceOutputKey}", meta keys=`, + Object.keys(meta), + "value=", + outputValue !== undefined + ? String(outputValue).slice(0, 100) + : "undefined", + ); + } // Fallback: if not found by handle key, try 'resultUrl' (common pattern) if (outputValue === undefined) outputValue = meta.resultUrl; } diff --git a/electron/workflow/index.ts b/electron/workflow/index.ts index 996ba31c..899e95a4 100644 --- a/electron/workflow/index.ts +++ b/electron/workflow/index.ts @@ -27,6 +27,8 @@ import { registerUploadIpc } from "./ipc/upload.ipc"; import { registerSettingsIpc } from "./ipc/settings.ipc"; import { registerFreeToolIpc } from "./ipc/free-tool.ipc"; import { registerTemplateIpc } from "./ipc/template.ipc"; +import { registerHttpServerIpc } from "./ipc/http-server.ipc"; +import { setHttpServerEngine } from "./services/http-server"; import { migrateTemplatesFromLocalStorage } from "./services/template-migration"; import { initializeDefaultTemplates } from "./services/template-init"; @@ -79,6 +81,7 @@ export async function initWorkflowModule(): Promise { // 5. Wire up singletons setExecutionEngine(engine); + setHttpServerEngine(engine); setCostDeps(costService, nodeRegistry); setMarkDownstreamStale((workflowId, nodeId) => engine.markDownstreamStale(workflowId, nodeId), @@ -94,6 +97,7 @@ export async function initWorkflowModule(): Promise { registerSettingsIpc(); registerFreeToolIpc(); registerTemplateIpc(); + registerHttpServerIpc(); // 7. Migrate templates from localStorage (if needed) try { diff --git a/electron/workflow/ipc/http-server.ipc.ts b/electron/workflow/ipc/http-server.ipc.ts new file mode 100644 index 00000000..d9b003e8 --- /dev/null +++ b/electron/workflow/ipc/http-server.ipc.ts @@ -0,0 +1,26 @@ +/** + * IPC handlers for the global HTTP server — start, stop, status. + */ +import { ipcMain } from "electron"; +import { + startHttpServer, + stopHttpServer, + getHttpServerStatus, +} from "../services/http-server"; + +export function registerHttpServerIpc(): void { + ipcMain.handle( + "http-server:start", + async (_event, args: { port?: number; workflowId?: string }) => { + return startHttpServer(args?.port ?? 3100, args?.workflowId); + }, + ); + + ipcMain.handle("http-server:stop", async () => { + return stopHttpServer(); + }); + + ipcMain.handle("http-server:status", async () => { + return getHttpServerStatus(); + }); +} diff --git a/electron/workflow/nodes/control/subgraph.ts b/electron/workflow/nodes/control/subgraph.ts new file mode 100644 index 00000000..c3d9f20e --- /dev/null +++ b/electron/workflow/nodes/control/subgraph.ts @@ -0,0 +1,267 @@ +/** + * Iterator node — now simplified to a Group container. + * + * Executes its internal sub-workflow exactly ONCE (no iteration). + * Batch/repeat logic is handled by Trigger nodes and Run Count at the engine level. + * + * This node is purely an organizational/encapsulation tool: + * - Contains child nodes (sub-workflow) + * - Routes external inputs to child nodes via exposedInputs + * - Collects child node outputs via exposedOutputs + * + * The type remains "control/iterator" for backward compatibility with + * existing workflows and frontend components. The label shown in the UI + * is "Group" (via i18n). + */ +import { + BaseNodeHandler, + type NodeExecutionContext, + type NodeExecutionResult, +} from "../base"; +import type { NodeTypeDefinition } from "../../../../src/workflow/types/node-defs"; +import type { ExposedParam } from "../../../../src/workflow/types/workflow"; +import type { NodeRegistry } from "../registry"; +import { getChildNodes } from "../../db/node.repo"; +import { getInternalEdges } from "../../db/edge.repo"; +import { topologicalLevels } from "../../engine/scheduler"; + +export const subgraphDef: NodeTypeDefinition = { + type: "control/iterator", + category: "control", + label: "Group", + inputs: [], + outputs: [], + params: [ + // Legacy params kept for backward compat — ignored at runtime + { + key: "iterationCount", + label: "Iteration Count", + type: "number", + default: 1, + validation: { min: 1 }, + }, + { + key: "iterationMode", + label: "Iteration Mode", + type: "string", + default: "fixed", + }, + { + key: "exposedInputs", + label: "Exposed Inputs", + type: "string", + default: "[]", + }, + { + key: "exposedOutputs", + label: "Exposed Outputs", + type: "string", + default: "[]", + }, + ], +}; + +export class SubgraphNodeHandler extends BaseNodeHandler { + constructor(private registry: NodeRegistry) { + super(subgraphDef); + } + + async execute(ctx: NodeExecutionContext): Promise { + const start = Date.now(); + + const exposedInputs = this.parseExposedParams(ctx.params.exposedInputs); + const exposedOutputs = this.parseExposedParams(ctx.params.exposedOutputs); + + // Load child nodes and internal edges + const childNodes = getChildNodes(ctx.nodeId); + const internalEdges = getInternalEdges(ctx.workflowId); + + const childNodeIds = childNodes.map((n) => n.id); + const childNodeIdSet = new Set(childNodeIds); + const relevantEdges = internalEdges.filter( + (e) => + childNodeIdSet.has(e.sourceNodeId) && + childNodeIdSet.has(e.targetNodeId), + ); + + if (childNodes.length === 0) { + return { + status: "success", + outputs: {}, + durationMs: Date.now() - start, + cost: 0, + }; + } + + // Topologically sort child nodes + const simpleEdges = relevantEdges.map((e) => ({ + sourceNodeId: e.sourceNodeId, + targetNodeId: e.targetNodeId, + })); + const levels = topologicalLevels(childNodeIds, simpleEdges); + const childNodeMap = new Map(childNodes.map((n) => [n.id, n])); + + // Route external inputs to child nodes + const inputRouting = new Map>(); + for (const ep of exposedInputs) { + const externalValue = ctx.inputs[ep.namespacedKey]; + if (externalValue !== undefined) { + if (!inputRouting.has(ep.subNodeId)) { + inputRouting.set(ep.subNodeId, new Map()); + } + inputRouting.get(ep.subNodeId)!.set(ep.paramKey, externalValue); + } + } + + // Execute child nodes level by level — single pass, no iteration + const subNodeOutputs = new Map>(); + let totalCost = 0; + + for (const level of levels) { + for (const subNodeId of level) { + const subNode = childNodeMap.get(subNodeId); + if (!subNode) continue; + + const handler = this.registry.getHandler(subNode.nodeType); + if (!handler) { + return { + status: "error", + outputs: {}, + durationMs: Date.now() - start, + cost: totalCost, + error: `No handler for sub-node type: ${subNode.nodeType} (node: ${subNodeId})`, + }; + } + + // Build params: base + external inputs + const subParams: Record = { ...subNode.params }; + const externalInputs = inputRouting.get(subNodeId); + if (externalInputs) { + for (const [paramKey, value] of externalInputs) { + subParams[paramKey] = value; + } + } + + // Resolve internal edge inputs + const subInputs = this.resolveSubNodeInputs( + subNodeId, + relevantEdges, + subNodeOutputs, + ); + + const subCtx: NodeExecutionContext = { + nodeId: subNodeId, + nodeType: subNode.nodeType, + params: subParams, + inputs: subInputs, + workflowId: ctx.workflowId, + abortSignal: ctx.abortSignal, + onProgress: (_progress, message) => { + ctx.onProgress(_progress, message); + }, + }; + + try { + const result = await handler.execute(subCtx); + totalCost += result.cost; + + if (result.status === "error") { + return { + status: "error", + outputs: {}, + durationMs: Date.now() - start, + cost: totalCost, + error: `Sub-node ${subNodeId} failed: ${result.error || "Unknown error"}`, + }; + } + + console.log( + `[Iterator] Sub-node ${subNodeId} (${subNode.nodeType}) outputs:`, + JSON.stringify(result.outputs).slice(0, 300), + ); + subNodeOutputs.set(subNodeId, result.outputs); + } catch (error) { + return { + status: "error", + outputs: {}, + durationMs: Date.now() - start, + cost: totalCost, + error: `Sub-node ${subNodeId} threw: ${error instanceof Error ? error.message : String(error)}`, + }; + } + } + } + + // Collect exposed outputs — single values, no array aggregation + const outputs: Record = {}; + for (const ep of exposedOutputs) { + const nodeOutputs = subNodeOutputs.get(ep.subNodeId); + console.log( + `[Iterator] Collecting exposedOutput: subNodeId=${ep.subNodeId}, paramKey=${ep.paramKey}, nk=${ep.namespacedKey}, nodeOutputs=`, + nodeOutputs ? JSON.stringify(nodeOutputs).slice(0, 200) : "null", + ); + if (nodeOutputs) { + outputs[`output-${ep.namespacedKey}`] = nodeOutputs[ep.paramKey]; + } + } + console.log( + `[Iterator] Final outputs:`, + JSON.stringify(outputs).slice(0, 500), + ); + + return { + status: "success", + outputs, + resultMetadata: { ...outputs }, + durationMs: Date.now() - start, + cost: totalCost, + }; + } + + private resolveSubNodeInputs( + subNodeId: string, + internalEdges: { + sourceNodeId: string; + targetNodeId: string; + sourceOutputKey: string; + targetInputKey: string; + }[], + subNodeOutputs: Map>, + ): Record { + const inputs: Record = {}; + const incomingEdges = internalEdges.filter( + (e) => e.targetNodeId === subNodeId, + ); + + for (const edge of incomingEdges) { + const sourceOutputs = subNodeOutputs.get(edge.sourceNodeId); + if (!sourceOutputs) continue; + + const value = sourceOutputs[edge.sourceOutputKey]; + if (value === undefined) continue; + + const targetKey = edge.targetInputKey; + if (targetKey.startsWith("param-")) { + inputs[targetKey.slice(6)] = value; + } else if (targetKey.startsWith("input-")) { + inputs[targetKey.slice(6)] = value; + } else { + inputs[targetKey] = value; + } + } + + return inputs; + } + + private parseExposedParams(value: unknown): ExposedParam[] { + if (typeof value === "string") { + try { + return JSON.parse(value) as ExposedParam[]; + } catch { + return []; + } + } + if (Array.isArray(value)) return value as ExposedParam[]; + return []; + } +} diff --git a/electron/workflow/nodes/output/http-response.ts b/electron/workflow/nodes/output/http-response.ts new file mode 100644 index 00000000..23122965 --- /dev/null +++ b/electron/workflow/nodes/output/http-response.ts @@ -0,0 +1,119 @@ +/** + * HTTP Response — declares what the workflow returns to the HTTP caller. + * + * Pairs with HTTP Trigger. The user configures "response fields" — each + * field becomes an input port. Connect upstream outputs to these ports. + * After workflow execution, the HTTP server reads this node's collected + * inputs and sends them as the JSON response body. + * + * Example: + * responseFields = [{ "key": "result", "label": "Result", "type": "url" }] + * + * → input port "result" receives the final image URL from upstream + * → HTTP response: { "result": "https://cdn.../output.png" } + */ +import { + BaseNodeHandler, + type NodeExecutionContext, + type NodeExecutionResult, +} from "../base"; +import type { + NodeTypeDefinition, + PortDefinition, + PortDataType, +} from "../../../../src/workflow/types/node-defs"; + +export interface ResponseFieldConfig { + key: string; + label: string; + type: PortDataType; +} + +export function parseResponseFields(raw: unknown): ResponseFieldConfig[] { + if (typeof raw === "string") { + try { + return JSON.parse(raw) as ResponseFieldConfig[]; + } catch { + return []; + } + } + if (Array.isArray(raw)) return raw as ResponseFieldConfig[]; + return []; +} + +export function buildHttpResponseInputDefs( + fields: ResponseFieldConfig[], +): PortDefinition[] { + return fields.map((f) => ({ + key: f.key, + label: f.label || f.key, + dataType: f.type || "any", + required: true, + })); +} + +export const httpResponseDef: NodeTypeDefinition = { + type: "output/http-response", + category: "output", + label: "HTTP Response", + inputs: [], // Dynamic — built from responseFields + outputs: [], + params: [ + { + key: "responseFields", + label: "Response Fields", + type: "textarea", + dataType: "text", + connectable: false, + default: '[{"key":"image","label":"Image","type":"text"}]', + description: + "Define API response fields. Each field becomes an input port.", + }, + ], +}; + +export class HttpResponseHandler extends BaseNodeHandler { + constructor() { + super(httpResponseDef); + } + + /** + * Collect all input values and package them as the response body. + * The HTTP server reads resultMetadata.__httpResponseBody after execution. + */ + async execute(ctx: NodeExecutionContext): Promise { + const start = Date.now(); + const fields = parseResponseFields(ctx.params.responseFields); + + const responseBody: Record = {}; + for (const field of fields) { + const value = ctx.inputs[field.key]; + if (value !== undefined && value !== null) { + responseBody[field.key] = value; + } + } + + // Fallback: if no fields matched, include all inputs directly + if ( + Object.keys(responseBody).length === 0 && + Object.keys(ctx.inputs).length > 0 + ) { + for (const [k, v] of Object.entries(ctx.inputs)) { + if (v !== undefined && v !== null) { + responseBody[k] = v; + } + } + } + + return { + status: "success", + outputs: responseBody, + resultMetadata: { + ...responseBody, + __httpResponseBody: responseBody, + }, + durationMs: Date.now() - start, + cost: 0, + }; + } +} diff --git a/electron/workflow/nodes/register-all.ts b/electron/workflow/nodes/register-all.ts index 228c1974..0f2c4368 100644 --- a/electron/workflow/nodes/register-all.ts +++ b/electron/workflow/nodes/register-all.ts @@ -7,16 +7,42 @@ import { previewDisplayDef, PreviewDisplayHandler } from "./output/preview"; import { registerFreeToolNodes } from "./free-tool/register"; import { concatDef, ConcatHandler } from "./processing/concat"; import { selectDef, SelectHandler } from "./processing/select"; +import { subgraphDef, SubgraphNodeHandler } from "./control/subgraph"; +// Trigger nodes +import { + directoryTriggerDef, + DirectoryTriggerHandler, +} from "./trigger/directory"; +import { httpTriggerDef, HttpTriggerHandler } from "./trigger/http"; +import { httpResponseDef, HttpResponseHandler } from "./output/http-response"; export function registerAllNodes(): void { + // Trigger nodes + nodeRegistry.register(directoryTriggerDef, new DirectoryTriggerHandler()); + nodeRegistry.register(httpTriggerDef, new HttpTriggerHandler()); + + // Input nodes nodeRegistry.register(mediaUploadDef, new MediaUploadHandler()); nodeRegistry.register(textInputDef, new TextInputHandler()); + + // AI task nodeRegistry.register(aiTaskDef, new AITaskHandler()); + + // Output nodeRegistry.register(fileExportDef, new FileExportHandler()); nodeRegistry.register(previewDisplayDef, new PreviewDisplayHandler()); + nodeRegistry.register(httpResponseDef, new HttpResponseHandler()); + + // Free tools registerFreeToolNodes(); + + // Processing nodeRegistry.register(concatDef, new ConcatHandler()); nodeRegistry.register(selectDef, new SelectHandler()); + + // Control (Iterator simplified to Group — no iteration, just sub-workflow container) + nodeRegistry.register(subgraphDef, new SubgraphNodeHandler(nodeRegistry)); + console.log( `[Registry] Registered ${nodeRegistry.getAll().length} node types`, ); diff --git a/electron/workflow/nodes/trigger/base.ts b/electron/workflow/nodes/trigger/base.ts new file mode 100644 index 00000000..f4d3386a --- /dev/null +++ b/electron/workflow/nodes/trigger/base.ts @@ -0,0 +1,43 @@ +/** + * Trigger node base — defines the interface for trigger nodes that drive workflow execution. + * + * Trigger nodes are special input nodes that determine: + * - What data enters the workflow + * - How many times the workflow executes (single vs batch) + * + * A workflow has at most one trigger node. If the trigger is a batch type, + * the engine calls getItems() and executes the full workflow once per item. + */ +import type { NodeHandler } from "../base"; + +export type TriggerMode = "single" | "batch"; + +export interface BatchItem { + /** Unique ID for dedup and progress tracking */ + id: string; + /** The value passed to downstream nodes for this item */ + value: unknown; + /** Display label for UI (e.g. filename, message ID) */ + label?: string; +} + +export interface TriggerHandler extends NodeHandler { + /** Whether this trigger produces a single value or a batch of items */ + readonly triggerMode: TriggerMode; + + /** + * For batch triggers: return all items to iterate over. + * The engine will execute the workflow once per item. + * For single triggers: this is not called. + */ + getItems?(params: Record): Promise; +} + +/** + * Type guard to check if a NodeHandler is a TriggerHandler. + */ +export function isTriggerHandler( + handler: NodeHandler, +): handler is TriggerHandler { + return "triggerMode" in handler; +} diff --git a/electron/workflow/nodes/trigger/directory.ts b/electron/workflow/nodes/trigger/directory.ts new file mode 100644 index 00000000..44697437 --- /dev/null +++ b/electron/workflow/nodes/trigger/directory.ts @@ -0,0 +1,160 @@ +/** + * Directory Trigger — scans a local directory for media files. + * Batch trigger: the engine executes the workflow once per file. + * + * Unlike the old directory-import node which output an array, + * this trigger produces one item per file. Each workflow execution + * receives a single local-asset:// URL. + */ +import { + BaseNodeHandler, + type NodeExecutionContext, + type NodeExecutionResult, +} from "../base"; +import type { NodeTypeDefinition } from "../../../../src/workflow/types/node-defs"; +import type { TriggerHandler, TriggerMode, BatchItem } from "./base"; +import { readdirSync, existsSync } from "fs"; +import { join, extname, basename } from "path"; + +const MEDIA_EXTENSIONS: Record = { + image: [ + ".jpg", + ".jpeg", + ".png", + ".webp", + ".gif", + ".bmp", + ".tiff", + ".tif", + ".svg", + ".avif", + ], + video: [".mp4", ".webm", ".mov", ".avi", ".mkv", ".flv", ".wmv", ".m4v"], + audio: [".mp3", ".wav", ".flac", ".m4a", ".ogg", ".aac", ".wma"], + all: [], +}; +MEDIA_EXTENSIONS.all = [ + ...MEDIA_EXTENSIONS.image, + ...MEDIA_EXTENSIONS.video, + ...MEDIA_EXTENSIONS.audio, +]; + +export const directoryTriggerDef: NodeTypeDefinition = { + type: "trigger/directory", + category: "trigger", + label: "Directory Trigger", + inputs: [], + outputs: [{ key: "output", label: "File", dataType: "url", required: true }], + params: [ + { + key: "directoryPath", + label: "Directory", + type: "string", + dataType: "text", + connectable: false, + default: "", + }, + { + key: "mediaType", + label: "File Type", + type: "select", + dataType: "text", + connectable: false, + default: "image", + options: [ + { label: "Images", value: "image" }, + { label: "Videos", value: "video" }, + { label: "Audio", value: "audio" }, + { label: "All Media", value: "all" }, + ], + }, + ], +}; + +export class DirectoryTriggerHandler + extends BaseNodeHandler + implements TriggerHandler +{ + readonly triggerMode: TriggerMode = "batch"; + + constructor() { + super(directoryTriggerDef); + } + + /** + * Return all files in the directory as batch items. + * Each item is a single local-asset:// URL. + */ + async getItems(params: Record): Promise { + const dirPath = String(params.directoryPath ?? "").trim(); + const mediaType = String(params.mediaType ?? "image"); + + if (!dirPath || !existsSync(dirPath)) return []; + + const allowedExts = new Set( + MEDIA_EXTENSIONS[mediaType] ?? MEDIA_EXTENSIONS.all, + ); + const files = scanDirectory(dirPath, allowedExts); + files.sort(); + + return files.map((filePath) => ({ + id: filePath, + value: `local-asset://${encodeURIComponent(filePath)}`, + label: basename(filePath), + })); + } + + /** + * Execute for a single file. When called by the engine in batch mode, + * ctx.params.__triggerValue contains the current item's value. + */ + async execute(ctx: NodeExecutionContext): Promise { + const start = Date.now(); + + // In batch mode, the engine injects the current item value + const triggerValue = ctx.params.__triggerValue as string | undefined; + const url = triggerValue ?? ""; + + if (!url) { + return { + status: "error", + outputs: {}, + durationMs: Date.now() - start, + cost: 0, + error: "No file provided.", + }; + } + + return { + status: "success", + outputs: { output: url }, + resultPath: url, + resultMetadata: { + output: url, + resultUrl: url, + resultUrls: [url], + mediaType: ctx.params.mediaType ?? "image", + }, + durationMs: Date.now() - start, + cost: 0, + }; + } +} + +function scanDirectory(dir: string, allowedExts: Set): string[] { + const results: string[] = []; + try { + const entries = readdirSync(dir, { withFileTypes: true }); + for (const entry of entries) { + if (entry.isFile()) { + const ext = extname(entry.name).toLowerCase(); + if (allowedExts.has(ext)) { + results.push(join(dir, entry.name)); + } + } + } + } catch { + // Skip unreadable directories + } + return results; +} diff --git a/electron/workflow/nodes/trigger/http.ts b/electron/workflow/nodes/trigger/http.ts new file mode 100644 index 00000000..83ff79ee --- /dev/null +++ b/electron/workflow/nodes/trigger/http.ts @@ -0,0 +1,153 @@ +/** + * HTTP Trigger — declares the API input schema for a workflow. + * + * This node does NOT start a server itself. Instead, a global HTTP server + * service routes incoming requests to workflows that contain an HTTP Trigger. + * + * The user configures "output fields" — each field becomes an output port + * on the canvas that can be connected to downstream nodes. When a request + * arrives, the server extracts the matching JSON body fields and injects + * them as this node's outputs. + * + * Example: + * outputFields = [ + * { "key": "image", "label": "Image", "type": "url" }, + * { "key": "prompt", "label": "Prompt", "type": "text" } + * ] + * + * POST /api/workflows/{id}/run + * { "image": "https://...", "prompt": "a cat" } + * + * → output port "image" = "https://..." + * → output port "prompt" = "a cat" + */ +import { + BaseNodeHandler, + type NodeExecutionContext, + type NodeExecutionResult, +} from "../base"; +import type { + NodeTypeDefinition, + PortDefinition, + PortDataType, +} from "../../../../src/workflow/types/node-defs"; +import type { TriggerHandler, TriggerMode } from "./base"; + +export interface OutputFieldConfig { + key: string; + label: string; + type: PortDataType; +} + +export function parseOutputFields(raw: unknown): OutputFieldConfig[] { + if (typeof raw === "string") { + try { + return JSON.parse(raw) as OutputFieldConfig[]; + } catch { + return []; + } + } + if (Array.isArray(raw)) return raw as OutputFieldConfig[]; + return []; +} + +export function buildHttpOutputDefs( + fields: OutputFieldConfig[], +): PortDefinition[] { + return fields.map((f) => ({ + key: f.key, + label: f.label || f.key, + dataType: f.type || "any", + required: true, + })); +} + +export const httpTriggerDef: NodeTypeDefinition = { + type: "trigger/http", + category: "trigger", + label: "HTTP Trigger", + inputs: [], + outputs: [], // Dynamic — built from outputFields + params: [ + { + key: "port", + label: "Port", + type: "number", + dataType: "text", + connectable: false, + default: 3100, + description: "HTTP server port number.", + }, + { + key: "outputFields", + label: "Output Fields", + type: "textarea", + dataType: "text", + connectable: false, + default: + '[{"key":"image","label":"Image","type":"url"},{"key":"prompt","label":"Prompt","type":"text"}]', + description: + "Define API input fields. Each field becomes an output port.", + }, + ], +}; + +export class HttpTriggerHandler + extends BaseNodeHandler + implements TriggerHandler +{ + readonly triggerMode: TriggerMode = "single"; + + constructor() { + super(httpTriggerDef); + } + + /** + * Extract values from the injected request body (__triggerValue) + * and output them on the corresponding ports. + */ + async execute(ctx: NodeExecutionContext): Promise { + const start = Date.now(); + const fields = parseOutputFields(ctx.params.outputFields); + const body = (ctx.params.__triggerValue ?? {}) as Record; + + if (fields.length === 0) { + return { + status: "error", + outputs: {}, + durationMs: Date.now() - start, + cost: 0, + error: "No output fields configured.", + }; + } + + const outputs: Record = {}; + for (const field of fields) { + const value = body[field.key]; + if (value !== undefined && value !== null && value !== "") { + outputs[field.key] = + typeof value === "string" ? value : JSON.stringify(value); + } + } + + if (Object.keys(outputs).length === 0) { + return { + status: "error", + outputs: {}, + durationMs: Date.now() - start, + cost: 0, + error: `No matching fields in request body. Expected: ${fields.map((f) => f.key).join(", ")}`, + }; + } + + const resultMetadata: Record = { ...outputs }; + + return { + status: "success", + outputs, + resultMetadata, + durationMs: Date.now() - start, + cost: 0, + }; + } +} diff --git a/electron/workflow/services/http-server.ts b/electron/workflow/services/http-server.ts new file mode 100644 index 00000000..b039438f --- /dev/null +++ b/electron/workflow/services/http-server.ts @@ -0,0 +1,373 @@ +/** + * Global HTTP server — exposes workflows as REST API endpoints. + * + * Uses Node's built-in http module (no external dependencies). + * + * Endpoints: + * POST /api/workflows/:id/run — execute a workflow with JSON body as input + * GET /api/workflows/:id/schema — get the workflow's input/output schema + * GET /api/health — health check + * + * The server only works with workflows that have an HTTP Trigger node. + * The trigger's outputFields define the expected request body fields. + * The HTTP Response node (if present) defines what gets returned. + */ +import * as http from "http"; +import { ExecutionEngine } from "../engine/executor"; +import { getNodesByWorkflowId } from "../db/node.repo"; +import { getEdgesByWorkflowId } from "../db/edge.repo"; +import { getWorkflowById } from "../db/workflow.repo"; +import { getExecutionById } from "../db/execution.repo"; +import { parseOutputFields } from "../nodes/trigger/http"; +import { parseResponseFields } from "../nodes/output/http-response"; + +export interface HttpServerStatus { + running: boolean; + port: number | null; + url: string | null; +} + +let server: http.Server | null = null; +let currentPort: number | null = null; +let engine: ExecutionEngine | null = null; +/** The workflow ID that initiated the server (for the simple POST / route). */ +let activeWorkflowId: string | null = null; + +export function setHttpServerEngine(e: ExecutionEngine): void { + engine = e; +} + +export function getHttpServerStatus(): HttpServerStatus { + return { + running: server !== null && server.listening, + port: currentPort, + url: currentPort ? `http://localhost:${currentPort}` : null, + }; +} + +export async function startHttpServer( + port = 3100, + workflowId?: string, +): Promise { + if (server?.listening) { + // Already running — update active workflow and return current status + if (workflowId) activeWorkflowId = workflowId; + return getHttpServerStatus(); + } + + if (workflowId) activeWorkflowId = workflowId; + + return new Promise((resolve, reject) => { + const srv = http.createServer(handleRequest); + + srv.on("error", (err: NodeJS.ErrnoException) => { + if (err.code === "EADDRINUSE") { + reject(new Error(`Port ${port} is already in use`)); + } else { + reject(err); + } + }); + + srv.listen(port, () => { + server = srv; + currentPort = port; + console.log( + `[HTTP Server] Listening on http://localhost:${port} for workflow ${activeWorkflowId}`, + ); + resolve(getHttpServerStatus()); + }); + }); +} + +export function stopHttpServer(): HttpServerStatus { + if (server) { + server.close(); + server = null; + currentPort = null; + activeWorkflowId = null; + console.log("[HTTP Server] Stopped"); + } + return getHttpServerStatus(); +} + +// ── Request handler ────────────────────────────────────────────────── + +function handleRequest( + req: http.IncomingMessage, + res: http.ServerResponse, +): void { + const url = new URL(req.url ?? "/", `http://localhost`); + const path = url.pathname; + const method = req.method?.toUpperCase() ?? "GET"; + + console.log( + `[HTTP Server] ${method} ${path} (activeWorkflowId=${activeWorkflowId})`, + ); + + // CORS headers + res.setHeader("Access-Control-Allow-Origin", "*"); + res.setHeader("Access-Control-Allow-Methods", "GET, POST, OPTIONS"); + res.setHeader("Access-Control-Allow-Headers", "Content-Type"); + + if (method === "OPTIONS") { + res.writeHead(204); + res.end(); + return; + } + + // GET /api/health + if (method === "GET" && path === "/api/health") { + sendJson(res, 200, { status: "ok" }); + return; + } + + // GET /schema — schema for the active workflow + if (method === "GET" && path === "/schema") { + if (!activeWorkflowId) { + sendJson(res, 400, { error: "No active workflow." }); + return; + } + handleGetSchema(activeWorkflowId, res); + return; + } + + // Route: /api/workflows/:id/run or /api/workflows/:id/schema + const runMatch = path.match(/^\/api\/workflows\/([^/]+)\/run$/); + const schemaMatch = path.match(/^\/api\/workflows\/([^/]+)\/schema$/); + + if (runMatch && method === "POST") { + const workflowId = decodeURIComponent(runMatch[1]); + readBody(req) + .then((body) => { + handleRunWorkflow(workflowId, body, res); + }) + .catch((err) => { + sendJson(res, 400, { error: `Invalid request body: ${err.message}` }); + }); + return; + } + + if (schemaMatch && method === "GET") { + const workflowId = decodeURIComponent(schemaMatch[1]); + handleGetSchema(workflowId, res); + return; + } + + // POST to any path — use the active workflow (simple default route) + if (method === "POST") { + if (!activeWorkflowId) { + sendJson(res, 400, { + error: + "No active workflow. Start the server from a workflow with an HTTP Trigger.", + }); + return; + } + readBody(req) + .then((body) => { + handleRunWorkflow(activeWorkflowId!, body, res); + }) + .catch((err) => { + sendJson(res, 400, { error: `Invalid request body: ${err.message}` }); + }); + return; + } + + sendJson(res, 404, { error: "Not found" }); +} + +// ── Route handlers + +async function handleRunWorkflow( + workflowId: string, + body: Record, + res: http.ServerResponse, +): Promise { + if (!engine) { + sendJson(res, 500, { error: "Execution engine not initialized" }); + return; + } + + // Verify workflow exists + const workflow = getWorkflowById(workflowId); + if (!workflow) { + sendJson(res, 404, { error: `Workflow not found: ${workflowId}` }); + return; + } + + // Verify workflow has an HTTP Trigger + const nodes = getNodesByWorkflowId(workflowId); + const httpTrigger = nodes.find((n) => n.nodeType === "trigger/http"); + if (!httpTrigger) { + sendJson(res, 400, { + error: + "This workflow does not have an HTTP Trigger node. Only workflows with HTTP Trigger can be called via API.", + }); + return; + } + + try { + console.log( + `[HTTP Server] Running workflow ${workflowId} with body:`, + JSON.stringify(body).slice(0, 200), + ); + const result = await engine.runAll(workflowId, body); + console.log( + `[HTTP Server] runAll result:`, + JSON.stringify(result)?.slice(0, 500), + ); + + if (result && typeof result === "object") { + const statusCode = (result.statusCode as number) ?? 200; + const responseBody = result.body ?? result; + const bodyObj = responseBody as Record; + + // If the engine returned an error (e.g. node failure without HTTP Response) + if (statusCode >= 400) { + sendJson(res, statusCode, { + error_msg: String( + bodyObj.error ?? bodyObj.message ?? "Workflow execution failed", + ), + }); + return; + } + + // Clean response — remove internal keys + const cleanBody = { ...bodyObj }; + delete cleanBody.statusCode; + if (Object.keys(cleanBody).length === 0) { + const fallback = collectLastNodeOutputs(workflowId); + sendJson(res, 200, fallback); + } else { + sendJson(res, 200, cleanBody); + } + } else { + // No HTTP Response node — collect outputs from terminal nodes + const fallback = collectLastNodeOutputs(workflowId); + sendJson(res, 200, fallback); + } + } catch (err) { + const message = err instanceof Error ? err.message : String(err); + console.error(`[HTTP Server] Workflow execution failed:`, message); + sendJson(res, 500, { error_msg: message }); + } +} + +function handleGetSchema(workflowId: string, res: http.ServerResponse): void { + const workflow = getWorkflowById(workflowId); + if (!workflow) { + sendJson(res, 404, { error: `Workflow not found: ${workflowId}` }); + return; + } + + const nodes = getNodesByWorkflowId(workflowId); + const httpTrigger = nodes.find((n) => n.nodeType === "trigger/http"); + const httpResponse = nodes.find((n) => n.nodeType === "output/http-response"); + + if (!httpTrigger) { + sendJson(res, 400, { + error: "This workflow does not have an HTTP Trigger node.", + }); + return; + } + + const inputFields = parseOutputFields(httpTrigger.params.outputFields); + const outputFields = httpResponse + ? parseResponseFields(httpResponse.params.responseFields) + : []; + + sendJson(res, 200, { + workflowId, + name: workflow.name, + inputs: inputFields.map((f) => ({ + key: f.key, + label: f.label, + type: f.type, + })), + outputs: outputFields.map((f) => ({ + key: f.key, + label: f.label, + type: f.type, + })), + }); +} + +function readBody(req: http.IncomingMessage): Promise> { + return new Promise((resolve, reject) => { + const chunks: Buffer[] = []; + req.on("data", (chunk: Buffer) => chunks.push(chunk)); + req.on("end", () => { + try { + const raw = Buffer.concat(chunks).toString("utf-8"); + resolve(raw ? JSON.parse(raw) : {}); + } catch (err) { + reject(err); + } + }); + req.on("error", reject); + }); +} + +function sendJson( + res: http.ServerResponse, + status: number, + data: Record, +): void { + res.writeHead(status, { "Content-Type": "application/json" }); + res.end(JSON.stringify(data)); +} + +/** + * Fallback: when no HTTP Response node exists, collect outputs from terminal nodes + * (nodes with no outgoing edges, excluding trigger nodes). + */ +function collectLastNodeOutputs(workflowId: string): Record { + const nodes = getNodesByWorkflowId(workflowId); + const edges = getEdgesByWorkflowId(workflowId); + + // Find terminal nodes: nodes that are NOT a source of any edge + const sourceIds = new Set(edges.map((e) => e.sourceNodeId)); + const terminalNodes = nodes.filter( + (n) => !sourceIds.has(n.id) && !n.nodeType.startsWith("trigger/"), + ); + + const outputs: Record = {}; + for (const node of terminalNodes) { + if (!node.currentOutputId) continue; + const exec = getExecutionById(node.currentOutputId); + if (!exec || exec.status !== "success") continue; + + const meta = exec.resultMetadata as Record | null; + if (!meta) continue; + + // Use resultUrls or resultPath as the output value + const resultUrls = meta.resultUrls as string[] | undefined; + const resultUrl = (meta.resultUrl as string) ?? exec.resultPath; + const value = + resultUrls && resultUrls.length > 0 + ? resultUrls.length === 1 + ? resultUrls[0] + : resultUrls + : (resultUrl ?? meta.output); + + if (value !== undefined && value !== null) { + // Use node label or type as key + const label = + ((node.params?.__meta as Record)?.label as string) ?? + node.nodeType.split("/").pop() ?? + node.id; + outputs[label] = value; + } + } + + if (Object.keys(outputs).length === 0) { + return { status: "completed" }; + } + + // If only one terminal node, flatten the output + const keys = Object.keys(outputs); + if (keys.length === 1) { + return { status: "completed", output: outputs[keys[0]] }; + } + + return { status: "completed", outputs }; +} diff --git a/package-lock.json b/package-lock.json index 80bc39f6..0715b949 100644 --- a/package-lock.json +++ b/package-lock.json @@ -1,14 +1,15 @@ { "name": "wavespeed-desktop", - "version": "2.0.15", + "version": "2.1.0", "lockfileVersion": 3, "requires": true, "packages": { "": { "name": "wavespeed-desktop", - "version": "2.0.15", + "version": "2.1.0", "hasInstallScript": true, "dependencies": { + "7zip-bin": "^5.2.0", "adm-zip": "^0.5.16", "electron-log": "^5.4.3", "electron-updater": "^6.6.2", @@ -4833,7 +4834,6 @@ "version": "5.2.0", "resolved": "https://registry.npmjs.org/7zip-bin/-/7zip-bin-5.2.0.tgz", "integrity": "sha512-ukTPVhqG4jNzMro2qA9HSCSSVJN3aN7tlb+hfqYCt3ER0yWroeA2VR38MNrOHLQ/cVj+DaIMad0kFCtWWowh/A==", - "dev": true, "license": "MIT" }, "node_modules/abbrev": { diff --git a/package.json b/package.json index 8e06148f..13f0538b 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "wavespeed-desktop", - "version": "2.0.23", + "version": "2.1.0", "description": "WaveSpeedAI Desktop Application - A playground for AI models", "main": "./out/main/index.js", "author": { @@ -32,6 +32,7 @@ "format:check": "prettier --check \"src/**/*.{ts,tsx,css}\" \"electron/**/*.ts\"" }, "dependencies": { + "7zip-bin": "^5.2.0", "adm-zip": "^0.5.16", "electron-log": "^5.4.3", "electron-updater": "^6.6.2", diff --git a/src/components/layout/Sidebar.tsx b/src/components/layout/Sidebar.tsx index c2772ac4..d83bf775 100644 --- a/src/components/layout/Sidebar.tsx +++ b/src/components/layout/Sidebar.tsx @@ -116,13 +116,17 @@ export const Sidebar = memo(function Sidebar({ }; const handleFocus = () => { if (!blurredRef.current) return; - // Keep suppressed — will be re-enabled by mousemove + // Keep suppressed — will be re-enabled by mousemove after a short grace period + // The delay prevents tooltips from flashing when the OS synthesizes a + // mousemove event immediately upon window focus (common in Electron). const onMove = () => { blurredRef.current = false; setTooltipReady(true); window.removeEventListener("mousemove", onMove); }; - window.addEventListener("mousemove", onMove, { once: true }); + setTimeout(() => { + window.addEventListener("mousemove", onMove, { once: true }); + }, 150); }; window.addEventListener("blur", handleBlur); window.addEventListener("focus", handleFocus); diff --git a/src/components/playground/ExplorePanel.tsx b/src/components/playground/ExplorePanel.tsx index 26b84d70..f3de8bae 100644 --- a/src/components/playground/ExplorePanel.tsx +++ b/src/components/playground/ExplorePanel.tsx @@ -31,8 +31,8 @@ import { X, ArrowDownNarrowWide, ArrowUpNarrowWide, + ChevronRight, ChevronDown, - ChevronUp, RefreshCw, } from "lucide-react"; import { cn } from "@/lib/utils"; @@ -261,14 +261,21 @@ export function ExplorePanel({ }: ExplorePanelProps) { const { t } = useTranslation(); const navigate = useNavigate(); - const { models, toggleFavorite, isFavorite, fetchModels } = useModelsStore(); + const { + models, + toggleFavorite, + isFavorite, + fetchModels, + selectedType: typeFilter, + setSelectedType: setTypeFilter, + typeFiltersOpen, + setTypeFiltersOpen, + } = useModelsStore(); const { createTab } = usePlaygroundStore(); - const [typeFilter, setTypeFilter] = useState(null); const [showFavoritesOnly, setShowFavoritesOnly] = useState(false); const [isRefreshing, setIsRefreshing] = useState(false); const [sortKey, setSortKey] = useState("popularity"); const [sortAsc, setSortAsc] = useState(false); - const [typeFiltersOpen, setTypeFiltersOpen] = useState(false); // type filter row collapsed by default // Local search state with debounce const [searchInput, setSearchInput] = useState(""); @@ -507,23 +514,23 @@ export function ExplorePanel({ className="flex-1 overflow-y-auto overflow-x-hidden" >
-

- {showFavoritesOnly - ? t("playground.explore.favorites", "Favorites") - : search - ? t("playground.explore.searchResults", "{{count}} results", { - count: filteredModels.length, - }) - : t("playground.explore.allModels", "All Models")} -

-
+
+

+ {showFavoritesOnly + ? t("playground.explore.favorites", "Favorites") + : search + ? t("playground.explore.searchResults", "{{count}} results", { + count: filteredModels.length, + }) + : t("playground.explore.allModels", "All Models")} +

{!typeFiltersOpen ? ( - {allTypes.map((type) => ( - - ))} - -
- + )}
+ {typeFiltersOpen && ( +
+ + {allTypes.map((type) => ( + + ))} +
+ )} {filteredModels.length === 0 ? (
diff --git a/src/components/playground/FeaturedModelsPanel.tsx b/src/components/playground/FeaturedModelsPanel.tsx index ec5d7605..8e1a2754 100644 --- a/src/components/playground/FeaturedModelsPanel.tsx +++ b/src/components/playground/FeaturedModelsPanel.tsx @@ -3,7 +3,16 @@ import { ScrollArea } from "@/components/ui/scroll-area"; import { Badge } from "@/components/ui/badge"; import type { Model } from "@/types/model"; -const FEATURED_MODEL_FAMILIES = [ +const FEATURED_MODEL_FAMILIES: Array<{ + name: string; + provider: string; + description: string; + poster: string; + primaryVariant: string; + tags: string[]; + ratio: "poster" | "square"; + isNew?: boolean; +}> = [ // ── Top row: poster cards (3:4) ── { name: "Nano Banana 2", diff --git a/src/components/ui/tooltip.tsx b/src/components/ui/tooltip.tsx index 742dd249..1912fef1 100644 --- a/src/components/ui/tooltip.tsx +++ b/src/components/ui/tooltip.tsx @@ -18,7 +18,7 @@ const TooltipContent = React.forwardRef< ref={ref} sideOffset={sideOffset} className={cn( - "z-50 overflow-hidden rounded-md bg-primary px-3 py-1.5 text-xs text-primary-foreground animate-in fade-in-0 zoom-in-95 data-[state=closed]:animate-out data-[state=closed]:fade-out-0 data-[state=closed]:zoom-out-95 data-[side=bottom]:slide-in-from-top-2 data-[side=left]:slide-in-from-right-2 data-[side=right]:slide-in-from-left-2 data-[side=top]:slide-in-from-bottom-2", + "z-[9999] overflow-hidden rounded-md bg-primary px-3 py-1.5 text-xs text-primary-foreground animate-in fade-in-0 zoom-in-95 data-[state=closed]:animate-out data-[state=closed]:fade-out-0 data-[state=closed]:zoom-out-95 data-[side=bottom]:slide-in-from-top-2 data-[side=left]:slide-in-from-right-2 data-[side=right]:slide-in-from-left-2 data-[side=top]:slide-in-from-bottom-2", className, )} {...props} diff --git a/src/i18n/locales/ar.json b/src/i18n/locales/ar.json index f041a6cc..a7f0721b 100644 --- a/src/i18n/locales/ar.json +++ b/src/i18n/locales/ar.json @@ -1137,6 +1137,13 @@ "modelIdLabel": "معرف النموذج", "selectNode": "اختر عقدة للتكوين", "noExecutions": "لا توجد عمليات تنفيذ بعد", + "selectAsOutput": "استخدام كمخرج", + "selectedAsOutput": "تم التحديد", + "importWorkflow": "استيراد سير العمل", + "importWorkflowAsSubgraph": "استيراد سير العمل كرسم فرعي", + "noWorkflowsToImport": "لا توجد سير عمل أخرى متاحة", + "currentWorkflow": "الحالي", + "importContainsTrigger": "لا يمكن الاستيراد: سير العمل يحتوي على عقد مشغّل. لا يُسمح بالمشغّلات داخل المجموعات.", "budgetExceeded": "تجاوز الميزانية", "dailySpend": "الإنفاق اليومي", "perExecutionLimit": "حد التنفيذ لكل عملية", @@ -1195,6 +1202,14 @@ "control": "التحكم" }, "nodeDefs": { + "trigger/directory": { + "label": "مشغّل المجلد", + "hint": "مسح مجلد محلي — يعمل سير العمل مرة لكل ملف" + }, + "trigger/http": { + "label": "مشغّل HTTP", + "hint": "نشر سير العمل كـ HTTP API — تحديد حقول الإدخال التي يوفرها المتصلون" + }, "ai-task/run": { "label": "WaveSpeed API", "hint": "تشغيل أي نموذج ذكاء اصطناعي — صور وفيديو وصوت والمزيد", @@ -1281,6 +1296,10 @@ } } }, + "output/http-response": { + "label": "استجابة HTTP", + "hint": "حدد ما يعيده سير العمل لمستدعي HTTP" + }, "free-tool/image-enhancer": { "label": "تحسين الصورة", "hint": "تكبير وتحسين الصور (2×–4×) مجاناً" @@ -1349,6 +1368,10 @@ "processing/select": { "label": "اختيار", "hint": "اختيار عنصر واحد من مصفوفة حسب الفهرس" + }, + "control/iterator": { + "label": "مجموعة", + "hint": "تجميع العقد في سير عمل فرعي للتنظيم" } }, "modelSelector": { @@ -1463,6 +1486,18 @@ "title": "عقدة Concat — دمج مخرجات متعددة", "desc": "هل تحتاج لتمرير عدة صور إلى مدخل \"images\"؟ استخدم عقدة Concat:\n\n1. صِل مخرج image من كل عقدة سابقة بمدخلات Concat: value1، value2، value3، إلخ.\n2. صِل مخرج output من Concat (الآن مصفوفة) بمعامل images في العقدة التالية.\n\nمثال:\n[رفع A] → image → Concat → output (مصفوفة) → [مهمة AI].images\n[رفع B] → image ↗\n\nيعمل مع أي نوع مخرجات — صور، فيديو، نص — كلما احتجت لدمج عدة قيم فردية في مدخل مصفوفة واحد." }, + "directoryTrigger": { + "title": "مشغّل المجلد — معالجة دفعية للملفات المحلية", + "desc": "مسح مجلد محلي تلقائياً وتشغيل سير العمل مرة لكل ملف:\n\n• اختر مجلداً ونوع الملفات (صور، فيديو، صوت، أو الكل)\n• يجد المشغّل جميع الملفات المطابقة ويمررها واحداً تلو الآخر إلى خط الأنابيب\n• مثالي للمعالجة الدفعية — مثل تحسين كل صورة في مجلد أو تحويل جميع الفيديوهات\n\nكل تنفيذ يستقبل ملفاً واحداً، لذا تعمل العقد اللاحقة تماماً كما مع الرفع اليدوي." + }, + "httpTrigger": { + "title": "مشغّل HTTP — تشغيل سير العمل عبر API", + "desc": "حوّل أي سير عمل إلى نقطة نهاية HTTP API:\n\n• حدد حقول الإخراج (مثل image، prompt) — كل حقل يصبح منفذ إخراج على اللوحة\n• عند وصول طلب POST، يتم استخراج حقول JSON body وتمريرها للعقد اللاحقة\n• اقرنه بعقدة HTTP Response لإرجاع النتائج للمتصل\n\nهذا يتيح لك دمج سير العمل في تطبيقات خارجية أو سكربتات عبر إرسال طلب HTTP." + }, + "group": { + "title": "عقدة المجموعة — تنظيم سير العمل الفرعي", + "desc": "اجمع عدة عقد في حاوية واحدة قابلة للطي:\n\n• اسحب العقد إلى مجموعة لتغليف سير عمل فرعي\n• اكشف مدخلات/مخرجات محددة على سطح المجموعة ليتصل بها العقد الخارجية\n• انقر \"تعديل الرسم الفرعي\" للدخول وتعديل العقد الداخلية\n• استورد سير عمل موجود إلى مجموعة لإعادة استخدامه\n\nالمجموعات تحافظ على نظافة سير العمل المعقد — فكر بها كدوال يمكن ربطها معاً." + }, "canvas": { "title": "التفاعل مع اللوحة", "desc": "اللوحة هي مساحة عملك:\n• اسحب العقد لوضعها\n• اسحب من منافذ الإخراج إلى منافذ الإدخال لإنشاء اتصالات\n• انقر على عقدة لتحديدها — تتوسع المعلمات داخل العقدة للتعديل\n• انقر بزر الفأرة الأيمن لفتح القائمة السياقية (نسخ، لصق، حذف)\n• مرّر للتكبير، واسحب الخلفية لتحريك العرض\n• الاختصارات: Ctrl+Z للتراجع، Ctrl+C/V للنسخ واللصق، Delete للحذف" diff --git a/src/i18n/locales/de.json b/src/i18n/locales/de.json index 55537591..8f9f330a 100644 --- a/src/i18n/locales/de.json +++ b/src/i18n/locales/de.json @@ -1138,6 +1138,13 @@ "modelIdLabel": "Modell-ID", "selectNode": "Wählen Sie einen Knoten zur Konfiguration", "noExecutions": "Noch keine Ausführungen", + "selectAsOutput": "Als Ausgabe verwenden", + "selectedAsOutput": "Ausgewählt", + "importWorkflow": "Workflow importieren", + "importWorkflowAsSubgraph": "Workflow als Untergraph importieren", + "noWorkflowsToImport": "Keine anderen Workflows verfügbar", + "currentWorkflow": "aktuell", + "importContainsTrigger": "Import nicht möglich: Workflow enthält Trigger-Knoten. Trigger sind in Gruppen nicht erlaubt.", "budgetExceeded": "Budget überschritten", "dailySpend": "Tägliche Ausgaben", "perExecutionLimit": "Limite pro Ausführung", @@ -1196,6 +1203,14 @@ "control": "Steuerung" }, "nodeDefs": { + "trigger/directory": { + "label": "Verzeichnis-Trigger", + "hint": "Lokalen Ordner scannen — Workflow wird einmal pro Datei ausgeführt" + }, + "trigger/http": { + "label": "HTTP-Trigger", + "hint": "Workflow als HTTP-API bereitstellen — Eingabefelder definieren" + }, "ai-task/run": { "label": "WaveSpeed API", "hint": "Beliebiges KI-Modell ausführen — Bild, Video, Audio und mehr", @@ -1282,6 +1297,10 @@ } } }, + "output/http-response": { + "label": "HTTP-Antwort", + "hint": "Definieren Sie, was dieser Workflow an HTTP-Aufrufer zurückgibt" + }, "free-tool/image-enhancer": { "label": "Bildverbesserung", "hint": "Bilder kostenlos hochskalieren und schärfen (2×–4×)" @@ -1350,6 +1369,10 @@ "processing/select": { "label": "Auswählen", "hint": "Ein Element aus einem Array per Index auswählen" + }, + "control/iterator": { + "label": "Gruppe", + "hint": "Knoten in einem Sub-Workflow gruppieren" } }, "modelSelector": { @@ -1434,6 +1457,18 @@ "title": "Concat-Knoten — Mehrere Ausgaben zusammenführen", "desc": "Müssen Sie mehrere Bilder in einen \"images\"-Eingang übergeben? Verwenden Sie den Concat-Knoten:\n\n1. Verbinden Sie die image-Ausgabe jedes vorgelagerten Knotens mit Concat's value1, value2, value3 usw.\n2. Verbinden Sie Concat's output (jetzt ein Array) mit dem images-Parameter des nachgelagerten Knotens.\n\nBeispiel:\n[Upload A] → image → Concat → output (Array) → [AI-Aufgabe].images\n[Upload B] → image ↗\n\nDies funktioniert für jeden Ausgabetyp — Bilder, Videos, Text — wann immer Sie mehrere Einzelwerte zu einem Array-Eingang kombinieren müssen." }, + "directoryTrigger": { + "title": "Verzeichnis-Trigger — Lokale Dateien stapelweise verarbeiten", + "desc": "Automatisch einen lokalen Ordner scannen und den Workflow einmal pro Datei ausführen:\n\n• Wählen Sie ein Verzeichnis und einen Dateityp (Bilder, Videos, Audio oder Alle)\n• Der Trigger findet alle passenden Dateien und leitet sie einzeln in die Pipeline\n• Ideal für Stapelverarbeitung — z.B. jedes Foto in einem Ordner verbessern, alle Videos konvertieren usw.\n\nJede Ausführung erhält eine einzelne Datei, nachgelagerte Knoten funktionieren genau wie bei manuellem Upload." + }, + "httpTrigger": { + "title": "HTTP-Trigger — Workflows per API ausführen", + "desc": "Verwandeln Sie jeden Workflow in einen HTTP-API-Endpunkt:\n\n• Definieren Sie Ausgabefelder (z.B. image, prompt) — jedes wird zu einem Ausgangsport auf der Leinwand\n• Bei eingehenden POST-Anfragen werden die JSON-Body-Felder extrahiert und an nachgelagerte Knoten weitergeleitet\n• Kombinieren Sie mit dem HTTP-Response-Knoten, um Ergebnisse zurückzugeben\n\nSo können Sie Workflows in externe Apps, Skripte oder Automatisierungstools integrieren." + }, + "group": { + "title": "Gruppen-Knoten — Sub-Workflows organisieren", + "desc": "Fassen Sie mehrere Knoten in einem zusammenklappbaren Container zusammen:\n\n• Ziehen Sie Knoten in eine Gruppe, um einen Sub-Workflow zu kapseln\n• Legen Sie ausgewählte Ein-/Ausgänge auf der Gruppenoberfläche frei, damit externe Knoten sich verbinden können\n• Klicken Sie auf \"Subgraph bearbeiten\", um die internen Knoten zu bearbeiten\n• Importieren Sie einen bestehenden Workflow in eine Gruppe zur Wiederverwendung\n\nGruppen halten komplexe Workflows übersichtlich und modular — denken Sie an sie wie Funktionen, die Sie miteinander verbinden können." + }, "canvas": { "title": "Leinwand-Interaktionen", "desc": "Die Leinwand ist Ihr Arbeitsbereich:\n• Ziehen Sie Knoten, um sie zu positionieren\n• Ziehen Sie von Ausgangsports zu Eingangsports, um Verbindungen zu erstellen\n• Klicken Sie auf einen Knoten, um ihn auszuwählen — Parameter werden im Knoten zur Bearbeitung erweitert\n• Rechtsklick für Kontextmenü (Kopieren, Einfügen, Löschen)\n• Scrollen zum Zoomen, Hintergrund ziehen zum Schwenken\n• Tastenkürzel: Ctrl+Z Rückgängig, Ctrl+C/V Kopieren/Einfügen, Delete zum Entfernen" diff --git a/src/i18n/locales/en.json b/src/i18n/locales/en.json index e561240f..2e00e303 100644 --- a/src/i18n/locales/en.json +++ b/src/i18n/locales/en.json @@ -1137,7 +1137,7 @@ "runTarget": "Run target", "runTargetAll": "Run All Nodes", "runTargetSelected": "Run Selected Node", - "runCount": "Run count", + "runCount": "Run Count", "stop": "Stop", "running": "Running...", "cancelAll": "Cancel All", @@ -1154,6 +1154,29 @@ "zoomOut": "Zoom out", "autoLayout": "Auto Layout", "freeToolModelHint": "First run will auto-download the AI model, please wait", + "triggerHint": "This trigger will repeatedly run the downstream workflow each time it fires", + "dropToAddToGroup": "Release to add to Group", + "dropToRemoveFromGroup": "Release to remove from Group", + "editSubgraph": "Edit Subgraph", + "mainWorkflow": "Main Workflow", + "exitSubgraph": "Exit subgraph (ESC)", + "editingSubgraph": "Editing subgraph", + "childNodesCount": "{{count}} child node(s)", + "groupInput": "Group Input", + "groupOutput": "Group Output", + "noExposedPorts": "No exposed ports", + "doubleClickToRename": "Double-click to rename", + "clickToSetAlias": "Click to set display name", + "setAlias": "Set display name...", + "aliasPlaceholder": "Display name on main graph...", + "mappedAs": "Mapped as:", + "group": "Group", + "importWorkflow": "Import Workflow", + "importWorkflowAsSubgraph": "Import Workflow as Subgraph", + "noWorkflowsToImport": "No other workflows available", + "currentWorkflow": "current", + "importContainsTrigger": "Cannot import: workflow contains trigger nodes. Triggers are not allowed inside groups.", + "nodeCountLabel": "{{count}} nodes", "more": "More", "previousImage": "Previous image", "nextImage": "Next image", @@ -1171,6 +1194,9 @@ "open": "Open", "paste": "Paste", "addNode": "Add Node", + "addDownstreamNode": "Add Downstream Node", + "triggerLimitTitle": "Only one trigger allowed", + "triggerLimitDesc": "A workflow can only have one trigger node. Remove the existing trigger first.", "addNote": "Add Note", "note": "Note", "deleteConnection": "Delete Connection", @@ -1185,6 +1211,8 @@ "modelIdLabel": "Model ID", "selectNode": "Select a node to configure", "noExecutions": "No executions yet", + "selectAsOutput": "Use as output", + "selectedAsOutput": "Selected", "budgetExceeded": "Budget exceeded", "dailySpend": "Daily Spend", "perExecutionLimit": "Per-Execution Limit", @@ -1220,6 +1248,11 @@ "hideRun": "Hide this run", "output": "Output", "outputLowercase": "output", + "httpTriggerFields": "API Input Fields", + "httpResponseFields": "API Response Fields", + "addField": "Add", + "noFieldsHint": "No fields defined. Click Add to create one.", + "statusCode": "Status Code", "expandNode": "Expand", "collapseNode": "Collapse", "collapseAll": "Collapse All", @@ -1235,6 +1268,7 @@ "noNodesAvailable": "No nodes available", "nodeCategory": { "recent": "Recent", + "trigger": "Trigger", "input": "Input", "ai-task": "AI Model", "free-tool": "Free Tools", @@ -1243,6 +1277,14 @@ "control": "Control" }, "nodeDefs": { + "trigger/directory": { + "label": "Directory Trigger", + "hint": "Scan a local folder — workflow runs once per file" + }, + "trigger/http": { + "label": "HTTP Trigger", + "hint": "Expose this workflow as an HTTP API — define input fields that callers provide" + }, "ai-task/run": { "label": "WaveSpeed API", "hint": "Run any AI model on the platform — image, video, audio, and more", @@ -1329,6 +1371,10 @@ } } }, + "output/http-response": { + "label": "HTTP Response", + "hint": "Define what this workflow returns to HTTP callers" + }, "free-tool/image-enhancer": { "label": "Image Enhancer", "hint": "Upscale and sharpen images (2×–4×) for free" @@ -1397,6 +1443,14 @@ "processing/select": { "label": "Select", "hint": "Pick one item from an array by index" + }, + "control/iterator": { + "label": "Group", + "hint": "Group nodes into a sub-workflow for organization" + }, + "control/group": { + "label": "Group", + "hint": "Group nodes into a sub-workflow for organization" } }, "modelSelector": { @@ -1481,6 +1535,18 @@ "title": "Concat Node — Merge Multiple Outputs", "desc": "Need to pass multiple images into an \"images\" input? Use the Concat node:\n\n1. Connect each upstream node's image output to Concat's value1, value2, value3, etc.\n2. Connect Concat's output (now an array) to the downstream node's images parameter.\n\nExample:\n[Upload A] → image → Concat → output (array) → [AI Task].images\n[Upload B] → image ↗\n\nThis works for any type of output — images, videos, text — whenever you need to combine multiple single values into one array input." }, + "directoryTrigger": { + "title": "Directory Trigger — Batch Process Local Files", + "desc": "Automatically scan a local folder and run the workflow once per file:\n\n• Pick a directory and file type (Images, Videos, Audio, or All)\n• The trigger finds all matching files and feeds them one-by-one into the downstream pipeline\n• Great for batch processing — e.g. enhance every photo in a folder, convert all videos, etc.\n\nEach execution receives a single file, so downstream nodes work exactly the same as with a manual upload." + }, + "httpTrigger": { + "title": "HTTP Trigger — Run Workflows via API", + "desc": "Turn any workflow into an HTTP API endpoint:\n\n• Define output fields (e.g. image, prompt) — each becomes an output port on the canvas\n• When a POST request arrives, the JSON body fields are extracted and passed to downstream nodes\n• Pair with the HTTP Response node to return results back to the caller\n\nThis lets you integrate workflows into external apps, scripts, or automation tools by simply sending an HTTP request." + }, + "group": { + "title": "Group Node — Organize Sub-Workflows", + "desc": "Group multiple nodes into a single collapsible container:\n\n• Drag nodes into a Group to encapsulate a sub-workflow\n• Expose selected inputs/outputs on the Group's surface so external nodes can connect to them\n• Click \"Edit Subgraph\" to enter the Group and edit its internal nodes\n• Import an existing workflow into a Group to reuse it as a building block\n\nGroups keep complex workflows clean and modular — think of them as functions you can wire together." + }, "canvas": { "title": "Canvas Interactions", "desc": "The canvas is your workspace:\n• Drag nodes to position them\n• Drag from output ports to input ports to create connections\n• Click a node to select it — parameters expand inside the node for editing\n• Right-click for context menu (copy, paste, delete)\n• Scroll to zoom, drag background to pan\n• Shortcuts: Ctrl+Z undo, Ctrl+C/V copy-paste, Delete to remove" diff --git a/src/i18n/locales/es.json b/src/i18n/locales/es.json index a4a6c634..02937085 100644 --- a/src/i18n/locales/es.json +++ b/src/i18n/locales/es.json @@ -1137,6 +1137,13 @@ "modelIdLabel": "ID del modelo", "selectNode": "Seleccione un nodo para configurar", "noExecutions": "Aún no hay ejecuciones", + "selectAsOutput": "Usar como salida", + "selectedAsOutput": "Seleccionado", + "importWorkflow": "Importar flujo de trabajo", + "importWorkflowAsSubgraph": "Importar flujo de trabajo como subgrafo", + "noWorkflowsToImport": "No hay otros flujos de trabajo disponibles", + "currentWorkflow": "actual", + "importContainsTrigger": "No se puede importar: el flujo de trabajo contiene nodos de activación. Los activadores no están permitidos dentro de grupos.", "budgetExceeded": "Presupuesto superado", "dailySpend": "Gasto diario", "perExecutionLimit": "Límite por ejecución", @@ -1195,6 +1202,14 @@ "control": "Control" }, "nodeDefs": { + "trigger/directory": { + "label": "Disparador de directorio", + "hint": "Escanear carpeta local — el flujo se ejecuta una vez por archivo" + }, + "trigger/http": { + "label": "Disparador HTTP", + "hint": "Exponer este flujo como HTTP API — definir campos de entrada" + }, "ai-task/run": { "label": "WaveSpeed API", "hint": "Ejecuta cualquier modelo de IA — imagen, vídeo, audio y más", @@ -1281,6 +1296,10 @@ } } }, + "output/http-response": { + "label": "Respuesta HTTP", + "hint": "Define lo que este flujo devuelve a los llamadores HTTP" + }, "free-tool/image-enhancer": { "label": "Mejorador de imagen", "hint": "Ampliar y mejorar imágenes (2×–4×) gratis" @@ -1349,6 +1368,10 @@ "processing/select": { "label": "Seleccionar", "hint": "Elegir un elemento de un array por índice" + }, + "control/iterator": { + "label": "Grupo", + "hint": "Agrupar nodos en un sub-flujo para organización" } }, "modelSelector": { @@ -1463,6 +1486,18 @@ "title": "Nodo Concat — Combinar múltiples salidas", "desc": "¿Necesitas pasar múltiples imágenes a una entrada \"images\"? Usa el nodo Concat:\n\n1. Conecta la salida image de cada nodo anterior a value1, value2, value3, etc. de Concat\n2. Conecta la salida output de Concat (ahora un array) al parámetro images del nodo siguiente\n\nEjemplo:\n[Upload A] → image → Concat → output (array) → [Tarea AI].images\n[Upload B] → image ↗\n\nFunciona para cualquier tipo de salida — imágenes, videos, texto — siempre que necesites combinar múltiples valores individuales en una entrada de array." }, + "directoryTrigger": { + "title": "Disparador de directorio — Procesamiento por lotes de archivos locales", + "desc": "Escanea automáticamente una carpeta local y ejecuta el flujo una vez por archivo:\n\n• Elige un directorio y tipo de archivo (Imágenes, Videos, Audio o Todos)\n• El disparador encuentra todos los archivos coincidentes y los envía uno a uno al pipeline\n• Ideal para procesamiento por lotes — p.ej. mejorar cada foto de una carpeta, convertir todos los videos, etc.\n\nCada ejecución recibe un solo archivo, los nodos posteriores funcionan igual que con una subida manual." + }, + "httpTrigger": { + "title": "Disparador HTTP — Ejecutar flujos vía API", + "desc": "Convierte cualquier flujo en un endpoint HTTP API:\n\n• Define campos de salida (p.ej. image, prompt) — cada uno se convierte en un puerto de salida en el lienzo\n• Cuando llega una solicitud POST, los campos del JSON body se extraen y pasan a los nodos posteriores\n• Combínalo con el nodo HTTP Response para devolver resultados al llamante\n\nEsto te permite integrar flujos en apps externas, scripts o herramientas de automatización enviando una solicitud HTTP." + }, + "group": { + "title": "Nodo Grupo — Organizar sub-flujos", + "desc": "Agrupa múltiples nodos en un contenedor plegable:\n\n• Arrastra nodos a un Grupo para encapsular un sub-flujo\n• Expón entradas/salidas seleccionadas en la superficie del Grupo para que nodos externos se conecten\n• Haz clic en \"Editar subgrafo\" para entrar y editar los nodos internos\n• Importa un flujo existente a un Grupo para reutilizarlo como bloque\n\nLos Grupos mantienen los flujos complejos limpios y modulares — piensa en ellos como funciones que puedes conectar entre sí." + }, "canvas": { "title": "Interacciones del lienzo", "desc": "El lienzo es tu espacio de trabajo:\n• Arrastra nodos para colocarlos\n• Arrastra desde puertos de salida a puertos de entrada para crear conexiones\n• Haz clic en un nodo para seleccionarlo — los parámetros se expanden dentro del nodo para editar\n• Clic derecho para menú contextual (copiar, pegar, eliminar)\n• Desplaza para hacer zoom y arrastra el fondo para mover la vista\n• Atajos: Ctrl+Z deshacer, Ctrl+C/V copiar-pegar, Delete para eliminar" diff --git a/src/i18n/locales/fr.json b/src/i18n/locales/fr.json index d3d5cc92..8d792aaf 100644 --- a/src/i18n/locales/fr.json +++ b/src/i18n/locales/fr.json @@ -1138,6 +1138,13 @@ "modelIdLabel": "ID du modèle", "selectNode": "Sélectionnez un nœud à configurer", "noExecutions": "Pas encore d'exécutions", + "selectAsOutput": "Utiliser comme sortie", + "selectedAsOutput": "Sélectionné", + "importWorkflow": "Importer le workflow", + "importWorkflowAsSubgraph": "Importer le workflow en sous-graphe", + "noWorkflowsToImport": "Aucun autre workflow disponible", + "currentWorkflow": "actuel", + "importContainsTrigger": "Impossible d'importer : le workflow contient des nœuds déclencheurs. Les déclencheurs ne sont pas autorisés dans les groupes.", "budgetExceeded": "Budget dépassé", "dailySpend": "Dépenses quotidiennes", "perExecutionLimit": "Limite par exécution", @@ -1196,6 +1203,14 @@ "control": "Contrôle" }, "nodeDefs": { + "trigger/directory": { + "label": "Déclencheur de répertoire", + "hint": "Scanner un dossier local — le workflow s'exécute une fois par fichier" + }, + "trigger/http": { + "label": "Déclencheur HTTP", + "hint": "Exposer ce workflow comme API HTTP — définir les champs d'entrée" + }, "ai-task/run": { "label": "WaveSpeed API", "hint": "Exécuter n'importe quel modèle IA — image, vidéo, audio et plus", @@ -1282,6 +1297,10 @@ } } }, + "output/http-response": { + "label": "Réponse HTTP", + "hint": "Définissez ce que ce workflow renvoie aux appelants HTTP" + }, "free-tool/image-enhancer": { "label": "Amélioration d'image", "hint": "Agrandir et affiner les images (2×–4×) gratuitement" @@ -1350,6 +1369,10 @@ "processing/select": { "label": "Sélectionner", "hint": "Choisir un élément d'un tableau par index" + }, + "control/iterator": { + "label": "Groupe", + "hint": "Regrouper des nœuds dans un sous-workflow pour l'organisation" } }, "modelSelector": { @@ -1434,6 +1457,18 @@ "title": "Nœud Concat — Fusionner plusieurs sorties", "desc": "Besoin de passer plusieurs images dans une entrée \"images\" ? Utilisez le nœud Concat :\n\n1. Connectez la sortie image de chaque nœud en amont aux entrées value1, value2, value3, etc. de Concat\n2. Connectez la sortie output de Concat (maintenant un tableau) au paramètre images du nœud en aval\n\nExemple :\n[Upload A] → image → Concat → output (tableau) → [Tâche AI].images\n[Upload B] → image ↗\n\nCela fonctionne pour tout type de sortie — images, vidéos, texte — chaque fois que vous devez combiner plusieurs valeurs en une seule entrée tableau." }, + "directoryTrigger": { + "title": "Déclencheur de répertoire — Traitement par lots de fichiers locaux", + "desc": "Scannez automatiquement un dossier local et exécutez le workflow une fois par fichier :\n\n• Choisissez un répertoire et un type de fichier (Images, Vidéos, Audio ou Tous)\n• Le déclencheur trouve tous les fichiers correspondants et les envoie un par un dans le pipeline\n• Idéal pour le traitement par lots — ex. améliorer chaque photo d'un dossier, convertir toutes les vidéos, etc.\n\nChaque exécution reçoit un seul fichier, les nœuds en aval fonctionnent exactement comme avec un upload manuel." + }, + "httpTrigger": { + "title": "Déclencheur HTTP — Exécuter des workflows via API", + "desc": "Transformez n'importe quel workflow en endpoint HTTP API :\n\n• Définissez des champs de sortie (ex. image, prompt) — chacun devient un port de sortie sur le canevas\n• Lorsqu'une requête POST arrive, les champs du JSON body sont extraits et transmis aux nœuds en aval\n• Associez-le au nœud HTTP Response pour renvoyer les résultats à l'appelant\n\nCela vous permet d'intégrer des workflows dans des apps externes, scripts ou outils d'automatisation en envoyant une requête HTTP." + }, + "group": { + "title": "Nœud Groupe — Organiser les sous-workflows", + "desc": "Regroupez plusieurs nœuds dans un conteneur repliable :\n\n• Glissez des nœuds dans un Groupe pour encapsuler un sous-workflow\n• Exposez des entrées/sorties sélectionnées sur la surface du Groupe pour que les nœuds externes puissent s'y connecter\n• Cliquez sur « Éditer le sous-graphe » pour entrer et modifier les nœuds internes\n• Importez un workflow existant dans un Groupe pour le réutiliser comme bloc\n\nLes Groupes gardent les workflows complexes propres et modulaires — pensez-y comme des fonctions que vous pouvez connecter entre elles." + }, "canvas": { "title": "Interactions du canevas", "desc": "Le canevas est votre espace de travail :\n• Glissez les nœuds pour les positionner\n• Glissez des ports de sortie vers les ports d'entrée pour créer des connexions\n• Cliquez sur un nœud pour le sélectionner — les paramètres s'étendent dans le nœud pour l'édition\n• Clic droit pour le menu contextuel (copier, coller, supprimer)\n• Défilez pour zoomer, glissez l'arrière-plan pour naviguer\n• Raccourcis : Ctrl+Z annuler, Ctrl+C/V copier-coller, Delete pour supprimer" diff --git a/src/i18n/locales/hi.json b/src/i18n/locales/hi.json index 51907590..aa273bc9 100644 --- a/src/i18n/locales/hi.json +++ b/src/i18n/locales/hi.json @@ -1137,6 +1137,13 @@ "modelIdLabel": "मॉडल ID", "selectNode": "कॉन्फ़िगर करने के लिए नोड चुनें", "noExecutions": "अभी तक कोई निष्पादन नहीं", + "selectAsOutput": "आउटपुट के रूप में उपयोग करें", + "selectedAsOutput": "चयनित", + "importWorkflow": "वर्कफ़्लो आयात करें", + "importWorkflowAsSubgraph": "वर्कफ़्लो को सबग्राफ़ के रूप में आयात करें", + "noWorkflowsToImport": "कोई अन्य वर्कफ़्लो उपलब्ध नहीं", + "currentWorkflow": "वर्तमान", + "importContainsTrigger": "आयात नहीं किया जा सकता: वर्कफ़्लो में ट्रिगर नोड हैं। समूहों के अंदर ट्रिगर की अनुमति नहीं है।", "budgetExceeded": "बजट पार", "dailySpend": "दैनिक खर्च", "perExecutionLimit": "प्रति निष्पादन सीमा", @@ -1195,6 +1202,14 @@ "control": "नियंत्रण" }, "nodeDefs": { + "trigger/directory": { + "label": "डायरेक्टरी ट्रिगर", + "hint": "स्थानीय फ़ोल्डर स्कैन करें — प्रत्येक फ़ाइल के लिए वर्कफ़्लो एक बार चलता है" + }, + "trigger/http": { + "label": "HTTP ट्रिगर", + "hint": "इस वर्कफ़्लो को HTTP API के रूप में प्रकाशित करें — इनपुट फ़ील्ड परिभाषित करें" + }, "ai-task/run": { "label": "WaveSpeed API", "hint": "कोई भी AI मॉडल चलाएं — छवि, वीडियो, ऑडियो और अधिक", @@ -1281,6 +1296,10 @@ } } }, + "output/http-response": { + "label": "HTTP प्रतिक्रिया", + "hint": "परिभाषित करें कि यह वर्कफ़्लो HTTP कॉलर्स को क्या लौटाता है" + }, "free-tool/image-enhancer": { "label": "छवि एन्हांसर", "hint": "छवियों को मुफ्त में अपस्केल और शार्प करें (2×–4×)" @@ -1349,6 +1368,10 @@ "processing/select": { "label": "चुनें", "hint": "इंडेक्स द्वारा ऐरे से एक आइटम चुनें" + }, + "control/iterator": { + "label": "ग्रुप", + "hint": "नोड्स को सब-वर्कफ़्लो में समूहित करें" } }, "modelSelector": { @@ -1463,6 +1486,18 @@ "title": "Concat नोड — कई आउटपुट मर्ज करें", "desc": "क्या आपको कई इमेज को \"images\" इनपुट में पास करना है? Concat नोड का उपयोग करें:\n\n1. प्रत्येक अपस्ट्रीम नोड के image आउटपुट को Concat के value1, value2, value3 आदि से कनेक्ट करें\n2. Concat के output (अब एक ऐरे) को डाउनस्ट्रीम नोड के images पैरामीटर से कनेक्ट करें\n\nउदाहरण:\n[अपलोड A] → image → Concat → output (ऐरे) → [AI टास्क].images\n[अपलोड B] → image ↗\n\nयह किसी भी प्रकार के आउटपुट के लिए काम करता है — इमेज, वीडियो, टेक्स्ट — जब भी आपको कई सिंगल वैल्यू को एक ऐरे इनपुट में जोड़ना हो।" }, + "directoryTrigger": { + "title": "डायरेक्टरी ट्रिगर — स्थानीय फ़ाइलों का बैच प्रोसेसिंग", + "desc": "स्वचालित रूप से एक स्थानीय फ़ोल्डर स्कैन करें और प्रत्येक फ़ाइल के लिए वर्कफ़्लो एक बार चलाएं:\n\n• एक डायरेक्टरी और फ़ाइल प्रकार चुनें (इमेज, वीडियो, ऑडियो, या सभी)\n• ट्रिगर सभी मिलान फ़ाइलें ढूंढता है और उन्हें एक-एक करके पाइपलाइन में भेजता है\n• बैच प्रोसेसिंग के लिए बढ़िया — जैसे फ़ोल्डर की हर फ़ोटो को बेहतर बनाना, सभी वीडियो कन्वर्ट करना आदि\n\nप्रत्येक एक्ज़ीक्यूशन एक फ़ाइल प्राप्त करता है, डाउनस्ट्रीम नोड्स मैनुअल अपलोड की तरह ही काम करते हैं।" + }, + "httpTrigger": { + "title": "HTTP ट्रिगर — API के ज़रिए वर्कफ़्लो चलाएं", + "desc": "किसी भी वर्कफ़्लो को HTTP API एंडपॉइंट में बदलें:\n\n• आउटपुट फ़ील्ड परिभाषित करें (जैसे image, prompt) — प्रत्येक कैनवास पर एक आउटपुट पोर्ट बन जाता है\n• जब POST रिक्वेस्ट आती है, JSON body फ़ील्ड निकाले जाते हैं और डाउनस्ट्रीम नोड्स को भेजे जाते हैं\n• HTTP Response नोड के साथ जोड़कर कॉलर को रिज़ल्ट लौटाएं\n\nइससे आप HTTP रिक्वेस्ट भेजकर वर्कफ़्लो को बाहरी ऐप्स, स्क्रिप्ट या ऑटोमेशन टूल्स में इंटीग्रेट कर सकते हैं।" + }, + "group": { + "title": "ग्रुप नोड — सब-वर्कफ़्लो व्यवस्थित करें", + "desc": "कई नोड्स को एक फ़ोल्ड करने योग्य कंटेनर में समूहित करें:\n\n• नोड्स को ग्रुप में ड्रैग करके सब-वर्कफ़्लो एनकैप्सुलेट करें\n• ग्रुप की सतह पर चुनिंदा इनपुट/आउटपुट एक्सपोज़ करें ताकि बाहरी नोड्स कनेक्ट हो सकें\n• \"सबग्राफ़ एडिट करें\" पर क्लिक करके अंदर जाएं और आंतरिक नोड्स एडिट करें\n• किसी मौजूदा वर्कफ़्लो को ग्रुप में इम्पोर्ट करके बिल्डिंग ब्लॉक के रूप में पुन: उपयोग करें\n\nग्रुप जटिल वर्कफ़्लो को साफ़ और मॉड्यूलर रखते हैं — इन्हें ऐसे फ़ंक्शन समझें जिन्हें आप आपस में जोड़ सकते हैं।" + }, "canvas": { "title": "कैनवास इंटरैक्शन", "desc": "कैनवास आपका कार्यक्षेत्र है:\n• नोड्स को ड्रैग करके स्थान दें\n• कनेक्शन बनाने के लिए आउटपुट पोर्ट से इनपुट पोर्ट तक ड्रैग करें\n• नोड पर क्लिक करके चुनें — पैरामीटर नोड के अंदर विस्तारित होकर एडिट के लिए दिखते हैं\n• कॉन्टेक्स्ट मेनू (कॉपी, पेस्ट, डिलीट) के लिए राइट-क्लिक करें\n• ज़ूम के लिए स्क्रॉल करें, पैन करने के लिए बैकग्राउंड ड्रैग करें\n• शॉर्टकट: Ctrl+Z undo, Ctrl+C/V copy-paste, Delete हटाने के लिए" diff --git a/src/i18n/locales/id.json b/src/i18n/locales/id.json index fac00b5e..3360535c 100644 --- a/src/i18n/locales/id.json +++ b/src/i18n/locales/id.json @@ -1137,6 +1137,13 @@ "modelIdLabel": "ID model", "selectNode": "Pilih node untuk dikonfigurasi", "noExecutions": "Belum ada eksekusi", + "selectAsOutput": "Gunakan sebagai output", + "selectedAsOutput": "Terpilih", + "importWorkflow": "Impor Alur Kerja", + "importWorkflowAsSubgraph": "Impor Alur Kerja sebagai Subgraf", + "noWorkflowsToImport": "Tidak ada alur kerja lain yang tersedia", + "currentWorkflow": "saat ini", + "importContainsTrigger": "Tidak dapat mengimpor: alur kerja berisi node pemicu. Pemicu tidak diizinkan di dalam grup.", "budgetExceeded": "Anggaran terlampaui", "dailySpend": "Pengeluaran harian", "perExecutionLimit": "Batas per eksekusi", @@ -1195,6 +1202,14 @@ "control": "Kontrol" }, "nodeDefs": { + "trigger/directory": { + "label": "Pemicu Direktori", + "hint": "Pindai folder lokal — workflow berjalan sekali per file" + }, + "trigger/http": { + "label": "Pemicu HTTP", + "hint": "Ekspos workflow ini sebagai HTTP API — tentukan field input" + }, "ai-task/run": { "label": "WaveSpeed API", "hint": "Jalankan model AI apa pun — gambar, video, audio, dan lainnya", @@ -1281,6 +1296,10 @@ } } }, + "output/http-response": { + "label": "Respons HTTP", + "hint": "Tentukan apa yang dikembalikan workflow ini kepada pemanggil HTTP" + }, "free-tool/image-enhancer": { "label": "Peningkat gambar", "hint": "Perbesar dan pertajam gambar (2×–4×) secara gratis" @@ -1349,6 +1368,10 @@ "processing/select": { "label": "Pilih", "hint": "Pilih satu item dari array berdasarkan indeks" + }, + "control/iterator": { + "label": "Grup", + "hint": "Kelompokkan node ke dalam sub-workflow untuk organisasi" } }, "modelSelector": { @@ -1463,6 +1486,18 @@ "title": "Node Concat — Gabungkan Beberapa Output", "desc": "Perlu memasukkan beberapa gambar ke input \"images\"? Gunakan node Concat:\n\n1. Hubungkan output image dari setiap node hulu ke value1, value2, value3, dll. milik Concat\n2. Hubungkan output Concat (sekarang berupa array) ke parameter images node hilir\n\nContoh:\n[Upload A] → image → Concat → output (array) → [AI Task].images\n[Upload B] → image ↗\n\nIni berlaku untuk semua jenis output — gambar, video, teks — kapan pun Anda perlu menggabungkan beberapa nilai tunggal menjadi satu input array." }, + "directoryTrigger": { + "title": "Pemicu Direktori — Pemrosesan Batch File Lokal", + "desc": "Pindai folder lokal secara otomatis dan jalankan workflow sekali per file:\n\n• Pilih direktori dan jenis file (Gambar, Video, Audio, atau Semua)\n• Pemicu menemukan semua file yang cocok dan mengirimnya satu per satu ke pipeline\n• Cocok untuk pemrosesan batch — misal meningkatkan setiap foto di folder, mengonversi semua video, dll.\n\nSetiap eksekusi menerima satu file, node hilir bekerja persis seperti upload manual." + }, + "httpTrigger": { + "title": "Pemicu HTTP — Jalankan Workflow via API", + "desc": "Ubah workflow apa pun menjadi endpoint HTTP API:\n\n• Tentukan field output (misal image, prompt) — masing-masing menjadi port output di kanvas\n• Saat permintaan POST masuk, field JSON body diekstrak dan diteruskan ke node hilir\n• Pasangkan dengan node HTTP Response untuk mengembalikan hasil ke pemanggil\n\nIni memungkinkan Anda mengintegrasikan workflow ke aplikasi eksternal, skrip, atau alat otomasi dengan mengirim permintaan HTTP." + }, + "group": { + "title": "Node Grup — Atur Sub-Workflow", + "desc": "Kelompokkan beberapa node ke dalam kontainer yang dapat dilipat:\n\n• Seret node ke dalam Grup untuk mengenkapsulasi sub-workflow\n• Ekspos input/output terpilih di permukaan Grup agar node eksternal dapat terhubung\n• Klik \"Edit Subgraph\" untuk masuk dan mengedit node internal\n• Impor workflow yang ada ke dalam Grup untuk digunakan kembali sebagai blok bangunan\n\nGrup menjaga workflow kompleks tetap rapi dan modular — anggap saja seperti fungsi yang bisa Anda hubungkan satu sama lain." + }, "canvas": { "title": "Interaksi Kanvas", "desc": "Kanvas adalah ruang kerja Anda:\n• Seret node untuk memposisikannya\n• Seret dari port output ke port input untuk membuat koneksi\n• Klik node untuk memilihnya — parameter meluas di dalam node untuk diedit\n• Klik kanan untuk menu konteks (salin, tempel, hapus)\n• Gulir untuk zoom, seret latar untuk pan\n• Pintasan: Ctrl+Z undo, Ctrl+C/V copy-paste, Delete untuk menghapus" diff --git a/src/i18n/locales/it.json b/src/i18n/locales/it.json index b77c30f7..32cc4636 100644 --- a/src/i18n/locales/it.json +++ b/src/i18n/locales/it.json @@ -1137,6 +1137,13 @@ "modelIdLabel": "ID modello", "selectNode": "Seleziona un nodo da configurare", "noExecutions": "Nessuna esecuzione ancora", + "selectAsOutput": "Usa come output", + "selectedAsOutput": "Selezionato", + "importWorkflow": "Importa workflow", + "importWorkflowAsSubgraph": "Importa workflow come sottografo", + "noWorkflowsToImport": "Nessun altro workflow disponibile", + "currentWorkflow": "attuale", + "importContainsTrigger": "Impossibile importare: il workflow contiene nodi trigger. I trigger non sono consentiti all'interno dei gruppi.", "budgetExceeded": "Budget superato", "dailySpend": "Spesa giornaliera", "perExecutionLimit": "Limite per esecuzione", @@ -1195,6 +1202,14 @@ "control": "Controllo" }, "nodeDefs": { + "trigger/directory": { + "label": "Trigger Directory", + "hint": "Scansiona una cartella locale — il workflow viene eseguito una volta per file" + }, + "trigger/http": { + "label": "Trigger HTTP", + "hint": "Esponi questo workflow come API HTTP — definisci i campi di input" + }, "ai-task/run": { "label": "WaveSpeed API", "hint": "Esegui qualsiasi modello IA — immagini, video, audio e altro", @@ -1281,6 +1296,10 @@ } } }, + "output/http-response": { + "label": "Risposta HTTP", + "hint": "Definisci cosa restituisce questo workflow ai chiamanti HTTP" + }, "free-tool/image-enhancer": { "label": "Migliora immagine", "hint": "Ingrandisci e migliora le immagini (2×–4×) gratis" @@ -1349,6 +1368,10 @@ "processing/select": { "label": "Seleziona", "hint": "Scegli un elemento da un array per indice" + }, + "control/iterator": { + "label": "Gruppo", + "hint": "Raggruppa i nodi in un sub-workflow per organizzazione" } }, "modelSelector": { @@ -1463,6 +1486,18 @@ "title": "Nodo Concat — Unisci più output", "desc": "Devi passare più immagini a un input \"images\"? Usa il nodo Concat:\n\n1. Collega l'output image di ogni nodo a monte a value1, value2, value3, ecc. di Concat\n2. Collega l'output di Concat (ora un array) al parametro images del nodo a valle\n\nEsempio:\n[Upload A] → image → Concat → output (array) → [Task AI].images\n[Upload B] → image ↗\n\nFunziona per qualsiasi tipo di output — immagini, video, testo — ogni volta che devi combinare più valori singoli in un unico input array." }, + "directoryTrigger": { + "title": "Trigger Directory — Elaborazione batch di file locali", + "desc": "Scansiona automaticamente una cartella locale ed esegui il workflow una volta per file:\n\n• Scegli una directory e un tipo di file (Immagini, Video, Audio o Tutti)\n• Il trigger trova tutti i file corrispondenti e li invia uno alla volta nella pipeline\n• Ideale per l'elaborazione batch — es. migliorare ogni foto in una cartella, convertire tutti i video, ecc.\n\nOgni esecuzione riceve un singolo file, i nodi a valle funzionano esattamente come con un upload manuale." + }, + "httpTrigger": { + "title": "Trigger HTTP — Esegui workflow via API", + "desc": "Trasforma qualsiasi workflow in un endpoint HTTP API:\n\n• Definisci campi di output (es. image, prompt) — ognuno diventa una porta di output sul canvas\n• Quando arriva una richiesta POST, i campi del JSON body vengono estratti e passati ai nodi a valle\n• Abbinalo al nodo HTTP Response per restituire i risultati al chiamante\n\nQuesto ti permette di integrare i workflow in app esterne, script o strumenti di automazione inviando una richiesta HTTP." + }, + "group": { + "title": "Nodo Gruppo — Organizza sub-workflow", + "desc": "Raggruppa più nodi in un contenitore comprimibile:\n\n• Trascina i nodi in un Gruppo per incapsulare un sub-workflow\n• Esponi input/output selezionati sulla superficie del Gruppo per consentire ai nodi esterni di connettersi\n• Clicca \"Modifica sottografo\" per entrare e modificare i nodi interni\n• Importa un workflow esistente in un Gruppo per riutilizzarlo come blocco\n\nI Gruppi mantengono i workflow complessi puliti e modulari — pensali come funzioni che puoi collegare tra loro." + }, "canvas": { "title": "Interazioni della canvas", "desc": "La canvas e il tuo spazio di lavoro:\n• Trascina i nodi per posizionarli\n• Trascina dalle porte di output alle porte di input per creare connessioni\n• Fai clic su un nodo per selezionarlo — i parametri si espandono nel nodo per la modifica\n• Clic destro per menu contestuale (copia, incolla, elimina)\n• Scorri per zoom, trascina lo sfondo per pan\n• Scorciatoie: Ctrl+Z annulla, Ctrl+C/V copia-incolla, Delete per rimuovere" diff --git a/src/i18n/locales/ja.json b/src/i18n/locales/ja.json index c2aee456..8a07d4fa 100644 --- a/src/i18n/locales/ja.json +++ b/src/i18n/locales/ja.json @@ -1163,6 +1163,14 @@ "control": "制御" }, "nodeDefs": { + "trigger/directory": { + "label": "ディレクトリトリガー", + "hint": "ローカルフォルダをスキャン — ファイルごとにワークフローを1回実行" + }, + "trigger/http": { + "label": "HTTPトリガー", + "hint": "ワークフローをHTTP APIとして公開 — 入力フィールドを定義" + }, "ai-task/run": { "label": "WaveSpeed API", "hint": "画像・動画・音声など、あらゆるAIモデルを実行", @@ -1249,6 +1257,10 @@ } } }, + "output/http-response": { + "label": "HTTPレスポンス", + "hint": "このワークフローがHTTP呼び出し元に返す内容を定義" + }, "free-tool/image-enhancer": { "label": "画像強調", "hint": "画像を無料で高解像度化・シャープ化(2×〜4×)" @@ -1317,6 +1329,10 @@ "processing/select": { "label": "選択", "hint": "配列からインデックスで1つの項目を選択" + }, + "control/iterator": { + "label": "グループ", + "hint": "ノードをサブワークフローにグループ化して整理" } }, "modelSelector": { @@ -1401,6 +1417,18 @@ "title": "Concat ノード — 複数の出力を結合", "desc": "複数の画像を「images」入力に渡したい場合は、Concat ノードを使用します:\n\n1. 各上流ノードの image 出力を Concat の value1、value2、value3 などに接続\n2. Concat の output(配列)を下流ノードの images パラメータに接続\n\n例:\n[アップロード A] → image → Concat → output(配列)→ [AI タスク].images\n[アップロード B] → image ↗\n\n画像、動画、テキストなど、複数の単一値を1つの配列入力にまとめたい場合に使えます。" }, + "directoryTrigger": { + "title": "ディレクトリトリガー — ローカルファイルの一括処理", + "desc": "ローカルフォルダを自動スキャンし、ファイルごとにワークフローを1回実行します:\n\n• ディレクトリとファイルタイプ(画像、動画、音声、またはすべて)を選択\n• トリガーが一致するすべてのファイルを見つけ、1つずつパイプラインに送ります\n• 一括処理に最適 — フォルダ内のすべての写真を強化、すべての動画を変換など\n\n各実行は1つのファイルを受け取り、下流ノードは手動アップロードと同じように動作します。" + }, + "httpTrigger": { + "title": "HTTP トリガー — API 経由でワークフローを実行", + "desc": "任意のワークフローを HTTP API エンドポイントに変換します:\n\n• 出力フィールドを定義(例:image、prompt)— 各フィールドがキャンバス上の出力ポートになります\n• POST リクエストが届くと、JSON body のフィールドが抽出され下流ノードに渡されます\n• HTTP Response ノードと組み合わせて呼び出し元に結果を返します\n\nHTTP リクエストを送信するだけで、ワークフローを外部アプリ、スクリプト、自動化ツールに統合できます。" + }, + "group": { + "title": "グループノード — サブワークフローの整理", + "desc": "複数のノードを折りたたみ可能なコンテナにまとめます:\n\n• ノードをグループにドラッグしてサブワークフローをカプセル化\n• グループの表面に選択した入出力を公開し、外部ノードが接続できるようにします\n• 「サブグラフを編集」をクリックして内部ノードを編集\n• 既存のワークフローをグループにインポートして再利用\n\nグループは複雑なワークフローを整理しモジュール化します — 互いに接続できる関数のようなものです。" + }, "canvas": { "title": "キャンバス操作", "desc": "キャンバスがワークスペースです:\n• ノードをドラッグして配置\n• 出力ポートから入力ポートにドラッグして接続を作成\n• ノードをクリックして選択 — パラメータがノード内に展開して編集可能に\n• 右クリックでコンテキストメニュー(コピー、貼り付け、削除)\n• スクロールでズーム、背景をドラッグでパン\n• ショートカット:Ctrl+Z 元に戻す、Ctrl+C/V コピー&ペースト、Delete で削除" @@ -1460,6 +1488,13 @@ "exportFailed": "エクスポートに失敗しました", "estimated": "推定コスト", "noExecutions": "実行履歴がまだありません", + "selectAsOutput": "出力として使用", + "selectedAsOutput": "選択済み", + "importWorkflow": "ワークフローをインポート", + "importWorkflowAsSubgraph": "ワークフローをサブグラフとしてインポート", + "noWorkflowsToImport": "インポート可能なワークフローがありません", + "currentWorkflow": "現在", + "importContainsTrigger": "インポートできません:ワークフローにトリガーノードが含まれています。グループ内ではトリガーは使用できません。", "budgetExceeded": "予算超過", "dailySpend": "1日の支出", "perExecutionLimit": "実行ごとの制限", diff --git a/src/i18n/locales/ko.json b/src/i18n/locales/ko.json index 3d8bf0ce..8364bdbe 100644 --- a/src/i18n/locales/ko.json +++ b/src/i18n/locales/ko.json @@ -1138,6 +1138,13 @@ "modelIdLabel": "모델 ID", "selectNode": "구성할 노드 선택", "noExecutions": "아직 실행 없음", + "selectAsOutput": "출력으로 사용", + "selectedAsOutput": "선택됨", + "importWorkflow": "워크플로 가져오기", + "importWorkflowAsSubgraph": "워크플로를 서브그래프로 가져오기", + "noWorkflowsToImport": "가져올 수 있는 다른 워크플로가 없습니다", + "currentWorkflow": "현재", + "importContainsTrigger": "가져올 수 없습니다: 워크플로에 트리거 노드가 포함되어 있습니다. 그룹 내에서는 트리거를 사용할 수 없습니다.", "budgetExceeded": "예산 초과", "dailySpend": "일일 지출", "perExecutionLimit": "실행당 제한", @@ -1196,6 +1203,14 @@ "control": "제어" }, "nodeDefs": { + "trigger/directory": { + "label": "디렉토리 트리거", + "hint": "로컬 폴더 스캔 — 파일당 한 번 워크플로 실행" + }, + "trigger/http": { + "label": "HTTP 트리거", + "hint": "이 워크플로를 HTTP API로 노출 — 입력 필드 정의" + }, "ai-task/run": { "label": "WaveSpeed API", "hint": "이미지, 비디오, 오디오 등 모든 AI 모델을 실행", @@ -1282,6 +1297,10 @@ } } }, + "output/http-response": { + "label": "HTTP 응답", + "hint": "이 워크플로가 HTTP 호출자에게 반환하는 내용을 정의" + }, "free-tool/image-enhancer": { "label": "이미지 향상", "hint": "이미지를 무료로 업스케일 및 선명하게 (2×–4×)" @@ -1350,6 +1369,10 @@ "processing/select": { "label": "선택", "hint": "배열에서 인덱스로 항목 하나를 선택" + }, + "control/iterator": { + "label": "그룹", + "hint": "노드를 서브 워크플로로 그룹화하여 정리" } }, "modelSelector": { @@ -1434,6 +1457,18 @@ "title": "Concat 노드 — 여러 출력 병합", "desc": "여러 이미지를 \"images\" 입력에 전달해야 하나요? Concat 노드를 사용하세요:\n\n1. 각 업스트림 노드의 image 출력을 Concat의 value1, value2, value3 등에 연결\n2. Concat의 output(배열)을 다운스트림 노드의 images 매개변수에 연결\n\n예시:\n[업로드 A] → image → Concat → output(배열) → [AI 작업].images\n[업로드 B] → image ↗\n\n이미지, 비디오, 텍스트 등 여러 단일 값을 하나의 배열 입력으로 결합해야 할 때 사용합니다." }, + "directoryTrigger": { + "title": "디렉토리 트리거 — 로컬 파일 일괄 처리", + "desc": "로컬 폴더를 자동으로 스캔하고 파일당 한 번씩 워크플로를 실행합니다:\n\n• 디렉토리와 파일 유형(이미지, 비디오, 오디오 또는 전체)을 선택\n• 트리거가 일치하는 모든 파일을 찾아 하나씩 파이프라인에 전달\n• 일괄 처리에 적합 — 폴더의 모든 사진 향상, 모든 비디오 변환 등\n\n각 실행은 단일 파일을 수신하며, 다운스트림 노드는 수동 업로드와 동일하게 작동합니다." + }, + "httpTrigger": { + "title": "HTTP 트리거 — API로 워크플로 실행", + "desc": "모든 워크플로를 HTTP API 엔드포인트로 변환합니다:\n\n• 출력 필드 정의(예: image, prompt) — 각각 캔버스의 출력 포트가 됩니다\n• POST 요청이 도착하면 JSON body 필드가 추출되어 다운스트림 노드로 전달됩니다\n• HTTP Response 노드와 결합하여 호출자에게 결과를 반환\n\nHTTP 요청을 보내 워크플로를 외부 앱, 스크립트 또는 자동화 도구에 통합할 수 있습니다." + }, + "group": { + "title": "그룹 노드 — 서브 워크플로 구성", + "desc": "여러 노드를 접을 수 있는 컨테이너로 그룹화합니다:\n\n• 노드를 그룹으로 드래그하여 서브 워크플로를 캡슐화\n• 그룹 표면에 선택한 입출력을 노출하여 외부 노드가 연결할 수 있도록 합니다\n• \"서브그래프 편집\"을 클릭하여 내부 노드를 편집\n• 기존 워크플로를 그룹에 가져와 빌딩 블록으로 재사용\n\n그룹은 복잡한 워크플로를 깔끔하고 모듈화된 상태로 유지합니다 — 서로 연결할 수 있는 함수라고 생각하세요." + }, "canvas": { "title": "캔버스 상호작용", "desc": "캔버스가 작업 공간입니다:\n• 노드를 드래그하여 배치\n• 출력 포트에서 입력 포트로 드래그하여 연결 생성\n• 노드를 클릭하여 선택 — 매개변수가 노드 내에서 확장되어 편집 가능\n• 우클릭으로 컨텍스트 메뉴 (복사, 붙여넣기, 삭제)\n• 스크롤로 확대/축소, 배경 드래그로 이동\n• 단축키: Ctrl+Z 실행 취소, Ctrl+C/V 복사/붙여넣기, Delete로 삭제" diff --git a/src/i18n/locales/ms.json b/src/i18n/locales/ms.json index 214d73d4..f42ad93c 100644 --- a/src/i18n/locales/ms.json +++ b/src/i18n/locales/ms.json @@ -1137,6 +1137,13 @@ "modelIdLabel": "ID model", "selectNode": "Pilih nod untuk dikonfigurasi", "noExecutions": "Belum ada pelaksanaan", + "selectAsOutput": "Guna sebagai output", + "selectedAsOutput": "Dipilih", + "importWorkflow": "Import Aliran Kerja", + "importWorkflowAsSubgraph": "Import Aliran Kerja sebagai Subgraf", + "noWorkflowsToImport": "Tiada aliran kerja lain tersedia", + "currentWorkflow": "semasa", + "importContainsTrigger": "Tidak dapat mengimport: aliran kerja mengandungi nod pencetus. Pencetus tidak dibenarkan dalam kumpulan.", "budgetExceeded": "Budget terlampaui", "dailySpend": "Perbelanjaan harian", "perExecutionLimit": "Had per pelaksanaan", @@ -1195,6 +1202,14 @@ "control": "Kawalan" }, "nodeDefs": { + "trigger/directory": { + "label": "Pencetus Direktori", + "hint": "Imbas folder tempatan — aliran kerja berjalan sekali bagi setiap fail" + }, + "trigger/http": { + "label": "Pencetus HTTP", + "hint": "Dedahkan aliran kerja ini sebagai HTTP API — tentukan medan input" + }, "ai-task/run": { "label": "WaveSpeed API", "hint": "Jalankan mana-mana model AI — imej, video, audio dan lain-lain", @@ -1281,6 +1296,10 @@ } } }, + "output/http-response": { + "label": "Respons HTTP", + "hint": "Tentukan apa yang dikembalikan aliran kerja ini kepada pemanggil HTTP" + }, "free-tool/image-enhancer": { "label": "Penambah imej", "hint": "Besarkan dan pertajam imej (2×–4×) secara percuma" @@ -1349,6 +1368,10 @@ "processing/select": { "label": "Pilih", "hint": "Pilih satu item dari tatasusunan mengikut indeks" + }, + "control/iterator": { + "label": "Kumpulan", + "hint": "Kumpulkan nod ke dalam sub-aliran kerja untuk penyusunan" } }, "modelSelector": { @@ -1463,6 +1486,18 @@ "title": "Nod Concat — Gabungkan Pelbagai Output", "desc": "Perlu menghantar beberapa imej ke input \"images\"? Gunakan nod Concat:\n\n1. Sambungkan output image dari setiap nod huluan ke value1, value2, value3, dll. Concat\n2. Sambungkan output Concat (kini satu tatasusunan) ke parameter images nod hiliran\n\nContoh:\n[Muat Naik A] → image → Concat → output (tatasusunan) → [Tugas AI].images\n[Muat Naik B] → image ↗\n\nIni berfungsi untuk semua jenis output — imej, video, teks — bila-bila masa anda perlu menggabungkan beberapa nilai tunggal menjadi satu input tatasusunan." }, + "directoryTrigger": { + "title": "Pencetus Direktori — Pemprosesan Kelompok Fail Tempatan", + "desc": "Imbas folder tempatan secara automatik dan jalankan aliran kerja sekali bagi setiap fail:\n\n• Pilih direktori dan jenis fail (Imej, Video, Audio, atau Semua)\n• Pencetus mencari semua fail yang sepadan dan menghantarnya satu demi satu ke saluran paip\n• Sesuai untuk pemprosesan kelompok — cth. tingkatkan setiap foto dalam folder, tukar semua video, dll.\n\nSetiap pelaksanaan menerima satu fail, nod hiliran berfungsi sama seperti muat naik manual." + }, + "httpTrigger": { + "title": "Pencetus HTTP — Jalankan Aliran Kerja melalui API", + "desc": "Tukar mana-mana aliran kerja menjadi titik akhir HTTP API:\n\n• Tentukan medan output (cth. image, prompt) — setiap satu menjadi port output di kanvas\n• Apabila permintaan POST tiba, medan JSON body diekstrak dan dihantar ke nod hiliran\n• Pasangkan dengan nod HTTP Response untuk mengembalikan hasil kepada pemanggil\n\nIni membolehkan anda mengintegrasikan aliran kerja ke dalam aplikasi luaran, skrip, atau alat automasi dengan menghantar permintaan HTTP." + }, + "group": { + "title": "Nod Kumpulan — Susun Sub-Aliran Kerja", + "desc": "Kumpulkan beberapa nod ke dalam bekas yang boleh dilipat:\n\n• Seret nod ke dalam Kumpulan untuk mengkapsulkan sub-aliran kerja\n• Dedahkan input/output terpilih di permukaan Kumpulan supaya nod luaran boleh bersambung\n• Klik \"Edit Subgraph\" untuk masuk dan mengedit nod dalaman\n• Import aliran kerja sedia ada ke dalam Kumpulan untuk digunakan semula sebagai blok binaan\n\nKumpulan memastikan aliran kerja kompleks kekal kemas dan modular — anggap ia seperti fungsi yang boleh anda sambungkan antara satu sama lain." + }, "canvas": { "title": "Interaksi Kanvas", "desc": "Kanvas ialah ruang kerja anda:\n• Seret nod untuk meletakkannya\n• Seret dari port output ke port input untuk membuat sambungan\n• Klik nod untuk memilihnya — parameter berkembang dalam nod untuk penyuntingan\n• Klik kanan untuk menu konteks (salin, tampal, padam)\n• Skrol untuk zum, seret latar belakang untuk pan\n• Pintasan: Ctrl+Z undur, Ctrl+C/V salin-tampal, Delete untuk buang" diff --git a/src/i18n/locales/pt.json b/src/i18n/locales/pt.json index af085bb2..e7c19fb4 100644 --- a/src/i18n/locales/pt.json +++ b/src/i18n/locales/pt.json @@ -1137,6 +1137,13 @@ "modelIdLabel": "ID do modelo", "selectNode": "Selecione um nó para configurar", "noExecutions": "Ainda sem execuções", + "selectAsOutput": "Usar como saída", + "selectedAsOutput": "Selecionado", + "importWorkflow": "Importar fluxo de trabalho", + "importWorkflowAsSubgraph": "Importar fluxo de trabalho como subgrafo", + "noWorkflowsToImport": "Nenhum outro fluxo de trabalho disponível", + "currentWorkflow": "atual", + "importContainsTrigger": "Não é possível importar: o fluxo de trabalho contém nós de gatilho. Gatilhos não são permitidos dentro de grupos.", "budgetExceeded": "Orçamento excedido", "dailySpend": "Gasto diário", "perExecutionLimit": "Limite por execução", @@ -1195,6 +1202,14 @@ "control": "Controlo" }, "nodeDefs": { + "trigger/directory": { + "label": "Gatilho de Diretório", + "hint": "Escanear pasta local — workflow executa uma vez por arquivo" + }, + "trigger/http": { + "label": "Gatilho HTTP", + "hint": "Expor este workflow como HTTP API — definir campos de entrada" + }, "ai-task/run": { "label": "WaveSpeed API", "hint": "Executar qualquer modelo de IA — imagem, vídeo, áudio e mais", @@ -1281,6 +1296,10 @@ } } }, + "output/http-response": { + "label": "Resposta HTTP", + "hint": "Defina o que este fluxo de trabalho retorna aos chamadores HTTP" + }, "free-tool/image-enhancer": { "label": "Melhorador de imagem", "hint": "Ampliar e melhorar imagens (2×–4×) gratuitamente" @@ -1349,6 +1368,10 @@ "processing/select": { "label": "Selecionar", "hint": "Escolher um item de um array por índice" + }, + "control/iterator": { + "label": "Grupo", + "hint": "Agrupar nós em um sub-workflow para organização" } }, "modelSelector": { @@ -1463,6 +1486,18 @@ "title": "Nó Concat — Mesclar múltiplas saídas", "desc": "Precisa passar múltiplas imagens para uma entrada \"images\"? Use o nó Concat:\n\n1. Conecte a saída image de cada nó anterior ao value1, value2, value3, etc. do Concat\n2. Conecte a saída output do Concat (agora um array) ao parâmetro images do nó seguinte\n\nExemplo:\n[Upload A] → image → Concat → output (array) → [Tarefa AI].images\n[Upload B] → image ↗\n\nFunciona para qualquer tipo de saída — imagens, vídeos, texto — sempre que precisar combinar múltiplos valores individuais em uma entrada de array." }, + "directoryTrigger": { + "title": "Gatilho de Diretório — Processamento em lote de arquivos locais", + "desc": "Escaneie automaticamente uma pasta local e execute o workflow uma vez por arquivo:\n\n• Escolha um diretório e tipo de arquivo (Imagens, Vídeos, Áudio ou Todos)\n• O gatilho encontra todos os arquivos correspondentes e os envia um a um para o pipeline\n• Ótimo para processamento em lote — ex. melhorar cada foto de uma pasta, converter todos os vídeos, etc.\n\nCada execução recebe um único arquivo, os nós seguintes funcionam exatamente como com upload manual." + }, + "httpTrigger": { + "title": "Gatilho HTTP — Executar workflows via API", + "desc": "Transforme qualquer workflow em um endpoint HTTP API:\n\n• Defina campos de saída (ex. image, prompt) — cada um se torna uma porta de saída no canvas\n• Quando uma requisição POST chega, os campos do JSON body são extraídos e passados aos nós seguintes\n• Combine com o nó HTTP Response para retornar resultados ao chamador\n\nIsso permite integrar workflows em apps externos, scripts ou ferramentas de automação enviando uma requisição HTTP." + }, + "group": { + "title": "Nó Grupo — Organizar sub-workflows", + "desc": "Agrupe múltiplos nós em um contêiner recolhível:\n\n• Arraste nós para um Grupo para encapsular um sub-workflow\n• Exponha entradas/saídas selecionadas na superfície do Grupo para que nós externos possam se conectar\n• Clique em \"Editar subgrafo\" para entrar e editar os nós internos\n• Importe um workflow existente para um Grupo para reutilizá-lo como bloco\n\nGrupos mantêm workflows complexos limpos e modulares — pense neles como funções que você pode conectar entre si." + }, "canvas": { "title": "Interacoes do canvas", "desc": "O canvas e seu espaco de trabalho:\n• Arraste nos para posiciona-los\n• Arraste de portas de saida para portas de entrada para criar conexoes\n• Clique em um no para seleciona-lo — os parametros se expandem dentro do no para edicao\n• Clique com o botao direito para menu de contexto (copiar, colar, excluir)\n• Role para zoom e arraste o fundo para mover\n• Atalhos: Ctrl+Z desfazer, Ctrl+C/V copiar-colar, Delete para remover" diff --git a/src/i18n/locales/ru.json b/src/i18n/locales/ru.json index 91285582..1336da3f 100644 --- a/src/i18n/locales/ru.json +++ b/src/i18n/locales/ru.json @@ -1137,6 +1137,13 @@ "modelIdLabel": "ID модели", "selectNode": "Выберите узел для настройки", "noExecutions": "Пока нет выполнений", + "selectAsOutput": "Использовать как выход", + "selectedAsOutput": "Выбрано", + "importWorkflow": "Импорт рабочего процесса", + "importWorkflowAsSubgraph": "Импорт рабочего процесса как подграфа", + "noWorkflowsToImport": "Нет других доступных рабочих процессов", + "currentWorkflow": "текущий", + "importContainsTrigger": "Невозможно импортировать: рабочий процесс содержит узлы-триггеры. Триггеры не допускаются внутри групп.", "budgetExceeded": "Превышен бюджет", "dailySpend": "Ежедневные расходы", "perExecutionLimit": "Лимит на выполнение", @@ -1195,6 +1202,14 @@ "control": "Управление" }, "nodeDefs": { + "trigger/directory": { + "label": "Триггер каталога", + "hint": "Сканировать локальную папку — рабочий процесс запускается один раз для каждого файла" + }, + "trigger/http": { + "label": "HTTP-триггер", + "hint": "Открыть рабочий процесс как HTTP API — определите поля ввода для вызывающих" + }, "ai-task/run": { "label": "WaveSpeed API", "hint": "Запуск любой ИИ-модели — изображения, видео, аудио и другое", @@ -1281,6 +1296,10 @@ } } }, + "output/http-response": { + "label": "HTTP-ответ", + "hint": "Определите, что рабочий процесс возвращает HTTP-вызывающим" + }, "free-tool/image-enhancer": { "label": "Улучшение изображения", "hint": "Увеличение и улучшение изображений (2×–4×) бесплатно" @@ -1349,6 +1368,10 @@ "processing/select": { "label": "Выбрать", "hint": "Выбрать один элемент из массива по индексу" + }, + "control/iterator": { + "label": "Группа", + "hint": "Группировка узлов в подпроцесс для организации" } }, "modelSelector": { @@ -1463,6 +1486,18 @@ "title": "Узел Concat — Объединение нескольких выходов", "desc": "Нужно передать несколько изображений во вход \"images\"? Используйте узел Concat:\n\n1. Подключите выход image каждого предыдущего узла к value1, value2, value3 и т.д. узла Concat\n2. Подключите выход output Concat (теперь массив) к параметру images следующего узла\n\nПример:\n[Загрузка A] → image → Concat → output (массив) → [AI задача].images\n[Загрузка B] → image ↗\n\nРаботает для любого типа выхода — изображения, видео, текст — когда нужно объединить несколько отдельных значений в один массив." }, + "directoryTrigger": { + "title": "Триггер каталога — Пакетная обработка локальных файлов", + "desc": "Автоматическое сканирование локальной папки и запуск рабочего процесса один раз для каждого файла:\n\n• Выберите каталог и тип файлов (Изображения, Видео, Аудио или Все)\n• Триггер находит все подходящие файлы и передаёт их по одному в конвейер\n• Отлично подходит для пакетной обработки — например, улучшение каждого фото в папке, конвертация всех видео и т.д.\n\nКаждое выполнение получает один файл, последующие узлы работают так же, как при ручной загрузке." + }, + "httpTrigger": { + "title": "HTTP-триггер — Запуск рабочих процессов через API", + "desc": "Превратите любой рабочий процесс в HTTP API эндпоинт:\n\n• Определите поля вывода (например, image, prompt) — каждое становится выходным портом на холсте\n• При поступлении POST-запроса поля JSON body извлекаются и передаются последующим узлам\n• Используйте с узлом HTTP Response для возврата результатов вызывающей стороне\n\nЭто позволяет интегрировать рабочие процессы во внешние приложения, скрипты или инструменты автоматизации, отправляя HTTP-запрос." + }, + "group": { + "title": "Узел Группа — Организация подпроцессов", + "desc": "Объедините несколько узлов в сворачиваемый контейнер:\n\n• Перетащите узлы в Группу для инкапсуляции подпроцесса\n• Откройте выбранные входы/выходы на поверхности Группы, чтобы внешние узлы могли подключаться\n• Нажмите «Редактировать подграф» для входа и редактирования внутренних узлов\n• Импортируйте существующий рабочий процесс в Группу для повторного использования\n\nГруппы поддерживают сложные рабочие процессы чистыми и модульными — думайте о них как о функциях, которые можно соединять друг с другом." + }, "canvas": { "title": "Взаимодействие с холстом", "desc": "Холст — ваше рабочее пространство:\n• Перетаскивайте узлы, чтобы размещать их\n• Тяните от выходных портов к входным, чтобы создать связи\n• Нажмите на узел, чтобы выбрать его — параметры раскрываются внутри узла для редактирования\n• Щелкните правой кнопкой для контекстного меню (копировать, вставить, удалить)\n• Прокрутка — масштаб, перетаскивание фона — панорамирование\n• Горячие клавиши: Ctrl+Z отмена, Ctrl+C/V копировать-вставить, Delete удалить" diff --git a/src/i18n/locales/th.json b/src/i18n/locales/th.json index 03300ce1..70febe03 100644 --- a/src/i18n/locales/th.json +++ b/src/i18n/locales/th.json @@ -1137,6 +1137,13 @@ "modelIdLabel": "รหัสโมเดล", "selectNode": "เลือกโหนดเพื่อกำหนดค่า", "noExecutions": "ยังไม่มีการดำเนินการ", + "selectAsOutput": "ใช้เป็นเอาต์พุต", + "selectedAsOutput": "เลือกแล้ว", + "importWorkflow": "นำเข้าเวิร์กโฟลว์", + "importWorkflowAsSubgraph": "นำเข้าเวิร์กโฟลว์เป็นซับกราฟ", + "noWorkflowsToImport": "ไม่มีเวิร์กโฟลว์อื่นที่พร้อมใช้งาน", + "currentWorkflow": "ปัจจุบัน", + "importContainsTrigger": "ไม่สามารถนำเข้าได้: เวิร์กโฟลว์มีโหนดทริกเกอร์ ไม่อนุญาตให้ใช้ทริกเกอร์ภายในกลุ่ม", "budgetExceeded": "เกินงบประมาณ", "dailySpend": "การใช้จ่ายรายวัน", "perExecutionLimit": "ขีดจำกัดต่อการดำเนินการ", @@ -1195,6 +1202,14 @@ "control": "ควบคุม" }, "nodeDefs": { + "trigger/directory": { + "label": "ทริกเกอร์ไดเรกทอรี", + "hint": "สแกนโฟลเดอร์ในเครื่อง — เวิร์กโฟลว์ทำงานหนึ่งครั้งต่อไฟล์" + }, + "trigger/http": { + "label": "ทริกเกอร์ HTTP", + "hint": "เปิดเวิร์กโฟลว์เป็น HTTP API — กำหนดฟิลด์อินพุตที่ผู้เรียกต้องระบุ" + }, "ai-task/run": { "label": "WaveSpeed API", "hint": "เรียกใช้โมเดล AI ใดก็ได้ — รูปภาพ วิดีโอ เสียง และอื่นๆ", @@ -1281,6 +1296,10 @@ } } }, + "output/http-response": { + "label": "การตอบกลับ HTTP", + "hint": "กำหนดสิ่งที่เวิร์กโฟลว์ส่งกลับไปยังผู้เรียก HTTP" + }, "free-tool/image-enhancer": { "label": "ปรับปรุงรูปภาพ", "hint": "ขยายและเพิ่มความคมชัดของรูปภาพ (2×–4×) ฟรี" @@ -1349,6 +1368,10 @@ "processing/select": { "label": "เลือก", "hint": "เลือกรายการหนึ่งจากอาร์เรย์ตามดัชนี" + }, + "control/iterator": { + "label": "กลุ่ม", + "hint": "จัดกลุ่มโหนดเป็นเวิร์กโฟลว์ย่อยเพื่อการจัดระเบียบ" } }, "modelSelector": { @@ -1463,6 +1486,18 @@ "title": "โหนด Concat — รวมเอาต์พุตหลายรายการ", "desc": "ต้องการส่งหลายภาพเข้าอินพุต \"images\"? ใช้โหนด Concat:\n\n1. เชื่อมต่อเอาต์พุต image ของแต่ละโหนดต้นทางไปยัง value1, value2, value3 ฯลฯ ของ Concat\n2. เชื่อมต่อเอาต์พุต output ของ Concat (ตอนนี้เป็นอาร์เรย์) ไปยังพารามิเตอร์ images ของโหนดปลายทาง\n\nตัวอย่าง:\n[อัปโหลด A] → image → Concat → output (อาร์เรย์) → [AI Task].images\n[อัปโหลด B] → image ↗\n\nใช้ได้กับเอาต์พุตทุกประเภท — ภาพ วิดีโอ ข้อความ — เมื่อใดก็ตามที่คุณต้องการรวมหลายค่าเดี่ยวเป็นอินพุตอาร์เรย์เดียว" }, + "directoryTrigger": { + "title": "ทริกเกอร์ไดเรกทอรี — ประมวลผลไฟล์ในเครื่องเป็นชุด", + "desc": "สแกนโฟลเดอร์ในเครื่องโดยอัตโนมัติและรันเวิร์กโฟลว์หนึ่งครั้งต่อไฟล์:\n\n• เลือกไดเรกทอรีและประเภทไฟล์ (ภาพ, วิดีโอ, เสียง หรือทั้งหมด)\n• ทริกเกอร์จะค้นหาไฟล์ที่ตรงกันทั้งหมดและส่งทีละไฟล์เข้าไปในไปป์ไลน์\n• เหมาะสำหรับการประมวลผลเป็นชุด — เช่น ปรับปรุงทุกรูปในโฟลเดอร์ แปลงวิดีโอทั้งหมด ฯลฯ\n\nแต่ละการรันจะรับไฟล์เดียว โหนดปลายทางทำงานเหมือนกับการอัปโหลดด้วยตนเอง" + }, + "httpTrigger": { + "title": "ทริกเกอร์ HTTP — รันเวิร์กโฟลว์ผ่าน API", + "desc": "เปลี่ยนเวิร์กโฟลว์ใดก็ได้เป็น HTTP API endpoint:\n\n• กำหนดฟิลด์เอาต์พุต (เช่น image, prompt) — แต่ละฟิลด์จะกลายเป็นพอร์ตเอาต์พุตบนแคนวาส\n• เมื่อคำขอ POST มาถึง ฟิลด์ JSON body จะถูกดึงออกและส่งไปยังโหนดปลายทาง\n• จับคู่กับโหนด HTTP Response เพื่อส่งผลลัพธ์กลับไปยังผู้เรียก\n\nช่วยให้คุณรวมเวิร์กโฟลว์เข้ากับแอปภายนอก สคริปต์ หรือเครื่องมืออัตโนมัติโดยส่งคำขอ HTTP" + }, + "group": { + "title": "โหนดกลุ่ม — จัดระเบียบเวิร์กโฟลว์ย่อย", + "desc": "จัดกลุ่มหลายโหนดเข้าในคอนเทนเนอร์ที่พับได้:\n\n• ลากโหนดเข้ากลุ่มเพื่อห่อหุ้มเวิร์กโฟลว์ย่อย\n• เปิดเผยอินพุต/เอาต์พุตที่เลือกบนพื้นผิวกลุ่มเพื่อให้โหนดภายนอกเชื่อมต่อได้\n• คลิก \"แก้ไขซับกราฟ\" เพื่อเข้าไปแก้ไขโหนดภายใน\n• นำเข้าเวิร์กโฟลว์ที่มีอยู่เข้ากลุ่มเพื่อนำกลับมาใช้ใหม่\n\nกลุ่มช่วยให้เวิร์กโฟลว์ที่ซับซ้อนเป็นระเบียบและเป็นโมดูล — คิดว่าเป็นฟังก์ชันที่คุณสามารถเชื่อมต่อเข้าด้วยกัน" + }, "canvas": { "title": "การโต้ตอบบนแคนวาส", "desc": "แคนวาสคือพื้นที่ทำงานของคุณ:\n• ลากโหนดเพื่อวางตำแหน่ง\n• ลากจากพอร์ตเอาต์พุตไปยังพอร์ตอินพุตเพื่อเชื่อมต่อ\n• คลิกโหนดเพื่อเลือก — พารามิเตอร์จะขยายภายในโหนดเพื่อแก้ไข\n• คลิกขวาเพื่อเปิดเมนูบริบท (คัดลอก วาง ลบ)\n• เลื่อนเพื่อซูม ลากพื้นหลังเพื่อแพน\n• คีย์ลัด: Ctrl+Z ย้อนกลับ, Ctrl+C/V คัดลอก-วาง, Delete เพื่อลบ" diff --git a/src/i18n/locales/tr.json b/src/i18n/locales/tr.json index eeaddf71..9f006fd9 100644 --- a/src/i18n/locales/tr.json +++ b/src/i18n/locales/tr.json @@ -1137,6 +1137,13 @@ "modelIdLabel": "Model ID", "selectNode": "Yapılandırmak için düğüm seçin", "noExecutions": "Henüz yürütme yok", + "selectAsOutput": "Çıktı olarak kullan", + "selectedAsOutput": "Seçildi", + "importWorkflow": "İş akışını içe aktar", + "importWorkflowAsSubgraph": "İş akışını alt grafik olarak içe aktar", + "noWorkflowsToImport": "Başka iş akışı mevcut değil", + "currentWorkflow": "mevcut", + "importContainsTrigger": "İçe aktarılamıyor: iş akışı tetikleyici düğümler içeriyor. Gruplar içinde tetikleyicilere izin verilmez.", "budgetExceeded": "Bütçe aşıldı", "dailySpend": "Günlük harcama", "perExecutionLimit": "Yürütme başına limit", @@ -1195,6 +1202,14 @@ "control": "Kontrol" }, "nodeDefs": { + "trigger/directory": { + "label": "Dizin Tetikleyici", + "hint": "Yerel klasörü tara — iş akışı her dosya için bir kez çalışır" + }, + "trigger/http": { + "label": "HTTP Tetikleyici", + "hint": "Bu iş akışını HTTP API olarak aç — arayanların sağlayacağı giriş alanlarını tanımlayın" + }, "ai-task/run": { "label": "WaveSpeed API", "hint": "Herhangi bir AI modelini çalıştırın — görsel, video, ses ve daha fazlası", @@ -1281,6 +1296,10 @@ } } }, + "output/http-response": { + "label": "HTTP Yanıtı", + "hint": "Bu iş akışının HTTP arayanlarına ne döndüreceğini tanımlayın" + }, "free-tool/image-enhancer": { "label": "Görsel geliştirici", "hint": "Görselleri ücretsiz olarak büyütün ve netleştirin (2×–4×)" @@ -1349,6 +1368,10 @@ "processing/select": { "label": "Seç", "hint": "Bir diziden indekse göre bir öğe seçin" + }, + "control/iterator": { + "label": "Grup", + "hint": "Düğümleri organizasyon için bir alt iş akışında grupla" } }, "modelSelector": { @@ -1463,6 +1486,18 @@ "title": "Concat Dugumu — Birden Fazla Ciktiyi Birlestir", "desc": "Birden fazla gorseli \"images\" girisine aktarmaniz mi gerekiyor? Concat dugumunu kullanin:\n\n1. Her ust dugumun image ciktisini Concat'in value1, value2, value3 vb. girislerine baglayin\n2. Concat'in output ciktisini (artik bir dizi) alt dugumun images parametresine baglayin\n\nOrnek:\n[Yukleme A] → image → Concat → output (dizi) → [AI Gorevi].images\n[Yukleme B] → image ↗\n\nBu, herhangi bir cikti turu icin calisir — gorsel, video, metin — birden fazla tekil degeri tek bir dizi girisinde birlestirmeniz gerektiginde." }, + "directoryTrigger": { + "title": "Dizin Tetikleyicisi — Yerel Dosyalari Toplu Isleme", + "desc": "Yerel bir klasoru otomatik olarak tarayin ve her dosya icin is akisini bir kez calistirin:\n\n• Bir dizin ve dosya turu secin (Gorseller, Videolar, Ses veya Tumu)\n• Tetikleyici eslesen tum dosyalari bulur ve bunlari birer birer boru hattina gonderir\n• Toplu isleme icin idealdir — ornegin bir klasordeki her fotoyu iyilestirme, tum videolari donusturme vb.\n\nHer calistirma tek bir dosya alir, alt dugumler manuel yuklemeyle ayni sekilde calisir." + }, + "httpTrigger": { + "title": "HTTP Tetikleyicisi — API ile Is Akisi Calistirma", + "desc": "Herhangi bir is akisini HTTP API uç noktasina donusturun:\n\n• Cikti alanlari tanimlayin (ornegin image, prompt) — her biri tuvalde bir cikti portu olur\n• POST istegi geldiginde, JSON body alanlari cikarilir ve alt dugumlere iletilir\n• Sonuclari arayana dondurmek icin HTTP Response dugumu ile eslestirin\n\nBu, HTTP istegi gondererek is akislarini harici uygulamalara, betiklere veya otomasyon araclarına entegre etmenizi saglar." + }, + "group": { + "title": "Grup Dugumu — Alt Is Akislarini Duzenleme", + "desc": "Birden fazla dugumu katlanabilir bir kapsayicida gruplayin:\n\n• Dugumleri bir Gruba surukleyerek alt is akisini kapsulleyin\n• Dis dugumlerin baglanabilmesi icin secili giris/cikislari Grup yuzeyinde aciga cikarin\n• \"Alt grafi duzenle\" ye tiklayarak ic dugumleri duzenleyin\n• Mevcut bir is akisini yeniden kullanmak icin bir Gruba aktarin\n\nGruplar karmasik is akislarini temiz ve moduler tutar — birbirine baglayabileceginiz fonksiyonlar gibi dusunun." + }, "canvas": { "title": "Tuval Etkilesimleri", "desc": "Tuval calisma alaninizdir:\n• Dugumleri surukleyip konumlandirin\n• Baglanti olusturmak icin cikis portundan giris portuna surukleyin\n• Bir dugume tiklayarak secin — parametreler dugum icinde genisleyerek duzenleme yapilabilir\n• Baglam menusu icin sag tiklayin (kopyala, yapistir, sil)\n• Zum icin kaydirin, kaydirmak icin arka plani surukleyin\n• Kisayollar: Ctrl+Z geri al, Ctrl+C/V kopyala-yapistir, Delete kaldir" diff --git a/src/i18n/locales/vi.json b/src/i18n/locales/vi.json index f912478b..afa9a2d4 100644 --- a/src/i18n/locales/vi.json +++ b/src/i18n/locales/vi.json @@ -1137,6 +1137,13 @@ "modelIdLabel": "ID mô hình", "selectNode": "Chọn nút để cấu hình", "noExecutions": "Chưa có lần thực thi", + "selectAsOutput": "Dùng làm đầu ra", + "selectedAsOutput": "Đã chọn", + "importWorkflow": "Nhập quy trình", + "importWorkflowAsSubgraph": "Nhập quy trình dưới dạng đồ thị con", + "noWorkflowsToImport": "Không có quy trình nào khác", + "currentWorkflow": "hiện tại", + "importContainsTrigger": "Không thể nhập: quy trình chứa nút kích hoạt. Không cho phép kích hoạt bên trong nhóm.", "budgetExceeded": "Vượt ngân sách", "dailySpend": "Chi tiêu hàng ngày", "perExecutionLimit": "Giới hạn mỗi lần thực thi", @@ -1195,6 +1202,14 @@ "control": "Điều khiển" }, "nodeDefs": { + "trigger/directory": { + "label": "Kích hoạt thư mục", + "hint": "Quét thư mục cục bộ — quy trình chạy một lần cho mỗi tệp" + }, + "trigger/http": { + "label": "Kích hoạt HTTP", + "hint": "Mở quy trình làm việc dưới dạng HTTP API — xác định các trường đầu vào mà người gọi cung cấp" + }, "ai-task/run": { "label": "WaveSpeed API", "hint": "Chạy bất kỳ mô hình AI nào — hình ảnh, video, âm thanh và hơn thế", @@ -1281,6 +1296,10 @@ } } }, + "output/http-response": { + "label": "Phản hồi HTTP", + "hint": "Xác định nội dung quy trình trả về cho người gọi HTTP" + }, "free-tool/image-enhancer": { "label": "Tăng cường hình ảnh", "hint": "Phóng to và làm sắc nét hình ảnh (2×–4×) miễn phí" @@ -1349,6 +1368,10 @@ "processing/select": { "label": "Chọn", "hint": "Chọn một mục từ mảng theo chỉ mục" + }, + "control/iterator": { + "label": "Nhóm", + "hint": "Nhóm các node thành workflow con để tổ chức" } }, "modelSelector": { @@ -1463,6 +1486,18 @@ "title": "Node Concat — Gop Nhieu Dau Ra", "desc": "Can truyen nhieu hinh anh vao dau vao \"images\"? Su dung node Concat:\n\n1. Ket noi dau ra image cua moi node phia truoc voi value1, value2, value3, v.v. cua Concat\n2. Ket noi dau ra output cua Concat (bay gio la mang) voi tham so images cua node phia sau\n\nVi du:\n[Upload A] → image → Concat → output (mang) → [AI Task].images\n[Upload B] → image ↗\n\nDieu nay hoat dong voi bat ky loai dau ra nao — hinh anh, video, van ban — bat cu khi nao ban can gop nhieu gia tri don le thanh mot dau vao mang." }, + "directoryTrigger": { + "title": "Trigger Thu Muc — Xu Ly Hang Loat File Cuc Bo", + "desc": "Tu dong quet thu muc cuc bo va chay workflow mot lan cho moi file:\n\n• Chon thu muc va loai file (Hinh anh, Video, Am thanh hoac Tat ca)\n• Trigger tim tat ca file phu hop va gui tung file mot vao pipeline\n• Tuyet voi cho xu ly hang loat — vi du nang cap moi anh trong thu muc, chuyen doi tat ca video, v.v.\n\nMoi lan chay nhan mot file duy nhat, cac node phia sau hoat dong giong nhu upload thu cong." + }, + "httpTrigger": { + "title": "Trigger HTTP — Chay Workflow qua API", + "desc": "Bien bat ky workflow nao thanh HTTP API endpoint:\n\n• Dinh nghia cac truong dau ra (vi du: image, prompt) — moi truong tro thanh mot cong dau ra tren canvas\n• Khi yeu cau POST den, cac truong JSON body duoc trich xuat va chuyen den cac node phia sau\n• Ket hop voi node HTTP Response de tra ket qua cho nguoi goi\n\nDieu nay cho phep ban tich hop workflow vao ung dung ben ngoai, script hoac cong cu tu dong hoa bang cach gui yeu cau HTTP." + }, + "group": { + "title": "Node Nhom — To Chuc Workflow Con", + "desc": "Nhom nhieu node vao mot container co the gap lai:\n\n• Keo node vao Nhom de dong goi workflow con\n• Hien thi cac dau vao/dau ra duoc chon tren be mat Nhom de cac node ben ngoai co the ket noi\n• Nhan \"Chinh sua subgraph\" de vao va chinh sua cac node ben trong\n• Nhap workflow co san vao Nhom de tai su dung nhu khoi xay dung\n\nNhom giu cho workflow phuc tap gon gang va module — hay nghi chung nhu cac ham ma ban co the noi voi nhau." + }, "canvas": { "title": "Tuong tac Canvas", "desc": "Canvas la khong gian lam viec cua ban:\n• Keo node de dat vi tri\n• Keo tu cong dau ra sang cong dau vao de tao ket noi\n• Nhan vao node de chon — tham so se mo rong trong node de chinh sua\n• Nhap chuot phai de mo menu ngu canh (sao chep, dan, xoa)\n• Cuon de zoom, keo nen de pan\n• Phim tat: Ctrl+Z hoan tac, Ctrl+C/V sao chep-dan, Delete de xoa" diff --git a/src/i18n/locales/zh-CN.json b/src/i18n/locales/zh-CN.json index d9315d3f..ad6f3026 100644 --- a/src/i18n/locales/zh-CN.json +++ b/src/i18n/locales/zh-CN.json @@ -1147,6 +1147,29 @@ "zoomOut": "缩小", "autoLayout": "整理布局", "freeToolModelHint": "首次运行时将自动下载 AI 模型,请耐心等待", + "triggerHint": "此触发器每次触发时都会重复运行下游工作流", + "dropToAddToGroup": "松开以添加到 Group", + "dropToRemoveFromGroup": "松开以从 Group 移出", + "editSubgraph": "编辑子图", + "mainWorkflow": "主工作流", + "exitSubgraph": "退出子图 (ESC)", + "editingSubgraph": "正在编辑子图", + "childNodesCount": "{{count}} 个子节点", + "groupInput": "组输入", + "groupOutput": "组输出", + "noExposedPorts": "暂无暴露端口", + "doubleClickToRename": "双击重命名", + "clickToSetAlias": "点击设置显示名称", + "setAlias": "设置显示名称...", + "aliasPlaceholder": "主图上的显示名称...", + "mappedAs": "映射为:", + "group": "分组", + "importWorkflow": "导入工作流", + "importWorkflowAsSubgraph": "导入工作流为子图", + "noWorkflowsToImport": "没有其他可导入的工作流", + "currentWorkflow": "当前", + "importContainsTrigger": "无法导入:工作流包含触发器节点,触发器不能在组内运行。", + "nodeCountLabel": "{{count}} 个节点", "more": "更多", "previousImage": "上一张图片", "nextImage": "下一张图片", @@ -1162,6 +1185,9 @@ "open": "打开", "paste": "粘贴", "addNode": "添加节点", + "addDownstreamNode": "添加下游节点", + "triggerLimitTitle": "只允许一个触发器", + "triggerLimitDesc": "每个工作流只能有一个触发器节点,请先删除现有的触发器。", "addNote": "添加备注", "note": "备注", "deleteConnection": "删除连接", @@ -1176,6 +1202,8 @@ "modelIdLabel": "模型 ID", "selectNode": "选择节点进行配置", "noExecutions": "暂无执行记录", + "selectAsOutput": "选为输出", + "selectedAsOutput": "已选为输出", "budgetExceeded": "超出预算", "dailySpend": "今日花费", "perExecutionLimit": "单次执行限额", @@ -1211,6 +1239,11 @@ "hideRun": "隐藏本次运行", "output": "输出", "outputLowercase": "输出", + "httpTriggerFields": "API 输入字段", + "httpResponseFields": "API 响应字段", + "addField": "添加", + "noFieldsHint": "暂无字段,点击添加创建。", + "statusCode": "状态码", "expandNode": "展开", "collapseNode": "收起", "collapseAll": "全部收起", @@ -1226,6 +1259,7 @@ "noNodesAvailable": "暂无可用节点", "nodeCategory": { "recent": "最近使用", + "trigger": "触发器", "input": "输入", "ai-task": "AI 任务", "free-tool": "免费工具", @@ -1234,6 +1268,14 @@ "control": "控制" }, "nodeDefs": { + "trigger/directory": { + "label": "目录触发器", + "hint": "扫描本地文件夹,每个文件执行一次工作流" + }, + "trigger/http": { + "label": "HTTP 触发", + "hint": "将工作流发布为 HTTP API,定义调用方需要提供的输入字段" + }, "ai-task/run": { "label": "WaveSpeed API", "hint": "运行平台上的任意 AI 模型——图像、视频、音频等", @@ -1320,6 +1362,10 @@ } } }, + "output/http-response": { + "label": "HTTP 响应", + "hint": "定义工作流通过 HTTP 返回的内容" + }, "free-tool/image-enhancer": { "label": "图片增强", "hint": "免费放大和锐化图片(2×–4×)" @@ -1388,6 +1434,14 @@ "processing/select": { "label": "选择", "hint": "按索引从数组中取出一个元素" + }, + "control/iterator": { + "label": "分组", + "hint": "将节点分组为子工作流,便于组织管理" + }, + "control/group": { + "label": "分组", + "hint": "将节点分组为子工作流,便于组织管理" } }, "modelSelector": { @@ -1472,6 +1526,18 @@ "title": "Concat 节点 — 合并多个输出", "desc": "需要将多张图片传入「images」参数?使用 Concat 节点:\n\n1. 将每个上游节点的 image 输出分别连接到 Concat 的 value1、value2、value3 等输入口\n2. 将 Concat 的 output(此时为数组)连接到下游节点的 images 参数\n\n示例:\n[上传 A] → image → Concat → output(数组)→ [AI 任务].images\n[上传 B] → image ↗\n\n适用于任何类型的输出——图片、视频、文本——只要你需要将多个单值合并为一个数组输入。" }, + "directoryTrigger": { + "title": "目录触发器 — 批量处理本地文件", + "desc": "自动扫描本地文件夹,对每个文件执行一次工作流:\n\n• 选择目录和文件类型(图片、视频、音频或全部)\n• 触发器会找到所有匹配的文件,逐个送入下游流水线\n• 非常适合批量处理——例如增强文件夹中的每张照片、转换所有视频等\n\n每次执行只接收一个文件,下游节点的工作方式与手动上传完全一致。" + }, + "httpTrigger": { + "title": "HTTP 触发器 — 通过 API 运行工作流", + "desc": "将任意工作流变成 HTTP API 端点:\n\n• 定义输出字段(如 image、prompt)——每个字段会成为画布上的一个输出端口\n• 当 POST 请求到达时,JSON body 中的字段会被提取并传递给下游节点\n• 搭配 HTTP Response 节点可将结果返回给调用方\n\n这样你就可以通过发送 HTTP 请求,将工作流集成到外部应用、脚本或自动化工具中。" + }, + "group": { + "title": "Group 节点 — 组织子工作流", + "desc": "将多个节点归入一个可折叠的容器:\n\n• 将节点拖入 Group 以封装子工作流\n• 在 Group 表面暴露选定的输入/输出,供外部节点连接\n• 点击「编辑子图」进入 Group 内部编辑节点\n• 可导入已有工作流到 Group 中作为构建模块复用\n\nGroup 让复杂工作流保持整洁和模块化——可以把它们看作可以互相连接的函数。" + }, "canvas": { "title": "画布操作", "desc": "画布是你的工作区:\n• 拖拽节点到画布上定位\n• 从输出端口拖拽到输入端口创建连接\n• 点击节点选中——参数会在节点内展开供编辑\n• 右键打开上下文菜单(复制、粘贴、删除)\n• 滚轮缩放,拖拽背景平移\n• 快捷键:Ctrl+Z 撤销、Ctrl+C/V 复制粘贴、Delete 删除" diff --git a/src/i18n/locales/zh-TW.json b/src/i18n/locales/zh-TW.json index 2fbeeffa..00eb78fb 100644 --- a/src/i18n/locales/zh-TW.json +++ b/src/i18n/locales/zh-TW.json @@ -1160,6 +1160,14 @@ "control": "控制" }, "nodeDefs": { + "trigger/directory": { + "label": "目錄觸發器", + "hint": "掃描本機資料夾,每個檔案執行一次工作流" + }, + "trigger/http": { + "label": "HTTP 觸發器", + "hint": "將工作流發布為 HTTP API,定義呼叫方需要提供的輸入欄位" + }, "ai-task/run": { "label": "WaveSpeed API", "hint": "運行平台上的任意 AI 模型——圖片、影片、音訊等", @@ -1246,6 +1254,10 @@ } } }, + "output/http-response": { + "label": "HTTP 回應", + "hint": "定義工作流透過 HTTP 回傳的內容" + }, "free-tool/image-enhancer": { "label": "圖片增強", "hint": "免費放大和銳化圖片(2×–4×)" @@ -1314,6 +1326,10 @@ "processing/select": { "label": "選擇", "hint": "按索引從陣列中取出一個元素" + }, + "control/iterator": { + "label": "群組", + "hint": "將節點分組為子工作流,便於組織管理" } }, "modelSelector": { @@ -1413,6 +1429,13 @@ "exportFailed": "導出失敗", "estimated": "費用", "noExecutions": "暫無執行記錄", + "selectAsOutput": "選為輸出", + "selectedAsOutput": "已選為輸出", + "importWorkflow": "匯入工作流程", + "importWorkflowAsSubgraph": "匯入工作流程為子圖", + "noWorkflowsToImport": "沒有其他可匯入的工作流程", + "currentWorkflow": "目前", + "importContainsTrigger": "無法匯入:工作流程包含觸發器節點,觸發器不能在群組內運行。", "budgetExceeded": "超出預算", "dailySpend": "每日支出", "perExecutionLimit": "每次執行限制", @@ -1465,6 +1488,18 @@ "title": "Concat 節點 — 合併多個輸出", "desc": "需要將多張圖片傳入「images」參數?使用 Concat 節點:\n\n1. 將每個上游節點的 image 輸出分別連接到 Concat 的 value1、value2、value3 等輸入口\n2. 將 Concat 的 output(此時為陣列)連接到下游節點的 images 參數\n\n範例:\n[上傳 A] → image → Concat → output(陣列)→ [AI 任務].images\n[上傳 B] → image ↗\n\n適用於任何類型的輸出——圖片、影片、文字——只要你需要將多個單值合併為一個陣列輸入。" }, + "directoryTrigger": { + "title": "目錄觸發器 — 批次處理本機檔案", + "desc": "自動掃描本機資料夾,對每個檔案執行一次工作流:\n\n• 選擇目錄和檔案類型(圖片、影片、音訊或全部)\n• 觸發器會找到所有符合的檔案,逐一送入下游流水線\n• 非常適合批次處理——例如增強資料夾中的每張照片、轉換所有影片等\n\n每次執行只接收一個檔案,下游節點的運作方式與手動上傳完全一致。" + }, + "httpTrigger": { + "title": "HTTP 觸發器 — 透過 API 執行工作流", + "desc": "將任意工作流變成 HTTP API 端點:\n\n• 定義輸出欄位(如 image、prompt)——每個欄位會成為畫布上的一個輸出埠\n• 當 POST 請求到達時,JSON body 中的欄位會被擷取並傳遞給下游節點\n• 搭配 HTTP Response 節點可將結果回傳給呼叫方\n\n這樣你就可以透過發送 HTTP 請求,將工作流整合到外部應用、腳本或自動化工具中。" + }, + "group": { + "title": "Group 節點 — 組織子工作流", + "desc": "將多個節點歸入一個可摺疊的容器:\n\n• 將節點拖入 Group 以封裝子工作流\n• 在 Group 表面暴露選定的輸入/輸出,供外部節點連接\n• 點擊「編輯子圖」進入 Group 內部編輯節點\n• 可匯入已有工作流到 Group 中作為建構模組複用\n\nGroup 讓複雜工作流保持整潔和模組化——可以把它們看作可以互相連接的函式。" + }, "canvas": { "title": "畫布操作", "desc": "畫布是你的工作區:\n• 拖曳節點到畫布上定位\n• 從輸出埠拖曳到輸入埠建立連線\n• 點擊節點選取——參數會在節點內展開供編輯\n• 右鍵開啟內容選單(複製、貼上、刪除)\n• 滾輪縮放,拖曳背景平移\n• 快捷鍵:Ctrl+Z 復原、Ctrl+C/V 複製貼上、Delete 刪除" diff --git a/src/index.css b/src/index.css index 172fb247..1d58f1c4 100644 --- a/src/index.css +++ b/src/index.css @@ -423,6 +423,20 @@ animation: pulse-subtle 1.8s ease-in-out infinite; } +/* Group drop-target breathe animation */ +@keyframes group-breathe { + 0%, + 100% { + transform: scale(1); + } + 50% { + transform: scale(1.004); + } +} +.animate-group-breathe { + animation: group-breathe 0.8s ease-in-out infinite; +} + /* Carousel slide animations for results panel */ @keyframes carousel-slide-left { from { diff --git a/src/pages/SmartPlaygroundPage.tsx b/src/pages/SmartPlaygroundPage.tsx index 69f8b2f2..6ac92b2d 100644 --- a/src/pages/SmartPlaygroundPage.tsx +++ b/src/pages/SmartPlaygroundPage.tsx @@ -825,7 +825,6 @@ export function SmartPlaygroundPage() { error={error} isLoading={isRunning} modelId={resolvedVariantId} - gridLayout={outputs.length > 1} />
diff --git a/src/stores/modelsStore.ts b/src/stores/modelsStore.ts index 6e844dd3..67e0971a 100644 --- a/src/stores/modelsStore.ts +++ b/src/stores/modelsStore.ts @@ -53,6 +53,7 @@ interface ModelsState { favorites: Set; showFavoritesOnly: boolean; hasFetched: boolean; + typeFiltersOpen: boolean; fetchModels: (force?: boolean) => Promise; setSearchQuery: (query: string) => void; setSelectedType: (type: string | null) => void; @@ -62,6 +63,7 @@ interface ModelsState { toggleFavorite: (modelId: string) => void; isFavorite: (modelId: string) => boolean; setShowFavoritesOnly: (show: boolean) => void; + setTypeFiltersOpen: (open: boolean) => void; getFilteredModels: () => Model[]; getModelById: (modelId: string) => Model | undefined; } @@ -77,6 +79,7 @@ export const useModelsStore = create((set, get) => ({ favorites: loadFavorites(), showFavoritesOnly: false, hasFetched: false, + typeFiltersOpen: true, fetchModels: async (force = false) => { if (get().hasFetched && !force) return; @@ -140,6 +143,10 @@ export const useModelsStore = create((set, get) => ({ set({ showFavoritesOnly: show }); }, + setTypeFiltersOpen: (open: boolean) => { + set({ typeFiltersOpen: open }); + }, + getFilteredModels: () => { const { models, diff --git a/src/types/model.ts b/src/types/model.ts index 396646a5..ca1b195f 100644 --- a/src/types/model.ts +++ b/src/types/model.ts @@ -18,6 +18,9 @@ export interface SchemaProperty { maxItems?: number; properties?: Record; required?: string[]; + enum?: string[]; + "x-enum"?: string[]; + "x-order-properties"?: string[]; }; minItems?: number; maxItems?: number; @@ -28,11 +31,18 @@ export interface SchemaProperty { }>; // Extended UI hints step?: number; - "x-ui-component"?: "slider" | "uploader" | "loras" | "select"; + "x-ui-component"?: + | "slider" + | "uploader" + | "uploaders" + | "loras" + | "select" + | "array"; "x-accept"?: string; "x-placeholder"?: string; "x-hidden"?: boolean; nullable?: boolean; + "x-enum"?: string[]; } export interface Model { diff --git a/src/workflow/WorkflowPage.tsx b/src/workflow/WorkflowPage.tsx index 05699d98..b35763e8 100644 --- a/src/workflow/WorkflowPage.tsx +++ b/src/workflow/WorkflowPage.tsx @@ -223,6 +223,8 @@ export function WorkflowPage() { const { cancelAll, activeExecutions } = useExecutionStore(); const initListeners = useExecutionStore((s) => s.initListeners); const wasRunning = useExecutionStore((s) => s._wasRunning); + const lastRunType = useExecutionStore((s) => s._lastRunType); + const lastRunNodeLabel = useExecutionStore((s) => s._lastRunNodeLabel); const nodeStatuses = useExecutionStore((s) => s.nodeStatuses); const isRunning = activeExecutions.size > 0; const [lastSavedAt, setLastSavedAt] = useState(null); @@ -281,7 +283,9 @@ export function WorkflowPage() { ? blobMediaType : previewTypeBase; const previewIsImage = previewType === "image"; - const canNavigatePreview = previewIsImage && previewItems.length > 1; + const previewIsVideo = previewType === "video"; + const canNavigatePreview = + (previewIsImage || previewIsVideo) && previewItems.length > 1; useEffect(() => { if (!previewSrc || !isActive) return; @@ -381,6 +385,8 @@ export function WorkflowPage() { >["edges"], isDirty: target.isDirty, }); + // Exit subgraph editing when switching tabs + useUIStore.getState().exitGroupEdit(); setActiveTabId(tabId); }, [activeTabId, tabs, saveCurrentTabSnapshot], @@ -420,6 +426,8 @@ export function WorkflowPage() { edges, isDirty: false, }); + // Exit subgraph editing when creating a new tab + useUIStore.getState().exitGroupEdit(); setActiveTabId(newTabId); // Auto-scroll to show the newly created tab requestAnimationFrame(() => { @@ -646,6 +654,8 @@ export function WorkflowPage() { edges, isDirty: false, }); + // Exit subgraph editing when closing the last tab + useUIStore.getState().exitGroupEdit(); setTabs([ { tabId: newTabId, @@ -675,6 +685,8 @@ export function WorkflowPage() { >["edges"], isDirty: target.isDirty, }); + // Exit subgraph editing when closing the active tab + useUIStore.getState().exitGroupEdit(); setActiveTabId(target.tabId); } }, @@ -735,6 +747,8 @@ export function WorkflowPage() { >["edges"], isDirty: target.isDirty, }); + // Exit subgraph editing when closing multiple tabs including the active one + useUIStore.getState().exitGroupEdit(); setActiveTabId(target.tabId); } }, @@ -1047,16 +1061,33 @@ export function WorkflowPage() { useEffect(() => { if (prevWasRunning.current && !wasRunning && !isRunning) { const hasError = Object.values(nodeStatuses).some((s) => s === "error"); + const nodeName = + lastRunType === "single" && lastRunNodeLabel ? lastRunNodeLabel : null; setExecToast({ type: hasError ? "error" : "success", msg: hasError - ? "Workflow completed with errors" - : "All nodes executed successfully", + ? nodeName + ? `${nodeName} executed with errors` + : "Workflow completed with errors" + : nodeName + ? `${nodeName} executed successfully` + : "All nodes executed successfully", }); setTimeout(() => setExecToast(null), 4000); } prevWasRunning.current = wasRunning; - }, [wasRunning, isRunning, nodeStatuses]); + }, [wasRunning, isRunning, nodeStatuses, lastRunType, lastRunNodeLabel]); + + // Listen for workflow:toast events dispatched from the store (e.g. cycle detection) + useEffect(() => { + const handler = (e: Event) => { + const { type, msg } = (e as CustomEvent).detail; + setExecToast({ type, msg }); + setTimeout(() => setExecToast(null), 4000); + }; + window.addEventListener("workflow:toast", handler); + return () => window.removeEventListener("workflow:toast", handler); + }, []); // Model loading state const [modelSyncStatus, setModelSyncStatus] = @@ -1246,6 +1277,7 @@ export function WorkflowPage() { const runAllInBrowser = useExecutionStore.getState().runAllInBrowser; const browserNodes = latestNodes.map((n) => ({ id: n.id, + parentNode: n.parentNode, data: { nodeType: n.data?.nodeType ?? "", params: { @@ -1802,7 +1834,7 @@ export function WorkflowPage() { {nodes.length === 0 ? t("workflow.addNodesToRun", "Add nodes to run") - : t("workflow.runWorkflow", "Run")} + : t("workflow.runWorkflowHint", "Run Workflow")} {/* Run count */} @@ -1827,7 +1859,7 @@ export function WorkflowPage() { - {t("workflow.runCount", "Run count")} + {t("workflow.runCount", "Run Count")} @@ -1839,7 +1871,7 @@ export function WorkflowPage() { className="h-7 w-7 rounded-lg flex items-center justify-center bg-red-900/60 text-red-300 hover:bg-red-800/70 transition-colors" onClick={() => { runCancelRef.current = true; - if (workflowId) cancelAll(workflowId); + cancelAll(workflowId || "browser"); }} > ) : previewType === "video" ? ( -