diff --git a/.env.example b/.env.example new file mode 100644 index 0000000..9e595c9 --- /dev/null +++ b/.env.example @@ -0,0 +1,42 @@ +# Copy this file to .env and fill in real values. + +# Backend / LiteLLM +ROBOT_LITELLM_API_KEY= +GITLAB_CLIENT_ID= +ROBOT_LITELLM_BASE_URL= +GITLAB_BASE_URL= +GITLAB_REDIRECT_URI= +BILLING_GATEWAY_URL_TEMPLATE= +TASK_SHARE_BASE_URL= + +# Database +LINKWORK_DB_URL= + +# Agent runtime config.json placeholders +OPENAI_API_KEY= +LITELLM_API_KEY= +LITELLM_BASE_URL= +ANTHROPIC_AUTH_TOKEN= +ANTHROPIC_API_KEY= +ANTHROPIC_BASE_URL= + +# Image / Registry +SCHEDULE_IMAGE_REGISTRY= +SCHEDULE_AGENT_IMAGE= +RUNNER_IMAGE= +DEFAULT_AGENT_BASE_IMAGE= +COMPOSE_BASE_IMAGE= +IMAGE_REGISTRY= + +# Runtime gateway routes +API_BASE_URL= +WS_BASE_URL= +LLM_GATEWAY_URL= +LINKWORK_AGENT_MCP_GATEWAY_AGENT_BASE_URL= +LINKWORK_AGENT_MCP_GATEWAY_PROXY_BASE_URL= + +# Other endpoints +MILVUS_URI= + +# Optional local Maven/GitHub packages +GITHUB_TOKEN= diff --git a/.gitignore b/.gitignore index 140d456..045e358 100644 --- a/.gitignore +++ b/.gitignore @@ -8,3 +8,12 @@ # OS .DS_Store Thumbs.db +/db/migration/ + +# Local secrets +.env +.env.* +!.env.example +/back/target +/target + diff --git a/back/pom.xml b/back/pom.xml new file mode 100644 index 0000000..52af13d --- /dev/null +++ b/back/pom.xml @@ -0,0 +1,239 @@ + + + 4.0.0 + + com.linkwork + linkwork-backend-service + 1.0.0-SNAPSHOT + jar + + LinkWork Backend Service + AI 员工执行环境 - 后端 Web 服务 + + + org.springframework.boot + spring-boot-starter-parent + 3.2.5 + + + + + 21 + 0.1.0-SNAPSHOT + + + + + + org.springframework.boot + spring-boot-starter-web + + + + + org.springframework.boot + spring-boot-starter-websocket + + + + + com.fasterxml.jackson.core + jackson-databind + + + com.fasterxml.jackson.datatype + jackson-datatype-jsr310 + + + + + org.projectlombok + lombok + 1.18.36 + true + + + + + org.springframework.boot + spring-boot-starter-test + test + + + + + org.springframework.boot + spring-boot-starter-data-redis + + + + + com.mysql + mysql-connector-j + runtime + + + + + com.baomidou + mybatis-plus-spring-boot3-starter + 3.5.5 + + + + + org.springframework.boot + spring-boot-starter-validation + + + + + io.jsonwebtoken + jjwt-api + 0.12.5 + + + io.jsonwebtoken + jjwt-impl + 0.12.5 + runtime + + + io.jsonwebtoken + jjwt-jackson + 0.12.5 + runtime + + + + + org.springframework.security + spring-security-crypto + + + + + io.fabric8 + kubernetes-client + 6.10.0 + + + + + io.linkwork + linkwork-k8s-starter + ${linkwork.server.version} + + + io.linkwork + linkwork-storage-starter + ${linkwork.server.version} + + + io.linkwork + linkwork-skill-starter + ${linkwork.server.version} + + + io.linkwork + linkwork-mcp-starter + ${linkwork.server.version} + + + + + com.github.docker-java + docker-java-core + 3.3.4 + + + + com.github.docker-java + docker-java-transport-zerodep + 3.3.4 + + + + + com.google.guava + guava + 32.1.3-jre + + + + + com.fasterxml.jackson.dataformat + jackson-dataformat-yaml + + + + + io.milvus + milvus-sdk-java + 2.5.15 + + + + + org.apache.tika + tika-core + 3.2.3 + + + org.apache.tika + tika-parsers-standard-package + 3.2.3 + + + org.apache.commons + commons-compress + 1.27.1 + + + + commons-io + commons-io + 2.16.1 + + + org.apache.commons + commons-lang3 + 3.16.0 + + + + + + + org.apache.maven.plugins + maven-compiler-plugin + 3.13.0 + + 21 + 21 + + + org.projectlombok + lombok + 1.18.36 + + + + + + org.springframework.boot + spring-boot-maven-plugin + + + + org.projectlombok + lombok + + + + + + + diff --git a/back/src/main/java/com/linkwork/LinkWorkApplication.java b/back/src/main/java/com/linkwork/LinkWorkApplication.java new file mode 100644 index 0000000..19526b2 --- /dev/null +++ b/back/src/main/java/com/linkwork/LinkWorkApplication.java @@ -0,0 +1,18 @@ +package com.linkwork; + +import org.mybatis.spring.annotation.MapperScan; +import org.springframework.boot.SpringApplication; +import org.springframework.boot.autoconfigure.SpringBootApplication; +import org.springframework.scheduling.annotation.EnableScheduling; + +import java.util.TimeZone; + +@SpringBootApplication +@MapperScan("com.linkwork.mapper") +@EnableScheduling +public class LinkWorkApplication { + public static void main(String[] args) { + TimeZone.setDefault(TimeZone.getTimeZone("Asia/Shanghai")); + SpringApplication.run(LinkWorkApplication.class, args); + } +} diff --git a/back/src/main/java/com/linkwork/common/ApiResponse.java b/back/src/main/java/com/linkwork/common/ApiResponse.java new file mode 100644 index 0000000..c6ff9aa --- /dev/null +++ b/back/src/main/java/com/linkwork/common/ApiResponse.java @@ -0,0 +1,48 @@ +package com.linkwork.common; + +import lombok.Data; + +import java.time.Instant; +import java.util.UUID; + +/** + * 统一 API 响应结构 + */ +@Data +public class ApiResponse { + + private Integer code; + private String msg; + private T data; + private String traceId; + + private String timestamp; + + private ApiResponse(Integer code, String msg, T data) { + this.code = code; + this.msg = msg; + this.data = data; + this.traceId = UUID.randomUUID().toString(); + this.timestamp = Instant.now().toString(); + } + + public static ApiResponse success(T data) { + return new ApiResponse<>(0, "success", data); + } + + public static ApiResponse success() { + return new ApiResponse<>(0, "success", null); + } + + public static ApiResponse error(Integer code, String msg) { + return new ApiResponse<>(code, msg, null); + } + + public static ApiResponse error(String msg) { + return new ApiResponse<>(50000, msg, null); + } + + public static ApiResponse error(Integer code, String msg, T data) { + return new ApiResponse<>(code, msg, data); + } +} diff --git a/back/src/main/java/com/linkwork/common/ClientIpResolver.java b/back/src/main/java/com/linkwork/common/ClientIpResolver.java new file mode 100644 index 0000000..58774a1 --- /dev/null +++ b/back/src/main/java/com/linkwork/common/ClientIpResolver.java @@ -0,0 +1,55 @@ +package com.linkwork.common; + +import org.springframework.util.StringUtils; + +import jakarta.servlet.http.HttpServletRequest; + +/** + * Resolve client IP from proxy headers and servlet request. + */ +public final class ClientIpResolver { + + private static final String[] IP_HEADER_CANDIDATES = { + "X-Forwarded-For", + "X-Real-IP", + "Proxy-Client-IP", + "WL-Proxy-Client-IP", + "HTTP_CLIENT_IP", + "HTTP_X_FORWARDED_FOR" + }; + + private ClientIpResolver() { + } + + public static String resolve(HttpServletRequest request) { + if (request == null) { + return null; + } + + for (String header : IP_HEADER_CANDIDATES) { + String value = normalize(request.getHeader(header)); + if (StringUtils.hasText(value)) { + return value; + } + } + + return normalize(request.getRemoteAddr()); + } + + private static String normalize(String rawIp) { + if (!StringUtils.hasText(rawIp)) { + return null; + } + + String first = rawIp.split(",")[0].trim(); + if (!StringUtils.hasText(first) || "unknown".equalsIgnoreCase(first)) { + return null; + } + + if ("0:0:0:0:0:0:0:1".equals(first) || "::1".equals(first)) { + return "127.0.0.1"; + } + + return first.length() > 64 ? first.substring(0, 64) : first; + } +} diff --git a/back/src/main/java/com/linkwork/common/FileConflictException.java b/back/src/main/java/com/linkwork/common/FileConflictException.java new file mode 100644 index 0000000..d2de3e0 --- /dev/null +++ b/back/src/main/java/com/linkwork/common/FileConflictException.java @@ -0,0 +1,37 @@ +package com.linkwork.common; + +import lombok.Getter; + +import java.time.LocalDateTime; +import java.util.HashMap; +import java.util.Map; + +@Getter +public class FileConflictException extends RuntimeException { + + private final String conflictType; + private final Map existingNode; + + public FileConflictException(String message, String fileId, String name, + String entryType, Long fileSize, LocalDateTime updatedAt) { + super(message); + this.conflictType = "NAME_EXISTS"; + this.existingNode = new HashMap<>(); + this.existingNode.put("fileId", fileId); + this.existingNode.put("name", name); + this.existingNode.put("entryType", entryType); + if (fileSize != null) { + this.existingNode.put("fileSize", fileSize); + } + if (updatedAt != null) { + this.existingNode.put("updatedAt", updatedAt.toString()); + } + } + + public Map toResponseData() { + Map data = new HashMap<>(); + data.put("conflictType", conflictType); + data.put("existingNode", existingNode); + return data; + } +} diff --git a/back/src/main/java/com/linkwork/common/ForbiddenOperationException.java b/back/src/main/java/com/linkwork/common/ForbiddenOperationException.java new file mode 100644 index 0000000..4e80dc0 --- /dev/null +++ b/back/src/main/java/com/linkwork/common/ForbiddenOperationException.java @@ -0,0 +1,11 @@ +package com.linkwork.common; + +/** + * 禁止访问异常 + */ +public class ForbiddenOperationException extends RuntimeException { + + public ForbiddenOperationException(String message) { + super(message); + } +} diff --git a/back/src/main/java/com/linkwork/common/GlobalExceptionHandler.java b/back/src/main/java/com/linkwork/common/GlobalExceptionHandler.java new file mode 100644 index 0000000..f93165c --- /dev/null +++ b/back/src/main/java/com/linkwork/common/GlobalExceptionHandler.java @@ -0,0 +1,100 @@ +package com.linkwork.common; + +import lombok.extern.slf4j.Slf4j; +import org.springframework.http.HttpStatus; +import org.springframework.http.ResponseEntity; +import org.springframework.validation.FieldError; +import org.springframework.web.method.annotation.MethodArgumentTypeMismatchException; +import org.springframework.web.bind.MethodArgumentNotValidException; +import org.springframework.web.bind.MissingServletRequestParameterException; +import org.springframework.web.bind.annotation.ExceptionHandler; +import org.springframework.web.bind.annotation.ResponseStatus; +import org.springframework.web.bind.annotation.RestControllerAdvice; +import org.springframework.web.servlet.resource.NoResourceFoundException; + +import java.util.Map; +import java.util.stream.Collectors; + +/** + * 全局异常处理器 + */ +@Slf4j +@RestControllerAdvice +public class GlobalExceptionHandler { + + @ExceptionHandler(MethodArgumentNotValidException.class) + @ResponseStatus(HttpStatus.BAD_REQUEST) + public ApiResponse handleValidationException(MethodArgumentNotValidException e) { + String errorMsg = e.getBindingResult().getFieldErrors().stream() + .map(FieldError::getDefaultMessage) + .collect(Collectors.joining(", ")); + log.warn("参数校验失败: {}", errorMsg); + return ApiResponse.error(40000, errorMsg); + } + + @ExceptionHandler(IllegalArgumentException.class) + @ResponseStatus(HttpStatus.BAD_REQUEST) + public ApiResponse handleIllegalArgumentException(IllegalArgumentException e) { + log.warn("参数错误: {}", e.getMessage()); + return ApiResponse.error(40000, e.getMessage()); + } + + @ExceptionHandler(MissingServletRequestParameterException.class) + @ResponseStatus(HttpStatus.BAD_REQUEST) + public ApiResponse handleMissingServletRequestParameterException(MissingServletRequestParameterException e) { + String errorMsg = "缺少必填参数: " + e.getParameterName(); + log.warn("参数缺失: {}", errorMsg); + return ApiResponse.error(40000, errorMsg); + } + + @ExceptionHandler(MethodArgumentTypeMismatchException.class) + @ResponseStatus(HttpStatus.BAD_REQUEST) + public ApiResponse handleMethodArgumentTypeMismatchException(MethodArgumentTypeMismatchException e) { + String errorMsg = "参数类型错误: " + e.getName(); + log.warn("参数类型错误: {}", e.getMessage()); + return ApiResponse.error(40000, errorMsg); + } + + @ExceptionHandler(NoResourceFoundException.class) + @ResponseStatus(HttpStatus.NOT_FOUND) + public ApiResponse handleNoResourceFoundException(NoResourceFoundException e) { + log.warn("接口不存在: {}", e.getResourcePath()); + return ApiResponse.error(40400, "接口不存在: " + e.getResourcePath()); + } + + + @ExceptionHandler(ForbiddenOperationException.class) + @ResponseStatus(HttpStatus.FORBIDDEN) + public ApiResponse handleForbiddenOperationException(ForbiddenOperationException e) { + log.warn("禁止访问: {}", e.getMessage()); + return ApiResponse.error(40300, e.getMessage()); + } + + @ExceptionHandler(FileConflictException.class) + public ResponseEntity>> handleFileConflictException(FileConflictException e) { + log.info("文件同名冲突: {}", e.getMessage()); + ApiResponse> response = ApiResponse.error(40901, e.getMessage(), e.toResponseData()); + return ResponseEntity.status(HttpStatus.CONFLICT).body(response); + } + + @ExceptionHandler(ResourceNotFoundException.class) + @ResponseStatus(HttpStatus.NOT_FOUND) + public ApiResponse handleResourceNotFoundException(ResourceNotFoundException e) { + log.warn("资源不存在: {}", e.getMessage()); + return ApiResponse.error(40400, e.getMessage()); + } + + @ExceptionHandler(RuntimeException.class) + @ResponseStatus(HttpStatus.INTERNAL_SERVER_ERROR) + public ApiResponse handleRuntimeException(RuntimeException e) { + log.error("系统异常", e); + return ApiResponse.error(50000, "服务器内部错误"); + } + + @ExceptionHandler(Exception.class) + @ResponseStatus(HttpStatus.INTERNAL_SERVER_ERROR) + public ApiResponse handleException(Exception e) { + log.error("未知异常", e); + return ApiResponse.error(50000, "服务器内部错误"); + } +} diff --git a/back/src/main/java/com/linkwork/common/ResourceNotFoundException.java b/back/src/main/java/com/linkwork/common/ResourceNotFoundException.java new file mode 100644 index 0000000..74fcd31 --- /dev/null +++ b/back/src/main/java/com/linkwork/common/ResourceNotFoundException.java @@ -0,0 +1,11 @@ +package com.linkwork.common; + +/** + * 资源不存在异常 + */ +public class ResourceNotFoundException extends RuntimeException { + + public ResourceNotFoundException(String message) { + super(message); + } +} diff --git a/back/src/main/java/com/linkwork/common/SnowflakeIdGenerator.java b/back/src/main/java/com/linkwork/common/SnowflakeIdGenerator.java new file mode 100644 index 0000000..1e376e2 --- /dev/null +++ b/back/src/main/java/com/linkwork/common/SnowflakeIdGenerator.java @@ -0,0 +1,122 @@ +package com.linkwork.common; + +import org.springframework.stereotype.Component; + +import java.net.NetworkInterface; +import java.util.Enumeration; + +/** + * 雪花算法 ID 生成器 + * + * 生成分布式全局唯一 64 位 ID: + * - 1 bit: 符号位(固定 0) + * - 41 bits: 时间戳(毫秒级,可用约 69 年) + * - 10 bits: 机器 ID(支持 1024 个节点) + * - 12 bits: 序列号(每毫秒最多 4096 个 ID) + */ +@Component +public class SnowflakeIdGenerator { + + // 起始时间戳 (2024-01-01 00:00:00 UTC) + private static final long EPOCH = 1704067200000L; + + // 机器 ID 位数 + private static final long WORKER_ID_BITS = 10L; + // 序列号位数 + private static final long SEQUENCE_BITS = 12L; + + // 最大机器 ID + private static final long MAX_WORKER_ID = ~(-1L << WORKER_ID_BITS); + // 最大序列号 + private static final long MAX_SEQUENCE = ~(-1L << SEQUENCE_BITS); + + // 时间戳左移位数 + private static final long TIMESTAMP_SHIFT = WORKER_ID_BITS + SEQUENCE_BITS; + // 机器 ID 左移位数 + private static final long WORKER_ID_SHIFT = SEQUENCE_BITS; + + private final long workerId; + private long sequence = 0L; + private long lastTimestamp = -1L; + + public SnowflakeIdGenerator() { + this.workerId = generateWorkerId(); + } + + /** + * 生成下一个分布式唯一 ID + */ + public synchronized long nextId() { + long timestamp = System.currentTimeMillis(); + + // 时钟回拨检测 + if (timestamp < lastTimestamp) { + throw new RuntimeException("时钟回拨,拒绝生成 ID: " + (lastTimestamp - timestamp) + " ms"); + } + + if (timestamp == lastTimestamp) { + // 同一毫秒内,序列号递增 + sequence = (sequence + 1) & MAX_SEQUENCE; + if (sequence == 0) { + // 序列号溢出,等待下一毫秒 + timestamp = waitNextMillis(lastTimestamp); + } + } else { + // 新的毫秒,序列号重置 + sequence = 0L; + } + + lastTimestamp = timestamp; + + return ((timestamp - EPOCH) << TIMESTAMP_SHIFT) + | (workerId << WORKER_ID_SHIFT) + | sequence; + } + + /** + * 生成字符串格式的 ID(用于 taskNo) + */ + public String nextIdStr() { + return String.valueOf(nextId()); + } + + /** + * 生成带前缀的任务编号 + */ + public String nextTaskNo() { + return "TSK-" + nextId(); + } + + /** + * 等待下一毫秒 + */ + private long waitNextMillis(long lastTimestamp) { + long timestamp = System.currentTimeMillis(); + while (timestamp <= lastTimestamp) { + timestamp = System.currentTimeMillis(); + } + return timestamp; + } + + /** + * 基于机器 MAC 地址生成 workerId + */ + private long generateWorkerId() { + try { + Enumeration interfaces = NetworkInterface.getNetworkInterfaces(); + while (interfaces.hasMoreElements()) { + NetworkInterface network = interfaces.nextElement(); + byte[] mac = network.getHardwareAddress(); + if (mac != null && mac.length > 0) { + // 取 MAC 地址后两个字节计算 workerId + long id = ((0x000000FF & (long) mac[mac.length - 2]) + | (0x0000FF00 & (((long) mac[mac.length - 1]) << 8))) & MAX_WORKER_ID; + return id; + } + } + } catch (Exception e) { + // 获取失败时使用随机值 + } + return (long) (Math.random() * MAX_WORKER_ID); + } +} diff --git a/back/src/main/java/com/linkwork/config/AsyncConfig.java b/back/src/main/java/com/linkwork/config/AsyncConfig.java new file mode 100644 index 0000000..0a94c5f --- /dev/null +++ b/back/src/main/java/com/linkwork/config/AsyncConfig.java @@ -0,0 +1,36 @@ +package com.linkwork.config; + +import lombok.extern.slf4j.Slf4j; +import org.springframework.context.annotation.Configuration; +import org.springframework.scheduling.annotation.AsyncConfigurer; +import org.springframework.scheduling.annotation.EnableAsync; +import org.springframework.scheduling.annotation.EnableScheduling; +import org.springframework.scheduling.concurrent.ThreadPoolTaskExecutor; + +import java.util.concurrent.Executor; + +/** + * 异步任务配置 + */ +@Configuration +@EnableAsync +@EnableScheduling +@Slf4j +public class AsyncConfig implements AsyncConfigurer { + + @Override + public Executor getAsyncExecutor() { + ThreadPoolTaskExecutor executor = new ThreadPoolTaskExecutor(); + executor.setCorePoolSize(5); + executor.setMaxPoolSize(10); + executor.setQueueCapacity(100); + executor.setThreadNamePrefix("async-build-"); + executor.setWaitForTasksToCompleteOnShutdown(true); + executor.setAwaitTerminationSeconds(60); + executor.initialize(); + + log.info("Async executor initialized: corePoolSize=5, maxPoolSize=10, queueCapacity=100"); + + return executor; + } +} diff --git a/back/src/main/java/com/linkwork/config/BuildQueueConfig.java b/back/src/main/java/com/linkwork/config/BuildQueueConfig.java new file mode 100644 index 0000000..e0308db --- /dev/null +++ b/back/src/main/java/com/linkwork/config/BuildQueueConfig.java @@ -0,0 +1,71 @@ +package com.linkwork.config; + +import lombok.Data; +import org.springframework.boot.context.properties.ConfigurationProperties; +import org.springframework.context.annotation.Configuration; + +/** + * 构建队列配置 + */ +@Data +@Configuration +@ConfigurationProperties(prefix = "build-queue") +public class BuildQueueConfig { + + /** + * CPU 使用率阈值 (0.0 ~ 1.0) + * 当系统 CPU 使用率低于此值时,才允许启动新的构建任务 + * 默认 0.7 (70%) + */ + private double cpuThreshold = 0.7; + + /** + * 内存使用率阈值 (0.0 ~ 1.0) + * 当系统内存使用率低于此值时,才允许启动新的构建任务 + * 默认 0.7 (70%) + */ + private double memoryThreshold = 0.7; + + /** + * 硬性并发上限 + * 即使系统资源充足,同时运行的构建任务也不会超过此值 + * 这是一个安全阀,防止资源监控失效时系统过载 + * 默认 3 + */ + private int maxConcurrent = 3; + + /** + * 队列最大容量 + * 超过此容量时,新任务将被拒绝 + * 默认 50 + */ + private int maxQueueSize = 50; + + /** + * 构建超时时间(秒) + * 单个构建任务的最大执行时间 + * 默认 600 秒 (10 分钟) + */ + private int taskTimeout = 600; + + /** + * 资源检查间隔(毫秒) + * 队列消费线程检查资源的间隔 + * 默认 1000 毫秒 (1 秒) + */ + private long checkInterval = 1000; + + /** + * 残留文件清理阈值(小时) + * 超过此时间的临时构建目录会被清理 + * 默认 1 小时 + */ + private int staleContextHours = 1; + + /** + * 是否启用队列功能 + * 设为 false 时,构建任务将直接异步执行(兼容旧行为) + * 默认 true + */ + private boolean enabled = true; +} diff --git a/back/src/main/java/com/linkwork/config/CronConfig.java b/back/src/main/java/com/linkwork/config/CronConfig.java new file mode 100644 index 0000000..dda43ef --- /dev/null +++ b/back/src/main/java/com/linkwork/config/CronConfig.java @@ -0,0 +1,19 @@ +package com.linkwork.config; + +import lombok.Data; +import org.springframework.boot.context.properties.ConfigurationProperties; +import org.springframework.context.annotation.Configuration; + +@Data +@Configuration +@ConfigurationProperties(prefix = "robot.cron") +public class CronConfig { + private boolean enabled = true; + private long scanIntervalMs = 60_000; + private long dispatchLeadMs = 180_000; + private int maxJobsPerUser = 50; + private int maxJobsPerRole = 100; + private int maxRunsPerJob = 100; + private String lockKey = "lock:cron:scanner"; + private int lockTtlSeconds = 55; +} diff --git a/back/src/main/java/com/linkwork/config/DispatchConfig.java b/back/src/main/java/com/linkwork/config/DispatchConfig.java new file mode 100644 index 0000000..82f1e3f --- /dev/null +++ b/back/src/main/java/com/linkwork/config/DispatchConfig.java @@ -0,0 +1,118 @@ +package com.linkwork.config; + +import lombok.Data; +import org.springframework.boot.context.properties.ConfigurationProperties; +import org.springframework.context.annotation.Configuration; + +/** + * 任务调度配置(与 momo-worker 对齐) + */ +@Data +@Configuration +@ConfigurationProperties(prefix = "robot.dispatch") +public class DispatchConfig { + + /** 默认 workstationId(仅 roleId 缺失时兜底) */ + private String workstationId = "test-post-001"; + + /** 日志流 key 前缀 */ + private String logStreamPrefix = "logs"; + + /** 审批 key 前缀 */ + private String approvalKeyPrefix = "approval"; + + // ==================== 派生 Key 方法 ==================== + + /** 任务调度队列 key: workstation:{workstationId}:tasks */ + public String getTaskQueueKey(Long roleId) { + return "workstation:" + resolveWorkstationId(roleId) + ":tasks"; + } + + /** 日志流 key: logs:{workstationId}:{taskId} */ + public String getLogStreamKey(Long roleId, String taskId) { + return logStreamPrefix + ":" + resolveWorkstationId(roleId) + ":" + taskId; + } + + /** 审批请求队列 key: approval:{workstationId} */ + public String getApprovalRequestKey(Long roleId) { + return approvalKeyPrefix + ":" + resolveWorkstationId(roleId); + } + + /** 审批响应 key: approval:{workstationId}:response:{requestId} */ + public String getApprovalResponseKey(Long roleId, String requestId) { + return approvalKeyPrefix + ":" + resolveWorkstationId(roleId) + ":response:" + requestId; + } + + + /** 任务终止控制队列 key: workstation:{workstationId}:control */ + public String getTaskControlQueueKey(Long roleId) { + return "workstation:" + resolveWorkstationId(roleId) + ":control"; + } + + /** approval 请求队列匹配模式(含 response key,调用方需过滤) */ + public String getApprovalRequestKeyPattern() { + return approvalKeyPrefix + ":*"; + } + + /** + * roleId -> workstationId 解析规则: + * 1) roleId 存在时直接使用 roleId(真实岗位链路) + * 2) 否则回退默认配置 + */ + public String resolveWorkstationId(Long roleId) { + if (roleId != null && roleId > 0) { + return String.valueOf(roleId); + } + return workstationId; + } + + // ==================== 兼容旧调用(默认 workstation) ==================== + + /** + * @deprecated use {@link #getTaskQueueKey(Long)}. + */ + @Deprecated + public String getTaskQueueKey() { + return getTaskQueueKey(null); + } + + /** + * @deprecated use {@link #getLogStreamKey(Long, String)}. + */ + @Deprecated + public String getLogStreamKey(String taskId) { + return getLogStreamKey(null, taskId); + } + + /** + * @deprecated use {@link #getApprovalRequestKey(Long)}. + */ + @Deprecated + public String getApprovalRequestKey() { + return getApprovalRequestKey(null); + } + + /** + * @deprecated use {@link #getApprovalResponseKey(Long, String)}. + */ + @Deprecated + public String getApprovalResponseKey(String requestId) { + return getApprovalResponseKey(null, requestId); + } + + /** + * @deprecated use {@link #getTaskControlQueueKey(Long)}. + */ + @Deprecated + public String getTaskControlQueueKey() { + return getTaskControlQueueKey(null); + } + + /** + * @deprecated use {@link #getTaskControlQueueKey(Long)}. + */ + @Deprecated + public String getTaskTerminateQueueKey() { + return getTaskControlQueueKey(null); + } +} diff --git a/back/src/main/java/com/linkwork/config/EnvConfig.java b/back/src/main/java/com/linkwork/config/EnvConfig.java new file mode 100644 index 0000000..32937b4 --- /dev/null +++ b/back/src/main/java/com/linkwork/config/EnvConfig.java @@ -0,0 +1,157 @@ +package com.linkwork.config; + +import com.linkwork.model.dto.ResourceSpec; +import com.linkwork.model.enums.PodMode; +import lombok.Data; +import org.springframework.boot.context.properties.ConfigurationProperties; +import org.springframework.context.annotation.Configuration; + +import java.util.HashMap; +import java.util.Map; + +/** + * 系统环境配置(从 YAML 加载) + * + * 设计说明: + * - 所有代码已打入镜像,不需要代码拉取配置 + * - 文件放置配置定义 token 和 ssh-key 的路径和权限 + */ +@Data +@Configuration +@ConfigurationProperties(prefix = "schedule") +public class EnvConfig { + + // 集群配置 + private ClusterConfig cluster = new ClusterConfig(); + + // 镜像配置 + private ImagesConfig images = new ImagesConfig(); + + // Agent 启动脚本配置 + private AgentBootstrapConfig agentBootstrap = new AgentBootstrapConfig(); + + // 文件放置配置 + private FilePlacementConfig filePlacement = new FilePlacementConfig(); + + // 网络配置 + private NetworkConfig network = new NetworkConfig(); + + // SSH 配置 + private SshConfig ssh = new SshConfig(); + + // 默认资源配置 + private DefaultResourcesConfig defaultResources = new DefaultResourcesConfig(); + + // NFS 挂载配置(沿用 oss-mount YAML key 以兼容现有 K8s 配置) + private OssMountConfig ossMount = new OssMountConfig(); + + // 模式决策规则 + private PodModeRulesConfig podModeRules = new PodModeRulesConfig(); + + @Data + public static class ClusterConfig { + private String namespace = "ai-worker"; + private String schedulerName = "volcano"; + private String kubeconfigPath; // kubeconfig 文件路径 + } + + @Data + public static class ImagesConfig { + private String registry = ""; + private String agent = "ai-worker/agent-base:v1.0"; // Agent 基础镜像 + private String runner = "ai-worker/runner-base:v1.0"; // Runner 默认镜像(Sidecar 模式) + private Map defaultRunners = new HashMap<>(); // Runner 镜像(代码已内置) + } + + /** + * Agent 启动脚本配置 + * main.py 从链接下载,执行后启动 agent 和 executor 两个进程 + */ + @Data + public static class AgentBootstrapConfig { + private String mainPyUrl; // main.py 下载链接 + } + + /** + * 文件放置配置 + * - token: 仅 executor 用户可访问 + * - ssh-key: agent 和 executor 都可访问 + */ + @Data + public static class FilePlacementConfig { + // token 文件配置(仅 executor 可访问) + private String tokenPath = "/workspace/.credentials/token"; + private String tokenDirMode = "700"; + private String tokenFileMode = "600"; + private String tokenOwner = "executor:executor"; + + // SSH 密钥配置(agent 和 executor 都可访问) + private String sshPath = "/workspace/.ssh"; + private String sshDirMode = "755"; + private String sshKeyMode = "600"; // 私钥 + private String sshPubMode = "644"; // 公钥 + } + + @Data + public static class NetworkConfig { + private String apiBaseUrl; + private String wsBaseUrl; + private String llmGatewayUrl; + private String redisUrl; + } + + @Data + public static class SshConfig { + private Integer port = 2222; + private String keyType = "ed25519"; + } + + @Data + public static class DefaultResourcesConfig { + private ResourceSpec agent = ResourceSpec.builder() + .cpuRequest("1").cpuLimit("2") + .memoryRequest("2Gi").memoryLimit("4Gi") + .build(); + private ResourceSpec runner = ResourceSpec.builder() + .cpuRequest("1").cpuLimit("4") + .memoryRequest("2Gi").memoryLimit("8Gi") + .build(); + } + + /** + * OSS 挂载配置 + * 通过 hostPath 将宿主机上的 ossfs 挂载目录映射到容器内 + * + * 节点级挂载 (DaemonSet / ossfs): + * oss://robot-agent-files/system/ → hostPath/system + * oss://robot-agent-files/user-files/ → hostPath/user-files + * oss://robot-agent-files/workstation/ → hostPath/workstation + * + * 容器级挂载 (Pod hostPath → container): + * 1. oss-data: hostPath/system/{wsId} → mountPath (读写,产出物挂载根) + * 2. oss-user-files: hostPath/user-files → /mnt/user-files (读写,个人空间挂载根) + * 3. oss-workstation: hostPath/workstation/{wsId} → /mnt/workstation (读写,岗位空间挂载根) + */ + @Data + public static class OssMountConfig { + /** 是否启用 NFS 挂载 */ + private boolean enabled = false; + /** 宿主机 NFS 挂载根目录 */ + private String hostPath = "/mnt/oss/robot-agent-files"; + /** 容器内主挂载路径(产出物挂载根) */ + private String mountPath = "/data/oss/robot"; + /** 主挂载是否只读 */ + private boolean readOnly = false; + + /** user-files 容器内挂载路径(记忆-个人空间挂载根) */ + private String userFilesMountPath = "/mnt/user-files"; + /** workstation 容器内挂载路径(记忆-岗位空间挂载根) */ + private String workstationMountPath = "/mnt/workstation"; + } + + @Data + public static class PodModeRulesConfig { + private PodMode defaultMode = PodMode.SIDECAR; + private Map overrides = new HashMap<>(); + } +} diff --git a/back/src/main/java/com/linkwork/config/ImageBuildConfig.java b/back/src/main/java/com/linkwork/config/ImageBuildConfig.java new file mode 100644 index 0000000..148f548 --- /dev/null +++ b/back/src/main/java/com/linkwork/config/ImageBuildConfig.java @@ -0,0 +1,142 @@ +package com.linkwork.config; + +import lombok.Data; +import org.springframework.boot.context.properties.ConfigurationProperties; +import org.springframework.context.annotation.Configuration; + +/** + * 镜像构建配置 + * + * 设计说明: + * - 仅构建 Agent 镜像,Runner 由运行时 agent 启动 + */ +@Data +@Configuration +@ConfigurationProperties(prefix = "image-build") +public class ImageBuildConfig { + + /** + * 是否启用镜像构建 + */ + private boolean enabled = false; + + /** + * 是否推送镜像到仓库(K8s 模式) + * 设置为 false 时只构建不推送,适用于测试环境 + */ + private boolean pushEnabled = false; + + /** + * 镜像拉取策略 + * - Always: 总是拉取(默认,需要镜像在仓库中) + * - IfNotPresent: 本地有则不拉取(适合本地构建 + 单节点/共享 Docker) + * - Never: 从不拉取(要求镜像必须已在节点上) + */ + private String imagePullPolicy = "IfNotPresent"; + + /** + * K8s 拉取私有镜像的 Secret 名称 + * 需要在 K8s 中预先创建,或由服务自动创建 + */ + private String imagePullSecret = "robot-registry-secret"; + + /** + * Docker 连接配置 + * 默认使用 unix socket: unix:///var/run/docker.sock + */ + private String dockerHost = "unix:///var/run/docker.sock"; + + /** + * 默认 Agent 基础镜像(K8s 模式构建使用内网 Harbor) + */ + private String defaultAgentBaseImage = "10.30.107.146/robot/rockylinux9-agent@sha256:b49d75f52f6b3c55bbf90427f0df0e97bc8e3f3e03727721cafc2c9d775b8975"; + + /** + * Compose 模式基础镜像(用户本地构建,需要可公开拉取的镜像) + */ + private String composeBaseImage = "rockylinux:9"; + + /** + * 镜像仓库地址 + * K8s 模式下构建的镜像会推送到此仓库 + */ + private String registry = ""; + + /** + * 镜像仓库用户名 + */ + private String registryUsername = ""; + + /** + * 镜像仓库密码 + */ + private String registryPassword = ""; + + /** + * 构建脚本路径 + * 此脚本会在 Dockerfile 中被 COPY 并执行 + */ + private String buildScriptPath = "/opt/scripts/build.sh"; + + /** + * 构建超时时间(秒) + */ + private int buildTimeout = 300; + + /** + * 入口点脚本名称 + */ + private String entrypointScript = "/entrypoint.sh"; + + /** + * 构建上下文临时目录 + */ + private String buildContextDir = "/tmp/docker-build"; + + /** + * 是否启用本地镜像自动同步到 Kind 节点(仅 K8s + 未配置镜像仓库时生效) + */ + private boolean autoLoadToKind = true; + + /** + * 指定 Kind 集群名;为空时自动发现所有 Kind 集群节点 + */ + private String kindClusterName = ""; + + /** + * Kind 节点镜像导入超时时间(秒) + */ + private int kindLoadTimeout = 600; + + /** + * 是否启用本地镜像定期清理 + */ + private boolean localCleanupEnabled = true; + + /** + * 本地构建镜像保留小时数(超过后尝试删除,运行中镜像会跳过) + */ + private int localImageRetentionHours = 24; + + /** + * 本地镜像清理 Cron(默认每小时第 40 分钟) + */ + private String localCleanupCron = "0 40 * * * *"; + + /** + * 是否在 Kind 节点执行未使用镜像 prune + */ + private boolean kindPruneEnabled = true; + + /** + * SDK 源码在镜像中的目标路径 + * 从项目内置 build-assets/sdk-source/ 目录拷贝 + */ + private String sdkSourcePath = "/opt/linkwork-agent-build/sdk-source"; + + /** + * zzd 二进制文件在镜像中的目标路径 + * 从项目内置 build-assets/zzd-binaries/ 目录拷贝 + */ + private String zzdBinariesPath = "/opt/linkwork-agent-build/zzd-binaries"; +} diff --git a/back/src/main/java/com/linkwork/config/JacksonConfig.java b/back/src/main/java/com/linkwork/config/JacksonConfig.java new file mode 100644 index 0000000..bed1b91 --- /dev/null +++ b/back/src/main/java/com/linkwork/config/JacksonConfig.java @@ -0,0 +1,40 @@ +package com.linkwork.config; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.fasterxml.jackson.databind.SerializationFeature; +import com.fasterxml.jackson.datatype.jsr310.JavaTimeModule; +import org.springframework.boot.autoconfigure.jackson.Jackson2ObjectMapperBuilderCustomizer; +import org.springframework.boot.autoconfigure.jackson.JacksonProperties; +import org.springframework.boot.jackson.JsonComponentModule; +import org.springframework.boot.jackson.JsonMixinModule; +import org.springframework.context.annotation.Bean; +import org.springframework.context.annotation.Configuration; +import org.springframework.context.annotation.Primary; +import org.springframework.http.converter.json.Jackson2ObjectMapperBuilder; + +@Configuration +public class JacksonConfig { + + @Bean + public Jackson2ObjectMapperBuilderCustomizer jackson2ObjectMapperBuilderCustomizer() { + return builder -> builder + .modulesToInstall(JavaTimeModule.class) + .featuresToDisable(SerializationFeature.WRITE_DATES_AS_TIMESTAMPS); + } + + /** + * MCP starter 可能在自动配置阶段提前注册一个裸 ObjectMapper,导致 Web 层缺少 JavaTime 支持。 + * 这里显式提供主 ObjectMapper,确保 MVC 与业务注入都使用支持 LocalDateTime 的配置。 + */ + @Bean + @Primary + public ObjectMapper objectMapper(Jackson2ObjectMapperBuilder builder, JacksonProperties properties) { + builder.modules(new JsonComponentModule(), new JsonMixinModule(), new JavaTimeModule()); + ObjectMapper objectMapper = builder.build(); + objectMapper.disable(SerializationFeature.WRITE_DATES_AS_TIMESTAMPS); + if (properties.getTimeZone() != null) { + objectMapper.setTimeZone(properties.getTimeZone()); + } + return objectMapper; + } +} diff --git a/back/src/main/java/com/linkwork/config/KubernetesConfig.java b/back/src/main/java/com/linkwork/config/KubernetesConfig.java new file mode 100644 index 0000000..19b2b8e --- /dev/null +++ b/back/src/main/java/com/linkwork/config/KubernetesConfig.java @@ -0,0 +1,47 @@ +package com.linkwork.config; + +import io.fabric8.kubernetes.client.Config; +import io.fabric8.kubernetes.client.KubernetesClient; +import io.fabric8.kubernetes.client.KubernetesClientBuilder; +import lombok.extern.slf4j.Slf4j; +import org.springframework.context.annotation.Bean; +import org.springframework.context.annotation.Configuration; + +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; + +/** + * Kubernetes 客户端配置 + */ +@Configuration +@Slf4j +public class KubernetesConfig { + + private final EnvConfig envConfig; + + public KubernetesConfig(EnvConfig envConfig) { + this.envConfig = envConfig; + } + + @Bean + public KubernetesClient kubernetesClient() { + String kubeconfigPath = envConfig.getCluster().getKubeconfigPath(); + + if (kubeconfigPath != null && !kubeconfigPath.isBlank()) { + log.info("Loading kubeconfig from: {}", kubeconfigPath); + try { + String kubeconfigContent = Files.readString(Path.of(kubeconfigPath)); + Config config = Config.fromKubeconfig(kubeconfigContent); + return new KubernetesClientBuilder().withConfig(config).build(); + } catch (IOException e) { + log.error("Failed to load kubeconfig from {}: {}", kubeconfigPath, e.getMessage()); + throw new RuntimeException("Failed to load kubeconfig", e); + } + } + + // 使用默认配置(从环境变量或默认路径) + log.info("Using default Kubernetes configuration"); + return new KubernetesClientBuilder().build(); + } +} diff --git a/back/src/main/java/com/linkwork/config/MemoryConfig.java b/back/src/main/java/com/linkwork/config/MemoryConfig.java new file mode 100644 index 0000000..72bf8c4 --- /dev/null +++ b/back/src/main/java/com/linkwork/config/MemoryConfig.java @@ -0,0 +1,48 @@ +package com.linkwork.config; + +import lombok.Data; +import org.springframework.boot.context.properties.ConfigurationProperties; +import org.springframework.context.annotation.Configuration; + +@Data +@Configuration +@ConfigurationProperties(prefix = "memory") +public class MemoryConfig { + + private boolean enabled = true; + private Milvus milvus = new Milvus(); + private Embedding embedding = new Embedding(); + private Index index = new Index(); + private String ossMountPath = "/data/oss"; + + @Data + public static class Milvus { + private String uri = "http://milvus:19530"; + private String token = ""; + } + + @Data + public static class Embedding { + private String model = "text-embedding-3-small"; + private int dimension = 1536; + } + + @Data + public static class Index { + private int maxChunkSize = 1500; + private int overlapLines = 2; + private String queueKey = "memory:index:jobs"; + } + + public String collectionName(String workstationId, String userId) { + return "memory_" + sanitize(workstationId) + "_" + sanitize(userId); + } + + public String userCollectionName(String userId) { + return "memory_user_" + sanitize(userId); + } + + private static String sanitize(String s) { + return s.replaceAll("[^a-zA-Z0-9_]", "_"); + } +} diff --git a/back/src/main/java/com/linkwork/config/MyBatisPlusConfig.java b/back/src/main/java/com/linkwork/config/MyBatisPlusConfig.java new file mode 100644 index 0000000..1eeea89 --- /dev/null +++ b/back/src/main/java/com/linkwork/config/MyBatisPlusConfig.java @@ -0,0 +1,47 @@ +package com.linkwork.config; + +import com.baomidou.mybatisplus.annotation.DbType; +import com.baomidou.mybatisplus.core.handlers.MetaObjectHandler; +import com.baomidou.mybatisplus.extension.plugins.MybatisPlusInterceptor; +import com.baomidou.mybatisplus.extension.plugins.inner.PaginationInnerInterceptor; +import org.apache.ibatis.reflection.MetaObject; +import org.springframework.context.annotation.Bean; +import org.springframework.context.annotation.Configuration; + +import java.time.LocalDateTime; + +/** + * MyBatis Plus 配置 + */ +@Configuration +public class MyBatisPlusConfig { + + /** + * 分页插件 + */ + @Bean + public MybatisPlusInterceptor mybatisPlusInterceptor() { + MybatisPlusInterceptor interceptor = new MybatisPlusInterceptor(); + interceptor.addInnerInterceptor(new PaginationInnerInterceptor(DbType.MYSQL)); + return interceptor; + } + + /** + * 自动填充处理器 + */ + @Bean + public MetaObjectHandler metaObjectHandler() { + return new MetaObjectHandler() { + @Override + public void insertFill(MetaObject metaObject) { + this.strictInsertFill(metaObject, "createdAt", LocalDateTime.class, LocalDateTime.now()); + this.strictInsertFill(metaObject, "updatedAt", LocalDateTime.class, LocalDateTime.now()); + } + + @Override + public void updateFill(MetaObject metaObject) { + this.strictUpdateFill(metaObject, "updatedAt", LocalDateTime.class, LocalDateTime.now()); + } + }; + } +} diff --git a/back/src/main/java/com/linkwork/config/NfsStorageConfig.java b/back/src/main/java/com/linkwork/config/NfsStorageConfig.java new file mode 100644 index 0000000..a8e88cc --- /dev/null +++ b/back/src/main/java/com/linkwork/config/NfsStorageConfig.java @@ -0,0 +1,40 @@ +package com.linkwork.config; + +import jakarta.annotation.PostConstruct; +import lombok.Data; +import lombok.extern.slf4j.Slf4j; +import org.springframework.boot.context.properties.ConfigurationProperties; +import org.springframework.context.annotation.Configuration; + +import java.nio.file.Files; +import java.nio.file.Path; + +@Slf4j +@Data +@Configuration +@ConfigurationProperties(prefix = "nfs.storage") +public class NfsStorageConfig { + + /** NFS 本地挂载根路径,后续更换 NFS 服务器只需重新 mount + 改此值 */ + private String basePath = "/mnt/oss/robot-agent-files"; + + /** 后端文件下载 API 的 URL 前缀 */ + private String downloadBaseUrl = "/api/v1/files"; + + /** 任务产出物下载 API 的 URL 前缀 */ + private String taskOutputBaseUrl = "/api/v1/task-outputs"; + + @PostConstruct + public void validate() { + Path base = Path.of(basePath); + if (Files.isDirectory(base)) { + log.info("NFS storage configured: basePath={}", basePath); + } else { + log.warn("NFS storage basePath does not exist or is not a directory: {}", basePath); + } + } + + public Path resolve(String relativePath) { + return Path.of(basePath).resolve(relativePath); + } +} diff --git a/back/src/main/java/com/linkwork/config/WebSocketConfig.java b/back/src/main/java/com/linkwork/config/WebSocketConfig.java new file mode 100644 index 0000000..e64acc6 --- /dev/null +++ b/back/src/main/java/com/linkwork/config/WebSocketConfig.java @@ -0,0 +1,24 @@ +package com.linkwork.config; + +import com.linkwork.websocket.TaskWebSocketHandler; +import org.springframework.context.annotation.Configuration; +import org.springframework.web.socket.config.annotation.EnableWebSocket; +import org.springframework.web.socket.config.annotation.WebSocketConfigurer; +import org.springframework.web.socket.config.annotation.WebSocketHandlerRegistry; + +@Configuration +@EnableWebSocket +public class WebSocketConfig implements WebSocketConfigurer { + + private final TaskWebSocketHandler taskWebSocketHandler; + + public WebSocketConfig(TaskWebSocketHandler taskWebSocketHandler) { + this.taskWebSocketHandler = taskWebSocketHandler; + } + + @Override + public void registerWebSocketHandlers(WebSocketHandlerRegistry registry) { + registry.addHandler(taskWebSocketHandler, "/api/v1/ws", "/ws", "/ws/") + .setAllowedOrigins("*"); + } +} diff --git a/back/src/main/java/com/linkwork/context/UserContext.java b/back/src/main/java/com/linkwork/context/UserContext.java new file mode 100644 index 0000000..9dad7bc --- /dev/null +++ b/back/src/main/java/com/linkwork/context/UserContext.java @@ -0,0 +1,61 @@ +package com.linkwork.context; + +/** + * 用户上下文(ThreadLocal) + *

+ * 替代现有的 X-User-Id Header 和 Mock 硬编码。 + * 由 JwtAuthFilter 在请求进入时设置,请求结束时清除。 + * 用户信息全部来自 JWT payload,不查数据库。 + */ +public final class UserContext { + + private static final ThreadLocal HOLDER = new ThreadLocal<>(); + + private UserContext() { + } + + /** + * 设置当前用户(由 Filter 调用) + */ + public static void set(UserInfo userInfo) { + HOLDER.set(userInfo); + } + + /** + * 获取当前用户(完整信息) + */ + public static UserInfo get() { + return HOLDER.get(); + } + + /** + * 获取当前用户 ID + */ + public static String getCurrentUserId() { + UserInfo info = HOLDER.get(); + return info != null ? info.getUserId() : null; + } + + /** + * 获取当前用户姓名 + */ + public static String getCurrentUserName() { + UserInfo info = HOLDER.get(); + return info != null ? info.getName() : null; + } + + /** + * 获取当前用户邮箱 + */ + public static String getCurrentEmail() { + UserInfo info = HOLDER.get(); + return info != null ? info.getEmail() : null; + } + + /** + * 清除当前用户(由 Filter 在 finally 中调用,防止内存泄漏) + */ + public static void clear() { + HOLDER.remove(); + } +} diff --git a/back/src/main/java/com/linkwork/context/UserInfo.java b/back/src/main/java/com/linkwork/context/UserInfo.java new file mode 100644 index 0000000..0efae09 --- /dev/null +++ b/back/src/main/java/com/linkwork/context/UserInfo.java @@ -0,0 +1,36 @@ +package com.linkwork.context; + +import lombok.AllArgsConstructor; +import lombok.Builder; +import lombok.Data; +import lombok.NoArgsConstructor; + +import java.util.List; + +/** + * 当前登录用户信息(从 JWT payload 解析,不查数据库) + */ +@Data +@Builder +@NoArgsConstructor +@AllArgsConstructor +public class UserInfo { + + /** 唯一用户标识 */ + private String userId; + + /** 姓名 */ + private String name; + + /** 邮箱 */ + private String email; + + /** 工号 */ + private String workId; + + /** 头像 URL */ + private String avatarUrl; + + /** 权限列表 */ + private List permissions; +} diff --git a/back/src/main/java/com/linkwork/controller/ApprovalController.java b/back/src/main/java/com/linkwork/controller/ApprovalController.java new file mode 100644 index 0000000..df15528 --- /dev/null +++ b/back/src/main/java/com/linkwork/controller/ApprovalController.java @@ -0,0 +1,116 @@ +package com.linkwork.controller; + +import com.baomidou.mybatisplus.extension.plugins.pagination.Page; +import com.linkwork.common.ApiResponse; +import com.linkwork.common.ClientIpResolver; +import com.linkwork.context.UserContext; +import com.linkwork.model.entity.Approval; +import com.linkwork.service.ApprovalService; +import lombok.RequiredArgsConstructor; +import lombok.extern.slf4j.Slf4j; +import org.springframework.web.bind.annotation.*; +import org.springframework.util.StringUtils; + +import java.util.HashMap; +import jakarta.servlet.http.HttpServletRequest; +import java.util.List; +import java.util.Map; + +/** + * 审批控制器 + */ +@Slf4j +@RestController +@RequestMapping("/api/v1/approvals") +@CrossOrigin(origins = "*") +@RequiredArgsConstructor +public class ApprovalController { + + private final ApprovalService approvalService; + + /** + * 获取审批列表 + * GET /api/v1/approvals + */ + @GetMapping + public ApiResponse> listApprovals( + @RequestParam(required = false) String status, + @RequestParam(defaultValue = "1") Integer page, + @RequestParam(defaultValue = "20") Integer pageSize) { + String userId = UserContext.getCurrentUserId(); + if (!StringUtils.hasText(userId)) { + throw new IllegalStateException("用户未登录或登录态失效"); + } + log.info("获取审批列表: status={}, page={}, pageSize={}, userId={}", status, page, pageSize, userId); + + Page approvalPage = approvalService.listApprovals(status, page, pageSize, userId); + List> items = approvalService.toResponseList(approvalPage.getRecords()); + + Map pagination = new HashMap<>(); + pagination.put("page", approvalPage.getCurrent()); + pagination.put("pageSize", approvalPage.getSize()); + pagination.put("total", approvalPage.getTotal()); + pagination.put("totalPages", approvalPage.getPages()); + + Map result = new HashMap<>(); + result.put("items", items); + result.put("pagination", pagination); + + return ApiResponse.success(result); + } + + /** + * 获取审批统计 + * GET /api/v1/approvals/stats + */ + @GetMapping("/stats") + public ApiResponse> getStats() { + String userId = UserContext.getCurrentUserId(); + if (!StringUtils.hasText(userId)) { + throw new IllegalStateException("用户未登录或登录态失效"); + } + log.info("获取审批统计: userId={}", userId); + return ApiResponse.success(approvalService.getStats(userId)); + } + + /** + * 提交审批决策 + * POST /api/v1/approvals/{approvalNo}/decision + */ + @PostMapping("/{approvalNo}/decision") + public ApiResponse> decide( + @PathVariable String approvalNo, + @RequestBody Map request, + HttpServletRequest servletRequest) { + String userId = UserContext.getCurrentUserId(); + String userName = UserContext.getCurrentUserName(); + String decision = request.get("decision"); + String comment = request.get("comment"); + String operatorIp = ClientIpResolver.resolve(servletRequest); + log.info("审批决策: approvalNo={}, decision={}, userId={}, operatorIp={}", approvalNo, decision, userId, operatorIp); + + Approval approval = approvalService.decide(approvalNo, decision, comment, userId, userName, operatorIp); + return ApiResponse.success(approvalService.toResponse(approval)); + } + + /** + * 创建审批请求(内部接口,由 Agent/Worker 调用) + * POST /api/v1/approvals/create + */ + @PostMapping("/create") + public ApiResponse> createApproval(@RequestBody Map request) { + log.info("创建审批请求: taskNo={}, action={}", request.get("taskNo"), request.get("action")); + + Approval approval = approvalService.createApproval( + request.get("taskNo"), + request.get("taskTitle"), + request.get("action"), + request.get("description"), + request.get("riskLevel"), + request.getOrDefault("creatorId", "agent"), + request.getOrDefault("creatorName", "AI Agent") + ); + + return ApiResponse.success(approvalService.toResponse(approval)); + } +} diff --git a/back/src/main/java/com/linkwork/controller/AuthController.java b/back/src/main/java/com/linkwork/controller/AuthController.java new file mode 100644 index 0000000..2d21200 --- /dev/null +++ b/back/src/main/java/com/linkwork/controller/AuthController.java @@ -0,0 +1,145 @@ +package com.linkwork.controller; + +import com.linkwork.common.ApiResponse; +import com.linkwork.context.UserContext; +import com.linkwork.context.UserInfo; +import com.linkwork.service.AuthService; +import com.fasterxml.jackson.annotation.JsonIgnoreProperties; +import jakarta.servlet.http.Cookie; +import jakarta.servlet.http.HttpServletResponse; +import jakarta.validation.Valid; +import jakarta.validation.constraints.NotBlank; +import lombok.Data; +import lombok.RequiredArgsConstructor; +import lombok.extern.slf4j.Slf4j; +import org.springframework.web.bind.annotation.*; + +import java.util.HashMap; +import java.util.Map; + +/** + * 认证控制器 + * 处理登录验证和 Token 校验 + */ +@Slf4j +@RestController +@RequestMapping("/api/v1/auth") +@CrossOrigin(origins = "*") +@RequiredArgsConstructor +public class AuthController { + + private final AuthService authService; + public static final String COOKIE_NAME = "robot_token"; + + /** + * 登录验证 + * POST /api/v1/auth/login + */ + @PostMapping("/login") + public ApiResponse> login(@Valid @RequestBody LoginRequest request, HttpServletResponse response) { + log.info("登录请求"); + + // 验证密码 + if (!authService.validatePassword(request.getPassword())) { + log.warn("密码验证失败"); + return ApiResponse.error(40100, "密码错误"); + } + + // 生成 Token + String token = authService.generateToken("robot-user"); + + Cookie cookie = new Cookie(COOKIE_NAME, token); + cookie.setPath("/"); + cookie.setHttpOnly(true); + cookie.setMaxAge(86400); + response.addCookie(cookie); + + Map result = new HashMap<>(); + result.put("token", token); + result.put("expiresIn", 86400); // 24 小时(秒) + + log.info("登录成功"); + return ApiResponse.success(result); + } + + /** + * 验证 Token + * POST /api/v1/auth/verify + */ + @PostMapping("/verify") + public ApiResponse> verify(@RequestHeader(value = "Authorization", required = false) String authHeader) { + if (authHeader == null || !authHeader.startsWith("Bearer ")) { + return ApiResponse.error(40101, "未提供有效的 Token"); + } + + String token = authHeader.substring(7); + + if (!authService.validateToken(token)) { + return ApiResponse.error(40101, "Token 无效或已过期"); + } + + Map result = new HashMap<>(); + result.put("valid", true); + result.put("subject", authService.getSubjectFromToken(token)); + + return ApiResponse.success(result); + } + + /** + * 生成密码哈希(工具端点,生产环境可禁用) + * GET /api/v1/auth/encode?password=xxx + */ + @GetMapping("/encode") + public ApiResponse> encodePassword(@RequestParam String password) { + String hash = authService.encodePassword(password); + Map result = new HashMap<>(); + result.put("password", password); + result.put("hash", hash); + return ApiResponse.success(result); + } + + /** + * 获取当前登录用户信息(从 JWT payload 解析)。 + */ + @GetMapping("/me") + public ApiResponse> me() { + UserInfo userInfo = UserContext.get(); + if (userInfo == null) { + return ApiResponse.error(40100, "未登录"); + } + + Map data = new HashMap<>(); + data.put("userId", userInfo.getUserId()); + data.put("name", userInfo.getName()); + data.put("email", userInfo.getEmail()); + data.put("workId", userInfo.getWorkId()); + data.put("avatarUrl", userInfo.getAvatarUrl()); + data.put("permissions", userInfo.getPermissions()); + return ApiResponse.success(data); + } + + /** + * 登出:清理 token cookie。 + */ + @PostMapping("/logout") + public ApiResponse> logout(HttpServletResponse response) { + Cookie cookie = new Cookie(COOKIE_NAME, ""); + cookie.setPath("/"); + cookie.setHttpOnly(true); + cookie.setMaxAge(0); + response.addCookie(cookie); + + Map data = new HashMap<>(); + data.put("logoutUrl", "/login"); + return ApiResponse.success(data); + } + + @Data + @JsonIgnoreProperties(ignoreUnknown = true) + public static class LoginRequest { + private String username; + + @NotBlank(message = "密码不能为空") + private String password; + } +} diff --git a/back/src/main/java/com/linkwork/controller/BuildLogController.java b/back/src/main/java/com/linkwork/controller/BuildLogController.java new file mode 100644 index 0000000..80b6a0e --- /dev/null +++ b/back/src/main/java/com/linkwork/controller/BuildLogController.java @@ -0,0 +1,174 @@ +package com.linkwork.controller; + +import com.linkwork.service.BuildLogBuffer; +import com.linkwork.service.BuildRecordService; +import lombok.RequiredArgsConstructor; +import lombok.extern.slf4j.Slf4j; +import org.springframework.http.MediaType; +import org.springframework.web.bind.annotation.*; +import org.springframework.web.servlet.mvc.method.annotation.SseEmitter; + +import java.io.IOException; +import java.util.List; +import java.util.Map; +import java.util.concurrent.TimeUnit; + +/** + * 构建日志 SSE 控制器 + * 直接从 build.sh 执行输出推送日志,不依赖 Redis Stream + */ +@Slf4j +@RestController +@RequestMapping("/api/v1/build-logs") +@RequiredArgsConstructor +public class BuildLogController { + + private final BuildLogBuffer logBuffer; + private final BuildRecordService buildRecordService; + + /** + * 兼容旧查询入口:按 roleId 获取最近一次构建日志 + */ + @GetMapping + public Map getLogsByQuery( + @RequestParam(required = false) String buildId, + @RequestParam(required = false) Long roleId, + @RequestParam(defaultValue = "0") int afterIndex) { + + String targetBuildId = buildId; + if ((targetBuildId == null || targetBuildId.isBlank()) && roleId != null) { + var latest = buildRecordService.getLatestByRoleId(roleId); + if (latest == null || latest.getBuildNo() == null || latest.getBuildNo().isBlank()) { + return Map.of( + "buildId", "", + "logs", List.of(), + "totalCount", 0, + "completed", true, + "success", false + ); + } + targetBuildId = latest.getBuildNo(); + } + + if (targetBuildId == null || targetBuildId.isBlank()) { + throw new IllegalArgumentException("buildId 或 roleId 至少传一个"); + } + return getLogs(targetBuildId, afterIndex); + } + + /** + * SSE 端点:实时接收构建日志 + * + * @param buildId 构建 ID + * @return SSE 事件流 + */ + @GetMapping(value = "/{buildId}/stream", produces = MediaType.TEXT_EVENT_STREAM_VALUE) + public SseEmitter streamLogs(@PathVariable String buildId) { + log.info("SSE connection opened for buildId: {}", buildId); + + // 10 分钟超时 + SseEmitter emitter = new SseEmitter(TimeUnit.MINUTES.toMillis(10)); + + // 先推送历史日志 + List history = logBuffer.getHistory(buildId); + for (BuildLogBuffer.LogEntry entry : history) { + try { + emitter.send(SseEmitter.event() + .name("log") + .data(Map.of( + "timestamp", entry.timestamp(), + "level", entry.level(), + "message", entry.message() + ))); + } catch (IOException e) { + log.debug("Failed to send history log: {}", e.getMessage()); + } + } + + // 如果构建已完成,发送完成事件并关闭 + if (logBuffer.isCompleted(buildId)) { + try { + Boolean success = logBuffer.getCompletionStatus(buildId); + emitter.send(SseEmitter.event().name("complete").data(Map.of( + "success", success != null ? success : false, + "message", success != null && success ? "构建成功" : "构建失败" + ))); + emitter.complete(); + } catch (IOException e) { + log.debug("Failed to send complete event: {}", e.getMessage()); + } + return emitter; + } + + // 订阅新日志 + java.util.function.Consumer subscriber = entry -> { + try { + emitter.send(SseEmitter.event() + .name("log") + .data(Map.of( + "timestamp", entry.timestamp(), + "level", entry.level(), + "message", entry.message() + ))); + + // 检查是否完成 + if (logBuffer.isCompleted(buildId)) { + Boolean success = logBuffer.getCompletionStatus(buildId); + emitter.send(SseEmitter.event().name("complete").data(Map.of( + "success", success != null ? success : false, + "message", success != null && success ? "构建成功" : "构建失败" + ))); + emitter.complete(); + } + } catch (IOException e) { + log.debug("Failed to send log via SSE: {}", e.getMessage()); + } + }; + + logBuffer.subscribe(buildId, subscriber); + + // 连接关闭时取消订阅 + emitter.onCompletion(() -> { + logBuffer.unsubscribe(buildId, subscriber); + log.debug("SSE connection completed for buildId: {}", buildId); + }); + + emitter.onTimeout(() -> { + logBuffer.unsubscribe(buildId, subscriber); + log.debug("SSE connection timeout for buildId: {}", buildId); + }); + + emitter.onError(e -> { + logBuffer.unsubscribe(buildId, subscriber); + log.debug("SSE connection error for buildId: {}", buildId); + }); + + return emitter; + } + + /** + * 轮询接口:获取历史日志(备用) + */ + @GetMapping("/{buildId}") + public Map getLogs( + @PathVariable String buildId, + @RequestParam(defaultValue = "0") int afterIndex) { + + List logs = logBuffer.getLogsAfter(buildId, afterIndex); + List allLogs = logBuffer.getHistory(buildId); + boolean completed = logBuffer.isCompleted(buildId); + Boolean success = logBuffer.getCompletionStatus(buildId); + + return Map.of( + "buildId", buildId, + "logs", logs.stream().map(e -> Map.of( + "timestamp", e.timestamp(), + "level", e.level(), + "message", e.message() + )).toList(), + "totalCount", allLogs.size(), + "completed", completed, + "success", success != null ? success : false + ); + } +} diff --git a/back/src/main/java/com/linkwork/controller/BuildQueueController.java b/back/src/main/java/com/linkwork/controller/BuildQueueController.java new file mode 100644 index 0000000..404f310 --- /dev/null +++ b/back/src/main/java/com/linkwork/controller/BuildQueueController.java @@ -0,0 +1,106 @@ +package com.linkwork.controller; + +import com.linkwork.common.ApiResponse; +import com.linkwork.model.dto.BuildQueueStatus; +import com.linkwork.service.BuildQueueService; +import lombok.extern.slf4j.Slf4j; +import org.springframework.http.ResponseEntity; +import org.springframework.web.bind.annotation.*; + +/** + * 构建队列控制器 + * + * 提供队列状态查询和任务管理 API + */ +@RestController +@RequestMapping("/api/v1/build-queue") +@Slf4j +public class BuildQueueController { + + private final BuildQueueService buildQueueService; + + public BuildQueueController(BuildQueueService buildQueueService) { + this.buildQueueService = buildQueueService; + } + + /** + * 获取队列状态 + * + * 返回:等待任务数、执行中任务数、系统资源状态等 + */ + @GetMapping("/status") + public ResponseEntity> getStatus() { + if (!buildQueueService.isEnabled()) { + return ResponseEntity.ok(ApiResponse.error("Build queue is disabled")); + } + + BuildQueueStatus status = buildQueueService.getStatus(); + return ResponseEntity.ok(ApiResponse.success(status)); + } + + /** + * 获取任务在队列中的位置 + * + * @param buildId 构建 ID + * @return 位置信息:正数表示排队位置,-1 表示正在执行,null 表示不存在 + */ + @GetMapping("/position/{buildId}") + public ResponseEntity> getPosition(@PathVariable String buildId) { + if (!buildQueueService.isEnabled()) { + return ResponseEntity.ok(ApiResponse.error("Build queue is disabled")); + } + + Integer position = buildQueueService.getPosition(buildId); + + if (position == null) { + return ResponseEntity.ok(ApiResponse.error("Task not found in queue")); + } + + String status; + String message; + if (position == -1) { + status = "RUNNING"; + message = "Task is currently executing"; + } else { + status = "WAITING"; + message = "Task is waiting at position " + position; + } + + PositionResponse response = new PositionResponse(buildId, position, status, message); + return ResponseEntity.ok(ApiResponse.success(response)); + } + + /** + * 取消排队中的任务 + * + * 注意:只能取消等待中的任务,正在执行的任务无法取消 + * + * @param buildId 构建 ID + */ + @DeleteMapping("/{buildId}") + public ResponseEntity> cancel(@PathVariable String buildId) { + if (!buildQueueService.isEnabled()) { + return ResponseEntity.ok(ApiResponse.error("Build queue is disabled")); + } + + log.info("收到取消任务请求: buildId={}", buildId); + + boolean cancelled = buildQueueService.cancel(buildId); + + if (cancelled) { + return ResponseEntity.ok(ApiResponse.success(null)); + } else { + return ResponseEntity.ok(ApiResponse.error("Cannot cancel task: either running or not found")); + } + } + + /** + * 位置响应 DTO + */ + public record PositionResponse( + String buildId, + int position, + String status, + String message + ) {} +} diff --git a/back/src/main/java/com/linkwork/controller/BuildRecordController.java b/back/src/main/java/com/linkwork/controller/BuildRecordController.java new file mode 100644 index 0000000..386fc88 --- /dev/null +++ b/back/src/main/java/com/linkwork/controller/BuildRecordController.java @@ -0,0 +1,121 @@ +package com.linkwork.controller; + +import com.linkwork.model.entity.BuildRecordEntity; +import com.linkwork.service.BuildRecordService; +import lombok.RequiredArgsConstructor; +import lombok.extern.slf4j.Slf4j; +import org.springframework.web.bind.annotation.*; + +import java.time.LocalDateTime; +import java.util.HashMap; +import java.util.Map; + +/** + * 构建记录 Controller + * + * 提供构建历史查询接口 + */ +@Slf4j +@RestController +@RequestMapping("/api/v1/build-records") +@CrossOrigin(origins = "*") +@RequiredArgsConstructor +public class BuildRecordController { + + private final BuildRecordService buildRecordService; + + /** + * 获取构建记录列表 + * + * @param roleId 岗位 ID(可选,不传则查询所有) + * @param status 状态过滤(可选) + * @param page 页码 + * @param pageSize 每页数量 + */ + @GetMapping + public Map listBuildRecords( + @RequestParam(required = false) Long roleId, + @RequestParam(required = false) String status, + @RequestParam(defaultValue = "1") int page, + @RequestParam(defaultValue = "20") int pageSize) { + + Map response = new HashMap<>(); + response.put("code", 0); + response.put("msg", "success"); + response.put("timestamp", LocalDateTime.now().toString()); + + Map data; + if (roleId != null) { + data = buildRecordService.listByRoleId(roleId, page, pageSize); + } else { + data = buildRecordService.listRecent(page, pageSize, status); + } + response.put("data", data); + + return response; + } + + /** + * 根据构建编号获取详情 + */ + @GetMapping("/{buildNo}") + public Map getBuildRecord(@PathVariable String buildNo) { + Map response = new HashMap<>(); + + BuildRecordEntity entity = buildRecordService.getByBuildNo(buildNo); + if (entity == null) { + response.put("code", 404); + response.put("msg", "Build record not found: " + buildNo); + response.put("timestamp", LocalDateTime.now().toString()); + return response; + } + + response.put("code", 0); + response.put("msg", "success"); + response.put("timestamp", LocalDateTime.now().toString()); + + Map data = new HashMap<>(); + data.put("id", entity.getId().toString()); + data.put("buildNo", entity.getBuildNo()); + data.put("roleId", entity.getRoleId() != null ? entity.getRoleId().toString() : null); + data.put("roleName", entity.getRoleName()); + data.put("status", entity.getStatus()); + data.put("imageTag", entity.getImageTag()); + data.put("durationMs", entity.getDurationMs()); + data.put("errorMessage", entity.getErrorMessage()); + data.put("configSnapshot", entity.getConfigSnapshot()); + data.put("creatorId", entity.getCreatorId()); + data.put("creatorName", entity.getCreatorName()); + data.put("createdAt", entity.getCreatedAt() != null ? entity.getCreatedAt().toString() : null); + data.put("updatedAt", entity.getUpdatedAt() != null ? entity.getUpdatedAt().toString() : null); + + response.put("data", data); + return response; + } + + /** + * 获取岗位最新一次构建记录 + */ + @GetMapping("/role/{roleId}/latest") + public Map getLatestBuildRecord(@PathVariable Long roleId) { + Map response = new HashMap<>(); + + Map records = buildRecordService.listByRoleId(roleId, 1, 1); + @SuppressWarnings("unchecked") + java.util.List> items = (java.util.List>) records.get("items"); + + if (items == null || items.isEmpty()) { + response.put("code", 0); + response.put("msg", "success"); + response.put("data", null); + response.put("timestamp", LocalDateTime.now().toString()); + return response; + } + + response.put("code", 0); + response.put("msg", "success"); + response.put("data", items.get(0)); + response.put("timestamp", LocalDateTime.now().toString()); + return response; + } +} diff --git a/back/src/main/java/com/linkwork/controller/CronJobController.java b/back/src/main/java/com/linkwork/controller/CronJobController.java new file mode 100644 index 0000000..829a59f --- /dev/null +++ b/back/src/main/java/com/linkwork/controller/CronJobController.java @@ -0,0 +1,106 @@ +package com.linkwork.controller; + +import com.linkwork.common.ApiResponse; +import com.linkwork.context.UserContext; +import com.linkwork.model.dto.CronJobCreateRequest; +import com.linkwork.model.dto.CronJobResponse; +import com.linkwork.model.dto.CronJobRunResponse; +import com.linkwork.model.dto.CronJobToggleRequest; +import com.linkwork.model.dto.CronJobUpdateRequest; +import com.linkwork.model.dto.CronSchedulePreviewRequest; +import com.linkwork.model.entity.CronJob; +import com.linkwork.model.entity.CronJobRun; +import com.linkwork.service.CronJobExecutor; +import com.linkwork.service.CronJobService; +import jakarta.validation.Valid; +import lombok.RequiredArgsConstructor; +import org.springframework.util.StringUtils; +import org.springframework.web.bind.annotation.DeleteMapping; +import org.springframework.web.bind.annotation.GetMapping; +import org.springframework.web.bind.annotation.PathVariable; +import org.springframework.web.bind.annotation.PostMapping; +import org.springframework.web.bind.annotation.PutMapping; +import org.springframework.web.bind.annotation.RequestBody; +import org.springframework.web.bind.annotation.RequestMapping; +import org.springframework.web.bind.annotation.RequestParam; +import org.springframework.web.bind.annotation.RestController; + +import java.util.Map; + +@RestController +@RequestMapping("/api/v1/cron-jobs") +@RequiredArgsConstructor +public class CronJobController { + + private final CronJobService cronJobService; + private final CronJobExecutor cronJobExecutor; + + @PostMapping + public ApiResponse create(@Valid @RequestBody CronJobCreateRequest request) { + String userId = UserContext.getCurrentUserId(); + String userName = UserContext.getCurrentUserName(); + return ApiResponse.success(cronJobService.create(request, userId, userName)); + } + + @PostMapping("/preview") + public ApiResponse> preview(@RequestBody CronSchedulePreviewRequest request) { + return ApiResponse.success(Map.of( + "nextFireTimes", + cronJobService.previewSchedule(request.getScheduleType(), request.getCronExpr(), request.getIntervalMs(), + request.getRunAt(), request.getTimezone(), request.getLimit()) + )); + } + + @GetMapping + public ApiResponse> listMine( + @RequestParam(value = "scope", defaultValue = "mine") String scope, + @RequestParam(value = "roleId", required = false) Long roleId, + @RequestParam(value = "enabled", required = false) Boolean enabled, + @RequestParam(value = "scheduleType", required = false) String scheduleType, + @RequestParam(value = "keyword", required = false) String keyword, + @RequestParam(value = "page", defaultValue = "1") Integer page, + @RequestParam(value = "pageSize", defaultValue = "20") Integer pageSize) { + if (StringUtils.hasText(scope) && !"mine".equalsIgnoreCase(scope.trim())) { + throw new IllegalArgumentException("MVP 仅支持 scope=mine"); + } + return ApiResponse.success(cronJobService.listMine( + UserContext.getCurrentUserId(), roleId, enabled, scheduleType, keyword, page, pageSize)); + } + + @GetMapping("/{id}") + public ApiResponse detail(@PathVariable Long id) { + return ApiResponse.success(cronJobService.getDetail(id, UserContext.getCurrentUserId())); + } + + @PutMapping("/{id}") + public ApiResponse update(@PathVariable Long id, + @Valid @RequestBody CronJobUpdateRequest request) { + return ApiResponse.success(cronJobService.update(id, request, UserContext.getCurrentUserId())); + } + + @PutMapping("/{id}/toggle") + public ApiResponse toggle(@PathVariable Long id, + @Valid @RequestBody CronJobToggleRequest request) { + return ApiResponse.success(cronJobService.toggle(id, request, UserContext.getCurrentUserId())); + } + + @PostMapping("/{id}/trigger") + public ApiResponse trigger(@PathVariable Long id) { + CronJob job = cronJobService.getOwnedJob(id, UserContext.getCurrentUserId()); + CronJobRun run = cronJobExecutor.dispatchManual(job); + return ApiResponse.success(cronJobService.toRunResponse(run)); + } + + @GetMapping("/{id}/runs") + public ApiResponse> listRuns(@PathVariable Long id, + @RequestParam(value = "page", defaultValue = "1") Integer page, + @RequestParam(value = "pageSize", defaultValue = "20") Integer pageSize) { + return ApiResponse.success(cronJobService.listRuns(id, UserContext.getCurrentUserId(), page, pageSize)); + } + + @DeleteMapping("/{id}") + public ApiResponse delete(@PathVariable Long id) { + cronJobService.delete(id, UserContext.getCurrentUserId()); + return ApiResponse.success(); + } +} diff --git a/back/src/main/java/com/linkwork/controller/FileController.java b/back/src/main/java/com/linkwork/controller/FileController.java new file mode 100644 index 0000000..68697d7 --- /dev/null +++ b/back/src/main/java/com/linkwork/controller/FileController.java @@ -0,0 +1,191 @@ +package com.linkwork.controller; + +import com.linkwork.common.ApiResponse; +import com.linkwork.context.UserContext; +import com.linkwork.model.dto.CreateFolderRequest; +import com.linkwork.model.dto.FileMentionResponse; +import com.linkwork.model.dto.FileNodeResponse; +import com.linkwork.model.dto.FileResponse; +import com.linkwork.model.dto.FileSpaceSyncRequest; +import com.linkwork.model.dto.FileSpaceSyncResponse; +import com.linkwork.model.dto.FileTransferRequest; +import com.linkwork.service.FileNodeService; +import com.linkwork.service.FileSpaceSyncService; +import com.linkwork.service.FileService; +import com.linkwork.service.NfsStorageService; +import lombok.RequiredArgsConstructor; +import lombok.extern.slf4j.Slf4j; +import org.springframework.core.io.InputStreamResource; +import org.springframework.core.io.Resource; +import org.springframework.http.HttpHeaders; +import org.springframework.http.MediaType; +import org.springframework.http.ResponseEntity; +import org.springframework.web.bind.annotation.*; +import org.springframework.web.multipart.MultipartFile; + +import java.io.IOException; +import java.io.InputStream; +import java.net.URLEncoder; +import java.nio.charset.StandardCharsets; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.List; +import java.util.Map; + +@Slf4j +@RestController +@RequestMapping("/api/v1/files") +@RequiredArgsConstructor +public class FileController { + + private final FileService fileService; + private final FileNodeService fileNodeService; + private final FileSpaceSyncService fileSpaceSyncService; + private final NfsStorageService nfsStorageService; + + @PostMapping("/upload") + public ApiResponse uploadFile( + @RequestParam("file") MultipartFile file, + @RequestParam("spaceType") String spaceType, + @RequestParam(value = "workstationId", required = false) String workstationId, + @RequestParam(value = "conflictPolicy", required = false) String conflictPolicy, + @RequestParam(value = "parentId", required = false) String parentId) { + String userId = UserContext.getCurrentUserId(); + return ApiResponse.success(fileService.uploadFile(file, spaceType, workstationId, userId, conflictPolicy, parentId)); + } + + @GetMapping("/tree") + public ApiResponse> listTree( + @RequestParam("spaceType") String spaceType, + @RequestParam(value = "workstationId", required = false) String workstationId, + @RequestParam(value = "parentId", required = false) String parentId) { + String userId = UserContext.getCurrentUserId(); + return ApiResponse.success(fileNodeService.listChildren(spaceType, workstationId, parentId, userId)); + } + + @PostMapping("/sync") + public ApiResponse syncSpace(@RequestBody FileSpaceSyncRequest request) { + String userId = UserContext.getCurrentUserId(); + return ApiResponse.success(fileSpaceSyncService.syncSpace(userId, request)); + } + + @PostMapping("/folders") + public ApiResponse createFolder(@RequestBody CreateFolderRequest request) { + String userId = UserContext.getCurrentUserId(); + return ApiResponse.success(fileNodeService.createFolder(request, userId)); + } + + @PutMapping("/nodes/{nodeId}/rename") + public ApiResponse renameNode(@PathVariable String nodeId, + @RequestBody Map body) { + String userId = UserContext.getCurrentUserId(); + fileNodeService.renameNode(nodeId, body.get("name"), userId); + return ApiResponse.success(); + } + + @DeleteMapping("/nodes/{nodeId}") + public ApiResponse deleteNode(@PathVariable String nodeId) { + String userId = UserContext.getCurrentUserId(); + fileNodeService.deleteNode(nodeId, userId); + return ApiResponse.success(); + } + + @GetMapping + public ApiResponse> listFiles( + @RequestParam("spaceType") String spaceType, + @RequestParam(value = "workstationId", required = false) String workstationId, + @RequestParam(value = "fileType", required = false) String fileType, + @RequestParam(value = "keyword", required = false) String keyword, + @RequestParam(value = "page", defaultValue = "1") Integer page, + @RequestParam(value = "pageSize", defaultValue = "20") Integer pageSize) { + return ApiResponse.success(fileService.listFiles(spaceType, workstationId, fileType, keyword, page, pageSize, + UserContext.getCurrentUserId())); + } + + @GetMapping("/{fileId}") + public ApiResponse getFileDetail(@PathVariable String fileId) { + return ApiResponse.success(fileService.getFileDetail(fileId, UserContext.getCurrentUserId())); + } + + @GetMapping("/{fileId}/download") + public ResponseEntity downloadFile( + @PathVariable String fileId, + @RequestParam(value = "inline", defaultValue = "false") boolean inline) throws IOException { + String userId = UserContext.getCurrentUserId(); + FileService.DownloadInfo info = fileService.getDownloadInfo(fileId, userId); + + Path filePath = nfsStorageService.getAbsolutePath(info.storagePath()); + if (!Files.exists(filePath)) { + throw new IllegalStateException("文件不存在于存储中: " + info.storagePath()); + } + + String encodedFileName = URLEncoder.encode(info.fileName(), StandardCharsets.UTF_8).replace("+", "%20"); + String contentType = resolveContentType(info.contentType(), info.fileName(), filePath); + String disposition = inline ? "inline" : "attachment"; + InputStream inputStream = Files.newInputStream(filePath); + + return ResponseEntity.ok() + .header(HttpHeaders.CONTENT_DISPOSITION, disposition + "; filename*=UTF-8''" + encodedFileName) + .contentType(MediaType.parseMediaType(contentType)) + .contentLength(Files.size(filePath)) + .body(new InputStreamResource(inputStream)); + } + + private String resolveContentType(String rawContentType, String fileName, Path filePath) { + if (rawContentType != null && !rawContentType.isBlank() + && !"application/octet-stream".equalsIgnoreCase(rawContentType)) { + return rawContentType; + } + try { + String detected = Files.probeContentType(filePath); + if (detected != null && !detected.isBlank()) { + return detected; + } + } catch (IOException ignore) { + // noop, fall through to extension mapping + } + String lowerName = fileName == null ? "" : fileName.toLowerCase(java.util.Locale.ROOT); + if (lowerName.endsWith(".pdf")) { + return MediaType.APPLICATION_PDF_VALUE; + } + if (lowerName.endsWith(".md") || lowerName.endsWith(".txt") || lowerName.endsWith(".log")) { + return MediaType.TEXT_PLAIN_VALUE; + } + if (lowerName.endsWith(".json")) { + return MediaType.APPLICATION_JSON_VALUE; + } + return MediaType.APPLICATION_OCTET_STREAM_VALUE; + } + + @DeleteMapping("/{fileId}") + public ApiResponse deleteFile(@PathVariable String fileId) { + fileService.deleteFile(fileId, UserContext.getCurrentUserId()); + return ApiResponse.success(); + } + + @PutMapping("/{fileId}") + public ApiResponse replaceFile( + @PathVariable String fileId, + @RequestParam("file") MultipartFile file) { + return ApiResponse.success(fileService.replaceFile(fileId, file, UserContext.getCurrentUserId())); + } + + @PostMapping("/{fileId}/copy") + public ApiResponse copyFile(@PathVariable String fileId, + @RequestBody FileTransferRequest request) { + return ApiResponse.success(fileService.copyFile(fileId, request, UserContext.getCurrentUserId())); + } + + @PostMapping("/{fileId}/move") + public ApiResponse moveFile(@PathVariable String fileId, + @RequestBody FileTransferRequest request) { + return ApiResponse.success(fileService.moveFile(fileId, request, UserContext.getCurrentUserId())); + } + + @GetMapping("/mention") + public ApiResponse> mentionFiles( + @RequestParam("workstationId") String workstationId, + @RequestParam(value = "keyword", required = false) String keyword) { + return ApiResponse.success(fileService.mentionFiles(workstationId, keyword, UserContext.getCurrentUserId())); + } +} diff --git a/back/src/main/java/com/linkwork/controller/GitLabAuthController.java b/back/src/main/java/com/linkwork/controller/GitLabAuthController.java new file mode 100644 index 0000000..66d205f --- /dev/null +++ b/back/src/main/java/com/linkwork/controller/GitLabAuthController.java @@ -0,0 +1,56 @@ +package com.linkwork.controller; + +import com.linkwork.common.ApiResponse; +import com.linkwork.context.UserContext; +import com.linkwork.model.entity.GitLabAuthEntity; +import com.linkwork.service.GitLabAuthService; +import lombok.RequiredArgsConstructor; +import org.springframework.web.bind.annotation.*; + +import java.util.List; +import java.util.Map; + +@RestController +@RequestMapping("/api/v1/auth/gitlab") +@RequiredArgsConstructor +public class GitLabAuthController { + + private final GitLabAuthService gitLabAuthService; + + @GetMapping("/url") + public ApiResponse> getAuthUrl( + @RequestParam(required = false) String redirectUri, + @RequestParam(defaultValue = "write") String scopeType) { + String url = gitLabAuthService.getAuthUrl(redirectUri, scopeType); + return ApiResponse.success(Map.of("url", url)); + } + + @PostMapping("/callback") + public ApiResponse callback(@RequestBody Map body) { + String userId = UserContext.getCurrentUserId(); + String code = body.get("code"); + String redirectUri = body.get("redirectUri"); + String scopeType = body.getOrDefault("scopeType", "write"); + gitLabAuthService.callback(userId, code, redirectUri, scopeType); + return ApiResponse.success(null); + } + + @GetMapping("/users") + public ApiResponse> listUsers() { + String userId = UserContext.getCurrentUserId(); + // We mask the tokens in the response + List list = gitLabAuthService.listUsers(userId); + list.forEach(item -> { + item.setAccessToken(null); + item.setRefreshToken(null); + }); + return ApiResponse.success(list); + } + + @DeleteMapping("/users/{id}") + public ApiResponse deleteUser(@PathVariable String id) { + String userId = UserContext.getCurrentUserId(); + gitLabAuthService.deleteUser(userId, id); + return ApiResponse.success(null); + } +} diff --git a/back/src/main/java/com/linkwork/controller/HealthController.java b/back/src/main/java/com/linkwork/controller/HealthController.java new file mode 100644 index 0000000..286dd95 --- /dev/null +++ b/back/src/main/java/com/linkwork/controller/HealthController.java @@ -0,0 +1,20 @@ +package com.linkwork.controller; + +import org.springframework.web.bind.annotation.GetMapping; +import org.springframework.web.bind.annotation.RestController; + +import java.util.Map; + +@RestController +public class HealthController { + + /** + * 健康检查接口 + * Docker 健康检查 + 外部监控使用 + * 同时映射 /health 和 /api/v1/health + */ + @GetMapping({"/health", "/api/v1/health"}) + public Map health() { + return Map.of("status", "UP"); + } +} diff --git a/back/src/main/java/com/linkwork/controller/ImageBuildController.java b/back/src/main/java/com/linkwork/controller/ImageBuildController.java new file mode 100644 index 0000000..19a2076 --- /dev/null +++ b/back/src/main/java/com/linkwork/controller/ImageBuildController.java @@ -0,0 +1,80 @@ +package com.linkwork.controller; + +import com.linkwork.common.ApiResponse; +import com.linkwork.service.ImageBuildService; +import lombok.RequiredArgsConstructor; +import lombok.extern.slf4j.Slf4j; +import org.springframework.web.bind.annotation.*; + +import java.util.ArrayList; +import java.util.List; +import java.util.Map; + +/** + * 镜像构建 Controller + * + * 提供构建相关的配置查询接口 + */ +@Slf4j +@RestController +@RequestMapping({"/api/v1/build", "/api/v1/image-build"}) +@CrossOrigin(origins = "*") +@RequiredArgsConstructor +public class ImageBuildController { + + private final ImageBuildService imageBuildService; + + /** + * 获取可选的构建基础镜像列表 + * + * @return 基础镜像列表 + */ + @GetMapping("/base-images") + public ApiResponse> listBaseImages() { + List images = new ArrayList<>(); + images.add(new BaseImageInfo( + "10.30.107.146/robot/rockylinux9-agent:v1.3", + "Rocky Linux 9 Agent v1.3", + "开发机默认基础镜像(优先使用本地缓存)", + true + )); + images.add(new BaseImageInfo( + "rockylinux/rockylinux:9.6", + "Rocky Linux 9.6", + "公共回退基础镜像(仅当 v1.3 不可用时使用)", + false + )); + images.add(new BaseImageInfo( + "10.30.107.146/robot/rockylinux9-agent@sha256:b49d75f52f6b3c55bbf90427f0df0e97bc8e3f3e03727721cafc2c9d775b8975", + "Rocky Linux 9 Agent", + "内网固定 digest 基础镜像(需内网仓库可达)", + false + )); + return ApiResponse.success(images); + } + + /** + * 手动触发一次本地镜像运维动作: + * - 清理过期 service-*-agent 本地镜像 + * - 可选对 Kind 节点执行 crictl prune + */ + @PostMapping("/ops/local-image-maintenance") + public ApiResponse> runLocalImageMaintenance() { + return ApiResponse.success(imageBuildService.runLocalImageMaintenance("manual")); + } + + /** + * 基础镜像信息 DTO + * + * @param id 镜像标识(唯一) + * @param name 镜像显示名称 + * @param description 镜像描述 + * @param isDefault 是否为默认选项 + */ + public record BaseImageInfo( + String id, + String name, + String description, + boolean isDefault + ) {} +} diff --git a/back/src/main/java/com/linkwork/controller/K8sClusterController.java b/back/src/main/java/com/linkwork/controller/K8sClusterController.java new file mode 100644 index 0000000..bca0b7a --- /dev/null +++ b/back/src/main/java/com/linkwork/controller/K8sClusterController.java @@ -0,0 +1,150 @@ +package com.linkwork.controller; + +import com.linkwork.common.ApiResponse; +import com.linkwork.context.UserContext; +import com.linkwork.context.UserInfo; +import com.linkwork.model.dto.*; +import com.linkwork.service.K8sClusterService; +import lombok.RequiredArgsConstructor; +import lombok.extern.slf4j.Slf4j; +import org.springframework.beans.factory.annotation.Value; +import org.springframework.web.bind.annotation.*; + +import java.util.HashSet; +import java.util.List; +import java.util.Set; + +/** + * K8s 集群监控 API + * 仅允许配置的 workId 用户访问 + */ +@Slf4j +@RestController +@RequestMapping("/api/v1/k8s-monitor") +@RequiredArgsConstructor +public class K8sClusterController { + + private final K8sClusterService k8sClusterService; + + @Value("${schedule.cluster.namespace:ai-worker}") + private String defaultNamespace; + + @Value("${robot.k8s-monitor.allowed-users:}") + private String allowedUsersConfig; + + private Set getAllowedUsers() { + Set set = new HashSet<>(); + for (String id : allowedUsersConfig.split(",")) { + String trimmed = id.trim(); + if (!trimmed.isEmpty()) { + set.add(trimmed); + } + } + return set; + } + + private boolean isUserAllowed(UserInfo user) { + if (user == null) return false; + Set allowed = getAllowedUsers(); + if (user.getWorkId() != null && allowed.contains(user.getWorkId().trim())) return true; + if (user.getUserId() != null && allowed.contains(user.getUserId().trim())) return true; + return false; + } + + private String resolveNamespace(String namespace) { + return (namespace == null || namespace.isBlank()) ? defaultNamespace : namespace; + } + + private void checkPermission() { + UserInfo user = UserContext.get(); + if (user == null) { + throw new SecurityException("未登录"); + } + if (!isUserAllowed(user)) { + log.warn("K8s Monitor 访问被拒绝: userId={}, workId={}, name={}", user.getUserId(), user.getWorkId(), user.getName()); + throw new SecurityException("无权访问 K8s 集群监控"); + } + } + + @GetMapping("/access-check") + public ApiResponse checkAccess() { + UserInfo user = UserContext.get(); + if (user == null) return ApiResponse.success(false); + log.debug("K8s Monitor access-check: userId={}, workId={}, name={}", user.getUserId(), user.getWorkId(), user.getName()); + return ApiResponse.success(isUserAllowed(user)); + } + + @GetMapping("/namespaces") + public ApiResponse> namespaces() { + checkPermission(); + return ApiResponse.success(k8sClusterService.listNamespaces()); + } + + @GetMapping("/overview") + public ApiResponse overview(@RequestParam(required = false) String namespace) { + checkPermission(); + return ApiResponse.success(k8sClusterService.getOverview(resolveNamespace(namespace))); + } + + @GetMapping("/nodes") + public ApiResponse> nodes() { + checkPermission(); + return ApiResponse.success(k8sClusterService.listNodes()); + } + + @GetMapping("/pods") + public ApiResponse> pods( + @RequestParam(required = false) String namespace, + @RequestParam(required = false) String status, + @RequestParam(required = false) String node, + @RequestParam(required = false) String podGroup) { + checkPermission(); + return ApiResponse.success(k8sClusterService.listPods(resolveNamespace(namespace), status, node, podGroup)); + } + + @GetMapping("/podgroups") + public ApiResponse> podGroups(@RequestParam(required = false) String namespace) { + checkPermission(); + return ApiResponse.success(k8sClusterService.listPodGroups(resolveNamespace(namespace))); + } + + @GetMapping("/pods/{podName}/logs") + public ApiResponse podLogs( + @PathVariable String podName, + @RequestParam(required = false) String namespace, + @RequestParam(required = false) String container, + @RequestParam(defaultValue = "200") int tailLines) { + checkPermission(); + return ApiResponse.success(k8sClusterService.getPodLogs(resolveNamespace(namespace), podName, container, tailLines)); + } + + @GetMapping("/events") + public ApiResponse> events( + @RequestParam(required = false) String namespace, + @RequestParam(defaultValue = "50") int limit) { + checkPermission(); + return ApiResponse.success(k8sClusterService.listEvents(resolveNamespace(namespace), limit)); + } + + @GetMapping("/pods/{podName}/events") + public ApiResponse> podEvents( + @PathVariable String podName, + @RequestParam(required = false) String namespace) { + checkPermission(); + return ApiResponse.success(k8sClusterService.listPodEvents(resolveNamespace(namespace), podName)); + } + + @DeleteMapping("/pods/{podName}") + public ApiResponse deletePod( + @PathVariable String podName, + @RequestParam(required = false) String namespace) { + checkPermission(); + k8sClusterService.deletePod(resolveNamespace(namespace), podName); + return ApiResponse.success("Pod " + podName + " deleted"); + } + + @ExceptionHandler(SecurityException.class) + public ApiResponse handleSecurity(SecurityException e) { + return ApiResponse.error(40300, e.getMessage()); + } +} diff --git a/back/src/main/java/com/linkwork/controller/McpInternalController.java b/back/src/main/java/com/linkwork/controller/McpInternalController.java new file mode 100644 index 0000000..6ae274b --- /dev/null +++ b/back/src/main/java/com/linkwork/controller/McpInternalController.java @@ -0,0 +1,91 @@ +package com.linkwork.controller; + +import com.linkwork.common.ApiResponse; +import com.linkwork.model.entity.McpServerEntity; +import com.linkwork.model.entity.McpUserConfigEntity; +import com.linkwork.model.entity.Task; +import com.linkwork.model.enums.TaskStatus; +import com.linkwork.service.McpServerService; +import com.linkwork.service.McpUserConfigService; +import com.linkwork.service.TaskService; +import lombok.RequiredArgsConstructor; +import lombok.extern.slf4j.Slf4j; +import org.springframework.web.bind.annotation.*; + +import java.time.LocalDateTime; +import java.util.*; + +/** + * MCP Gateway 内部 API — 仅供 Gateway 调用,不经过用户认证 + *

+ * /api/internal/** 路径已在 JwtAuthFilter 中放行 + */ +@Slf4j +@RestController +@RequestMapping("/api/internal") +@RequiredArgsConstructor +public class McpInternalController { + + private final McpServerService mcpServerService; + private final McpUserConfigService mcpUserConfigService; + private final TaskService taskService; + + /** + * Gateway 定时拉取完整 MCP Server 注册表 + */ + @GetMapping("/mcp-servers/registry") + public ApiResponse> registry() { + List allServers = mcpServerService.listByTypes(List.of("http", "sse")); + List> servers = new ArrayList<>(); + + for (McpServerEntity s : allServers) { + Map entry = new LinkedHashMap<>(); + entry.put("name", s.getName()); + entry.put("type", s.getType()); + entry.put("networkZone", s.getNetworkZone() != null ? s.getNetworkZone() : "external"); + entry.put("url", s.getUrl()); + entry.put("headers", s.getHeaders()); + entry.put("healthCheckUrl", s.getHealthCheckUrl()); + entry.put("status", s.getStatus()); + servers.add(entry); + } + + Map result = new LinkedHashMap<>(); + result.put("servers", servers); + result.put("updatedAt", LocalDateTime.now().toString()); + return ApiResponse.success(result); + } + + /** + * Gateway 验证 taskId(即 taskNo)是否有效且未结束 + */ + @GetMapping("/tasks/{taskId}/validate") + public ApiResponse> validateTask(@PathVariable String taskId) { + Map data = new LinkedHashMap<>(); + data.put("taskId", taskId); + + try { + Task task = taskService.getTaskByNo(taskId); + boolean active = task.getStatus() == TaskStatus.PENDING + || task.getStatus() == TaskStatus.RUNNING + || task.getStatus() == TaskStatus.PENDING_AUTH; + data.put("valid", active); + data.put("userId", task.getCreatorId() != null ? task.getCreatorId() : ""); + } catch (IllegalArgumentException e) { + data.put("valid", false); + data.put("userId", ""); + } + return ApiResponse.success(data); + } + + /** + * Gateway 查询用户个人 MCP 凭证 + */ + @GetMapping("/mcp-user-configs") + public ApiResponse getUserConfig( + @RequestParam String mcpName, + @RequestParam String userId) { + McpUserConfigEntity config = mcpUserConfigService.getByUserAndMcpName(userId, mcpName); + return ApiResponse.success(config); + } +} diff --git a/back/src/main/java/com/linkwork/controller/McpServerController.java b/back/src/main/java/com/linkwork/controller/McpServerController.java new file mode 100644 index 0000000..4b88414 --- /dev/null +++ b/back/src/main/java/com/linkwork/controller/McpServerController.java @@ -0,0 +1,204 @@ +package com.linkwork.controller; + +import com.linkwork.common.ApiResponse; +import com.linkwork.context.UserContext; +import com.linkwork.model.dto.McpDiscoverResult; +import com.linkwork.model.dto.McpProbeResult; +import com.linkwork.model.entity.McpServerEntity; +import com.linkwork.model.entity.RoleEntity; +import com.linkwork.service.McpDiscoveryService; +import com.linkwork.service.McpHealthChecker; +import com.linkwork.service.McpServerService; +import com.linkwork.service.RoleService; +import com.linkwork.service.AdminAccessService; +import lombok.RequiredArgsConstructor; +import lombok.extern.slf4j.Slf4j; +import org.springframework.web.bind.annotation.*; + +import java.time.LocalDateTime; +import java.util.*; +import java.util.stream.Collectors; + +/** + * MCP 服务控制器 + */ +@Slf4j +@RestController +@RequestMapping("/api/v1") +@CrossOrigin(origins = "*") +@RequiredArgsConstructor +public class McpServerController { + + private final McpServerService mcpServerService; + private final McpHealthChecker mcpHealthChecker; + private final McpDiscoveryService mcpDiscoveryService; + private final RoleService roleService; + private final AdminAccessService adminAccessService; + + /** + * 获取 MCP 服务列表 + */ + @GetMapping("/mcp-servers") + public ApiResponse> listMcpServers( + @RequestParam(defaultValue = "1") int page, + @RequestParam(defaultValue = "20") int pageSize, + @RequestParam(required = false) String status, + @RequestParam(required = false) String keyword) { + String userId = UserContext.getCurrentUserId(); + Map data = mcpServerService.listMcpServers(page, pageSize, status, keyword, userId); + return ApiResponse.success(data); + } + + /** + * 获取所有可用的 MCP 服务(用于下拉选择) + */ + @GetMapping("/mcp-servers/available") + public ApiResponse>> listAvailable() { + String userId = UserContext.getCurrentUserId(); + List> data = mcpServerService.listAllAvailable(userId); + return ApiResponse.success(data); + } + + /** + * 获取所有 MCP Server 的健康状态 + */ + @GetMapping("/mcp-servers/health") + public ApiResponse> getHealthStatus() { + String userId = UserContext.getCurrentUserId(); + Map data = mcpServerService.getHealthStatus(userId); + return ApiResponse.success(data); + } + + /** + * 获取单个 MCP 服务详情 + */ + @GetMapping("/mcp-servers/{id}") + public ApiResponse> getMcpServer(@PathVariable Long id) { + String userId = UserContext.getCurrentUserId(); + Map entity = mcpServerService.getMcpServerForRead(id, userId); + return ApiResponse.success(entity); + } + + /** + * 测试单个 MCP 服务的连通性 + */ + @PostMapping("/mcp-servers/{id}/test") + public ApiResponse testMcpServer(@PathVariable Long id) { + String userId = UserContext.getCurrentUserId(); + McpServerEntity entity = mcpServerService.getMcpServerForManage(id, userId); + McpProbeResult result = mcpHealthChecker.probeSingle(entity); + // 同时更新 DB 健康状态 + mcpServerService.updateHealth(entity.getId(), result.getStatus(), result.getLatencyMs(), result.getMessage(), + "online".equals(result.getStatus()) ? 0 + : (entity.getConsecutiveFailures() != null ? entity.getConsecutiveFailures() + 1 : 1)); + return ApiResponse.success(result); + } + + /** + * 发现 MCP Server 的工具列表 + *

+ * 对目标 MCP server 发送 JSON-RPC 请求(initialize → initialized → tools/list), + * 获取并返回工具清单。成功时同步更新 configJson。 + */ + @PostMapping("/mcp-servers/{id}/discover") + public ApiResponse discoverTools(@PathVariable Long id) { + String userId = UserContext.getCurrentUserId(); + McpServerEntity entity = mcpServerService.getMcpServerForManage(id, userId); + + McpDiscoverResult result = mcpDiscoveryService.discover(entity, userId); + + // 如果成功,把 tools 存入 config_json + if (result.isSuccess() && result.getTools() != null) { + Map configJson = entity.getConfigJson(); + if (configJson == null) { + configJson = new HashMap<>(); + } + configJson.put("tools", result.getTools()); + configJson.put("serverName", result.getServerName()); + configJson.put("serverVersion", result.getServerVersion()); + configJson.put("lastDiscoveredAt", LocalDateTime.now().toString()); + entity.setConfigJson(configJson); + mcpServerService.updateById(entity); + } + + return ApiResponse.success(result); + } + + /** + * 创建 MCP 服务(支持 type/url/headers/healthCheckUrl/version/tags) + */ + @PostMapping("/mcp-servers") + public ApiResponse> createMcpServer( + @RequestBody Map request) { + String userId = UserContext.getCurrentUserId(); + String userName = UserContext.getCurrentUserName(); + McpServerEntity entity = mcpServerService.createMcpServer(request, userId, userName); + return ApiResponse.success(Map.of("id", entity.getId(), "mcpNo", entity.getMcpNo())); + } + + /** + * 更新 MCP 服务(支持 type/url/headers/healthCheckUrl/version/tags) + */ + @PutMapping("/mcp-servers/{id}") + public ApiResponse> updateMcpServer( + @PathVariable Long id, + @RequestBody Map request) { + String userId = UserContext.getCurrentUserId(); + String userName = UserContext.getCurrentUserName(); + McpServerEntity entity = mcpServerService.updateMcpServer(id, request, userId, userName); + return ApiResponse.success(Map.of("id", entity.getId(), "mcpNo", entity.getMcpNo())); + } + + /** + * 删除 MCP 服务 + */ + @DeleteMapping("/mcp-servers/{id}") + public ApiResponse deleteMcpServer(@PathVariable Long id) { + String userId = UserContext.getCurrentUserId(); + mcpServerService.deleteMcpServer(id, userId); + return ApiResponse.success(null); + } + + /** + * 根据岗位生成 mcp.json 配置 + *

+ * 从岗位 configJson.mcp 中获取 MCP ID 列表,查询对应 MCP Server 配置, + * 生成 SDK 兼容的 mcp.json 格式。 + */ + @GetMapping("/roles/{roleId}/mcp-config") + public ApiResponse> getMcpConfigByRole(@PathVariable Long roleId) { + String userId = UserContext.getCurrentUserId(); + RoleEntity role = roleService.getRoleForWrite(roleId, userId); + + // 兼容数字 ID 和名称字符串两种格式 + List mcpIds = new ArrayList<>(); + if (role.getConfigJson() != null && role.getConfigJson().getMcp() != null) { + List mcpNames = new ArrayList<>(); + for (String ref : role.getConfigJson().getMcp()) { + if (ref == null || ref.isBlank()) continue; + try { + mcpIds.add(Long.parseLong(ref)); + } catch (NumberFormatException e) { + mcpNames.add(ref); + } + } + // 按名称查询 + if (!mcpNames.isEmpty()) { + com.baomidou.mybatisplus.core.conditions.query.LambdaQueryWrapper byNameQuery = + new com.baomidou.mybatisplus.core.conditions.query.LambdaQueryWrapper() + .in(McpServerEntity::getName, mcpNames); + if (!adminAccessService.isAdmin(userId)) { + byNameQuery.and(w -> w.eq(McpServerEntity::getCreatorId, userId) + .or().eq(McpServerEntity::getVisibility, "public")); + } + List byNames = mcpServerService.list(byNameQuery); + for (McpServerEntity entity : byNames) { + mcpIds.add(entity.getId()); + } + } + } + + Map config = mcpServerService.generateMcpConfig(mcpIds); + return ApiResponse.success(config); + } +} diff --git a/back/src/main/java/com/linkwork/controller/McpUserConfigController.java b/back/src/main/java/com/linkwork/controller/McpUserConfigController.java new file mode 100644 index 0000000..dd9589b --- /dev/null +++ b/back/src/main/java/com/linkwork/controller/McpUserConfigController.java @@ -0,0 +1,70 @@ +package com.linkwork.controller; + +import com.linkwork.common.ApiResponse; +import com.linkwork.context.UserContext; +import com.linkwork.model.entity.McpUserConfigEntity; +import com.linkwork.service.McpUserConfigService; +import lombok.RequiredArgsConstructor; +import lombok.extern.slf4j.Slf4j; +import org.springframework.web.bind.annotation.*; + +import java.util.*; +import java.util.stream.Collectors; + +@Slf4j +@RestController +@RequestMapping("/api/v1/mcp-user-configs") +@CrossOrigin(origins = "*") +@RequiredArgsConstructor +public class McpUserConfigController { + + private final McpUserConfigService mcpUserConfigService; + + @GetMapping + public ApiResponse>> list() { + String userId = UserContext.getCurrentUserId(); + List configs = mcpUserConfigService.listByUser(userId); + List> result = configs.stream() + .map(c -> { + Map m = new LinkedHashMap<>(); + m.put("id", c.getId()); + m.put("mcpServerId", c.getMcpServerId()); + m.put("hasHeaders", c.getHeaders() != null && !c.getHeaders().isEmpty()); + m.put("hasUrlParams", c.getUrlParams() != null && !c.getUrlParams().isEmpty()); + m.put("createdAt", c.getCreatedAt()); + m.put("updatedAt", c.getUpdatedAt()); + return m; + }) + .collect(Collectors.toList()); + return ApiResponse.success(result); + } + + @GetMapping("/{mcpServerId}/detail") + public ApiResponse> detail(@PathVariable Long mcpServerId) { + String userId = UserContext.getCurrentUserId(); + McpUserConfigEntity config = mcpUserConfigService.getByUserAndServer(userId, mcpServerId); + if (config == null) { + return ApiResponse.success(Map.of("headers", Map.of(), "urlParams", Map.of())); + } + Map result = new LinkedHashMap<>(); + result.put("headers", config.getHeaders() != null ? config.getHeaders() : Map.of()); + result.put("urlParams", config.getUrlParams() != null ? config.getUrlParams() : Map.of()); + return ApiResponse.success(result); + } + + @PutMapping("/{mcpServerId}") + public ApiResponse> saveOrUpdate( + @PathVariable Long mcpServerId, + @RequestBody Map request) { + String userId = UserContext.getCurrentUserId(); + McpUserConfigEntity entity = mcpUserConfigService.saveOrUpdate(userId, mcpServerId, request); + return ApiResponse.success(Map.of("id", entity.getId())); + } + + @DeleteMapping("/{mcpServerId}") + public ApiResponse delete(@PathVariable Long mcpServerId) { + String userId = UserContext.getCurrentUserId(); + mcpUserConfigService.deleteConfig(userId, mcpServerId); + return ApiResponse.success(null); + } +} diff --git a/back/src/main/java/com/linkwork/controller/MemoryController.java b/back/src/main/java/com/linkwork/controller/MemoryController.java new file mode 100644 index 0000000..2155dc2 --- /dev/null +++ b/back/src/main/java/com/linkwork/controller/MemoryController.java @@ -0,0 +1,83 @@ +package com.linkwork.controller; + +import com.linkwork.model.dto.MemoryIndexBatchRequest; +import com.linkwork.model.dto.MemoryIngestRequest; +import com.linkwork.model.dto.MemorySearchRequest; +import com.linkwork.service.memory.MemoryService; +import jakarta.validation.Valid; +import lombok.RequiredArgsConstructor; +import org.springframework.http.ResponseEntity; +import org.springframework.web.bind.annotation.*; + +import java.util.List; +import java.util.Map; + +@org.springframework.boot.autoconfigure.condition.ConditionalOnProperty(name = "memory.enabled", havingValue = "true", matchIfMissing = true) +@RestController +@RequestMapping("/api/v1/memory") +@RequiredArgsConstructor +public class MemoryController { + + private final MemoryService memoryService; + + @PostMapping("/search") + public ResponseEntity>> search( + @RequestParam String workstationId, + @RequestParam String userId, + @Valid @RequestBody MemorySearchRequest request) { + List> results = memoryService.search( + workstationId, userId, request.getQuery(), request.getTopK()); + return ResponseEntity.ok(results); + } + + @PostMapping("/ingest") + public ResponseEntity> ingest( + @RequestParam String workstationId, + @RequestParam String userId, + @Valid @RequestBody MemoryIngestRequest request) { + memoryService.ingest(workstationId, userId, request.getContent(), request.getSource()); + return ResponseEntity.ok(Map.of("status", "queued")); + } + + @PostMapping("/index/file") + public ResponseEntity> indexFile( + @RequestParam String workstationId, + @RequestParam String userId, + @RequestParam String filePath) { + memoryService.triggerIndexFile(workstationId, userId, filePath); + return ResponseEntity.ok(Map.of("status", "queued")); + } + + @PostMapping("/index/batch") + public ResponseEntity> indexBatch( + @RequestParam String workstationId, + @RequestParam String userId, + @Valid @RequestBody MemoryIndexBatchRequest request) { + int count = memoryService.triggerBatchIndex(workstationId, userId, request.getFilePaths()); + return ResponseEntity.ok(Map.of("status", "queued", "fileCount", count)); + } + + @GetMapping("/recent") + public ResponseEntity>> recent( + @RequestParam String workstationId, + @RequestParam String userId, + @RequestParam(defaultValue = "5") int limit) { + return ResponseEntity.ok(memoryService.recent(workstationId, userId, limit)); + } + + @GetMapping("/stats") + public ResponseEntity> stats( + @RequestParam String workstationId, + @RequestParam String userId) { + return ResponseEntity.ok(memoryService.stats(workstationId, userId)); + } + + @DeleteMapping("/source") + public ResponseEntity> deleteSource( + @RequestParam String workstationId, + @RequestParam String userId, + @RequestParam String source) { + memoryService.deleteSource(workstationId, userId, source); + return ResponseEntity.ok(Map.of("status", "deleted")); + } +} diff --git a/back/src/main/java/com/linkwork/controller/ModelRegistryController.java b/back/src/main/java/com/linkwork/controller/ModelRegistryController.java new file mode 100644 index 0000000..171e688 --- /dev/null +++ b/back/src/main/java/com/linkwork/controller/ModelRegistryController.java @@ -0,0 +1,23 @@ +package com.linkwork.controller; + +import com.linkwork.service.ModelRegistryService; +import lombok.RequiredArgsConstructor; +import org.springframework.web.bind.annotation.GetMapping; +import org.springframework.web.bind.annotation.RestController; + +import java.util.Map; + +/** + * 模型列表接口(同源调用,后端代理模型网关) + */ +@RestController +@RequiredArgsConstructor +public class ModelRegistryController { + + private final ModelRegistryService modelRegistryService; + + @GetMapping("/api/v1/models") + public Map listModels() { + return modelRegistryService.fetchModels(); + } +} diff --git a/back/src/main/java/com/linkwork/controller/PublicTaskController.java b/back/src/main/java/com/linkwork/controller/PublicTaskController.java new file mode 100644 index 0000000..434da90 --- /dev/null +++ b/back/src/main/java/com/linkwork/controller/PublicTaskController.java @@ -0,0 +1,63 @@ +package com.linkwork.controller; + +import com.linkwork.common.ApiResponse; +import com.linkwork.model.dto.TaskResponse; +import com.linkwork.model.entity.Task; +import com.linkwork.service.TaskShareLinkService; +import com.linkwork.service.TaskService; +import lombok.RequiredArgsConstructor; +import lombok.extern.slf4j.Slf4j; +import org.springframework.web.bind.annotation.CrossOrigin; +import org.springframework.web.bind.annotation.GetMapping; +import org.springframework.web.bind.annotation.PathVariable; +import org.springframework.web.bind.annotation.RequestMapping; +import org.springframework.web.bind.annotation.RequestParam; +import org.springframework.web.bind.annotation.RestController; + +import java.util.HashMap; +import java.util.Map; + +/** + * 公共任务查询接口(免鉴权最小返回) + */ +@Slf4j +@RestController +@RequestMapping("/api/v1/public/tasks") +@CrossOrigin(origins = "*") +@RequiredArgsConstructor +public class PublicTaskController { + + private final TaskService taskService; + private final TaskShareLinkService taskShareLinkService; + + /** + * 按任务 ID 查询任务使用模型 + * GET /api/v1/public/tasks/{taskNo}/model + */ + @GetMapping("/{taskNo}/model") + public ApiResponse> getTaskModel(@PathVariable String taskNo) { + log.info("公共接口查询任务模型: taskNo={}", taskNo); + + Task task = taskService.getTaskByNo(taskNo); + Map result = new HashMap<>(); + result.put("taskId", task.getTaskNo()); + result.put("modelId", task.getSelectedModel()); + result.put("userId", task.getCreatorId()); + return ApiResponse.success(result); + } + + /** + * 访客通过分享 token 查看任务详情 + * GET /api/v1/public/tasks/{taskNo}/share-detail?token=... + */ + @GetMapping("/{taskNo}/share-detail") + public ApiResponse getSharedTaskDetail( + @PathVariable String taskNo, + @RequestParam("token") String token) { + log.info("公共接口查询任务分享详情: taskNo={}", taskNo); + + taskShareLinkService.validateShareToken(taskNo, token); + Task task = taskService.getTaskByNo(taskNo); + return ApiResponse.success(taskService.toShareResponse(task)); + } +} diff --git a/back/src/main/java/com/linkwork/controller/ReportExportController.java b/back/src/main/java/com/linkwork/controller/ReportExportController.java new file mode 100644 index 0000000..a484590 --- /dev/null +++ b/back/src/main/java/com/linkwork/controller/ReportExportController.java @@ -0,0 +1,103 @@ +package com.linkwork.controller; + +import com.linkwork.common.ApiResponse; +import com.linkwork.common.ForbiddenOperationException; +import com.linkwork.context.UserContext; +import com.linkwork.context.UserInfo; +import com.linkwork.model.dto.ReportExportFieldResponse; +import com.linkwork.model.dto.ReportExportRequest; +import com.linkwork.service.ReportExportService; +import jakarta.validation.Valid; +import lombok.RequiredArgsConstructor; +import lombok.extern.slf4j.Slf4j; +import org.springframework.beans.factory.annotation.Value; +import org.springframework.http.ContentDisposition; +import org.springframework.http.HttpHeaders; +import org.springframework.http.MediaType; +import org.springframework.http.ResponseEntity; +import org.springframework.web.bind.annotation.CrossOrigin; +import org.springframework.web.bind.annotation.GetMapping; +import org.springframework.web.bind.annotation.PostMapping; +import org.springframework.web.bind.annotation.RequestBody; +import org.springframework.web.bind.annotation.RequestMapping; +import org.springframework.web.bind.annotation.RequestParam; +import org.springframework.web.bind.annotation.RestController; +import org.springframework.web.servlet.mvc.method.annotation.StreamingResponseBody; + +import java.nio.charset.StandardCharsets; +import java.time.LocalDateTime; +import java.time.format.DateTimeFormatter; +import java.util.HashSet; +import java.util.Set; + +/** + * 运维报表导出控制器 + */ +@Slf4j +@RestController +@RequestMapping("/api/v1/reports") +@CrossOrigin(origins = "*") +@RequiredArgsConstructor +public class ReportExportController { + + private static final DateTimeFormatter FILE_TIME_FORMATTER = DateTimeFormatter.ofPattern("yyyyMMdd_HHmmss"); + private final ReportExportService reportExportService; + + @Value("${robot.k8s-monitor.allowed-users:}") + private String allowedUsersConfig; + + @GetMapping("/export/fields") + public ApiResponse listExportFields(@RequestParam String type) { + checkPermission(); + return ApiResponse.success(reportExportService.listFields(type)); + } + + @PostMapping("/export") + public ResponseEntity export(@Valid @RequestBody ReportExportRequest request) { + checkPermission(); + + String normalizedType = "role".equalsIgnoreCase(request.getType()) ? "role" : "task"; + String fileName = normalizedType + "-report-" + LocalDateTime.now().format(FILE_TIME_FORMATTER) + ".csv"; + + StreamingResponseBody responseBody = outputStream -> reportExportService.exportCsv(request, outputStream); + + return ResponseEntity.ok() + .header(HttpHeaders.CACHE_CONTROL, "no-store, no-cache, must-revalidate, max-age=0") + .contentType(MediaType.parseMediaType("text/csv;charset=UTF-8")) + .header(HttpHeaders.CONTENT_DISPOSITION, ContentDisposition.attachment() + .filename(fileName, StandardCharsets.UTF_8) + .build() + .toString()) + .body(responseBody); + } + + private void checkPermission() { + UserInfo user = UserContext.get(); + if (user == null) { + throw new ForbiddenOperationException("未登录"); + } + if (!isUserAllowed(user)) { + log.warn("报表导出访问被拒绝: userId={}, workId={}, name={}", user.getUserId(), user.getWorkId(), user.getName()); + throw new ForbiddenOperationException("无权访问报表导出功能"); + } + } + + private boolean isUserAllowed(UserInfo user) { + Set allowed = getAllowedUsers(); + if (user.getWorkId() != null && allowed.contains(user.getWorkId().trim())) { + return true; + } + return user.getUserId() != null && allowed.contains(user.getUserId().trim()); + } + + private Set getAllowedUsers() { + Set set = new HashSet<>(); + for (String id : allowedUsersConfig.split(",")) { + String trimmed = id.trim(); + if (!trimmed.isEmpty()) { + set.add(trimmed); + } + } + return set; + } +} diff --git a/back/src/main/java/com/linkwork/controller/RoleController.java b/back/src/main/java/com/linkwork/controller/RoleController.java new file mode 100644 index 0000000..5267c7b --- /dev/null +++ b/back/src/main/java/com/linkwork/controller/RoleController.java @@ -0,0 +1,424 @@ +package com.linkwork.controller; + +import com.baomidou.mybatisplus.core.conditions.query.LambdaQueryWrapper; +import com.baomidou.mybatisplus.extension.plugins.pagination.Page; +import com.linkwork.context.UserContext; +import com.linkwork.model.entity.McpServerEntity; +import com.linkwork.model.entity.RoleEntity; +import com.linkwork.model.entity.SkillEntity; +import com.linkwork.model.enums.DeployMode; +import com.linkwork.service.K8sOrchestrator; +import com.linkwork.service.McpServerService; +import com.linkwork.service.RoleService; +import com.linkwork.service.RuntimeModeService; +import com.linkwork.service.ServiceSnapshotService; +import com.linkwork.service.SkillService; +import lombok.RequiredArgsConstructor; +import org.springframework.util.StringUtils; +import org.springframework.web.bind.annotation.CrossOrigin; +import org.springframework.web.bind.annotation.DeleteMapping; +import org.springframework.web.bind.annotation.GetMapping; +import org.springframework.web.bind.annotation.PathVariable; +import org.springframework.web.bind.annotation.PostMapping; +import org.springframework.web.bind.annotation.PutMapping; +import org.springframework.web.bind.annotation.RequestBody; +import org.springframework.web.bind.annotation.RequestMapping; +import org.springframework.web.bind.annotation.RequestParam; +import org.springframework.web.bind.annotation.RestController; + +import java.time.LocalDateTime; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; + +@RestController +@RequestMapping("/api/v1/roles") +@CrossOrigin(origins = "*") +@RequiredArgsConstructor +public class RoleController { + private static final java.util.Set SUPPORTED_ROLE_STATUSES = java.util.Set.of("active", "maintenance", "disabled"); + + private final RoleService roleService; + private final RuntimeModeService runtimeModeService; + private final McpServerService mcpServerService; + private final SkillService skillService; + private final K8sOrchestrator k8sOrchestrator; + private final ServiceSnapshotService snapshotService; + + private String getCurrentUserId() { + return UserContext.getCurrentUserId(); + } + + private String getCurrentUsername() { + return UserContext.getCurrentUserName(); + } + + @GetMapping + public Map listRoles( + @RequestParam(defaultValue = "1") int page, + @RequestParam(defaultValue = "20") int pageSize, + @RequestParam(required = false) String query, + @RequestParam(required = false) String category, + @RequestParam(required = false) String status, + @RequestParam(defaultValue = "all") String scope + ) { + String userId = getCurrentUserId(); + Page result = roleService.listRoles(page, pageSize, query, category, scope, status, userId); + Map favoriteCountMap = roleService.queryFavoriteCountMap( + result.getRecords().stream().map(RoleEntity::getId).collect(Collectors.toList()) + ); + + Map response = new HashMap<>(); + response.put("code", 0); + response.put("msg", "success"); + + Map data = new HashMap<>(); + List> items = result.getRecords().stream().map(role -> { + RuntimeModeService.RuntimeSnapshot runtimeSnapshot = runtimeModeService.resolveForRole(role); + + Map map = new HashMap<>(); + map.put("id", role.getId().toString()); + map.put("name", role.getName()); + map.put("description", role.getDescription()); + map.put("category", role.getCategory()); + map.put("icon", role.getIcon()); + map.put("image", role.getImage()); + map.put("status", coerceRoleStatus(role.getStatus())); + map.put("deployMode", resolveDeployMode(role)); + map.put("runtimeMode", runtimeSnapshot.getRuntimeMode()); + map.put("zzMode", runtimeSnapshot.getZzMode()); + map.put("runnerImage", runtimeSnapshot.getRunnerImage()); + map.put("memoryEnabled", resolveMemoryEnabled(role)); + map.put("isMine", userId.equals(role.getCreatorId())); + map.put("isFavorite", roleService.isFavorite(role.getId(), userId)); + map.put("favoriteCount", favoriteCountMap.getOrDefault(role.getId(), 0L)); + map.put("isPublic", Boolean.TRUE.equals(role.getIsPublic())); + map.put("maxEmployees", role.getMaxEmployees()); + + Map resourceCount = new HashMap<>(); + if (role.getConfigJson() != null) { + resourceCount.put("mcp", role.getConfigJson().getMcp() != null ? role.getConfigJson().getMcp().size() : 0); + resourceCount.put("skills", role.getConfigJson().getSkills() != null ? role.getConfigJson().getSkills().size() : 0); + } + map.put("resourceCount", resourceCount); + + return map; + }).collect(Collectors.toList()); + + data.put("items", items); + + Map pagination = new HashMap<>(); + pagination.put("page", result.getCurrent()); + pagination.put("pageSize", result.getSize()); + pagination.put("total", result.getTotal()); + pagination.put("totalPages", result.getPages()); + data.put("pagination", pagination); + + response.put("data", data); + response.put("timestamp", LocalDateTime.now().toString()); + + return response; + } + + @GetMapping("/hot") + public Map listHotRoles(@RequestParam(defaultValue = "4") int limit) { + String userId = getCurrentUserId(); + List roles = roleService.listHotRoles(limit, userId); + Map favoriteCountMap = roleService.queryFavoriteCountMap( + roles.stream().map(RoleEntity::getId).collect(Collectors.toList()) + ); + + List> items = roles.stream().map(role -> { + Map map = new HashMap<>(); + map.put("id", role.getId().toString()); + map.put("name", role.getName()); + map.put("description", role.getDescription()); + map.put("category", role.getCategory()); + map.put("status", coerceRoleStatus(role.getStatus())); + map.put("favoriteCount", favoriteCountMap.getOrDefault(role.getId(), 0L)); + map.put("isFavorite", roleService.isFavorite(role.getId(), userId)); + return map; + }).collect(Collectors.toList()); + + Map response = new HashMap<>(); + response.put("code", 0); + response.put("msg", "success"); + response.put("data", Map.of("items", items, "limit", Math.max(1, limit))); + response.put("timestamp", LocalDateTime.now().toString()); + return response; + } + + @GetMapping("/{id:\\d+}") + public Map getRole(@PathVariable Long id) { + String userId = getCurrentUserId(); + RoleEntity role = roleService.getRoleForRead(id, userId); + RuntimeModeService.RuntimeSnapshot runtimeSnapshot = runtimeModeService.resolveForRole(role); + + Map response = new HashMap<>(); + response.put("code", 0); + response.put("msg", "success"); + + Map data = new HashMap<>(); + data.put("id", role.getId().toString()); + data.put("name", role.getName()); + data.put("description", role.getDescription()); + data.put("prompt", role.getPrompt()); + data.put("category", role.getCategory()); + data.put("icon", role.getIcon()); + data.put("image", role.getImage()); + data.put("status", coerceRoleStatus(role.getStatus())); + data.put("deployMode", resolveDeployMode(role)); + data.put("runtimeMode", runtimeSnapshot.getRuntimeMode()); + data.put("zzMode", runtimeSnapshot.getZzMode()); + data.put("runnerImage", runtimeSnapshot.getRunnerImage()); + data.put("memoryEnabled", resolveMemoryEnabled(role)); + data.put("isMine", userId.equals(role.getCreatorId())); + data.put("isFavorite", roleService.isFavorite(id, userId)); + data.put("isPublic", Boolean.TRUE.equals(role.getIsPublic())); + data.put("maxEmployees", role.getMaxEmployees()); + data.put("createdAt", role.getCreatedAt() != null ? role.getCreatedAt().toString() : null); + + if (role.getConfigJson() != null) { + RoleEntity.RoleConfig config = role.getConfigJson(); + List> mcpModules = resolveMcpModules(config.getMcp()); + data.put("mcpModules", mcpModules); + data.put("mcpServers", mcpModules); // 兼容历史前端字段 + data.put("skills", resolveSkillModules(config.getSkills())); + + data.put("gitRepos", config.getGitRepos()); + data.put("envVars", config.getEnv()); + } + + response.put("data", data); + return response; + } + + private String resolveDeployMode(RoleEntity role) { + if (role == null || role.getConfigJson() == null) { + return DeployMode.K8S.name(); + } + + String rawDeployMode = role.getConfigJson().getDeployMode(); + if (!StringUtils.hasText(rawDeployMode)) { + return DeployMode.K8S.name(); + } + + try { + return DeployMode.valueOf(rawDeployMode.trim().toUpperCase()).name(); + } catch (IllegalArgumentException ex) { + throw new IllegalArgumentException("非法部署模式: " + rawDeployMode + ", roleId=" + role.getId()); + } + } + + private String coerceRoleStatus(String rawStatus) { + if (!StringUtils.hasText(rawStatus)) { + return "active"; + } + String normalized = rawStatus.trim().toLowerCase(); + return SUPPORTED_ROLE_STATUSES.contains(normalized) ? normalized : "active"; + } + + private boolean resolveMemoryEnabled(RoleEntity role) { + if (role == null || role.getConfigJson() == null) { + return false; + } + return Boolean.TRUE.equals(role.getConfigJson().getMemoryEnabled()); + } + + private List> resolveMcpModules(List refs) { + if (refs == null || refs.isEmpty()) { + return List.of(); + } + + Map byRef = new HashMap<>(); + List idRefs = new ArrayList<>(); + List nameRefs = new ArrayList<>(); + + for (String ref : refs) { + if (!StringUtils.hasText(ref)) { + continue; + } + String trimmedRef = ref.trim(); + try { + idRefs.add(Long.parseLong(trimmedRef)); + } catch (NumberFormatException ex) { + nameRefs.add(trimmedRef); + } + } + + if (!idRefs.isEmpty()) { + List mcpByIds = mcpServerService.list( + new LambdaQueryWrapper().in(McpServerEntity::getId, idRefs) + ); + for (McpServerEntity entity : mcpByIds) { + byRef.put(String.valueOf(entity.getId()), entity); + } + } + + if (!nameRefs.isEmpty()) { + List mcpByNames = mcpServerService.list( + new LambdaQueryWrapper().in(McpServerEntity::getName, nameRefs) + ); + for (McpServerEntity entity : mcpByNames) { + byRef.put(entity.getName(), entity); + } + } + + List> modules = new ArrayList<>(); + for (String ref : refs) { + if (!StringUtils.hasText(ref)) { + continue; + } + String trimmedRef = ref.trim(); + McpServerEntity entity = byRef.get(trimmedRef); + if (entity != null) { + modules.add(buildModule(String.valueOf(entity.getId()), entity.getName(), entity.getDescription())); + } else { + modules.add(buildModule(trimmedRef, trimmedRef, "MCP 配置不存在")); + } + } + return modules; + } + + private List> resolveSkillModules(List refs) { + if (refs == null || refs.isEmpty()) { + return List.of(); + } + + Map byRef = new HashMap<>(); + List idRefs = new ArrayList<>(); + List nameRefs = new ArrayList<>(); + + for (String ref : refs) { + if (!StringUtils.hasText(ref)) { + continue; + } + String trimmedRef = ref.trim(); + try { + idRefs.add(Long.parseLong(trimmedRef)); + } catch (NumberFormatException ex) { + nameRefs.add(trimmedRef); + } + } + + if (!idRefs.isEmpty()) { + List skillsByIds = skillService.list( + new LambdaQueryWrapper().in(SkillEntity::getId, idRefs) + ); + for (SkillEntity entity : skillsByIds) { + byRef.put(String.valueOf(entity.getId()), entity); + } + } + + if (!nameRefs.isEmpty()) { + List skillsByNames = skillService.list( + new LambdaQueryWrapper().in(SkillEntity::getName, nameRefs) + ); + for (SkillEntity entity : skillsByNames) { + byRef.put(entity.getName(), entity); + } + } + + List> modules = new ArrayList<>(); + for (String ref : refs) { + if (!StringUtils.hasText(ref)) { + continue; + } + String trimmedRef = ref.trim(); + SkillEntity entity = byRef.get(trimmedRef); + if (entity != null) { + modules.add(buildModule(String.valueOf(entity.getId()), entity.getName(), entity.getDescription())); + } else { + modules.add(buildModule(trimmedRef, trimmedRef, "Skill 配置不存在")); + } + } + return modules; + } + + private Map buildModule(String id, String name, String desc) { + Map module = new LinkedHashMap<>(); + module.put("id", id); + module.put("name", StringUtils.hasText(name) ? name : id); + module.put("desc", StringUtils.hasText(desc) ? desc : ""); + return module; + } + + /** + * 查询岗位下存活的 Pod 状态 + * + * 返回 runningPods(名称列表)、podCount、maxPodCount、alive(是否有存活 Pod)。 + */ + @GetMapping("/{id:\\d+}/pods") + public Map getRolePods(@PathVariable Long id) { + String serviceId = String.valueOf(id); + + List runningPods; + try { + runningPods = k8sOrchestrator.getRunningPods(serviceId); + } catch (Exception e) { + runningPods = List.of(); + } + + var snapshot = snapshotService.getSnapshot(serviceId); + int maxPodCount = snapshot != null && snapshot.getMaxPodCount() != null + ? snapshot.getMaxPodCount() : 0; + + Map data = new LinkedHashMap<>(); + data.put("roleId", id.toString()); + data.put("serviceId", serviceId); + data.put("alive", !runningPods.isEmpty()); + data.put("podCount", runningPods.size()); + data.put("maxPodCount", maxPodCount); + data.put("runningPods", runningPods); + + Map response = new HashMap<>(); + response.put("code", 0); + response.put("msg", "success"); + response.put("data", data); + return response; + } + + @PostMapping + public Map createRole(@RequestBody RoleEntity role) { + RoleEntity created = roleService.createRole(role, getCurrentUserId(), getCurrentUsername()); + + Map response = new HashMap<>(); + response.put("code", 0); + response.put("msg", "success"); + response.put("data", Map.of("id", created.getId().toString())); + return response; + } + + @PutMapping("/{id}") + public Map updateRole(@PathVariable Long id, @RequestBody RoleEntity role) { + roleService.updateRole(id, role, getCurrentUserId()); + + Map response = new HashMap<>(); + response.put("code", 0); + response.put("msg", "success"); + return response; + } + + @DeleteMapping("/{id}") + public Map deleteRole(@PathVariable Long id) { + roleService.deleteRole(id, getCurrentUserId()); + Map response = new HashMap<>(); + response.put("code", 0); + response.put("msg", "success"); + return response; + } + + @PostMapping("/{id}/favorite") + public Map toggleFavorite(@PathVariable Long id, @RequestBody Map body) { + boolean favorite = body.getOrDefault("favorite", false); + roleService.toggleFavorite(id, getCurrentUserId(), favorite); + + Map response = new HashMap<>(); + response.put("code", 0); + response.put("msg", "success"); + return response; + } +} diff --git a/back/src/main/java/com/linkwork/controller/ScheduleController.java b/back/src/main/java/com/linkwork/controller/ScheduleController.java new file mode 100644 index 0000000..1170d96 --- /dev/null +++ b/back/src/main/java/com/linkwork/controller/ScheduleController.java @@ -0,0 +1,319 @@ +package com.linkwork.controller; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.fasterxml.jackson.dataformat.yaml.YAMLFactory; +import com.fasterxml.jackson.dataformat.yaml.YAMLGenerator; +import com.linkwork.common.ApiResponse; +import com.linkwork.model.dto.*; +import com.linkwork.model.enums.DeployMode; +import com.linkwork.model.enums.PodMode; +import com.linkwork.service.K8sOrchestrator; +import com.linkwork.service.ServiceResumeService; +import com.linkwork.service.ServiceScaleService; +import com.linkwork.service.ServiceScheduleService; +import com.linkwork.service.ServiceSnapshotService; +import jakarta.validation.Valid; +import lombok.extern.slf4j.Slf4j; +import org.springframework.core.io.ByteArrayResource; +import org.springframework.http.HttpHeaders; +import org.springframework.http.HttpStatus; +import org.springframework.http.MediaType; +import org.springframework.http.ResponseEntity; +import org.springframework.validation.annotation.Validated; +import org.springframework.web.bind.annotation.*; + +/** + * 调度控制器 + */ +@RestController +@RequestMapping("/api/v1/schedule") +@Validated +@Slf4j +public class ScheduleController { + + private final ServiceScheduleService scheduleService; + private final K8sOrchestrator orchestrator; + private final ServiceSnapshotService snapshotService; + private final ServiceResumeService resumeService; + private final ServiceScaleService scaleService; + private final ObjectMapper yamlMapper; + + public ScheduleController(ServiceScheduleService scheduleService, + K8sOrchestrator orchestrator, + ServiceSnapshotService snapshotService, + ServiceResumeService resumeService, + ServiceScaleService scaleService) { + this.scheduleService = scheduleService; + this.orchestrator = orchestrator; + this.snapshotService = snapshotService; + this.resumeService = resumeService; + this.scaleService = scaleService; + this.yamlMapper = new ObjectMapper(new YAMLFactory() + .enable(YAMLGenerator.Feature.MINIMIZE_QUOTES) + .disable(YAMLGenerator.Feature.WRITE_DOC_START_MARKER)); + } + + @PostMapping("/build") + public ResponseEntity> build( + @Valid @RequestBody ServiceBuildRequest request) { + + log.info("Received build request for service {}", request.getServiceId()); + + if (request.getDeployMode() == DeployMode.COMPOSE) { + return ResponseEntity.badRequest() + .body(ApiResponse.error("COMPOSE 模式请使用 /compose/generate 接口")); + } + + if (request.getPodMode() == PodMode.SIDECAR && request.getDeployMode() != DeployMode.K8S) { + return ResponseEntity.badRequest() + .body(ApiResponse.error("Sidecar 模式仅支持 K8s 部署")); + } + + ServiceBuildResult result = scheduleService.build(request); + + if (result.isSuccess()) { + // 判断状态:BUILDING 返回 202 Accepted,SUCCESS 返回 200 OK + if ("BUILDING".equals(result.getStatus())) { + log.info("Build task submitted for service {}, returning 202 Accepted", request.getServiceId()); + return ResponseEntity.status(HttpStatus.ACCEPTED) + .body(ApiResponse.success(result)); + } else { + snapshotService.saveSnapshot(request, result); + return ResponseEntity.ok(ApiResponse.success(result)); + } + } else { + return ResponseEntity.status(HttpStatus.INTERNAL_SERVER_ERROR) + .body(ApiResponse.error(result.getErrorMessage())); + } + } + + @PostMapping("/preview") + public ResponseEntity> preview( + @Valid @RequestBody ServiceBuildRequest request) { + + log.info("Received preview request for service {}", request.getServiceId()); + + GeneratedSpec spec = scheduleService.preview(request); + return ResponseEntity.ok(ApiResponse.success(spec)); + } + + @PostMapping("/preview/yaml") + public ResponseEntity previewYaml( + @Valid @RequestBody ServiceBuildRequest request) { + + log.info("Received preview YAML request for service {}", request.getServiceId()); + + try { + GeneratedSpec spec = scheduleService.preview(request); + + StringBuilder yaml = new StringBuilder(); + yaml.append("# ========== PodGroup ==========\n"); + yaml.append(yamlMapper.writeValueAsString(spec.getPodGroupSpec())); + yaml.append("\n---\n"); + + for (int i = 0; i < spec.getPodSpecs().size(); i++) { + yaml.append("# ========== Pod ").append(i).append(" ==========\n"); + yaml.append(yamlMapper.writeValueAsString(spec.getPodSpecs().get(i))); + if (i < spec.getPodSpecs().size() - 1) { + yaml.append("\n---\n"); + } + } + + return ResponseEntity.ok() + .contentType(MediaType.parseMediaType("text/yaml")) + .body(yaml.toString()); + } catch (Exception e) { + log.error("Failed to generate YAML: {}", e.getMessage()); + return ResponseEntity.status(HttpStatus.INTERNAL_SERVER_ERROR) + .body("Error: " + e.getMessage()); + } + } + + @PostMapping("/config") + public ResponseEntity> getMergedConfig( + @Valid @RequestBody ServiceBuildRequest request) { + + log.info("Received config request for service {}", request.getServiceId()); + + MergedConfig config = scheduleService.getMergedConfig(request); + return ResponseEntity.ok(ApiResponse.success(config)); + } + + @GetMapping("/status/{serviceId}") + public ResponseEntity> getStatus( + @PathVariable String serviceId) { + + ServiceStatusResponse status = orchestrator.getServiceStatus(serviceId); + + if (status == null || status.getPodGroupStatus() == null) { + return ResponseEntity.status(HttpStatus.NOT_FOUND) + .body(ApiResponse.error("Service not found: " + serviceId)); + } + + return ResponseEntity.ok(ApiResponse.success(status)); + } + + @PostMapping("/resume/{serviceId}") + public ResponseEntity> resume( + @PathVariable String serviceId) { + + log.info("Received resume request for service {}", serviceId); + + ServiceResumeResult result = resumeService.resume(serviceId); + + if (result.isSuccess()) { + return ResponseEntity.ok(ApiResponse.success(result)); + } else if (result.isRequireFullRequest()) { + return ResponseEntity.status(HttpStatus.NOT_FOUND) + .body(ApiResponse.error(result.getMessage())); + } else { + return ResponseEntity.status(HttpStatus.INTERNAL_SERVER_ERROR) + .body(ApiResponse.error(result.getMessage())); + } + } + +// @PostMapping("/{serviceId}/stop") +// public ResponseEntity> stop( +// @PathVariable String serviceId, +// @RequestParam(defaultValue = "true") boolean graceful) { +// +// log.info("Received stop request for service {}, graceful={}", serviceId, graceful); +// +// StopResult result = orchestrator.stopService(serviceId, graceful); +// +// if (result.isSuccess()) { +// snapshotService.onServiceShutdown(serviceId); +// return ResponseEntity.ok(ApiResponse.success(result)); +// } else { +// return ResponseEntity.status(HttpStatus.INTERNAL_SERVER_ERROR) +// .body(ApiResponse.error(result.getErrorMessage())); +// } +// } + + @DeleteMapping("/{serviceId}") + public ResponseEntity delete(@PathVariable String serviceId) { + log.info("Received delete request for service {}", serviceId); + + orchestrator.cleanupService(serviceId); + + return ResponseEntity.noContent().build(); + } + + /** + * 生成 Compose 构建包(zip 下载) + * + * 返回 zip 包含 docker-compose.yaml、Dockerfile、build.sh 等构建所需文件。 + * 用户解压后执行 docker compose up --build -d 即可在本地构建镜像并启动服务。 + */ + @PostMapping("/compose/generate") + public ResponseEntity generateCompose( + @Valid @RequestBody ServiceBuildRequest request) { + + log.info("Received compose package request for service {}", request.getServiceId()); + + request.setPodMode(PodMode.ALONE); + request.setDeployMode(DeployMode.COMPOSE); + + ServiceBuildResult result = scheduleService.generateComposePackage(request); + + if (!result.isSuccess()) { + log.error("Compose package generation failed for service {}: {}", + request.getServiceId(), result.getErrorMessage()); + return ResponseEntity.status(HttpStatus.INTERNAL_SERVER_ERROR) + .body(ApiResponse.error(result.getErrorMessage())); + } + + String filename = "ai-worker-" + request.getServiceId() + ".tar.gz"; + log.info("Compose package ready for service {}, filename: {}", request.getServiceId(), filename); + + return ResponseEntity.ok() + .header(HttpHeaders.CONTENT_DISPOSITION, "attachment; filename=\"" + filename + "\"") + .contentType(MediaType.parseMediaType("application/gzip")) + .body(new ByteArrayResource(result.getComposeTar())); + } + + @PostMapping("/{serviceId}/scale-down") + public ResponseEntity> scaleDown( + @PathVariable String serviceId, + @RequestBody ScaleRequest request) { + + String podName = request != null ? request.getPodName() : null; + String source = request != null ? request.getSource() : "api"; + + if (podName == null || podName.isEmpty()) { + return ResponseEntity.badRequest() + .body(ApiResponse.error("podName is required for scale-down")); + } + + log.info("Received scale-down request for service {}, podName={}, source={}", + serviceId, podName, source); + + ScaleResult result = scaleService.scaleDown(serviceId, podName, source); + + if (result.isSuccess()) { + return ResponseEntity.ok(ApiResponse.success(result)); + } else { + return ResponseEntity.status(HttpStatus.INTERNAL_SERVER_ERROR) + .body(ApiResponse.error(result.getErrorMessage())); + } + } + + @PostMapping("/{serviceId}/scale-up") + public ResponseEntity> scaleUp( + @PathVariable String serviceId, + @RequestBody(required = false) ScaleRequest request) { + + Integer targetPodCount = request != null ? request.getTargetPodCount() : null; + String source = request != null ? request.getSource() : "api"; + + log.info("Received scale-up request for service {}, targetPodCount={}, source={}", + serviceId, targetPodCount, source); + + ScaleResult result = scaleService.scaleUp(serviceId, targetPodCount, source); + + if (result.isSuccess()) { + return ResponseEntity.ok(ApiResponse.success(result)); + } else { + return ResponseEntity.status(HttpStatus.INTERNAL_SERVER_ERROR) + .body(ApiResponse.error(result.getErrorMessage())); + } + } + + @PostMapping("/{serviceId}/scale") + public ResponseEntity> scale( + @PathVariable String serviceId, + @RequestBody ScaleRequest request) { + + if (request.getTargetPodCount() == null || request.getTargetPodCount() < 0) { + return ResponseEntity.badRequest() + .body(ApiResponse.error("targetPodCount is required and must be >= 0")); + } + + String source = request.getSource() != null ? request.getSource() : "api"; + + log.info("Received scale request for service {}, targetPodCount={}, source={}", + serviceId, request.getTargetPodCount(), source); + + ScaleResult result = scaleService.scale(serviceId, request.getTargetPodCount(), source); + + if (result.isSuccess()) { + return ResponseEntity.ok(ApiResponse.success(result)); + } else { + return ResponseEntity.status(HttpStatus.INTERNAL_SERVER_ERROR) + .body(ApiResponse.error(result.getErrorMessage())); + } + } + + @GetMapping("/{serviceId}/scale") + public ResponseEntity> getScaleStatus( + @PathVariable String serviceId) { + + ScaleResult result = scaleService.getScaleStatus(serviceId); + return ResponseEntity.ok(ApiResponse.success(result)); + } + + @GetMapping("/health") + public ResponseEntity> health() { + return ResponseEntity.ok(ApiResponse.success("OK")); + } +} diff --git a/back/src/main/java/com/linkwork/controller/SecurityPolicyController.java b/back/src/main/java/com/linkwork/controller/SecurityPolicyController.java new file mode 100644 index 0000000..139dd87 --- /dev/null +++ b/back/src/main/java/com/linkwork/controller/SecurityPolicyController.java @@ -0,0 +1,88 @@ +package com.linkwork.controller; + +import com.linkwork.common.ApiResponse; +import com.linkwork.context.UserContext; +import com.linkwork.service.SecurityPolicyService; +import lombok.RequiredArgsConstructor; +import lombok.extern.slf4j.Slf4j; +import org.springframework.web.bind.annotation.*; + +import java.util.List; +import java.util.Map; + +/** + * 安全策略控制器 + */ +@Slf4j +@RestController +@RequestMapping("/api/v1/security/policies") +@CrossOrigin(origins = "*") +@RequiredArgsConstructor +public class SecurityPolicyController { + + private final SecurityPolicyService policyService; + + /** + * 获取所有安全策略 + * GET /api/v1/security/policies + */ + @GetMapping + public ApiResponse>> listPolicies() { + log.info("获取安全策略列表"); + return ApiResponse.success(policyService.listPolicies()); + } + + /** + * 获取单个策略详情 + * GET /api/v1/security/policies/{id} + */ + @GetMapping("/{id}") + public ApiResponse> getPolicy(@PathVariable Long id) { + log.info("获取安全策略详情: id={}", id); + return ApiResponse.success(policyService.getPolicy(id)); + } + + /** + * 创建自定义策略 + * POST /api/v1/security/policies + */ + @PostMapping + public ApiResponse> createPolicy(@RequestBody Map request) { + String userId = UserContext.getCurrentUserId(); + String userName = UserContext.getCurrentUserName(); + log.info("创建安全策略: name={}, userId={}", request.get("name"), userId); + return ApiResponse.success(policyService.createPolicy(request, userId, userName)); + } + + /** + * 更新策略 + * PUT /api/v1/security/policies/{id} + */ + @PutMapping("/{id}") + public ApiResponse> updatePolicy( + @PathVariable Long id, @RequestBody Map request) { + log.info("更新安全策略: id={}", id); + return ApiResponse.success(policyService.updatePolicy(id, request)); + } + + /** + * 切换策略启用/禁用 + * POST /api/v1/security/policies/{id}/toggle + */ + @PostMapping("/{id}/toggle") + public ApiResponse> togglePolicy(@PathVariable Long id) { + log.info("切换安全策略: id={}", id); + return ApiResponse.success(policyService.togglePolicy(id)); + } + + /** + * 删除策略 + * DELETE /api/v1/security/policies/{id} + */ + @DeleteMapping("/{id}") + public ApiResponse deletePolicy(@PathVariable Long id) { + log.info("删除安全策略: id={}", id); + policyService.deletePolicy(id); + return ApiResponse.success(null); + } +} diff --git a/back/src/main/java/com/linkwork/controller/SkillController.java b/back/src/main/java/com/linkwork/controller/SkillController.java new file mode 100644 index 0000000..9039cce --- /dev/null +++ b/back/src/main/java/com/linkwork/controller/SkillController.java @@ -0,0 +1,276 @@ +package com.linkwork.controller; + +import com.linkwork.common.ApiResponse; +import com.linkwork.common.ForbiddenOperationException; +import com.linkwork.common.ResourceNotFoundException; +import com.linkwork.context.UserContext; +import com.linkwork.service.SkillService; +import jakarta.servlet.http.HttpServletRequest; +import lombok.RequiredArgsConstructor; +import lombok.extern.slf4j.Slf4j; +import org.springframework.util.StringUtils; +import org.springframework.web.bind.annotation.*; + +import java.net.URLDecoder; +import java.nio.charset.StandardCharsets; +import java.util.List; +import java.util.Map; + +/** + * 技能控制器 - Git-based skill management + */ +@Slf4j +@RestController +@RequestMapping("/api/v1/skills") +@CrossOrigin(origins = "*") +@RequiredArgsConstructor +public class SkillController { + + private final SkillService skillService; + + /** + * 获取技能列表(分页) + */ + @GetMapping + public ApiResponse> listSkills( + @RequestParam(defaultValue = "1") int page, + @RequestParam(defaultValue = "20") int pageSize, + @RequestParam(required = false) String status, + @RequestParam(required = false) String keyword) { + String userId = UserContext.getCurrentUserId(); + Map data = skillService.listSkills(page, pageSize, status, keyword, userId); + return ApiResponse.success(data); + } + + /** + * 获取所有可用的技能(用于角色配置下拉选择) + */ + @GetMapping("/available") + public ApiResponse>> listAvailable() { + String userId = UserContext.getCurrentUserId(); + List> data = skillService.listAllAvailable(userId); + return ApiResponse.success(data); + } + + /** + * 从Git仓库同步所有技能 + */ + @PostMapping("/sync") + public ApiResponse> syncFromGit() { + try { + int count = skillService.syncAllFromGit(); + return ApiResponse.success(Map.of("synced", count)); + } catch (Exception e) { + log.error("Failed to sync skills from git", e); + return ApiResponse.error(500, e.getMessage()); + } + } + + /** + * 获取技能详情(含文件树) + */ + @GetMapping("/{name}") + public ApiResponse> getSkillDetail(@PathVariable String name) { + try { + String userId = UserContext.getCurrentUserId(); + Map detail = skillService.getSkillDetail(name, userId); + return ApiResponse.success(detail); + } catch (ResourceNotFoundException e) { + return ApiResponse.error(404, e.getMessage()); + } catch (ForbiddenOperationException e) { + return ApiResponse.error(403, e.getMessage()); + } catch (Exception e) { + log.error("Failed to get skill detail: {}", name, e); + return ApiResponse.error(500, e.getMessage()); + } + } + + /** + * 读取技能文件内容 + */ + @GetMapping("/{name}/files/**") + public ApiResponse> getFile(@PathVariable String name, HttpServletRequest request) { + try { + String path = extractFilePath(request, name); + String userId = UserContext.getCurrentUserId(); + String content = skillService.getFile(name, path, userId); + // 获取分支最新 commit 用于前端乐观锁 + String commitId = skillService.getLatestCommitId(name, userId); + return ApiResponse.success(Map.of( + "content", content, + "path", path, + "commitId", commitId != null ? commitId : "" + )); + } catch (ResourceNotFoundException e) { + return ApiResponse.error(404, e.getMessage()); + } catch (ForbiddenOperationException e) { + return ApiResponse.error(403, e.getMessage()); + } catch (IllegalArgumentException e) { + return ApiResponse.error(400, e.getMessage()); + } catch (Exception e) { + log.error("Failed to get file: {}/{}", name, extractFilePath(request, name), e); + return ApiResponse.error(500, e.getMessage()); + } + } + + /** + * 编辑并提交技能文件 + */ + @PutMapping("/{name}/files/**") + public ApiResponse> commitFile(@PathVariable String name, + @RequestBody Map body, + HttpServletRequest request) { + try { + String path = extractFilePath(request, name); + String content = (String) body.get("content"); + String commitMessage = (String) body.get("commitMessage"); + String lastCommitId = (String) body.get("lastCommitId"); + if (!StringUtils.hasText(commitMessage)) { + throw new IllegalArgumentException("commitMessage 不能为空"); + } + String userId = UserContext.getCurrentUserId(); + Map result = skillService.commitFile( + name, path, content, commitMessage, lastCommitId, userId); + return ApiResponse.success(result); + } catch (ResourceNotFoundException e) { + return ApiResponse.error(404, e.getMessage()); + } catch (ForbiddenOperationException e) { + return ApiResponse.error(403, e.getMessage()); + } catch (IllegalArgumentException e) { + return ApiResponse.error(400, e.getMessage()); + } catch (Exception e) { + log.error("Failed to commit file: {}/{}", name, extractFilePath(request, name), e); + return ApiResponse.error(500, e.getMessage()); + } + } + + /** + * 创建新技能 + */ + @PostMapping + public ApiResponse> createSkill(@RequestBody Map request) { + try { + String name = (String) request.get("name"); + String description = (String) request.get("description"); + Boolean isPublic = request.get("isPublic") instanceof Boolean + ? (Boolean) request.get("isPublic") + : null; + String userId = UserContext.getCurrentUserId(); + String userName = UserContext.getCurrentUserName(); + skillService.createSkill(name, description, isPublic, userId, userName); + return ApiResponse.success(Map.of("name", name)); + } catch (ForbiddenOperationException e) { + return ApiResponse.error(403, e.getMessage()); + } catch (IllegalArgumentException e) { + return ApiResponse.error(400, e.getMessage()); + } catch (Exception e) { + log.error("Failed to create skill", e); + return ApiResponse.error(500, e.getMessage()); + } + } + + /** + * 更新技能元数据(description/isPublic) + */ + @PutMapping("/{name}") + public ApiResponse> updateSkillMeta(@PathVariable String name, + @RequestBody Map request) { + try { + String userId = UserContext.getCurrentUserId(); + String userName = UserContext.getCurrentUserName(); + skillService.updateSkillMeta(name, request, userId, userName); + return ApiResponse.success(Map.of("name", name)); + } catch (ResourceNotFoundException e) { + return ApiResponse.error(404, e.getMessage()); + } catch (ForbiddenOperationException e) { + return ApiResponse.error(403, e.getMessage()); + } catch (IllegalArgumentException e) { + return ApiResponse.error(400, e.getMessage()); + } catch (Exception e) { + log.error("Failed to update skill meta: {}", name, e); + return ApiResponse.error(500, e.getMessage()); + } + } + + /** + * 删除技能 + */ + @DeleteMapping("/{name}") + public ApiResponse deleteSkill(@PathVariable String name) { + try { + String userId = UserContext.getCurrentUserId(); + skillService.deleteSkill(name, userId); + return ApiResponse.success(null); + } catch (ResourceNotFoundException e) { + return ApiResponse.error(404, e.getMessage()); + } catch (ForbiddenOperationException e) { + return ApiResponse.error(403, e.getMessage()); + } catch (Exception e) { + log.error("Failed to delete skill: {}", name, e); + return ApiResponse.error(500, e.getMessage()); + } + } + + /** + * 获取技能提交历史 + */ + @GetMapping("/{name}/history") + public ApiResponse>> getHistory(@PathVariable String name) { + try { + String userId = UserContext.getCurrentUserId(); + List> history = skillService.getHistory(name, userId); + return ApiResponse.success(history); + } catch (ResourceNotFoundException e) { + return ApiResponse.error(404, e.getMessage()); + } catch (ForbiddenOperationException e) { + return ApiResponse.error(403, e.getMessage()); + } catch (Exception e) { + log.error("Failed to get history: {}", name, e); + return ApiResponse.error(500, e.getMessage()); + } + } + + /** + * 回退到指定提交 + */ + @PostMapping("/{name}/revert") + public ApiResponse revertToCommit(@PathVariable String name, + @RequestBody Map body) { + try { + String commitSha = (String) body.get("commitSha"); + String userId = UserContext.getCurrentUserId(); + skillService.revertToCommit(name, commitSha, userId); + return ApiResponse.success(null); + } catch (ResourceNotFoundException e) { + return ApiResponse.error(404, e.getMessage()); + } catch (ForbiddenOperationException e) { + return ApiResponse.error(403, e.getMessage()); + } catch (IllegalArgumentException e) { + return ApiResponse.error(400, e.getMessage()); + } catch (Exception e) { + log.error("Failed to revert skill {} to commit", name, e); + return ApiResponse.error(500, e.getMessage()); + } + } + + /** + * 从请求URI中提取文件路径 + */ + private String extractFilePath(HttpServletRequest request, String name) { + String uri = request.getRequestURI(); + String prefix = "/api/v1/skills/" + name + "/files/"; + String encodedPath = uri.substring(uri.indexOf(prefix) + prefix.length()); + String normalized = encodedPath == null ? "" : encodedPath.trim().replace('\\', '/'); + for (int i = 0; i < 3; i++) { + String decoded = URLDecoder.decode(normalized, StandardCharsets.UTF_8); + if (decoded.equals(normalized)) { + break; + } + normalized = decoded; + } + while (normalized.startsWith("/")) { + normalized = normalized.substring(1); + } + return normalized; + } +} diff --git a/back/src/main/java/com/linkwork/controller/StorageController.java b/back/src/main/java/com/linkwork/controller/StorageController.java new file mode 100644 index 0000000..7fae21b --- /dev/null +++ b/back/src/main/java/com/linkwork/controller/StorageController.java @@ -0,0 +1,31 @@ +package com.linkwork.controller; + +import com.linkwork.common.ApiResponse; +import com.linkwork.service.NfsStorageService; +import lombok.extern.slf4j.Slf4j; +import org.springframework.web.bind.annotation.GetMapping; +import org.springframework.web.bind.annotation.RequestMapping; +import org.springframework.web.bind.annotation.RestController; + +import java.util.HashMap; +import java.util.Map; + +@Slf4j +@RestController +@RequestMapping("/api/v1/storage") +public class StorageController { + + private final NfsStorageService nfsStorageService; + + public StorageController(NfsStorageService nfsStorageService) { + this.nfsStorageService = nfsStorageService; + } + + @GetMapping("/status") + public ApiResponse> getStatus() { + Map status = new HashMap<>(); + status.put("configured", nfsStorageService.isConfigured()); + status.put("type", "NFS"); + return ApiResponse.success(status); + } +} diff --git a/back/src/main/java/com/linkwork/controller/TaskController.java b/back/src/main/java/com/linkwork/controller/TaskController.java new file mode 100644 index 0000000..7c6d73c --- /dev/null +++ b/back/src/main/java/com/linkwork/controller/TaskController.java @@ -0,0 +1,218 @@ +package com.linkwork.controller; + +import com.baomidou.mybatisplus.extension.plugins.pagination.Page; +import com.linkwork.common.ApiResponse; +import com.linkwork.common.ClientIpResolver; +import com.linkwork.context.UserContext; +import com.linkwork.model.dto.TaskCompleteRequest; +import com.linkwork.model.dto.TaskCreateRequest; +import com.linkwork.model.dto.TaskGitTokenResponse; +import com.linkwork.model.dto.TaskResponse; +import com.linkwork.model.dto.TaskShareCreateRequest; +import com.linkwork.model.dto.TaskShareLinkResponse; +import com.linkwork.model.entity.Task; +import com.linkwork.service.TaskGitTokenService; +import com.linkwork.service.TaskShareLinkService; +import com.linkwork.service.TaskService; +import jakarta.validation.Valid; +import jakarta.servlet.http.HttpServletRequest; +import lombok.RequiredArgsConstructor; +import lombok.extern.slf4j.Slf4j; +import org.springframework.http.ResponseEntity; +import org.springframework.util.StringUtils; +import org.springframework.web.bind.annotation.CrossOrigin; +import org.springframework.web.bind.annotation.GetMapping; +import org.springframework.web.bind.annotation.PathVariable; +import org.springframework.web.bind.annotation.PostMapping; +import org.springframework.web.bind.annotation.RequestBody; +import org.springframework.web.bind.annotation.RequestHeader; +import org.springframework.web.bind.annotation.RequestMapping; +import org.springframework.web.bind.annotation.RequestParam; +import org.springframework.web.bind.annotation.RestController; + +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +/** + * 任务控制器 + * 对应 docs/back/api-design.md 3.1 任务执行模块 + */ +@Slf4j +@RestController +@RequestMapping("/api/v1/tasks") +@CrossOrigin(origins = "*") +@RequiredArgsConstructor +public class TaskController { + + private final TaskService taskService; + private final TaskGitTokenService taskGitTokenService; + private final TaskShareLinkService taskShareLinkService; + + /** + * 启动 AI 任务 + * POST /api/v1/tasks + */ + @PostMapping + public ApiResponse> createTask( + @Valid @RequestBody TaskCreateRequest request, + HttpServletRequest servletRequest) { + + String userId = UserContext.getCurrentUserId(); + String userName = UserContext.getCurrentUserName(); + String creatorIp = ClientIpResolver.resolve(servletRequest); + log.info("创建任务请求: prompt={}, roleId={}, modelId={}, userId={}, creatorIp={}", + request.getPrompt(), request.getRoleId(), request.getModelId(), userId, creatorIp); + + Task task = taskService.createTask(request, userId, userName, creatorIp); + TaskResponse taskResponse = taskService.toResponse(task); + + Map result = new HashMap<>(); + result.put("taskId", task.getTaskNo()); + result.put("status", task.getStatus().getCode()); + result.put("estimatedOutput", taskResponse.getEstimatedOutput()); + result.put("deliveryMode", taskResponse.getDeliveryMode()); + result.put("branchName", taskResponse.getBranchName()); + + return ApiResponse.success(result); + } + + /** + * 获取任务详情 + * GET /api/v1/tasks/{taskNo} + */ + @GetMapping("/{taskNo}") + public ApiResponse getTask(@PathVariable String taskNo) { + String userId = UserContext.getCurrentUserId(); + if (!StringUtils.hasText(userId)) { + throw new IllegalStateException("用户未登录或登录态失效"); + } + log.info("获取任务详情: taskNo={}, userId={}", taskNo, userId); + + Task task = taskService.getTaskByNo(taskNo, userId); + TaskResponse response = taskService.toResponse(task); + + return ApiResponse.success(response); + } + + /** + * zzd 按任务 ID 获取 Git token(服务到服务) + * GET /api/v1/tasks/{taskNo}/git-token + */ + @GetMapping("/{taskNo}/git-token") + public ResponseEntity> getTaskGitToken( + @PathVariable String taskNo, + @RequestHeader(value = "Authorization", required = false) String authorization) { + + // NOTE(temporary): API 端联调阶段先跳过 zzd 服务鉴权,优先保障任务链路可用。 + // 后续 API 端启用服务鉴权后,恢复 bearer token 校验逻辑。 + TaskGitTokenResponse response = taskGitTokenService.getTaskGitToken(taskNo); + return ResponseEntity.ok(ApiResponse.success(response)); + } + + /** + * 获取任务列表 + * GET /api/v1/tasks + */ + @GetMapping + public ApiResponse> listTasks( + @RequestParam(required = false) Long roleId, + @RequestParam(required = false) String status, + @RequestParam(defaultValue = "1") Integer page, + @RequestParam(defaultValue = "20") Integer pageSize) { + String userId = UserContext.getCurrentUserId(); + if (!StringUtils.hasText(userId)) { + throw new IllegalStateException("用户未登录或登录态失效"); + } + + log.info("获取任务列表: roleId={}, status={}, page={}, pageSize={}, userId={}", roleId, status, page, pageSize, userId); + + Page taskPage = taskService.listTasks(roleId, status, page, pageSize, userId); + List items = taskService.toResponseList(taskPage.getRecords()); + + Map pagination = new HashMap<>(); + pagination.put("page", taskPage.getCurrent()); + pagination.put("pageSize", taskPage.getSize()); + pagination.put("total", taskPage.getTotal()); + pagination.put("totalPages", taskPage.getPages()); + + Map result = new HashMap<>(); + result.put("items", items); + result.put("pagination", pagination); + + return ApiResponse.success(result); + } + + /** + * 任务完成回写(Worker 回调) + * POST /api/v1/tasks/{taskNo}/complete + */ + @PostMapping("/{taskNo}/complete") + public ApiResponse> completeTask( + @PathVariable String taskNo, + @Valid @RequestBody TaskCompleteRequest request) { + log.info("任务完成回写: taskNo={}, status={}, tokensUsed={}, durationMs={}", + taskNo, request.getStatus(), request.getTokensUsed(), request.getDurationMs()); + + Task task = taskService.completeTask(taskNo, request); + + Map result = new HashMap<>(); + result.put("taskId", task.getTaskNo()); + result.put("status", task.getStatus().getCode()); + result.put("tokensUsed", task.getTokensUsed()); + result.put("durationMs", task.getDurationMs()); + + return ApiResponse.success(result); + } + + /** + * 终止任务(主接口) + * POST /api/v1/tasks/{taskNo}/terminate + */ + @PostMapping("/{taskNo}/terminate") + public ApiResponse> terminateTask( + @PathVariable String taskNo) { + String userId = UserContext.getCurrentUserId(); + String userName = UserContext.getCurrentUserName(); + if (!StringUtils.hasText(userId)) { + throw new IllegalStateException("用户未登录或登录态失效"); + } + log.info("终止任务: {}, userId={}", taskNo, userId); + + Task task = taskService.abortTask(taskNo, userId, userName); + + Map result = new HashMap<>(); + result.put("taskId", task.getTaskNo()); + result.put("status", "terminate_requested"); + + return ApiResponse.success(result); + } + + /** + * 终止任务(兼容接口) + * POST /api/v1/tasks/{taskNo}/abort + */ + @PostMapping("/{taskNo}/abort") + public ApiResponse> abortTask( + @PathVariable String taskNo) { + return terminateTask(taskNo); + } + + /** + * 创建任务临时分享链接 + * POST /api/v1/tasks/{taskNo}/share-link + */ + @PostMapping("/{taskNo}/share-link") + public ApiResponse createTaskShareLink( + @PathVariable String taskNo, + @RequestBody(required = false) TaskShareCreateRequest request) { + String userId = UserContext.getCurrentUserId(); + if (!StringUtils.hasText(userId)) { + throw new IllegalStateException("用户未登录或登录态失效"); + } + Integer expireHours = request == null ? null : request.getExpireHours(); + log.info("创建任务分享链接: taskNo={}, userId={}, expireHours={}", taskNo, userId, expireHours); + TaskShareLinkResponse response = taskShareLinkService.createShareLink(taskNo, userId, expireHours); + return ApiResponse.success(response); + } +} diff --git a/back/src/main/java/com/linkwork/controller/TaskOutputController.java b/back/src/main/java/com/linkwork/controller/TaskOutputController.java new file mode 100644 index 0000000..4949a67 --- /dev/null +++ b/back/src/main/java/com/linkwork/controller/TaskOutputController.java @@ -0,0 +1,235 @@ +package com.linkwork.controller; + +import com.linkwork.service.NfsStorageService; +import lombok.RequiredArgsConstructor; +import lombok.extern.slf4j.Slf4j; +import org.springframework.core.io.InputStreamResource; +import org.springframework.core.io.Resource; +import org.springframework.http.HttpHeaders; +import org.springframework.http.MediaType; +import org.springframework.http.ResponseEntity; +import org.springframework.util.StringUtils; +import org.springframework.web.bind.annotation.*; +import org.springframework.web.servlet.mvc.method.annotation.StreamingResponseBody; + +import java.io.IOException; +import java.io.InputStream; +import java.net.URLEncoder; +import java.nio.charset.StandardCharsets; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.ArrayList; +import java.util.HashSet; +import java.util.List; +import java.util.Set; +import java.util.zip.ZipEntry; +import java.util.zip.ZipOutputStream; + +/** + * 任务产出物文件下载 — 供前端 WebSocket 事件中的 download_url 使用 + */ +@Slf4j +@RestController +@RequestMapping("/api/v1/task-outputs") +@RequiredArgsConstructor +public class TaskOutputController { + + private final NfsStorageService nfsStorageService; + private static final int ZIP_BUFFER_SIZE = 8192; + + /** + * 任务产出下载(按 objectName 直取)。 + * + * 示例: + * GET /api/v1/task-outputs/file?object=system%2F35%2Flogs%2Fuser-id%2FTSK-xxx%2Freport.md + */ + @GetMapping("/file") + public ResponseEntity downloadTaskOutputFile( + @RequestParam("object") String objectName, + @RequestParam(value = "inline", defaultValue = "false") boolean inline) throws IOException { + + String normalizedObject = normalizeObjectName(objectName); + Path filePath = nfsStorageService.getAbsolutePath(normalizedObject).normalize(); + if (!Files.exists(filePath) || !Files.isRegularFile(filePath)) { + log.warn("任务产出物文件未找到: object={}", normalizedObject); + return ResponseEntity.notFound().build(); + } + + String fileName = filePath.getFileName().toString(); + String encodedFileName = URLEncoder.encode(fileName, StandardCharsets.UTF_8).replace("+", "%20"); + String contentType = resolveContentType(fileName, filePath); + String disposition = inline ? "inline" : "attachment"; + InputStream inputStream = Files.newInputStream(filePath); + + return ResponseEntity.ok() + .header(HttpHeaders.CONTENT_DISPOSITION, disposition + "; filename*=UTF-8''" + encodedFileName) + .contentType(MediaType.parseMediaType(contentType)) + .contentLength(Files.size(filePath)) + .body(new InputStreamResource(inputStream)); + } + + @PostMapping(value = "/archive", consumes = MediaType.APPLICATION_JSON_VALUE) + public ResponseEntity downloadTaskOutputArchive( + @RequestBody ArchiveRequest request) throws IOException { + + List sourceFiles = prepareArchiveSourceFiles(request); + if (sourceFiles.isEmpty()) { + throw new IllegalArgumentException("items 参数不能为空"); + } + + String archiveFileName = normalizeArchiveFileName(request.fileName()); + String encodedFileName = URLEncoder.encode(archiveFileName, StandardCharsets.UTF_8).replace("+", "%20"); + + StreamingResponseBody responseBody = outputStream -> { + try (ZipOutputStream zipOutputStream = new ZipOutputStream(outputStream)) { + byte[] buffer = new byte[ZIP_BUFFER_SIZE]; + for (ArchiveSourceFile sourceFile : sourceFiles) { + zipOutputStream.putNextEntry(new ZipEntry(sourceFile.entryName())); + try (InputStream inputStream = Files.newInputStream(sourceFile.path())) { + int readSize; + while ((readSize = inputStream.read(buffer)) > 0) { + zipOutputStream.write(buffer, 0, readSize); + } + } + zipOutputStream.closeEntry(); + } + zipOutputStream.finish(); + } + }; + + return ResponseEntity.ok() + .header(HttpHeaders.CONTENT_DISPOSITION, "attachment; filename*=UTF-8''" + encodedFileName) + .contentType(MediaType.parseMediaType("application/zip")) + .body(responseBody); + } + + private List prepareArchiveSourceFiles(ArchiveRequest request) { + if (request == null || request.items() == null || request.items().isEmpty()) { + return List.of(); + } + + List sourceFiles = new ArrayList<>(); + Set usedEntryNames = new HashSet<>(); + + for (ArchiveItem item : request.items()) { + String normalizedObject = normalizeObjectName(item == null ? null : item.object()); + Path sourcePath = nfsStorageService.getAbsolutePath(normalizedObject).normalize(); + if (!Files.exists(sourcePath) || !Files.isRegularFile(sourcePath)) { + throw new IllegalArgumentException("产出物不存在或不可访问: " + normalizedObject); + } + + String fallbackName = sourcePath.getFileName().toString(); + String normalizedEntryName = normalizeZipEntryName(item == null ? null : item.name(), fallbackName); + String dedupedEntryName = ensureUniqueZipEntryName(normalizedEntryName, usedEntryNames); + sourceFiles.add(new ArchiveSourceFile(sourcePath, dedupedEntryName)); + } + + return sourceFiles; + } + + private String normalizeArchiveFileName(String fileName) { + String normalized = fileName == null ? "" : fileName.trim(); + if (!StringUtils.hasText(normalized)) { + normalized = "task-artifacts.zip"; + } + if (normalized.contains("..") || normalized.contains("/") || normalized.contains("\\")) { + throw new IllegalArgumentException("fileName 参数非法"); + } + if (!normalized.toLowerCase().endsWith(".zip")) { + normalized = normalized + ".zip"; + } + return normalized; + } + + private String normalizeZipEntryName(String entryName, String fallbackName) { + String normalized = entryName == null ? "" : entryName.trim(); + if (!StringUtils.hasText(normalized)) { + normalized = fallbackName; + } + normalized = normalized.replace("\\", "/"); + while (normalized.startsWith("/")) { + normalized = normalized.substring(1); + } + while (normalized.endsWith("/")) { + normalized = normalized.substring(0, normalized.length() - 1); + } + if (!StringUtils.hasText(normalized)) { + normalized = fallbackName; + } + + String[] rawSegments = normalized.split("/"); + List safeSegments = new ArrayList<>(); + for (String segment : rawSegments) { + if (!StringUtils.hasText(segment) || ".".equals(segment)) { + continue; + } + if ("..".equals(segment)) { + throw new IllegalArgumentException("name 参数非法"); + } + safeSegments.add(segment); + } + if (safeSegments.isEmpty()) { + return fallbackName; + } + return String.join("/", safeSegments); + } + + private String ensureUniqueZipEntryName(String entryName, Set usedEntryNames) { + if (!usedEntryNames.contains(entryName)) { + usedEntryNames.add(entryName); + return entryName; + } + + int dotIndex = entryName.lastIndexOf('.'); + String baseName = dotIndex > 0 ? entryName.substring(0, dotIndex) : entryName; + String extension = dotIndex > 0 ? entryName.substring(dotIndex) : ""; + int index = 1; + String candidate = baseName + "(" + index + ")" + extension; + while (usedEntryNames.contains(candidate)) { + index++; + candidate = baseName + "(" + index + ")" + extension; + } + usedEntryNames.add(candidate); + return candidate; + } + + private String normalizeObjectName(String objectName) { + String normalized = objectName == null ? "" : objectName.trim(); + while (normalized.startsWith("/")) { + normalized = normalized.substring(1); + } + if (!StringUtils.hasText(normalized)) { + throw new IllegalArgumentException("object 参数不能为空"); + } + if (normalized.contains("..") || normalized.contains("\\")) { + throw new IllegalArgumentException("object 参数非法"); + } + return normalized; + } + + private String resolveContentType(String fileName, Path filePath) { + try { + String detected = Files.probeContentType(filePath); + if (StringUtils.hasText(detected)) { + return detected; + } + } catch (IOException ignore) { + // noop, fall through to extension mapping + } + String lowerName = fileName == null ? "" : fileName.toLowerCase(java.util.Locale.ROOT); + if (lowerName.endsWith(".pdf")) { + return MediaType.APPLICATION_PDF_VALUE; + } + if (lowerName.endsWith(".md") || lowerName.endsWith(".txt") || lowerName.endsWith(".log")) { + return MediaType.TEXT_PLAIN_VALUE; + } + if (lowerName.endsWith(".json")) { + return MediaType.APPLICATION_JSON_VALUE; + } + return MediaType.APPLICATION_OCTET_STREAM_VALUE; + } + + private record ArchiveRequest(String fileName, List items) {} + private record ArchiveItem(String object, String name) {} + private record ArchiveSourceFile(Path path, String entryName) {} +} diff --git a/back/src/main/java/com/linkwork/controller/UserSoulController.java b/back/src/main/java/com/linkwork/controller/UserSoulController.java new file mode 100644 index 0000000..3d2725b --- /dev/null +++ b/back/src/main/java/com/linkwork/controller/UserSoulController.java @@ -0,0 +1,50 @@ +package com.linkwork.controller; + +import com.linkwork.common.ApiResponse; +import com.linkwork.context.UserContext; +import com.linkwork.model.dto.UserSoulResponse; +import com.linkwork.model.dto.UserSoulUpsertRequest; +import com.linkwork.service.UserSoulService; +import jakarta.validation.Valid; +import lombok.RequiredArgsConstructor; +import lombok.extern.slf4j.Slf4j; +import org.springframework.util.StringUtils; +import org.springframework.web.bind.annotation.CrossOrigin; +import org.springframework.web.bind.annotation.GetMapping; +import org.springframework.web.bind.annotation.PutMapping; +import org.springframework.web.bind.annotation.RequestBody; +import org.springframework.web.bind.annotation.RequestMapping; +import org.springframework.web.bind.annotation.RestController; + +@Slf4j +@RestController +@RequestMapping({"/api/v1/users/me/soul", "/api/v1/user-soul"}) +@CrossOrigin(origins = "*") +@RequiredArgsConstructor +public class UserSoulController { + + private final UserSoulService userSoulService; + + @GetMapping + public ApiResponse getCurrentUserSoul() { + String userId = UserContext.getCurrentUserId(); + if (!StringUtils.hasText(userId)) { + throw new IllegalStateException("用户未登录或登录态失效"); + } + UserSoulResponse response = userSoulService.getCurrentUserSoul(userId); + return ApiResponse.success(response); + } + + @PutMapping + public ApiResponse upsertCurrentUserSoul( + @Valid @RequestBody UserSoulUpsertRequest request) { + String userId = UserContext.getCurrentUserId(); + String userName = UserContext.getCurrentUserName(); + if (!StringUtils.hasText(userId)) { + throw new IllegalStateException("用户未登录或登录态失效"); + } + log.info("更新用户 Soul: userId={}, version={}", userId, request.getVersion()); + UserSoulResponse response = userSoulService.upsertCurrentUserSoul(userId, userName, request); + return ApiResponse.success(response); + } +} diff --git a/back/src/main/java/com/linkwork/controller/WorkspaceController.java b/back/src/main/java/com/linkwork/controller/WorkspaceController.java new file mode 100644 index 0000000..f15fb3e --- /dev/null +++ b/back/src/main/java/com/linkwork/controller/WorkspaceController.java @@ -0,0 +1,24 @@ +package com.linkwork.controller; + +import com.linkwork.model.FileNode; +import com.linkwork.service.WorkspaceService; +import org.springframework.web.bind.annotation.*; + +import java.util.List; + +@RestController +@RequestMapping("/api/v1/workspace") +@CrossOrigin(origins = "*") // 允许前端跨域 +public class WorkspaceController { + + private final WorkspaceService workspaceService; + + public WorkspaceController(WorkspaceService workspaceService) { + this.workspaceService = workspaceService; + } + + @GetMapping("/files") + public List listFiles(@RequestParam String taskId) { + return workspaceService.listFiles(taskId); + } +} diff --git a/back/src/main/java/com/linkwork/filter/InternalApiAuditFilter.java b/back/src/main/java/com/linkwork/filter/InternalApiAuditFilter.java new file mode 100644 index 0000000..0c1e4bb --- /dev/null +++ b/back/src/main/java/com/linkwork/filter/InternalApiAuditFilter.java @@ -0,0 +1,50 @@ +package com.linkwork.filter; + +import com.linkwork.common.ClientIpResolver; +import jakarta.servlet.*; +import jakarta.servlet.http.HttpServletRequest; +import jakarta.servlet.http.HttpServletResponse; +import lombok.extern.slf4j.Slf4j; +import org.springframework.core.annotation.Order; +import org.springframework.stereotype.Component; + +import java.io.IOException; + +/** + * 内部 API 调用审计日志 + *

+ * 记录所有 /api/internal/** 请求的调用方 IP、Method、Path、响应码。 + */ +@Slf4j +@Component +@Order(2) +public class InternalApiAuditFilter implements Filter { + + private static final String INTERNAL_PREFIX = "/api/internal/"; + + @Override + public void doFilter(ServletRequest request, ServletResponse response, FilterChain chain) + throws IOException, ServletException { + + HttpServletRequest httpRequest = (HttpServletRequest) request; + String path = httpRequest.getRequestURI(); + + if (!path.startsWith(INTERNAL_PREFIX)) { + chain.doFilter(request, response); + return; + } + + String clientIp = ClientIpResolver.resolve(httpRequest); + String method = httpRequest.getMethod(); + long start = System.currentTimeMillis(); + + try { + chain.doFilter(request, response); + } finally { + int status = ((HttpServletResponse) response).getStatus(); + long elapsed = System.currentTimeMillis() - start; + log.info("[AUDIT] internal-api ip={} method={} path={} status={} elapsed={}ms", + clientIp, method, path, status, elapsed); + } + } +} diff --git a/back/src/main/java/com/linkwork/filter/InternalApiIpFilter.java b/back/src/main/java/com/linkwork/filter/InternalApiIpFilter.java new file mode 100644 index 0000000..a30db9d --- /dev/null +++ b/back/src/main/java/com/linkwork/filter/InternalApiIpFilter.java @@ -0,0 +1,106 @@ +package com.linkwork.filter; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.linkwork.common.ClientIpResolver; +import jakarta.servlet.*; +import jakarta.servlet.http.HttpServletRequest; +import jakarta.servlet.http.HttpServletResponse; +import lombok.RequiredArgsConstructor; +import lombok.extern.slf4j.Slf4j; +import org.springframework.beans.factory.annotation.Value; +import org.springframework.core.annotation.Order; +import org.springframework.stereotype.Component; +import org.springframework.util.StringUtils; + +import java.io.IOException; +import java.net.InetAddress; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; + +/** + * 内部 API IP 白名单过滤器 + *

+ * /api/internal/** 路径仅允许集群内网 IP(10.x、172.16-31.x、192.168.x)和 localhost 访问。 + * 可通过 robot.internal-api.extra-ips 配置额外放行的 IP。 + */ +@Slf4j +@Component +@Order(0) +@RequiredArgsConstructor +public class InternalApiIpFilter implements Filter { + + private static final String INTERNAL_PREFIX = "/api/internal/"; + + private final ObjectMapper objectMapper; + + @Value("${robot.internal-api.extra-ips:}") + private String extraIps; + + private final Set extraIpSet = ConcurrentHashMap.newKeySet(); + private volatile boolean initialized = false; + + private void ensureInitialized() { + if (!initialized) { + synchronized (this) { + if (!initialized) { + if (StringUtils.hasText(extraIps)) { + for (String ip : extraIps.split(",")) { + String trimmed = ip.trim(); + if (!trimmed.isEmpty()) { + extraIpSet.add(trimmed); + } + } + } + initialized = true; + } + } + } + } + + @Override + public void doFilter(ServletRequest request, ServletResponse response, FilterChain chain) + throws IOException, ServletException { + + HttpServletRequest httpRequest = (HttpServletRequest) request; + String path = httpRequest.getRequestURI(); + + if (!path.startsWith(INTERNAL_PREFIX)) { + chain.doFilter(request, response); + return; + } + + ensureInitialized(); + + String clientIp = ClientIpResolver.resolve(httpRequest); + if (isAllowed(clientIp)) { + chain.doFilter(request, response); + } else { + log.warn("Internal API access denied: ip={}, path={}", clientIp, path); + HttpServletResponse httpResponse = (HttpServletResponse) response; + httpResponse.setStatus(HttpServletResponse.SC_FORBIDDEN); + httpResponse.setContentType("application/json;charset=UTF-8"); + httpResponse.getWriter().write(objectMapper.writeValueAsString( + Map.of("code", 40300, "msg", "Forbidden: IP not in whitelist", "data", Map.of()) + )); + } + } + + private boolean isAllowed(String ip) { + if (!StringUtils.hasText(ip)) { + return false; + } + if ("127.0.0.1".equals(ip) || "0:0:0:0:0:0:0:1".equals(ip) || "::1".equals(ip)) { + return true; + } + if (extraIpSet.contains(ip)) { + return true; + } + try { + InetAddress addr = InetAddress.getByName(ip); + return addr.isLoopbackAddress() || addr.isSiteLocalAddress(); + } catch (Exception e) { + return false; + } + } +} diff --git a/back/src/main/java/com/linkwork/filter/JwtAuthFilter.java b/back/src/main/java/com/linkwork/filter/JwtAuthFilter.java new file mode 100644 index 0000000..cffd870 --- /dev/null +++ b/back/src/main/java/com/linkwork/filter/JwtAuthFilter.java @@ -0,0 +1,169 @@ +package com.linkwork.filter; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.linkwork.context.UserContext; +import com.linkwork.context.UserInfo; +import com.linkwork.service.AuthService; +import jakarta.servlet.*; +import jakarta.servlet.http.Cookie; +import jakarta.servlet.http.HttpServletRequest; +import jakarta.servlet.http.HttpServletResponse; +import lombok.RequiredArgsConstructor; +import lombok.extern.slf4j.Slf4j; +import org.springframework.core.annotation.Order; +import org.springframework.stereotype.Component; + +import java.io.IOException; +import java.util.Map; +import java.util.Set; + +/** + * 全局 JWT 认证过滤器 + *

+ * 从 Cookie 或 Authorization Header 中提取 JWT Token, + * 解析用户信息存入 UserContext(ThreadLocal)。 + * 未认证的请求返回 401。 + */ +@Slf4j +@Component +@Order(1) +@RequiredArgsConstructor +public class JwtAuthFilter implements Filter { + + private final AuthService authService; + private final ObjectMapper objectMapper; + + private static final String AUTH_COOKIE_NAME = "robot_token"; + + /** 不需要认证的路径 */ + private static final Set EXCLUDE_PATHS = Set.of( + "/api/v1/auth/login", + "/api/v1/auth/verify", + "/api/v1/auth/encode", + "/api/v1/models", + "/health", + "/api/v1/health" + ); + + /** 不需要认证的路径后缀(内部 Worker 回调) */ + private static final Set EXCLUDE_SUFFIXES = Set.of( + "/complete", + "/git-token" + ); + + /** 不需要认证的精确内部路径 */ + private static final Set INTERNAL_PATHS = Set.of( + "/api/v1/approvals/create" + ); + + /** 不需要认证的路径前缀 */ + private static final Set EXCLUDE_PREFIXES = Set.of( + "/ws/", + "/api/v1/ws", + "/api/internal/" + ); + + private static final String PUBLIC_TASK_MODEL_PREFIX = "/api/v1/public/tasks/"; + private static final String PUBLIC_TASK_MODEL_SUFFIX = "/model"; + private static final String PUBLIC_TASK_SHARE_DETAIL_SUFFIX = "/share-detail"; + + @Override + public void doFilter(ServletRequest request, ServletResponse response, FilterChain chain) + throws IOException, ServletException { + + HttpServletRequest httpRequest = (HttpServletRequest) request; + HttpServletResponse httpResponse = (HttpServletResponse) response; + + String path = httpRequest.getRequestURI(); + + // 跳过不需要认证的路径 + if (isExcluded(path)) { + chain.doFilter(request, response); + return; + } + + try { + // 提取 JWT Token + String token = extractToken(httpRequest); + + if (token != null && authService.validateToken(token)) { + // 解析用户信息并存入 ThreadLocal + UserInfo userInfo = authService.getUserInfoFromToken(token); + UserContext.set(userInfo); + chain.doFilter(request, response); + } else { + // 未认证 + sendUnauthorized(httpResponse, "未登录或 Token 已过期"); + } + } finally { + // 清除 ThreadLocal,防止内存泄漏 + UserContext.clear(); + } + } + + /** + * 从请求中提取 JWT Token + * 优先从 Cookie 提取,其次从 Authorization Header 提取 + */ + private String extractToken(HttpServletRequest request) { + // 1. 从 Cookie 提取 + Cookie[] cookies = request.getCookies(); + if (cookies != null) { + for (Cookie cookie : cookies) { + if (AUTH_COOKIE_NAME.equals(cookie.getName())) { + String value = cookie.getValue(); + if (value != null && !value.isBlank()) { + return value; + } + } + } + } + + // 2. 从 Authorization Header 提取(兼容旧方式) + String authHeader = request.getHeader("Authorization"); + if (authHeader != null && authHeader.startsWith("Bearer ")) { + return authHeader.substring(7); + } + + return null; + } + + /** + * 判断路径是否排除认证 + */ + private boolean isExcluded(String path) { + if (EXCLUDE_PATHS.contains(path) || INTERNAL_PATHS.contains(path)) { + return true; + } + for (String prefix : EXCLUDE_PREFIXES) { + if (path.startsWith(prefix)) { + return true; + } + } + if (path.startsWith(PUBLIC_TASK_MODEL_PREFIX) + && (path.endsWith(PUBLIC_TASK_MODEL_SUFFIX) || path.endsWith(PUBLIC_TASK_SHARE_DETAIL_SUFFIX))) { + return true; + } + // Worker 回调接口:/api/v1/tasks/{taskNo}/complete 等 + for (String suffix : EXCLUDE_SUFFIXES) { + if (path.endsWith(suffix) && (path.startsWith("/api/v1/tasks/") || path.startsWith("/api/v1/roles/"))) { + return true; + } + } + return false; + } + + /** + * 返回 401 响应 + */ + private void sendUnauthorized(HttpServletResponse response, String message) throws IOException { + response.setStatus(HttpServletResponse.SC_UNAUTHORIZED); + response.setContentType("application/json;charset=UTF-8"); + Map body = Map.of( + "code", 40100, + "msg", message, + "data", Map.of() + ); + response.getWriter().write(objectMapper.writeValueAsString(body)); + } +} diff --git a/back/src/main/java/com/linkwork/mapper/ApprovalMapper.java b/back/src/main/java/com/linkwork/mapper/ApprovalMapper.java new file mode 100644 index 0000000..58f2b56 --- /dev/null +++ b/back/src/main/java/com/linkwork/mapper/ApprovalMapper.java @@ -0,0 +1,12 @@ +package com.linkwork.mapper; + +import com.baomidou.mybatisplus.core.mapper.BaseMapper; +import com.linkwork.model.entity.Approval; +import org.apache.ibatis.annotations.Mapper; + +/** + * 审批 Mapper + */ +@Mapper +public interface ApprovalMapper extends BaseMapper { +} diff --git a/back/src/main/java/com/linkwork/mapper/BuildRecordMapper.java b/back/src/main/java/com/linkwork/mapper/BuildRecordMapper.java new file mode 100644 index 0000000..0acf197 --- /dev/null +++ b/back/src/main/java/com/linkwork/mapper/BuildRecordMapper.java @@ -0,0 +1,12 @@ +package com.linkwork.mapper; + +import com.baomidou.mybatisplus.core.mapper.BaseMapper; +import com.linkwork.model.entity.BuildRecordEntity; +import org.apache.ibatis.annotations.Mapper; + +/** + * 构建记录 Mapper + */ +@Mapper +public interface BuildRecordMapper extends BaseMapper { +} diff --git a/back/src/main/java/com/linkwork/mapper/CronJobMapper.java b/back/src/main/java/com/linkwork/mapper/CronJobMapper.java new file mode 100644 index 0000000..818de29 --- /dev/null +++ b/back/src/main/java/com/linkwork/mapper/CronJobMapper.java @@ -0,0 +1,9 @@ +package com.linkwork.mapper; + +import com.baomidou.mybatisplus.core.mapper.BaseMapper; +import com.linkwork.model.entity.CronJob; +import org.apache.ibatis.annotations.Mapper; + +@Mapper +public interface CronJobMapper extends BaseMapper { +} diff --git a/back/src/main/java/com/linkwork/mapper/CronJobRunMapper.java b/back/src/main/java/com/linkwork/mapper/CronJobRunMapper.java new file mode 100644 index 0000000..32312ee --- /dev/null +++ b/back/src/main/java/com/linkwork/mapper/CronJobRunMapper.java @@ -0,0 +1,9 @@ +package com.linkwork.mapper; + +import com.baomidou.mybatisplus.core.mapper.BaseMapper; +import com.linkwork.model.entity.CronJobRun; +import org.apache.ibatis.annotations.Mapper; + +@Mapper +public interface CronJobRunMapper extends BaseMapper { +} diff --git a/back/src/main/java/com/linkwork/mapper/FileNodeMapper.java b/back/src/main/java/com/linkwork/mapper/FileNodeMapper.java new file mode 100644 index 0000000..77ff261 --- /dev/null +++ b/back/src/main/java/com/linkwork/mapper/FileNodeMapper.java @@ -0,0 +1,9 @@ +package com.linkwork.mapper; + +import com.baomidou.mybatisplus.core.mapper.BaseMapper; +import com.linkwork.model.entity.FileNodeEntity; +import org.apache.ibatis.annotations.Mapper; + +@Mapper +public interface FileNodeMapper extends BaseMapper { +} diff --git a/back/src/main/java/com/linkwork/mapper/GitLabAuthMapper.java b/back/src/main/java/com/linkwork/mapper/GitLabAuthMapper.java new file mode 100644 index 0000000..566e9e9 --- /dev/null +++ b/back/src/main/java/com/linkwork/mapper/GitLabAuthMapper.java @@ -0,0 +1,18 @@ +package com.linkwork.mapper; + +import com.baomidou.mybatisplus.core.mapper.BaseMapper; +import com.linkwork.model.entity.GitLabAuthEntity; +import org.apache.ibatis.annotations.*; + +@Mapper +public interface GitLabAuthMapper extends BaseMapper { + + @Select("SELECT * FROM linkwork_user_auth_gitlab WHERE user_id = #{userId} AND gitlab_id = #{gitlabId} AND scope = #{scope} LIMIT 1") + GitLabAuthEntity selectIncludingDeleted(@Param("userId") String userId, @Param("gitlabId") Long gitlabId, @Param("scope") String scope); + + @Update("UPDATE linkwork_user_auth_gitlab SET user_id=#{userId}, gitlab_id=#{gitlabId}, username=#{username}, " + + "name=#{name}, avatar_url=#{avatarUrl}, access_token=#{accessToken}, refresh_token=#{refreshToken}, " + + "token_alias=#{tokenAlias}, expires_at=#{expiresAt}, scope=#{scope}, " + + "created_at=#{createdAt}, updated_at=#{updatedAt}, is_deleted=#{isDeleted} WHERE id=#{id}") + int updateIncludingDeleted(GitLabAuthEntity entity); +} diff --git a/back/src/main/java/com/linkwork/mapper/McpServerMapper.java b/back/src/main/java/com/linkwork/mapper/McpServerMapper.java new file mode 100644 index 0000000..8e291ae --- /dev/null +++ b/back/src/main/java/com/linkwork/mapper/McpServerMapper.java @@ -0,0 +1,12 @@ +package com.linkwork.mapper; + +import com.baomidou.mybatisplus.core.mapper.BaseMapper; +import com.linkwork.model.entity.McpServerEntity; +import org.apache.ibatis.annotations.Mapper; + +/** + * MCP 服务 Mapper + */ +@Mapper +public interface McpServerMapper extends BaseMapper { +} diff --git a/back/src/main/java/com/linkwork/mapper/McpUsageDailyMapper.java b/back/src/main/java/com/linkwork/mapper/McpUsageDailyMapper.java new file mode 100644 index 0000000..f72ee6e --- /dev/null +++ b/back/src/main/java/com/linkwork/mapper/McpUsageDailyMapper.java @@ -0,0 +1,9 @@ +package com.linkwork.mapper; + +import com.baomidou.mybatisplus.core.mapper.BaseMapper; +import com.linkwork.model.entity.McpUsageDailyEntity; +import org.apache.ibatis.annotations.Mapper; + +@Mapper +public interface McpUsageDailyMapper extends BaseMapper { +} diff --git a/back/src/main/java/com/linkwork/mapper/McpUserConfigMapper.java b/back/src/main/java/com/linkwork/mapper/McpUserConfigMapper.java new file mode 100644 index 0000000..7222f68 --- /dev/null +++ b/back/src/main/java/com/linkwork/mapper/McpUserConfigMapper.java @@ -0,0 +1,9 @@ +package com.linkwork.mapper; + +import com.baomidou.mybatisplus.core.mapper.BaseMapper; +import com.linkwork.model.entity.McpUserConfigEntity; +import org.apache.ibatis.annotations.Mapper; + +@Mapper +public interface McpUserConfigMapper extends BaseMapper { +} diff --git a/back/src/main/java/com/linkwork/mapper/RoleMapper.java b/back/src/main/java/com/linkwork/mapper/RoleMapper.java new file mode 100644 index 0000000..d96fcba --- /dev/null +++ b/back/src/main/java/com/linkwork/mapper/RoleMapper.java @@ -0,0 +1,9 @@ +package com.linkwork.mapper; + +import com.baomidou.mybatisplus.core.mapper.BaseMapper; +import com.linkwork.model.entity.RoleEntity; +import org.apache.ibatis.annotations.Mapper; + +@Mapper +public interface RoleMapper extends BaseMapper { +} diff --git a/back/src/main/java/com/linkwork/mapper/SecurityPolicyMapper.java b/back/src/main/java/com/linkwork/mapper/SecurityPolicyMapper.java new file mode 100644 index 0000000..a349e06 --- /dev/null +++ b/back/src/main/java/com/linkwork/mapper/SecurityPolicyMapper.java @@ -0,0 +1,12 @@ +package com.linkwork.mapper; + +import com.baomidou.mybatisplus.core.mapper.BaseMapper; +import com.linkwork.model.entity.SecurityPolicy; +import org.apache.ibatis.annotations.Mapper; + +/** + * 安全策略 Mapper + */ +@Mapper +public interface SecurityPolicyMapper extends BaseMapper { +} diff --git a/back/src/main/java/com/linkwork/mapper/SkillMapper.java b/back/src/main/java/com/linkwork/mapper/SkillMapper.java new file mode 100644 index 0000000..ef507c1 --- /dev/null +++ b/back/src/main/java/com/linkwork/mapper/SkillMapper.java @@ -0,0 +1,12 @@ +package com.linkwork.mapper; + +import com.baomidou.mybatisplus.core.mapper.BaseMapper; +import com.linkwork.model.entity.SkillEntity; +import org.apache.ibatis.annotations.Mapper; + +/** + * 技能 Mapper + */ +@Mapper +public interface SkillMapper extends BaseMapper { +} diff --git a/back/src/main/java/com/linkwork/mapper/TaskGitAuthMapper.java b/back/src/main/java/com/linkwork/mapper/TaskGitAuthMapper.java new file mode 100644 index 0000000..f1abca5 --- /dev/null +++ b/back/src/main/java/com/linkwork/mapper/TaskGitAuthMapper.java @@ -0,0 +1,9 @@ +package com.linkwork.mapper; + +import com.baomidou.mybatisplus.core.mapper.BaseMapper; +import com.linkwork.model.entity.TaskGitAuthEntity; +import org.apache.ibatis.annotations.Mapper; + +@Mapper +public interface TaskGitAuthMapper extends BaseMapper { +} diff --git a/back/src/main/java/com/linkwork/mapper/TaskMapper.java b/back/src/main/java/com/linkwork/mapper/TaskMapper.java new file mode 100644 index 0000000..ee4c423 --- /dev/null +++ b/back/src/main/java/com/linkwork/mapper/TaskMapper.java @@ -0,0 +1,12 @@ +package com.linkwork.mapper; + +import com.baomidou.mybatisplus.core.mapper.BaseMapper; +import com.linkwork.model.entity.Task; +import org.apache.ibatis.annotations.Mapper; + +/** + * 任务 Mapper + */ +@Mapper +public interface TaskMapper extends BaseMapper { +} diff --git a/back/src/main/java/com/linkwork/mapper/UserFavoriteRoleMapper.java b/back/src/main/java/com/linkwork/mapper/UserFavoriteRoleMapper.java new file mode 100644 index 0000000..8a11205 --- /dev/null +++ b/back/src/main/java/com/linkwork/mapper/UserFavoriteRoleMapper.java @@ -0,0 +1,9 @@ +package com.linkwork.mapper; + +import com.baomidou.mybatisplus.core.mapper.BaseMapper; +import com.linkwork.model.entity.UserFavoriteRoleEntity; +import org.apache.ibatis.annotations.Mapper; + +@Mapper +public interface UserFavoriteRoleMapper extends BaseMapper { +} diff --git a/back/src/main/java/com/linkwork/mapper/UserSoulMapper.java b/back/src/main/java/com/linkwork/mapper/UserSoulMapper.java new file mode 100644 index 0000000..c495f9f --- /dev/null +++ b/back/src/main/java/com/linkwork/mapper/UserSoulMapper.java @@ -0,0 +1,29 @@ +package com.linkwork.mapper; + +import com.baomidou.mybatisplus.core.mapper.BaseMapper; +import com.linkwork.model.entity.UserSoulEntity; +import org.apache.ibatis.annotations.Mapper; +import org.apache.ibatis.annotations.Param; +import org.apache.ibatis.annotations.Select; + +@Mapper +public interface UserSoulMapper extends BaseMapper { + + @Select(""" + SELECT id, + user_id, + soul AS content, + template_id AS preset_id, + creator_id, + creator_name, + created_at, + updated_at, + is_deleted + FROM linkwork_user_soul + WHERE is_deleted = 0 + AND user_id = #{userId} + ORDER BY updated_at DESC, id DESC + LIMIT 1 + """) + UserSoulEntity selectLatestCompatByUserId(@Param("userId") String userId); +} diff --git a/back/src/main/java/com/linkwork/mapper/WorkspaceFileMapper.java b/back/src/main/java/com/linkwork/mapper/WorkspaceFileMapper.java new file mode 100644 index 0000000..dcce77f --- /dev/null +++ b/back/src/main/java/com/linkwork/mapper/WorkspaceFileMapper.java @@ -0,0 +1,9 @@ +package com.linkwork.mapper; + +import com.baomidou.mybatisplus.core.mapper.BaseMapper; +import com.linkwork.model.entity.WorkspaceFile; +import org.apache.ibatis.annotations.Mapper; + +@Mapper +public interface WorkspaceFileMapper extends BaseMapper { +} diff --git a/back/src/main/java/com/linkwork/model/FileNode.java b/back/src/main/java/com/linkwork/model/FileNode.java new file mode 100644 index 0000000..c9ac2fa --- /dev/null +++ b/back/src/main/java/com/linkwork/model/FileNode.java @@ -0,0 +1,20 @@ +package com.linkwork.model; + +import lombok.AllArgsConstructor; +import lombok.Builder; +import lombok.Data; +import lombok.NoArgsConstructor; + +import java.util.List; + +@Data +@Builder +@NoArgsConstructor +@AllArgsConstructor +public class FileNode { + private String name; + private String type; // "file" or "directory" + private String content; + private String size; + private List children; +} diff --git a/back/src/main/java/com/linkwork/model/dto/BuildQueueStatus.java b/back/src/main/java/com/linkwork/model/dto/BuildQueueStatus.java new file mode 100644 index 0000000..ca196ac --- /dev/null +++ b/back/src/main/java/com/linkwork/model/dto/BuildQueueStatus.java @@ -0,0 +1,102 @@ +package com.linkwork.model.dto; + +import lombok.AllArgsConstructor; +import lombok.Builder; +import lombok.Data; +import lombok.NoArgsConstructor; + +import java.util.List; + +/** + * 构建队列状态 DTO + */ +@Data +@Builder +@NoArgsConstructor +@AllArgsConstructor +public class BuildQueueStatus { + + /** + * 等待中的任务数量 + */ + private int waitingCount; + + /** + * 正在执行的任务数量 + */ + private int runningCount; + + /** + * 硬性并发上限 + */ + private int maxConcurrent; + + /** + * 队列最大容量 + */ + private int maxQueueSize; + + /** + * 系统资源状态 + */ + private ResourceStatus resourceStatus; + + /** + * CPU 阈值配置 + */ + private double cpuThreshold; + + /** + * 内存阈值配置 + */ + private double memoryThreshold; + + /** + * 是否可以接受新任务 + */ + private boolean canAcceptNewTask; + + /** + * 等待中的任务列表(简要信息) + */ + private List waitingTasks; + + /** + * 正在执行的任务列表(简要信息) + */ + private List runningTasks; + + /** + * 任务简要信息 + */ + @Data + @Builder + @NoArgsConstructor + @AllArgsConstructor + public static class TaskInfo { + /** + * 构建 ID + */ + private String buildId; + + /** + * 服务 ID + */ + private String serviceId; + + /** + * 等待时间(毫秒) + */ + private long waitTimeMs; + + /** + * 执行时间(毫秒),仅对正在执行的任务有效 + */ + private long executionTimeMs; + + /** + * 在队列中的位置(从 1 开始) + */ + private int position; + } +} diff --git a/back/src/main/java/com/linkwork/model/dto/BuildTask.java b/back/src/main/java/com/linkwork/model/dto/BuildTask.java new file mode 100644 index 0000000..8eb98c8 --- /dev/null +++ b/back/src/main/java/com/linkwork/model/dto/BuildTask.java @@ -0,0 +1,159 @@ +package com.linkwork.model.dto; + +import lombok.Data; + +import java.time.Instant; +import java.util.concurrent.CompletableFuture; + +/** + * 构建任务 DTO + * 用于队列管理 + */ +@Data +public class BuildTask { + + /** + * 构建唯一标识 + */ + private final String buildId; + + /** + * 服务 ID + */ + private final String serviceId; + + /** + * 原始构建请求 + */ + private final ServiceBuildRequest request; + + /** + * 融合后的配置 + */ + private final MergedConfig config; + + /** + * 任务创建时间(入队时间) + */ + private final Instant createdAt; + + /** + * 任务开始执行时间 + */ + private Instant startedAt; + + /** + * 任务完成时间 + */ + private Instant completedAt; + + /** + * 任务状态 + */ + private TaskState state; + + /** + * 任务执行结果 Future + */ + private CompletableFuture resultFuture; + + /** + * 错误信息(如果失败) + */ + private String errorMessage; + + public BuildTask(ServiceBuildRequest request, MergedConfig config) { + this.buildId = request.getBuildId(); + this.serviceId = request.getServiceId(); + this.request = request; + this.config = config; + this.createdAt = Instant.now(); + this.state = TaskState.WAITING; + } + + /** + * 标记任务开始执行 + */ + public void markStarted() { + this.startedAt = Instant.now(); + this.state = TaskState.RUNNING; + } + + /** + * 标记任务完成 + */ + public void markCompleted() { + this.completedAt = Instant.now(); + this.state = TaskState.COMPLETED; + } + + /** + * 标记任务失败 + */ + public void markFailed(String errorMessage) { + this.completedAt = Instant.now(); + this.state = TaskState.FAILED; + this.errorMessage = errorMessage; + } + + /** + * 标记任务取消 + */ + public void markCancelled() { + this.completedAt = Instant.now(); + this.state = TaskState.CANCELLED; + } + + /** + * 获取等待时间(毫秒) + */ + public long getWaitTimeMs() { + if (startedAt != null) { + return startedAt.toEpochMilli() - createdAt.toEpochMilli(); + } + return Instant.now().toEpochMilli() - createdAt.toEpochMilli(); + } + + /** + * 获取执行时间(毫秒) + */ + public long getExecutionTimeMs() { + if (startedAt == null) { + return 0; + } + if (completedAt != null) { + return completedAt.toEpochMilli() - startedAt.toEpochMilli(); + } + return Instant.now().toEpochMilli() - startedAt.toEpochMilli(); + } + + /** + * 任务状态枚举 + */ + public enum TaskState { + /** + * 等待执行 + */ + WAITING, + + /** + * 正在执行 + */ + RUNNING, + + /** + * 执行完成 + */ + COMPLETED, + + /** + * 执行失败 + */ + FAILED, + + /** + * 已取消 + */ + CANCELLED + } +} diff --git a/back/src/main/java/com/linkwork/model/dto/ClusterEventDTO.java b/back/src/main/java/com/linkwork/model/dto/ClusterEventDTO.java new file mode 100644 index 0000000..f460cf9 --- /dev/null +++ b/back/src/main/java/com/linkwork/model/dto/ClusterEventDTO.java @@ -0,0 +1,18 @@ +package com.linkwork.model.dto; + +import lombok.Builder; +import lombok.Data; + +@Data +@Builder +public class ClusterEventDTO { + private String type; + private String reason; + private String message; + private String objectKind; + private String objectName; + private String namespace; + private String firstTimestamp; + private String lastTimestamp; + private Integer count; +} diff --git a/back/src/main/java/com/linkwork/model/dto/ClusterNodeInfo.java b/back/src/main/java/com/linkwork/model/dto/ClusterNodeInfo.java new file mode 100644 index 0000000..0067639 --- /dev/null +++ b/back/src/main/java/com/linkwork/model/dto/ClusterNodeInfo.java @@ -0,0 +1,25 @@ +package com.linkwork.model.dto; + +import lombok.Builder; +import lombok.Data; + +import java.util.List; + +@Data +@Builder +public class ClusterNodeInfo { + private String name; + private String status; + private List roles; + private String kubeletVersion; + private long cpuCapacity; + private long cpuAllocatable; + private long cpuUsage; + private Double cpuUsagePercent; + private long memCapacity; + private long memAllocatable; + private long memUsage; + private Double memUsagePercent; + private int podCount; + private int podCapacity; +} diff --git a/back/src/main/java/com/linkwork/model/dto/ClusterOverviewDTO.java b/back/src/main/java/com/linkwork/model/dto/ClusterOverviewDTO.java new file mode 100644 index 0000000..92cca79 --- /dev/null +++ b/back/src/main/java/com/linkwork/model/dto/ClusterOverviewDTO.java @@ -0,0 +1,23 @@ +package com.linkwork.model.dto; + +import lombok.Builder; +import lombok.Data; + +@Data +@Builder +public class ClusterOverviewDTO { + private String namespace; + private int totalPods; + private int runningPods; + private int pendingPods; + private int failedPods; + private int succeededPods; + private long totalCpuMillicores; + private long usedCpuMillicores; + private Double cpuUsagePercent; + private long totalMemoryBytes; + private long usedMemoryBytes; + private Double memoryUsagePercent; + private int podGroupCount; + private int nodeCount; +} diff --git a/back/src/main/java/com/linkwork/model/dto/ClusterPodInfo.java b/back/src/main/java/com/linkwork/model/dto/ClusterPodInfo.java new file mode 100644 index 0000000..bf541d4 --- /dev/null +++ b/back/src/main/java/com/linkwork/model/dto/ClusterPodInfo.java @@ -0,0 +1,25 @@ +package com.linkwork.model.dto; + +import lombok.Builder; +import lombok.Data; + +import java.util.List; + +@Data +@Builder +public class ClusterPodInfo { + private String name; + private String namespace; + private String phase; + private String nodeName; + private String podGroupName; + private String podGroupPhase; + private String serviceId; + private String userId; + private List containers; + private int restartCount; + private String startTime; + private String age; + private List images; + private ResourceUsageInfo resourceUsage; +} diff --git a/back/src/main/java/com/linkwork/model/dto/ContainerStatusInfo.java b/back/src/main/java/com/linkwork/model/dto/ContainerStatusInfo.java new file mode 100644 index 0000000..72d6f26 --- /dev/null +++ b/back/src/main/java/com/linkwork/model/dto/ContainerStatusInfo.java @@ -0,0 +1,21 @@ +package com.linkwork.model.dto; + +import lombok.Builder; +import lombok.Data; + +/** + * 容器状态信息 + */ +@Data +@Builder +public class ContainerStatusInfo { + private String name; // 容器名称 (agent/runner) + private boolean ready; // 是否就绪 + private String state; // waiting/running/terminated + private String reason; // 状态原因 + private Integer exitCode; // 退出码(terminated 时) + private Integer restartCount; // 重启次数 + + // 资源使用(来自 metrics-server) + private ResourceUsageInfo resourceUsage; // 容器级别资源使用 +} diff --git a/back/src/main/java/com/linkwork/model/dto/CreateFolderRequest.java b/back/src/main/java/com/linkwork/model/dto/CreateFolderRequest.java new file mode 100644 index 0000000..4f6c173 --- /dev/null +++ b/back/src/main/java/com/linkwork/model/dto/CreateFolderRequest.java @@ -0,0 +1,11 @@ +package com.linkwork.model.dto; + +import lombok.Data; + +@Data +public class CreateFolderRequest { + private String name; + private String spaceType; + private String workstationId; + private String parentId; +} diff --git a/back/src/main/java/com/linkwork/model/dto/CredentialConfig.java b/back/src/main/java/com/linkwork/model/dto/CredentialConfig.java new file mode 100644 index 0000000..abb5f21 --- /dev/null +++ b/back/src/main/java/com/linkwork/model/dto/CredentialConfig.java @@ -0,0 +1,17 @@ +package com.linkwork.model.dto; + +import lombok.Builder; +import lombok.Data; + +/** + * 凭证配置 + */ +@Data +@Builder +public class CredentialConfig { + private String credentialId; + private String key; + private String type; + private String mountPath; + private String envName; +} diff --git a/back/src/main/java/com/linkwork/model/dto/CronJobCreateRequest.java b/back/src/main/java/com/linkwork/model/dto/CronJobCreateRequest.java new file mode 100644 index 0000000..1a4de7d --- /dev/null +++ b/back/src/main/java/com/linkwork/model/dto/CronJobCreateRequest.java @@ -0,0 +1,36 @@ +package com.linkwork.model.dto; + +import jakarta.validation.constraints.NotBlank; +import jakarta.validation.constraints.NotNull; +import jakarta.validation.constraints.Size; +import lombok.Data; + +import java.time.LocalDateTime; +@Data +public class CronJobCreateRequest { + @NotBlank(message = "任务名称不能为空") + private String jobName; + + @NotNull(message = "岗位ID不能为空") + private Long roleId; + + @NotBlank(message = "模型ID不能为空") + private String modelId; + + @NotBlank(message = "调度类型不能为空") + private String scheduleType; + + private String cronExpr; + private Long intervalMs; + private LocalDateTime runAt; + private String timezone; + + @NotBlank(message = "任务指令不能为空") + @Size(max = 10000, message = "任务指令长度不能超过10000字符") + private String taskContent; + + private Boolean deleteAfterRun; + private Integer maxRetry; + private String notifyMode; + private String notifyTarget; +} diff --git a/back/src/main/java/com/linkwork/model/dto/CronJobResponse.java b/back/src/main/java/com/linkwork/model/dto/CronJobResponse.java new file mode 100644 index 0000000..2f8dbe2 --- /dev/null +++ b/back/src/main/java/com/linkwork/model/dto/CronJobResponse.java @@ -0,0 +1,42 @@ +package com.linkwork.model.dto; + +import com.fasterxml.jackson.annotation.JsonFormat; +import lombok.Data; + +import java.time.LocalDateTime; +import java.util.List; + +@Data +public class CronJobResponse { + private Long id; + private String jobName; + private String creatorId; + private String creatorName; + private Long roleId; + private String roleName; + private String modelId; + private String scheduleType; + private String cronExpr; + private Long intervalMs; + @JsonFormat(pattern = "yyyy-MM-dd'T'HH:mm:ss") + private LocalDateTime runAt; + private String timezone; + private String taskContent; + private Boolean enabled; + private Boolean deleteAfterRun; + private Integer maxRetry; + private Integer consecutiveFailures; + @JsonFormat(pattern = "yyyy-MM-dd'T'HH:mm:ss") + private LocalDateTime nextFireTime; + private List nextFireTimes; + private String notifyMode; + private String notifyTarget; + private Integer totalRuns; + @JsonFormat(pattern = "yyyy-MM-dd'T'HH:mm:ss") + private LocalDateTime lastRunTime; + private String lastRunStatus; + @JsonFormat(pattern = "yyyy-MM-dd'T'HH:mm:ss") + private LocalDateTime createdAt; + @JsonFormat(pattern = "yyyy-MM-dd'T'HH:mm:ss") + private LocalDateTime updatedAt; +} diff --git a/back/src/main/java/com/linkwork/model/dto/CronJobRunResponse.java b/back/src/main/java/com/linkwork/model/dto/CronJobRunResponse.java new file mode 100644 index 0000000..536ff13 --- /dev/null +++ b/back/src/main/java/com/linkwork/model/dto/CronJobRunResponse.java @@ -0,0 +1,27 @@ +package com.linkwork.model.dto; + +import com.fasterxml.jackson.annotation.JsonFormat; +import lombok.Data; + +import java.time.LocalDateTime; + +@Data +public class CronJobRunResponse { + private Long id; + private Long cronJobId; + private String taskNo; + private String creatorId; + private Long roleId; + private String status; + private String triggerType; + @JsonFormat(pattern = "yyyy-MM-dd'T'HH:mm:ss") + private LocalDateTime plannedFireTime; + @JsonFormat(pattern = "yyyy-MM-dd'T'HH:mm:ss") + private LocalDateTime startedAt; + @JsonFormat(pattern = "yyyy-MM-dd'T'HH:mm:ss") + private LocalDateTime finishedAt; + private Long durationMs; + private String errorMessage; + @JsonFormat(pattern = "yyyy-MM-dd'T'HH:mm:ss") + private LocalDateTime createdAt; +} diff --git a/back/src/main/java/com/linkwork/model/dto/CronJobToggleRequest.java b/back/src/main/java/com/linkwork/model/dto/CronJobToggleRequest.java new file mode 100644 index 0000000..97e5713 --- /dev/null +++ b/back/src/main/java/com/linkwork/model/dto/CronJobToggleRequest.java @@ -0,0 +1,10 @@ +package com.linkwork.model.dto; + +import jakarta.validation.constraints.NotNull; +import lombok.Data; + +@Data +public class CronJobToggleRequest { + @NotNull(message = "enabled 不能为空") + private Boolean enabled; +} diff --git a/back/src/main/java/com/linkwork/model/dto/CronJobUpdateRequest.java b/back/src/main/java/com/linkwork/model/dto/CronJobUpdateRequest.java new file mode 100644 index 0000000..0639f2c --- /dev/null +++ b/back/src/main/java/com/linkwork/model/dto/CronJobUpdateRequest.java @@ -0,0 +1,36 @@ +package com.linkwork.model.dto; + +import jakarta.validation.constraints.NotBlank; +import jakarta.validation.constraints.NotNull; +import jakarta.validation.constraints.Size; +import lombok.Data; + +import java.time.LocalDateTime; +@Data +public class CronJobUpdateRequest { + @NotBlank(message = "任务名称不能为空") + private String jobName; + + @NotNull(message = "岗位ID不能为空") + private Long roleId; + + @NotBlank(message = "模型ID不能为空") + private String modelId; + + @NotBlank(message = "调度类型不能为空") + private String scheduleType; + + private String cronExpr; + private Long intervalMs; + private LocalDateTime runAt; + private String timezone; + + @NotBlank(message = "任务指令不能为空") + @Size(max = 10000, message = "任务指令长度不能超过10000字符") + private String taskContent; + + private Boolean deleteAfterRun; + private Integer maxRetry; + private String notifyMode; + private String notifyTarget; +} diff --git a/back/src/main/java/com/linkwork/model/dto/CronSchedulePreviewRequest.java b/back/src/main/java/com/linkwork/model/dto/CronSchedulePreviewRequest.java new file mode 100644 index 0000000..612f05b --- /dev/null +++ b/back/src/main/java/com/linkwork/model/dto/CronSchedulePreviewRequest.java @@ -0,0 +1,15 @@ +package com.linkwork.model.dto; + +import lombok.Data; + +import java.time.LocalDateTime; + +@Data +public class CronSchedulePreviewRequest { + private String scheduleType; + private String cronExpr; + private Long intervalMs; + private LocalDateTime runAt; + private String timezone; + private Integer limit; +} diff --git a/back/src/main/java/com/linkwork/model/dto/FileMentionResponse.java b/back/src/main/java/com/linkwork/model/dto/FileMentionResponse.java new file mode 100644 index 0000000..0952ca6 --- /dev/null +++ b/back/src/main/java/com/linkwork/model/dto/FileMentionResponse.java @@ -0,0 +1,16 @@ +package com.linkwork.model.dto; + +import lombok.Data; + +import java.time.LocalDateTime; + +@Data +public class FileMentionResponse { + private String fileId; + private String fileName; + private String fileType; + private Long fileSize; + private String spaceType; + private String workstationId; + private LocalDateTime createdAt; +} diff --git a/back/src/main/java/com/linkwork/model/dto/FileNodeResponse.java b/back/src/main/java/com/linkwork/model/dto/FileNodeResponse.java new file mode 100644 index 0000000..bccff54 --- /dev/null +++ b/back/src/main/java/com/linkwork/model/dto/FileNodeResponse.java @@ -0,0 +1,23 @@ +package com.linkwork.model.dto; + +import lombok.Data; + +import java.time.LocalDateTime; + +@Data +public class FileNodeResponse { + private String nodeId; + private String parentId; + private String entryType; + private String name; + private String spaceType; + private String workstationId; + private String fileId; + private Long fileSize; + private String fileType; + private String parseStatus; + private String memoryIndexStatus; + private LocalDateTime createdAt; + private LocalDateTime updatedAt; + private boolean hasChildren; +} diff --git a/back/src/main/java/com/linkwork/model/dto/FileResponse.java b/back/src/main/java/com/linkwork/model/dto/FileResponse.java new file mode 100644 index 0000000..2e1a8af --- /dev/null +++ b/back/src/main/java/com/linkwork/model/dto/FileResponse.java @@ -0,0 +1,19 @@ +package com.linkwork.model.dto; + +import lombok.Data; + +import java.time.LocalDateTime; + +@Data +public class FileResponse { + private String fileId; + private String fileName; + private Long fileSize; + private String fileType; + private String contentType; + private String spaceType; + private String workstationId; + private String parseStatus; + private String memoryIndexStatus; + private LocalDateTime createdAt; +} diff --git a/back/src/main/java/com/linkwork/model/dto/FileSpaceSyncRequest.java b/back/src/main/java/com/linkwork/model/dto/FileSpaceSyncRequest.java new file mode 100644 index 0000000..1d73a05 --- /dev/null +++ b/back/src/main/java/com/linkwork/model/dto/FileSpaceSyncRequest.java @@ -0,0 +1,10 @@ +package com.linkwork.model.dto; + +import lombok.Data; + +@Data +public class FileSpaceSyncRequest { + private String spaceType; + private String workstationId; +} + diff --git a/back/src/main/java/com/linkwork/model/dto/FileSpaceSyncResponse.java b/back/src/main/java/com/linkwork/model/dto/FileSpaceSyncResponse.java new file mode 100644 index 0000000..d95afcc --- /dev/null +++ b/back/src/main/java/com/linkwork/model/dto/FileSpaceSyncResponse.java @@ -0,0 +1,15 @@ +package com.linkwork.model.dto; + +import lombok.AllArgsConstructor; +import lombok.Data; + +@Data +@AllArgsConstructor +public class FileSpaceSyncResponse { + private String spaceType; + private String workstationId; + private int scannedCount; + private int syncedCount; + private int skippedCount; +} + diff --git a/back/src/main/java/com/linkwork/model/dto/FileTransferRequest.java b/back/src/main/java/com/linkwork/model/dto/FileTransferRequest.java new file mode 100644 index 0000000..40ca2dd --- /dev/null +++ b/back/src/main/java/com/linkwork/model/dto/FileTransferRequest.java @@ -0,0 +1,29 @@ +package com.linkwork.model.dto; + +import com.linkwork.model.enums.ConflictPolicy; +import lombok.Data; + +@Data +public class FileTransferRequest { + private String targetSpaceType; + private String targetWorkstationId; + private String targetParentId; + private String conflictPolicy; + private String newName; + + /** + * @deprecated Use {@link #conflictPolicy} = "OVERWRITE" instead. + */ + @Deprecated + private Boolean overwrite; + + public ConflictPolicy resolveConflictPolicy() { + if (conflictPolicy != null && !conflictPolicy.isBlank()) { + return ConflictPolicy.fromString(conflictPolicy); + } + if (Boolean.TRUE.equals(overwrite)) { + return ConflictPolicy.OVERWRITE; + } + return ConflictPolicy.REJECT; + } +} diff --git a/back/src/main/java/com/linkwork/model/dto/GeneratedSpec.java b/back/src/main/java/com/linkwork/model/dto/GeneratedSpec.java new file mode 100644 index 0000000..bf66555 --- /dev/null +++ b/back/src/main/java/com/linkwork/model/dto/GeneratedSpec.java @@ -0,0 +1,19 @@ +package com.linkwork.model.dto; + +import lombok.Builder; +import lombok.Data; + +import java.util.List; +import java.util.Map; + +/** + * 生成的 Spec 结果(用于预览) + */ +@Data +@Builder +public class GeneratedSpec { + private String serviceId; + private Map podGroupSpec; // PodGroup YAML + private List> podSpecs; // Pod YAML 列表 + private String composeYaml; // Docker Compose YAML (如果是 COMPOSE 模式) +} diff --git a/back/src/main/java/com/linkwork/model/dto/ImageBuildResult.java b/back/src/main/java/com/linkwork/model/dto/ImageBuildResult.java new file mode 100644 index 0000000..cfb20e6 --- /dev/null +++ b/back/src/main/java/com/linkwork/model/dto/ImageBuildResult.java @@ -0,0 +1,70 @@ +package com.linkwork.model.dto; + +import lombok.Builder; +import lombok.Data; + +/** + * 镜像构建结果 + */ +@Data +@Builder +public class ImageBuildResult { + + /** + * 构建是否成功 + */ + private boolean success; + + /** + * 构建的 Agent 镜像地址 + * 格式:{registry}/service-{serviceId}-agent:{timestamp} + */ + private String agentImageTag; + + /** + * 构建的 Runner 镜像地址(Sidecar 模式) + * 格式:{registry}/service-{serviceId}-runner:{timestamp} + * Alone 模式时为 null + */ + private String runnerImageTag; + + /** + * 镜像构建耗时(毫秒) + */ + private long buildDurationMs; + + /** + * 是否已推送到仓库 + * K8s 模式为 true,Compose 模式为 false + */ + private boolean pushed; + + /** + * 错误信息(失败时) + */ + private String errorMessage; + + /** + * 创建成功结果 + */ + public static ImageBuildResult success(String agentImageTag, String runnerImageTag, + long buildDurationMs, boolean pushed) { + return ImageBuildResult.builder() + .success(true) + .agentImageTag(agentImageTag) + .runnerImageTag(runnerImageTag) + .buildDurationMs(buildDurationMs) + .pushed(pushed) + .build(); + } + + /** + * 创建失败结果 + */ + public static ImageBuildResult failed(String errorMessage) { + return ImageBuildResult.builder() + .success(false) + .errorMessage(errorMessage) + .build(); + } +} diff --git a/back/src/main/java/com/linkwork/model/dto/McpDiscoverResult.java b/back/src/main/java/com/linkwork/model/dto/McpDiscoverResult.java new file mode 100644 index 0000000..15adfef --- /dev/null +++ b/back/src/main/java/com/linkwork/model/dto/McpDiscoverResult.java @@ -0,0 +1,56 @@ +package com.linkwork.model.dto; + +import lombok.AllArgsConstructor; +import lombok.Builder; +import lombok.Data; +import lombok.NoArgsConstructor; + +import java.util.List; +import java.util.Map; + +/** + * MCP Server 工具发现结果 DTO + */ +@Data +@Builder +@NoArgsConstructor +@AllArgsConstructor +public class McpDiscoverResult { + + /** 是否成功 */ + private boolean success; + + /** 失败原因 */ + private String error; + + /** MCP server 返回的名称 */ + private String serverName; + + /** MCP server 返回的版本 */ + private String serverVersion; + + /** MCP 协议版本 */ + private String protocolVersion; + + /** 工具列表 */ + private List tools; + + /** + * MCP 工具定义 + */ + @Data + @Builder + @NoArgsConstructor + @AllArgsConstructor + public static class McpTool { + + /** 工具名称 */ + private String name; + + /** 工具描述 */ + private String description; + + /** 输入参数的 JSON Schema */ + private Map inputSchema; + } +} diff --git a/back/src/main/java/com/linkwork/model/dto/McpProbeResult.java b/back/src/main/java/com/linkwork/model/dto/McpProbeResult.java new file mode 100644 index 0000000..656f791 --- /dev/null +++ b/back/src/main/java/com/linkwork/model/dto/McpProbeResult.java @@ -0,0 +1,32 @@ +package com.linkwork.model.dto; + +import lombok.Builder; +import lombok.Data; + +/** + * MCP Server 探活结果 DTO + */ +@Data +@Builder +public class McpProbeResult { + + /** + * 健康状态:online / degraded / offline + */ + private String status; + + /** + * 探活延迟(毫秒) + */ + private int latencyMs; + + /** + * 探活结果描述,例如 "HTTP 200 (123ms)" + */ + private String message; + + /** + * 实际探测的 URL + */ + private String probeUrl; +} diff --git a/back/src/main/java/com/linkwork/model/dto/MemoryIndexBatchRequest.java b/back/src/main/java/com/linkwork/model/dto/MemoryIndexBatchRequest.java new file mode 100644 index 0000000..44ebe52 --- /dev/null +++ b/back/src/main/java/com/linkwork/model/dto/MemoryIndexBatchRequest.java @@ -0,0 +1,12 @@ +package com.linkwork.model.dto; + +import jakarta.validation.constraints.NotEmpty; +import lombok.Data; + +import java.util.List; + +@Data +public class MemoryIndexBatchRequest { + @NotEmpty + private List filePaths; +} diff --git a/back/src/main/java/com/linkwork/model/dto/MemoryIndexJob.java b/back/src/main/java/com/linkwork/model/dto/MemoryIndexJob.java new file mode 100644 index 0000000..d6c3631 --- /dev/null +++ b/back/src/main/java/com/linkwork/model/dto/MemoryIndexJob.java @@ -0,0 +1,31 @@ +package com.linkwork.model.dto; + +import lombok.AllArgsConstructor; +import lombok.Builder; +import lombok.Data; +import lombok.NoArgsConstructor; + +@Data +@Builder +@NoArgsConstructor +@AllArgsConstructor +public class MemoryIndexJob { + + public enum JobType { + FILE_UPLOAD, + MEMORY_WRITEBACK, + SESSION_SUMMARY + } + + private String jobId; + private String workstationId; + private String userId; + private JobType jobType; + private String filePath; + private String content; + private String source; + private String fileType; + private String storageType; + private String objectName; + private String collectionName; +} diff --git a/back/src/main/java/com/linkwork/model/dto/MemoryIngestRequest.java b/back/src/main/java/com/linkwork/model/dto/MemoryIngestRequest.java new file mode 100644 index 0000000..9350a4c --- /dev/null +++ b/back/src/main/java/com/linkwork/model/dto/MemoryIngestRequest.java @@ -0,0 +1,11 @@ +package com.linkwork.model.dto; + +import jakarta.validation.constraints.NotBlank; +import lombok.Data; + +@Data +public class MemoryIngestRequest { + @NotBlank + private String content; + private String source = ""; +} diff --git a/back/src/main/java/com/linkwork/model/dto/MemorySearchRequest.java b/back/src/main/java/com/linkwork/model/dto/MemorySearchRequest.java new file mode 100644 index 0000000..dffa262 --- /dev/null +++ b/back/src/main/java/com/linkwork/model/dto/MemorySearchRequest.java @@ -0,0 +1,11 @@ +package com.linkwork.model.dto; + +import jakarta.validation.constraints.NotBlank; +import lombok.Data; + +@Data +public class MemorySearchRequest { + @NotBlank + private String query; + private int topK = 10; +} diff --git a/back/src/main/java/com/linkwork/model/dto/MergedConfig.java b/back/src/main/java/com/linkwork/model/dto/MergedConfig.java new file mode 100644 index 0000000..c0f4392 --- /dev/null +++ b/back/src/main/java/com/linkwork/model/dto/MergedConfig.java @@ -0,0 +1,139 @@ +package com.linkwork.model.dto; + +import com.linkwork.config.EnvConfig.FilePlacementConfig; +import com.linkwork.config.EnvConfig.OssMountConfig; +import com.linkwork.model.enums.DeployMode; +import com.linkwork.model.enums.PodMode; +import lombok.Builder; +import lombok.Data; + +import java.util.Map; + +/** + * 融合后的配置(内部使用) + * + * 设计说明: + * - 镜像在 Build 时根据环境变量动态构建 + * - token 写入容器后仅 executor 可访问 + */ +@Data +@Builder +public class MergedConfig { + // 服务标识 + private String serviceId; + private String userId; + + // 岗位信息(用于运行时 MCP 配置拉取) + private Long roleId; + + // 模式配置 + private DeployMode deployMode; + private PodMode podMode; + private Integer podCount; + + // K8s 调度配置 + private String namespace; + private String queueName; + private String priorityClassName; + + // ========== 镜像配置 ========== + /** + * Agent 镜像地址 + * 在镜像构建后设置为构建的镜像地址 + */ + private String agentImage; + + /** + * Runner 镜像地址(Sidecar 模式) + * 在镜像构建后设置为构建的镜像地址 + */ + private String runnerImage; + + /** + * 镜像构建耗时(毫秒) + */ + private Long imageBuildDurationMs; + + /** + * 镜像拉取策略(Always/IfNotPresent/Never) + */ + private String imagePullPolicy; + + /** + * K8s 拉取私有镜像的 Secret 名称 + */ + private String imagePullSecret; + + // ========== 构建参数(用于镜像构建)========== + /** + * 环境变量列表(用于镜像构建) + */ + private Map buildEnvVars; + + /** + * Agent 基础镜像 + */ + private String agentBaseImage; + + /** + * Runner 基础镜像(Sidecar 模式) + */ + private String runnerBaseImage; + + /** + * 镜像仓库地址 + */ + private String imageRegistry; + + // Agent 启动脚本配置 + private String mainPyUrl; // main.py 下载链接(启动 agent+executor) + + // 凭证配置 + private String token; // API 凭证(写入容器,仅 executor 可访问) + + // 文件放置配置 + private FilePlacementConfig filePlacement; + + // 资源配置 + private ResourceSpec agentResources; + private ResourceSpec runnerResources; + + // 网络配置 + private String apiBaseUrl; + private String wsBaseUrl; + private String llmGatewayUrl; + private String redisUrl; + + // SSH 配置(仅 Sidecar) + private Integer sshPort; + + // ========== Agent 启动配置(双容器模式)========== + /** + * 工位 ID (WORKSTATION_ID 环境变量,默认使用 serviceId) + */ + private String workstationId; + + /** + * Agent config.json 内容(用于创建 ConfigMap 挂载到 /opt/agent/config.json) + */ + private String configJson; + + // 执行限制 + private Integer maxSteps; + private Integer maxRuntimeSeconds; + private Integer workspaceSizeLimit; + + // 任务元信息 + private String model; + private String runnerType; + private String description; + + // 回调配置 + private String callbackUrl; + + // 快速恢复配置 + private String preferredNode; // 优先调度节点(Resume 时使用) + + // OSS 挂载配置 + private OssMountConfig ossMount; +} diff --git a/back/src/main/java/com/linkwork/model/dto/PodGroupStatusInfo.java b/back/src/main/java/com/linkwork/model/dto/PodGroupStatusInfo.java new file mode 100644 index 0000000..f544bc9 --- /dev/null +++ b/back/src/main/java/com/linkwork/model/dto/PodGroupStatusInfo.java @@ -0,0 +1,19 @@ +package com.linkwork.model.dto; + +import lombok.Builder; +import lombok.Data; + +/** + * PodGroup 状态信息 + */ +@Data +@Builder +public class PodGroupStatusInfo { + private String name; // PodGroup 名称 + private String phase; // Pending/Running/Succeeded/Failed/Unknown + private Integer minMember; // 最小成员数 + private Integer running; // 运行中 Pod 数 + private Integer succeeded; // 成功 Pod 数 + private Integer failed; // 失败 Pod 数 + private Integer pending; // 等待中 Pod 数 +} diff --git a/back/src/main/java/com/linkwork/model/dto/PodLogResponseDTO.java b/back/src/main/java/com/linkwork/model/dto/PodLogResponseDTO.java new file mode 100644 index 0000000..ab79a70 --- /dev/null +++ b/back/src/main/java/com/linkwork/model/dto/PodLogResponseDTO.java @@ -0,0 +1,14 @@ +package com.linkwork.model.dto; + +import lombok.Builder; +import lombok.Data; + +@Data +@Builder +public class PodLogResponseDTO { + private String podName; + private String namespace; + private String containerName; + private String logs; + private int tailLines; +} diff --git a/back/src/main/java/com/linkwork/model/dto/PodStatusInfo.java b/back/src/main/java/com/linkwork/model/dto/PodStatusInfo.java new file mode 100644 index 0000000..b0669dd --- /dev/null +++ b/back/src/main/java/com/linkwork/model/dto/PodStatusInfo.java @@ -0,0 +1,24 @@ +package com.linkwork.model.dto; + +import lombok.Builder; +import lombok.Data; + +import java.time.Instant; +import java.util.List; + +/** + * Pod 状态信息 + */ +@Data +@Builder +public class PodStatusInfo { + private String name; // Pod 名称 + private String phase; // Pending/Running/Succeeded/Failed + private String nodeName; // 所在节点 + private String nodeHostname; // 节点主机名 + private Instant startTime; // 启动时间 + private List containers; // 容器状态列表 + + // 资源使用(来自 metrics-server) + private ResourceUsageInfo resourceUsage; // Pod 级别资源使用汇总 +} diff --git a/back/src/main/java/com/linkwork/model/dto/ReportExportFieldOption.java b/back/src/main/java/com/linkwork/model/dto/ReportExportFieldOption.java new file mode 100644 index 0000000..1ad6b3c --- /dev/null +++ b/back/src/main/java/com/linkwork/model/dto/ReportExportFieldOption.java @@ -0,0 +1,34 @@ +package com.linkwork.model.dto; + +import lombok.AllArgsConstructor; +import lombok.Data; +import lombok.NoArgsConstructor; + +/** + * 报表字段元数据 + */ +@Data +@NoArgsConstructor +@AllArgsConstructor +public class ReportExportFieldOption { + + /** + * Java 字段名 + */ + private String field; + + /** + * 数据库列名 + */ + private String column; + + /** + * 前端展示名 + */ + private String label; + + /** + * Java 类型名 + */ + private String javaType; +} diff --git a/back/src/main/java/com/linkwork/model/dto/ReportExportFieldResponse.java b/back/src/main/java/com/linkwork/model/dto/ReportExportFieldResponse.java new file mode 100644 index 0000000..1fe5589 --- /dev/null +++ b/back/src/main/java/com/linkwork/model/dto/ReportExportFieldResponse.java @@ -0,0 +1,26 @@ +package com.linkwork.model.dto; + +import lombok.AllArgsConstructor; +import lombok.Data; +import lombok.NoArgsConstructor; + +import java.util.List; + +/** + * 报表可导出字段响应 + */ +@Data +@NoArgsConstructor +@AllArgsConstructor +public class ReportExportFieldResponse { + + /** + * 导出类型:task / role + */ + private String type; + + /** + * 字段定义列表 + */ + private List fields; +} diff --git a/back/src/main/java/com/linkwork/model/dto/ReportExportRequest.java b/back/src/main/java/com/linkwork/model/dto/ReportExportRequest.java new file mode 100644 index 0000000..41de971 --- /dev/null +++ b/back/src/main/java/com/linkwork/model/dto/ReportExportRequest.java @@ -0,0 +1,41 @@ +package com.linkwork.model.dto; + +import jakarta.validation.constraints.NotBlank; +import lombok.Data; + +import java.util.List; + +/** + * 报表导出请求 + */ +@Data +public class ReportExportRequest { + + /** + * 导出类型:task / role + */ + @NotBlank(message = "导出类型不能为空") + private String type; + + /** + * 开始时间,支持 yyyy-MM-dd'T'HH:mm 或 yyyy-MM-dd'T'HH:mm:ss + */ + @NotBlank(message = "开始时间不能为空") + private String startTime; + + /** + * 结束时间,支持 yyyy-MM-dd'T'HH:mm 或 yyyy-MM-dd'T'HH:mm:ss + */ + @NotBlank(message = "结束时间不能为空") + private String endTime; + + /** + * 导出字段(为空时导出全部) + */ + private List fields; + + /** + * 是否附带消息流(仅 task 生效) + */ + private Boolean includeEventStream; +} diff --git a/back/src/main/java/com/linkwork/model/dto/ResourceConfig.java b/back/src/main/java/com/linkwork/model/dto/ResourceConfig.java new file mode 100644 index 0000000..27736f7 --- /dev/null +++ b/back/src/main/java/com/linkwork/model/dto/ResourceConfig.java @@ -0,0 +1,14 @@ +package com.linkwork.model.dto; + +import lombok.Data; + +/** + * 资源配置 + */ +@Data +public class ResourceConfig { + private String cpuRequest; // "1" / "500m" + private String cpuLimit; + private String memoryRequest; // "2Gi" / "512Mi" + private String memoryLimit; +} diff --git a/back/src/main/java/com/linkwork/model/dto/ResourceSpec.java b/back/src/main/java/com/linkwork/model/dto/ResourceSpec.java new file mode 100644 index 0000000..60b4e38 --- /dev/null +++ b/back/src/main/java/com/linkwork/model/dto/ResourceSpec.java @@ -0,0 +1,20 @@ +package com.linkwork.model.dto; + +import lombok.AllArgsConstructor; +import lombok.Builder; +import lombok.Data; +import lombok.NoArgsConstructor; + +/** + * 资源规格 + */ +@Data +@Builder +@NoArgsConstructor +@AllArgsConstructor +public class ResourceSpec { + private String cpuRequest; + private String cpuLimit; + private String memoryRequest; + private String memoryLimit; +} diff --git a/back/src/main/java/com/linkwork/model/dto/ResourceStatus.java b/back/src/main/java/com/linkwork/model/dto/ResourceStatus.java new file mode 100644 index 0000000..f53a9ea --- /dev/null +++ b/back/src/main/java/com/linkwork/model/dto/ResourceStatus.java @@ -0,0 +1,75 @@ +package com.linkwork.model.dto; + +import lombok.AllArgsConstructor; +import lombok.Builder; +import lombok.Data; +import lombok.NoArgsConstructor; + +/** + * 系统资源状态 DTO + */ +@Data +@Builder +@NoArgsConstructor +@AllArgsConstructor +public class ResourceStatus { + + /** + * CPU 使用率 (0.0 ~ 1.0) + * -1 表示无法获取 + */ + private double cpuUsage; + + /** + * 内存使用率 (0.0 ~ 1.0) + * -1 表示无法获取 + */ + private double memoryUsage; + + /** + * 总内存(字节) + */ + private long totalMemory; + + /** + * 可用内存(字节) + */ + private long freeMemory; + + /** + * 可用处理器数量 + */ + private int availableProcessors; + + /** + * 获取 CPU 使用率百分比字符串 + */ + public String getCpuUsagePercent() { + if (cpuUsage < 0) return "N/A"; + return String.format("%.1f%%", cpuUsage * 100); + } + + /** + * 获取内存使用率百分比字符串 + */ + public String getMemoryUsagePercent() { + if (memoryUsage < 0) return "N/A"; + return String.format("%.1f%%", memoryUsage * 100); + } + + /** + * 获取可读的内存信息 + */ + public String getMemoryInfo() { + return String.format("%s / %s", + formatBytes(totalMemory - freeMemory), + formatBytes(totalMemory)); + } + + private String formatBytes(long bytes) { + if (bytes < 1024) return bytes + " B"; + if (bytes < 1024 * 1024) return String.format("%.1f KB", bytes / 1024.0); + if (bytes < 1024 * 1024 * 1024) return String.format("%.1f MB", bytes / (1024.0 * 1024)); + return String.format("%.1f GB", bytes / (1024.0 * 1024 * 1024)); + } +} diff --git a/back/src/main/java/com/linkwork/model/dto/ResourceUsageInfo.java b/back/src/main/java/com/linkwork/model/dto/ResourceUsageInfo.java new file mode 100644 index 0000000..85a6ed4 --- /dev/null +++ b/back/src/main/java/com/linkwork/model/dto/ResourceUsageInfo.java @@ -0,0 +1,26 @@ +package com.linkwork.model.dto; + +import lombok.Builder; +import lombok.Data; + +/** + * 资源使用信息(来自 metrics-server) + */ +@Data +@Builder +public class ResourceUsageInfo { + private String cpuUsage; // CPU 使用量,如 "100m" (100 millicores) + private String memoryUsage; // 内存使用量,如 "256Mi" + private Long cpuMillicores; // CPU 使用量(毫核) + private Long memoryBytes; // 内存使用量(字节) + + // 资源限制 + private String cpuLimit; // CPU 限制 + private String memoryLimit; // 内存限制 + private String cpuRequest; // CPU 请求 + private String memoryRequest; // 内存请求 + + // 使用率(百分比) + private Double cpuUsagePercent; // CPU 使用率 (usage/limit) + private Double memoryUsagePercent; // 内存使用率 (usage/limit) +} diff --git a/back/src/main/java/com/linkwork/model/dto/ScaleRequest.java b/back/src/main/java/com/linkwork/model/dto/ScaleRequest.java new file mode 100644 index 0000000..007b2b7 --- /dev/null +++ b/back/src/main/java/com/linkwork/model/dto/ScaleRequest.java @@ -0,0 +1,27 @@ +package com.linkwork.model.dto; + +import lombok.Data; + +/** + * 伸缩请求 + */ +@Data +public class ScaleRequest { + /** + * 目标 Pod 数量(可选) + * scale-up 时不指定则扩容到 maxPodCount + * scale 时必填 + */ + private Integer targetPodCount; + + /** + * 要删除的 Pod 名称(scale-down 时必填) + * 不指定则返回错误,防止误删 + */ + private String podName; + + /** + * 调用来源(用于日志追踪) + */ + private String source; +} diff --git a/back/src/main/java/com/linkwork/model/dto/ScaleResult.java b/back/src/main/java/com/linkwork/model/dto/ScaleResult.java new file mode 100644 index 0000000..0e311d4 --- /dev/null +++ b/back/src/main/java/com/linkwork/model/dto/ScaleResult.java @@ -0,0 +1,95 @@ +package com.linkwork.model.dto; + +import lombok.Builder; +import lombok.Data; + +import java.util.List; + +/** + * 伸缩结果 + */ +@Data +@Builder +public class ScaleResult { + private String serviceId; + private boolean success; + + /** + * 伸缩类型:SCALE_UP / SCALE_DOWN / NO_CHANGE + */ + private String scaleType; + + /** + * 伸缩前 Pod 数量 + */ + private int previousPodCount; + + /** + * 伸缩后 Pod 数量 + */ + private int currentPodCount; + + /** + * 最大 Pod 数量(初始配置) + */ + private int maxPodCount; + + /** + * 当前运行的 Pod 列表 + */ + private List runningPods; + + /** + * 本次新增的 Pod 列表(扩容时) + */ + private List addedPods; + + /** + * 本次删除的 Pod 列表(缩容时) + */ + private List removedPods; + + /** + * 错误信息 + */ + private String errorMessage; + + public static ScaleResult success(String serviceId, String scaleType, + int previousCount, int currentCount, int maxCount, + List runningPods, + List addedPods, + List removedPods) { + return ScaleResult.builder() + .serviceId(serviceId) + .success(true) + .scaleType(scaleType) + .previousPodCount(previousCount) + .currentPodCount(currentCount) + .maxPodCount(maxCount) + .runningPods(runningPods) + .addedPods(addedPods) + .removedPods(removedPods) + .build(); + } + + public static ScaleResult noChange(String serviceId, int currentCount, int maxCount, + List runningPods) { + return ScaleResult.builder() + .serviceId(serviceId) + .success(true) + .scaleType("NO_CHANGE") + .previousPodCount(currentCount) + .currentPodCount(currentCount) + .maxPodCount(maxCount) + .runningPods(runningPods) + .build(); + } + + public static ScaleResult failed(String serviceId, String errorMessage) { + return ScaleResult.builder() + .serviceId(serviceId) + .success(false) + .errorMessage(errorMessage) + .build(); + } +} diff --git a/back/src/main/java/com/linkwork/model/dto/ScheduleApiResponse.java b/back/src/main/java/com/linkwork/model/dto/ScheduleApiResponse.java new file mode 100644 index 0000000..da617bc --- /dev/null +++ b/back/src/main/java/com/linkwork/model/dto/ScheduleApiResponse.java @@ -0,0 +1,48 @@ +package com.linkwork.model.dto; + +import lombok.AllArgsConstructor; +import lombok.Builder; +import lombok.Data; +import lombok.NoArgsConstructor; + +import java.time.Instant; + +/** + * 调度模块通用 API 响应 + */ +@Data +@Builder +@NoArgsConstructor +@AllArgsConstructor +public class ScheduleApiResponse { + private boolean success; + private T data; + private String errorCode; + private String errorMessage; + private Instant timestamp; + + public static ScheduleApiResponse success(T data) { + return ScheduleApiResponse.builder() + .success(true) + .data(data) + .timestamp(Instant.now()) + .build(); + } + + public static ScheduleApiResponse error(String message) { + return ScheduleApiResponse.builder() + .success(false) + .errorMessage(message) + .timestamp(Instant.now()) + .build(); + } + + public static ScheduleApiResponse error(String code, String message) { + return ScheduleApiResponse.builder() + .success(false) + .errorCode(code) + .errorMessage(message) + .timestamp(Instant.now()) + .build(); + } +} diff --git a/back/src/main/java/com/linkwork/model/dto/ScheduleEvent.java b/back/src/main/java/com/linkwork/model/dto/ScheduleEvent.java new file mode 100644 index 0000000..64134e9 --- /dev/null +++ b/back/src/main/java/com/linkwork/model/dto/ScheduleEvent.java @@ -0,0 +1,44 @@ +package com.linkwork.model.dto; + +import com.fasterxml.jackson.annotation.JsonProperty; +import lombok.AllArgsConstructor; +import lombok.Builder; +import lombok.Data; +import lombok.NoArgsConstructor; + +/** + * 统一信封格式 + * 遵循 data-format.md 规范定义 + * + * 所有写入 Redis Stream 的消息共享统一的外层结构 + */ +@Data +@Builder +@NoArgsConstructor +@AllArgsConstructor +public class ScheduleEvent { + + /** + * 事件类型枚举,大写下划线分隔 + * 例如:POD_SCHEDULING, POD_SCHEDULED, IMAGE_PULLING 等 + */ + @JsonProperty("event_type") + private String eventType; + + /** + * ISO 8601 带时区,精确到微秒 + * 格式:2026-01-29T10:00:00.000000+00:00 + */ + private String timestamp; + + /** + * 任务标识(对应 serviceId) + */ + @JsonProperty("task_id") + private String taskId; + + /** + * 业务负载,结构由 event_type 决定 + */ + private Object data; +} diff --git a/back/src/main/java/com/linkwork/model/dto/ServiceBuildRequest.java b/back/src/main/java/com/linkwork/model/dto/ServiceBuildRequest.java new file mode 100644 index 0000000..abdbe6f --- /dev/null +++ b/back/src/main/java/com/linkwork/model/dto/ServiceBuildRequest.java @@ -0,0 +1,109 @@ +package com.linkwork.model.dto; + +import com.linkwork.model.enums.DeployMode; +import com.linkwork.model.enums.PodMode; +import com.linkwork.model.enums.ServiceType; +import jakarta.validation.constraints.NotBlank; +import jakarta.validation.constraints.NotEmpty; +import jakarta.validation.constraints.NotNull; +import lombok.AllArgsConstructor; +import lombok.Builder; +import lombok.Data; +import lombok.NoArgsConstructor; + +import java.util.Map; + +/** + * 服务构建请求(Build 服务输入) + * + * 设计说明: + * - Build 服务会根据传入的环境变量实时构建镜像 + * - 构建流程:拉取基础镜像 → 注入环境变量 → 执行 build.sh → 推送镜像仓库 + * - K8s 模式:构建后推送到镜像仓库,然后创建 PodGroup/Pod + * - Compose 模式:本地构建,返回 docker-compose.yaml + * - token 写入容器后仅 executor 用户可访问 + * - ssh-key 在容器启动时生成,agent 和 executor 都可访问 + */ +@Data +@Builder +@NoArgsConstructor +@AllArgsConstructor +public class ServiceBuildRequest { + + // ========== 服务信息(必填)========== + @NotBlank + private String serviceId; // 服务唯一标识 + + @NotBlank + private String userId; // 用户 ID + + // ========== 构建追踪(可选,由前端生成)========== + /** + * 构建唯一标识,由前端生成 (UUID 格式) + * 用于关联构建记录和 Redis Stream 事件 + */ + private String buildId; + + /** + * 关联的岗位 ID + */ + private Long roleId; + + /** + * 岗位名称(用于构建记录快照) + */ + private String roleName; + + // ========== 服务信息(可选)========== + private String description; // 服务描述(可选) + + private ServiceType serviceType; // CODE_WRITING / TASK_EXECUTION(可选) + + // ========== 部署配置(必填)========== + @NotNull + private DeployMode deployMode; // K8S / COMPOSE + + // ========== 镜像构建配置(必填)========== + /** + * 环境变量列表(必填)- 用于镜像构建 + * 这些环境变量会被写入 Dockerfile 的 ENV 指令,并在 build.sh 中使用 + * 注意:token 也应放入此 Map 中,在 build.sh 执行前会 export + */ + @NotEmpty + private Map buildEnvVars; + + // ========== 镜像配置(可选)========== + /** + * Runner 基础镜像地址(Sidecar 模式使用) + * K8s + Sidecar 模式下,Runner 容器使用此镜像 + * 为空时使用系统默认配置:schedule.images.runner + * 注意:非 K8s 模式或 Alone 模式不使用此参数 + */ + private String runnerBaseImage; + + /** + * 镜像仓库地址(可选) + * 为空时仅本地构建,不推送远端仓库 + * K8s 模式下构建的镜像会推送到此仓库 + */ + private String imageRegistry; + + // ========== 可选配置 ========== + private PodMode podMode; // SIDECAR / ALONE,为空时使用默认值 + private Integer podCount; // Pod 数量,默认 1,最大 10(仅 K8s) + private Integer priority; // 0-100,默认 50 + private ResourceConfig resourceConfig; // 自定义资源配置 + + // ========== 工作目录配置 ========== + private Integer workspaceSizeLimit; // GB,默认 10 + + // ========== 回调配置(仅 K8s)========== + private String callbackUrl; // 状态回调 URL + + // ========== 快速恢复相关(Resume 时使用)========== + /** + * 优先调度节点(Resume 时设置) + * 设置后会在 Pod Spec 中添加节点亲和性,优先调度到原节点以命中镜像缓存 + */ + private String preferredNode; +} diff --git a/back/src/main/java/com/linkwork/model/dto/ServiceBuildResult.java b/back/src/main/java/com/linkwork/model/dto/ServiceBuildResult.java new file mode 100644 index 0000000..9116747 --- /dev/null +++ b/back/src/main/java/com/linkwork/model/dto/ServiceBuildResult.java @@ -0,0 +1,192 @@ +package com.linkwork.model.dto; + +import lombok.Builder; +import lombok.Data; + +import java.util.List; + +/** + * 服务构建结果(Build 服务输出) + * + * 包含镜像构建结果和 K8s 资源创建结果 + */ +@Data +@Builder +public class ServiceBuildResult { + private String serviceId; + private boolean success; + private String status; // 状态:SUCCESS, FAILED, BUILDING + private String podGroupName; // 创建的 PodGroup 名称(仅 K8s 模式) + private List podNames; // 创建的 Pod 名称列表(仅 K8s 模式,BUILDING 时为空) + private String queueName; // 分配的队列名称(仅 K8s 模式) + private String createdAt; + + // ========== 构建追踪 ========== + /** + * 构建 ID(用于订阅日志和查询状态) + */ + private String buildId; + + /** + * 提示信息(BUILDING 状态时的说明) + */ + private String message; + + // ========== 镜像构建结果 ========== + /** + * 构建的 Agent 镜像地址 + * 格式:{registry}/service-{serviceId}-agent:{timestamp} + * BUILDING 状态时为 null + */ + private String builtAgentImage; + + /** + * 镜像构建耗时(毫秒) + * BUILDING 状态时为 null + */ + private Long buildDurationMs; + + // ========== Compose 模式返回 ========== + /** + * docker-compose.yaml 内容(仅 Compose 模式,已废弃) + * @deprecated 使用 composeArchive 代替 + */ + @Deprecated + private String composeYaml; + + /** + * Compose 构建包 tar.gz 字节(仅 Compose 模式) + * 包含 docker-compose.yaml、Dockerfile、build.sh、config.json、start.sh、README.md 等 + */ + private byte[] composeTar; + + // 调度信息(用于快速恢复) + private String scheduledNode; // 实际调度到的节点,用于保存到 Snapshot(BUILDING 时为 null) + + // 失败时 + private String errorCode; + private String errorMessage; + + /** + * K8s 模式构建成功 + */ + public static ServiceBuildResult successK8s(String serviceId, String podGroupName, + List podNames, String queueName, + String builtAgentImage, Long buildDurationMs) { + return ServiceBuildResult.builder() + .serviceId(serviceId) + .success(true) + .status("SUCCESS") + .podGroupName(podGroupName) + .podNames(podNames) + .queueName(queueName) + .builtAgentImage(builtAgentImage) + .buildDurationMs(buildDurationMs) + .createdAt(java.time.Instant.now().toString()) + .build(); + } + + /** + * K8s 模式构建成功(带调度节点) + */ + public static ServiceBuildResult successK8s(String serviceId, String podGroupName, + List podNames, String queueName, + String scheduledNode, + String builtAgentImage, Long buildDurationMs) { + return ServiceBuildResult.builder() + .serviceId(serviceId) + .success(true) + .status("SUCCESS") + .podGroupName(podGroupName) + .podNames(podNames) + .queueName(queueName) + .scheduledNode(scheduledNode) + .builtAgentImage(builtAgentImage) + .buildDurationMs(buildDurationMs) + .createdAt(java.time.Instant.now().toString()) + .build(); + } + + /** + * Compose 模式构建成功(返回 tar.gz 构建包) + */ + public static ServiceBuildResult successCompose(String serviceId, byte[] composeTar) { + return ServiceBuildResult.builder() + .serviceId(serviceId) + .success(true) + .status("SUCCESS") + .composeTar(composeTar) + .createdAt(java.time.Instant.now().toString()) + .build(); + } + + /** + * 兼容旧版本:K8s 模式构建成功(不含镜像信息) + * @deprecated 使用 successK8s 方法 + */ + @Deprecated + public static ServiceBuildResult success(String serviceId, String podGroupName, + List podNames, String queueName) { + return ServiceBuildResult.builder() + .serviceId(serviceId) + .success(true) + .podGroupName(podGroupName) + .podNames(podNames) + .queueName(queueName) + .createdAt(java.time.Instant.now().toString()) + .build(); + } + + /** + * 兼容旧版本:K8s 模式构建成功(带调度节点,不含镜像信息) + * @deprecated 使用 successK8s 方法 + */ + @Deprecated + public static ServiceBuildResult success(String serviceId, String podGroupName, + List podNames, String queueName, + String scheduledNode) { + return ServiceBuildResult.builder() + .serviceId(serviceId) + .success(true) + .podGroupName(podGroupName) + .podNames(podNames) + .queueName(queueName) + .scheduledNode(scheduledNode) + .createdAt(java.time.Instant.now().toString()) + .build(); + } + + public static ServiceBuildResult failed(String serviceId, String errorCode, String errorMessage) { + return ServiceBuildResult.builder() + .serviceId(serviceId) + .success(false) + .status("FAILED") + .errorCode(errorCode) + .errorMessage(errorMessage) + .createdAt(java.time.Instant.now().toString()) + .build(); + } + + /** + * 构建中(任务已提交,异步执行中) + * + * @param serviceId 服务 ID + * @param buildId 构建 ID(用于订阅日志) + * @param podGroupName PodGroup 名称 + * @param queueName 队列名称 + * @param message 提示信息 + */ + public static ServiceBuildResult building(String serviceId, String buildId, + String podGroupName, String queueName, String message) { + return ServiceBuildResult.builder() + .serviceId(serviceId) + .success(true) + .status("BUILDING") + .buildId(buildId) + .podGroupName(podGroupName) + .queueName(queueName) + .message(message) + .createdAt(java.time.Instant.now().toString()) + .build(); + } +} diff --git a/back/src/main/java/com/linkwork/model/dto/ServiceResumeResult.java b/back/src/main/java/com/linkwork/model/dto/ServiceResumeResult.java new file mode 100644 index 0000000..1333c94 --- /dev/null +++ b/back/src/main/java/com/linkwork/model/dto/ServiceResumeResult.java @@ -0,0 +1,79 @@ +package com.linkwork.model.dto; + +import com.linkwork.model.enums.ResumeType; +import lombok.Builder; +import lombok.Data; + +import java.util.List; + +/** + * 服务恢复结果(Resume 服务输出) + */ +@Data +@Builder +public class ServiceResumeResult { + + private String serviceId; + private boolean success; + + // 恢复类型 + private ResumeType resumeType; // WARM / COLD + + // 如果快照不存在或已过期,需要完整请求重新构建 + private boolean requireFullRequest; + + // 成功时的信息 + private String podGroupName; + private List podNames; + private String scheduledNode; // 实际调度节点 + private boolean imageCached; // 是否命中镜像缓存 + private Integer estimatedReadySeconds; // 预计就绪时间(秒) + + // 失败时的信息 + private String errorCode; + private String message; + + /** + * 快照不存在或已过期 + */ + public static ServiceResumeResult snapshotNotFound(String serviceId) { + return ServiceResumeResult.builder() + .serviceId(serviceId) + .success(false) + .resumeType(ResumeType.COLD) + .requireFullRequest(true) + .errorCode("SNAPSHOT_NOT_FOUND") + .message("快照不存在或已过期,需要完整的 Build 请求") + .build(); + } + + /** + * 恢复成功 + */ + public static ServiceResumeResult success(String serviceId, String podGroupName, + List podNames, String scheduledNode, + boolean imageCached) { + return ServiceResumeResult.builder() + .serviceId(serviceId) + .success(true) + .resumeType(imageCached ? ResumeType.WARM : ResumeType.COLD) + .podGroupName(podGroupName) + .podNames(podNames) + .scheduledNode(scheduledNode) + .imageCached(imageCached) + .estimatedReadySeconds(imageCached ? 20 : 90) + .build(); + } + + /** + * 恢复失败 + */ + public static ServiceResumeResult failed(String serviceId, String errorCode, String message) { + return ServiceResumeResult.builder() + .serviceId(serviceId) + .success(false) + .errorCode(errorCode) + .message(message) + .build(); + } +} diff --git a/back/src/main/java/com/linkwork/model/dto/ServiceSnapshot.java b/back/src/main/java/com/linkwork/model/dto/ServiceSnapshot.java new file mode 100644 index 0000000..74d5d8d --- /dev/null +++ b/back/src/main/java/com/linkwork/model/dto/ServiceSnapshot.java @@ -0,0 +1,57 @@ +package com.linkwork.model.dto; + +import com.linkwork.model.enums.SnapshotStatus; +import lombok.Builder; +import lombok.Data; + +import java.time.Instant; +import java.time.temporal.ChronoUnit; +import java.util.Set; + +/** + * 服务快照(用于快速重启) + * + * 设计说明: + * - Runner 由运行时 agent 启动,快照不保存 runner 相关信息 + */ +@Data +@Builder +public class ServiceSnapshot { + + // ========== 服务信息 ========== + private String serviceId; + private String userId; + + // ========== 原始请求(完整保存,用于恢复)========== + private String originalRequestJson; // ServiceBuildRequest JSON + + // ========== 镜像信息 ========== + private String agentImage; // Agent 镜像地址 + private String podMode; // SIDECAR / ALONE + + // ========== 调度信息(关键:用于节点亲和)========== + private String lastScheduledNode; // ★ 上次运行的节点 + + // ========== 伸缩信息 ========== + private Integer maxPodCount; // 最大 Pod 数量(初始配置) + private Integer currentPodCount; // 当前 Pod 数量 + private Set runningPodNames; // 当前运行中的 Pod 名称列表 + private Integer nextPodIndex; // 下一个 Pod 序号(用于生成唯一 Pod 名称) + + // ========== 时间信息 ========== + private Instant createdAt; // 首次创建时间 + private Instant lastActiveAt; // 最后活跃时间 + private Instant shutdownAt; // 关闭时间 + + // ========== 快照状态 ========== + private SnapshotStatus status; // ACTIVE / EXPIRED + private Integer resumeCount; // 恢复次数 + + /** + * 快照 24 小时后过期 + */ + public boolean isExpired() { + return shutdownAt != null && + shutdownAt.plus(24, ChronoUnit.HOURS).isBefore(Instant.now()); + } +} diff --git a/back/src/main/java/com/linkwork/model/dto/ServiceStatusResponse.java b/back/src/main/java/com/linkwork/model/dto/ServiceStatusResponse.java new file mode 100644 index 0000000..fef5744 --- /dev/null +++ b/back/src/main/java/com/linkwork/model/dto/ServiceStatusResponse.java @@ -0,0 +1,20 @@ +package com.linkwork.model.dto; + +import lombok.Builder; +import lombok.Data; + +import java.time.Instant; +import java.util.List; + +/** + * 服务状态响应(Status 服务输出) + */ +@Data +@Builder +public class ServiceStatusResponse { + private String serviceId; + private PodGroupStatusInfo podGroupStatus; // PodGroup 整体状态 + private List pods; // 各 Pod 详细状态 + private Instant createdAt; + private Instant updatedAt; +} diff --git a/back/src/main/java/com/linkwork/model/dto/StopResult.java b/back/src/main/java/com/linkwork/model/dto/StopResult.java new file mode 100644 index 0000000..ad26d7e --- /dev/null +++ b/back/src/main/java/com/linkwork/model/dto/StopResult.java @@ -0,0 +1,30 @@ +package com.linkwork.model.dto; + +import lombok.Builder; +import lombok.Data; + +/** + * 停止结果 + */ +@Data +@Builder +public class StopResult { + private String serviceId; + private boolean success; + private String errorMessage; + + public static StopResult success(String serviceId) { + return StopResult.builder() + .serviceId(serviceId) + .success(true) + .build(); + } + + public static StopResult failed(String serviceId, String errorMessage) { + return StopResult.builder() + .serviceId(serviceId) + .success(false) + .errorMessage(errorMessage) + .build(); + } +} diff --git a/back/src/main/java/com/linkwork/model/dto/TaskCompleteRequest.java b/back/src/main/java/com/linkwork/model/dto/TaskCompleteRequest.java new file mode 100644 index 0000000..cd35fe9 --- /dev/null +++ b/back/src/main/java/com/linkwork/model/dto/TaskCompleteRequest.java @@ -0,0 +1,80 @@ +package com.linkwork.model.dto; + +import jakarta.validation.constraints.NotBlank; +import jakarta.validation.constraints.NotNull; +import lombok.Data; + +import java.util.List; + +/** + * 任务完成回写请求 DTO + * + * Worker 在任务执行完毕后,通过此接口回写执行结果。 + * 调用方:momo-worker (Workstation 执行器) + */ +@Data +public class TaskCompleteRequest { + + /** + * 最终任务状态 + * 允许值:COMPLETED, FAILED + */ + @NotBlank(message = "任务状态不能为空") + private String status; + + /** + * 消耗的 Token 总数 + */ + @NotNull(message = "Token 消耗不能为空") + private Integer tokensUsed; + + /** + * 任务执行时长(毫秒) + */ + @NotNull(message = "执行时长不能为空") + private Long durationMs; + + /** + * 任务报告(可选) + */ + private Report report; + + @Data + public static class Report { + /** + * 执行摘要 + */ + private String summary; + + /** + * 完成度百分比 (0-100) + */ + private Integer completion; + + /** + * 审计评级 (A/B/C/D) + */ + private String audit; + + /** + * 产出物列表 + */ + private List artifacts; + + /** + * Git 分支名 + */ + private String branch; + + /** + * Git 提交哈希 + */ + private String commit; + } + + @Data + public static class Artifact { + private String name; + private String url; + } +} diff --git a/back/src/main/java/com/linkwork/model/dto/TaskCreateRequest.java b/back/src/main/java/com/linkwork/model/dto/TaskCreateRequest.java new file mode 100644 index 0000000..02af06f --- /dev/null +++ b/back/src/main/java/com/linkwork/model/dto/TaskCreateRequest.java @@ -0,0 +1,39 @@ +package com.linkwork.model.dto; + +import jakarta.validation.constraints.NotBlank; +import jakarta.validation.constraints.NotNull; +import lombok.Data; + +import java.util.List; + +/** + * 创建任务请求 DTO + * + * 简化版本:前端只需传递核心参数,其他配置由后端根据岗位自动填充 + */ +@Data +public class TaskCreateRequest { + + /** + * 用户输入的任务指令 + */ + @NotBlank(message = "任务指令不能为空") + private String prompt; + + /** + * 执行岗位 ID + */ + @NotNull(message = "岗位 ID 不能为空") + private Long roleId; + + /** + * 选择的模型 ID(如 claude-opus-4-5, deepseek-v3 等) + */ + @NotBlank(message = "模型 ID 不能为空") + private String modelId; + + /** + * 用户上传的文档文件 ID 列表(OSS 文件 key) + */ + private List fileIds; +} diff --git a/back/src/main/java/com/linkwork/model/dto/TaskDispatchMessage.java b/back/src/main/java/com/linkwork/model/dto/TaskDispatchMessage.java new file mode 100644 index 0000000..3a66d90 --- /dev/null +++ b/back/src/main/java/com/linkwork/model/dto/TaskDispatchMessage.java @@ -0,0 +1,79 @@ +package com.linkwork.model.dto; + +import lombok.Builder; +import lombok.Data; + +import java.time.LocalDateTime; +import java.util.List; +import java.util.Map; + +/** + * 任务调度消息 DTO + * 用于 Redis 队列传输 + */ +@Data +@Builder +public class TaskDispatchMessage { + + /** + * 任务编号 + */ + private String taskNo; + + /** + * 岗位 ID + */ + private Long roleId; + + /** + * 岗位名称 + */ + private String roleName; + + /** + * 任务指令 + */ + private String prompt; + + /** + * 任务配置 + */ + private TaskConfig config; + + /** + * 创建者 ID + */ + private String creatorId; + + /** + * 创建者名称 + */ + private String creatorName; + + /** + * 创建时间 + */ + private LocalDateTime createdAt; + + /** + * 任务配置 + */ + @Data + @Builder + public static class TaskConfig { + private String image; + private String selectedModel; + private List mcp; + private List skills; + private List knowledge; + private List gitRepos; + private Map env; + } + + @Data + @Builder + public static class GitRepo { + private String id; + private String branch; + } +} diff --git a/back/src/main/java/com/linkwork/model/dto/TaskGitTokenResponse.java b/back/src/main/java/com/linkwork/model/dto/TaskGitTokenResponse.java new file mode 100644 index 0000000..2cae61c --- /dev/null +++ b/back/src/main/java/com/linkwork/model/dto/TaskGitTokenResponse.java @@ -0,0 +1,25 @@ +package com.linkwork.model.dto; + +import lombok.Data; + +/** + * zzd 获取任务 Git token 响应 + */ +@Data +public class TaskGitTokenResponse { + + private String provider; + + private String tokenType; + + private String token; + + private String expiresAt; + + /** + * git commit 需要的提交身份(由 token 对应 Git 用户解析) + */ + private String commitUserName; + + private String commitUserEmail; +} diff --git a/back/src/main/java/com/linkwork/model/dto/TaskResponse.java b/back/src/main/java/com/linkwork/model/dto/TaskResponse.java new file mode 100644 index 0000000..af5b884 --- /dev/null +++ b/back/src/main/java/com/linkwork/model/dto/TaskResponse.java @@ -0,0 +1,92 @@ +package com.linkwork.model.dto; + +import com.linkwork.model.enums.TaskStatus; +import lombok.Data; + +import java.math.BigDecimal; +import java.util.List; +import java.util.Map; + +/** + * 任务响应 DTO + */ +@Data +public class TaskResponse { + + /** + * 任务编号(对外展示) + */ + private String taskNo; + + /** + * 关联岗位 ID + */ + private Long roleId; + + /** + * 岗位名称 + */ + private String roleName; + + private String prompt; + + private TaskStatus status; + + private String source; + + private String image; + + private String selectedModel; + + private String creator; + + private String createdAt; + + private Usage usage; + + private List estimatedOutput; + + private Report report; + + // ---- 从 configJson 解析的字段 ---- + private String runtimeMode; + private String zzMode; + private String runnerImage; + private String repo; + private String branch; + private String branchName; + private String deliveryMode; + private List mcp; + private List skills; + private List knowledge; + private Map env; + + @Data + public static class Usage { + private Integer tokensUsed; + private Integer inputTokens; + private Integer outputTokens; + private Integer requestCount; + private Long tokenLimit; + private BigDecimal usagePercent; + private String duration; + } + + @Data + public static class Report { + private String summary; + private Integer tokens; + private String duration; + private Integer completion; + private String audit; + private List artifacts; + private String branch; + private String commit; + + @Data + public static class Artifact { + private String name; + private String url; + } + } +} diff --git a/back/src/main/java/com/linkwork/model/dto/TaskShareCreateRequest.java b/back/src/main/java/com/linkwork/model/dto/TaskShareCreateRequest.java new file mode 100644 index 0000000..c14cb7a --- /dev/null +++ b/back/src/main/java/com/linkwork/model/dto/TaskShareCreateRequest.java @@ -0,0 +1,15 @@ +package com.linkwork.model.dto; + +import lombok.Data; + +/** + * 创建任务分享链接请求 + */ +@Data +public class TaskShareCreateRequest { + + /** + * 过期时长(小时) + */ + private Integer expireHours; +} diff --git a/back/src/main/java/com/linkwork/model/dto/TaskShareLinkResponse.java b/back/src/main/java/com/linkwork/model/dto/TaskShareLinkResponse.java new file mode 100644 index 0000000..cf6ad37 --- /dev/null +++ b/back/src/main/java/com/linkwork/model/dto/TaskShareLinkResponse.java @@ -0,0 +1,19 @@ +package com.linkwork.model.dto; + +import com.fasterxml.jackson.annotation.JsonFormat; +import lombok.Data; + +import java.time.LocalDateTime; + +/** + * 任务分享链接响应 + */ +@Data +public class TaskShareLinkResponse { + private String taskId; + private String token; + private String shareUrl; + + @JsonFormat(pattern = "yyyy-MM-dd'T'HH:mm:ss'Z'") + private LocalDateTime expiresAt; +} diff --git a/back/src/main/java/com/linkwork/model/dto/UserSoulResponse.java b/back/src/main/java/com/linkwork/model/dto/UserSoulResponse.java new file mode 100644 index 0000000..e0acbed --- /dev/null +++ b/back/src/main/java/com/linkwork/model/dto/UserSoulResponse.java @@ -0,0 +1,17 @@ +package com.linkwork.model.dto; + +import lombok.Data; + +import java.time.LocalDateTime; + +@Data +public class UserSoulResponse { + + private String content; + + private String presetId; + + private Long version; + + private LocalDateTime updatedAt; +} diff --git a/back/src/main/java/com/linkwork/model/dto/UserSoulUpsertRequest.java b/back/src/main/java/com/linkwork/model/dto/UserSoulUpsertRequest.java new file mode 100644 index 0000000..e7509bd --- /dev/null +++ b/back/src/main/java/com/linkwork/model/dto/UserSoulUpsertRequest.java @@ -0,0 +1,17 @@ +package com.linkwork.model.dto; + +import jakarta.validation.constraints.NotBlank; +import jakarta.validation.constraints.NotNull; +import lombok.Data; + +@Data +public class UserSoulUpsertRequest { + + @NotBlank(message = "Soul 内容不能为空") + private String content; + + private String presetId; + + @NotNull(message = "version 不能为空") + private Long version; +} diff --git a/back/src/main/java/com/linkwork/model/dto/event/BuildEventData.java b/back/src/main/java/com/linkwork/model/dto/event/BuildEventData.java new file mode 100644 index 0000000..d00ee4a --- /dev/null +++ b/back/src/main/java/com/linkwork/model/dto/event/BuildEventData.java @@ -0,0 +1,76 @@ +package com.linkwork.model.dto.event; + +import com.fasterxml.jackson.annotation.JsonProperty; +import lombok.AllArgsConstructor; +import lombok.Builder; +import lombok.Data; +import lombok.NoArgsConstructor; + +/** + * 构建事件数据 + * 用于 BUILD_STARTED / BUILD_PROGRESS / BUILD_COMPLETED / BUILD_FAILED / BUILD_PUSHING / BUILD_PUSHED 事件 + */ +@Data +@Builder +@NoArgsConstructor +@AllArgsConstructor +public class BuildEventData { + + /** + * 构建编号 + */ + @JsonProperty("build_no") + private String buildNo; + + /** + * 岗位 ID + */ + @JsonProperty("role_id") + private Long roleId; + + /** + * 岗位名称 + */ + @JsonProperty("role_name") + private String roleName; + + /** + * 基础镜像 + */ + @JsonProperty("base_image") + private String baseImage; + + /** + * 构建产物镜像标签 + */ + @JsonProperty("image_tag") + private String imageTag; + + /** + * 进度消息(用于 BUILD_PROGRESS) + */ + private String message; + + /** + * 进度步骤(用于 BUILD_PROGRESS) + */ + private String step; + + /** + * 错误码(用于 BUILD_FAILED) + */ + @JsonProperty("error_code") + private String errorCode; + + /** + * 错误消息(用于 BUILD_FAILED) + */ + @JsonProperty("error_message") + private String errorMessage; + + /** + * 构建耗时(毫秒) + */ + @JsonProperty("duration_ms") + private Long durationMs; +} diff --git a/back/src/main/java/com/linkwork/model/dto/event/ContainerEventData.java b/back/src/main/java/com/linkwork/model/dto/event/ContainerEventData.java new file mode 100644 index 0000000..f3457f6 --- /dev/null +++ b/back/src/main/java/com/linkwork/model/dto/event/ContainerEventData.java @@ -0,0 +1,34 @@ +package com.linkwork.model.dto.event; + +import com.fasterxml.jackson.annotation.JsonProperty; +import lombok.AllArgsConstructor; +import lombok.Builder; +import lombok.Data; +import lombok.NoArgsConstructor; + +/** + * CONTAINER_STARTING / CONTAINER_READY 事件数据 + */ +@Data +@Builder +@NoArgsConstructor +@AllArgsConstructor +public class ContainerEventData { + + /** + * Pod 名称 + */ + @JsonProperty("pod_name") + private String podName; + + /** + * 容器名称 + */ + @JsonProperty("container_name") + private String containerName; + + /** + * 是否就绪(CONTAINER_READY 时填充) + */ + private Boolean ready; +} diff --git a/back/src/main/java/com/linkwork/model/dto/event/EnvSetupData.java b/back/src/main/java/com/linkwork/model/dto/event/EnvSetupData.java new file mode 100644 index 0000000..1b2285a --- /dev/null +++ b/back/src/main/java/com/linkwork/model/dto/event/EnvSetupData.java @@ -0,0 +1,34 @@ +package com.linkwork.model.dto.event; + +import com.fasterxml.jackson.annotation.JsonProperty; +import lombok.AllArgsConstructor; +import lombok.Builder; +import lombok.Data; +import lombok.NoArgsConstructor; + +/** + * ENV_SETUP / WORKSPACE_INIT 事件数据 + */ +@Data +@Builder +@NoArgsConstructor +@AllArgsConstructor +public class EnvSetupData { + + /** + * Pod 名称 + */ + @JsonProperty("pod_name") + private String podName; + + /** + * 步骤标识 + * 例如:code_clone, workspace_setup, ssh_config 等 + */ + private String step; + + /** + * 步骤描述消息 + */ + private String message; +} diff --git a/back/src/main/java/com/linkwork/model/dto/event/ImageEventData.java b/back/src/main/java/com/linkwork/model/dto/event/ImageEventData.java new file mode 100644 index 0000000..04adb68 --- /dev/null +++ b/back/src/main/java/com/linkwork/model/dto/event/ImageEventData.java @@ -0,0 +1,34 @@ +package com.linkwork.model.dto.event; + +import com.fasterxml.jackson.annotation.JsonProperty; +import lombok.AllArgsConstructor; +import lombok.Builder; +import lombok.Data; +import lombok.NoArgsConstructor; + +/** + * IMAGE_PULLING / IMAGE_PULLED 事件数据 + */ +@Data +@Builder +@NoArgsConstructor +@AllArgsConstructor +public class ImageEventData { + + /** + * Pod 名称 + */ + @JsonProperty("pod_name") + private String podName; + + /** + * 容器名称 + */ + @JsonProperty("container_name") + private String containerName; + + /** + * 镜像地址 + */ + private String image; +} diff --git a/back/src/main/java/com/linkwork/model/dto/event/InitCompleteData.java b/back/src/main/java/com/linkwork/model/dto/event/InitCompleteData.java new file mode 100644 index 0000000..4c67fbe --- /dev/null +++ b/back/src/main/java/com/linkwork/model/dto/event/InitCompleteData.java @@ -0,0 +1,53 @@ +package com.linkwork.model.dto.event; + +import com.fasterxml.jackson.annotation.JsonProperty; +import lombok.AllArgsConstructor; +import lombok.Builder; +import lombok.Data; +import lombok.NoArgsConstructor; + +/** + * INIT_COMPLETE / INIT_FAILED 事件数据 + */ +@Data +@Builder +@NoArgsConstructor +@AllArgsConstructor +public class InitCompleteData { + + /** + * Pod 名称 + */ + @JsonProperty("pod_name") + private String podName; + + /** + * PodGroup 名称 + */ + @JsonProperty("pod_group_name") + private String podGroupName; + + /** + * 就绪 Pod 数量 + */ + @JsonProperty("ready_pods") + private Integer readyPods; + + /** + * 总 Pod 数量 + */ + @JsonProperty("total_pods") + private Integer totalPods; + + /** + * 错误信息(INIT_FAILED 时填充) + */ + @JsonProperty("error_message") + private String errorMessage; + + /** + * 错误码(INIT_FAILED 时填充) + */ + @JsonProperty("error_code") + private String errorCode; +} diff --git a/back/src/main/java/com/linkwork/model/dto/event/PodSchedulingData.java b/back/src/main/java/com/linkwork/model/dto/event/PodSchedulingData.java new file mode 100644 index 0000000..6e58e21 --- /dev/null +++ b/back/src/main/java/com/linkwork/model/dto/event/PodSchedulingData.java @@ -0,0 +1,41 @@ +package com.linkwork.model.dto.event; + +import com.fasterxml.jackson.annotation.JsonProperty; +import lombok.AllArgsConstructor; +import lombok.Builder; +import lombok.Data; +import lombok.NoArgsConstructor; + +/** + * POD_SCHEDULING / POD_SCHEDULED 事件数据 + */ +@Data +@Builder +@NoArgsConstructor +@AllArgsConstructor +public class PodSchedulingData { + + /** + * Pod 名称 + */ + @JsonProperty("pod_name") + private String podName; + + /** + * Pod 索引 + */ + @JsonProperty("pod_index") + private Integer podIndex; + + /** + * 节点名称(POD_SCHEDULED 时填充) + */ + @JsonProperty("node_name") + private String nodeName; + + /** + * 队列名称 + */ + @JsonProperty("queue_name") + private String queueName; +} diff --git a/back/src/main/java/com/linkwork/model/dto/event/SessionEventData.java b/back/src/main/java/com/linkwork/model/dto/event/SessionEventData.java new file mode 100644 index 0000000..5bd8069 --- /dev/null +++ b/back/src/main/java/com/linkwork/model/dto/event/SessionEventData.java @@ -0,0 +1,46 @@ +package com.linkwork.model.dto.event; + +import com.fasterxml.jackson.annotation.JsonProperty; +import lombok.AllArgsConstructor; +import lombok.Builder; +import lombok.Data; +import lombok.NoArgsConstructor; + +/** + * SESSION_START / SESSION_END 事件数据 + */ +@Data +@Builder +@NoArgsConstructor +@AllArgsConstructor +public class SessionEventData { + + /** + * PodGroup 名称 + */ + @JsonProperty("pod_group_name") + private String podGroupName; + + /** + * Pod 数量 + */ + @JsonProperty("pod_count") + private Integer podCount; + + /** + * 是否优雅停止(SESSION_END 时填充) + */ + private Boolean graceful; + + /** + * 队列名称 + */ + @JsonProperty("queue_name") + private String queueName; + + /** + * 节点名称 + */ + @JsonProperty("node_name") + private String nodeName; +} diff --git a/back/src/main/java/com/linkwork/model/entity/Approval.java b/back/src/main/java/com/linkwork/model/entity/Approval.java new file mode 100644 index 0000000..3e104c8 --- /dev/null +++ b/back/src/main/java/com/linkwork/model/entity/Approval.java @@ -0,0 +1,78 @@ +package com.linkwork.model.entity; + +import com.baomidou.mybatisplus.annotation.*; +import lombok.Data; + +import java.time.LocalDateTime; + +/** + * 审批实体 + * 对应数据库表: linkwork_approval + */ +@Data +@TableName("linkwork_approval") +public class Approval { + + @TableId(type = IdType.AUTO) + private Long id; + + /** 审批编号 */ + private String approvalNo; + + /** 关联任务编号 */ + private String taskNo; + + /** momo-worker 审批请求 ID(用于 Redis List 回写响应) */ + private String requestId; + + /** 任务标题快照 */ + private String taskTitle; + + /** 待审批操作内容 */ + private String action; + + /** 风险描述 */ + private String description; + + /** 风险等级: low/medium/high/critical */ + private String riskLevel; + + /** 状态: pending/approved/rejected/expired */ + private String status; + + /** 决策: approved/rejected */ + private String decision; + + /** 审批意见 */ + private String comment; + + /** 审批操作人ID */ + private String operatorId; + + /** 审批操作人名称 */ + private String operatorName; + + /** 审批操作来源IP */ + private String operatorIp; + + /** 过期时间 */ + private LocalDateTime expiredAt; + + /** 决策时间 */ + private LocalDateTime decidedAt; + + /** 创建人ID */ + private String creatorId; + + /** 创建人名称 */ + private String creatorName; + + @TableField(fill = FieldFill.INSERT) + private LocalDateTime createdAt; + + @TableField(fill = FieldFill.INSERT_UPDATE) + private LocalDateTime updatedAt; + + @TableLogic + private Integer isDeleted; +} diff --git a/back/src/main/java/com/linkwork/model/entity/BuildRecordEntity.java b/back/src/main/java/com/linkwork/model/entity/BuildRecordEntity.java new file mode 100644 index 0000000..1ef4687 --- /dev/null +++ b/back/src/main/java/com/linkwork/model/entity/BuildRecordEntity.java @@ -0,0 +1,94 @@ +package com.linkwork.model.entity; + +import com.baomidou.mybatisplus.annotation.*; +import com.baomidou.mybatisplus.extension.handlers.JacksonTypeHandler; +import lombok.Data; + +import java.time.LocalDateTime; +import java.util.Map; + +/** + * 构建记录实体 + * 记录每次镜像构建的完整信息 + */ +@Data +@TableName(value = "linkwork_build_record", autoResultMap = true) +public class BuildRecordEntity { + + @TableId(type = IdType.AUTO) + private Long id; + + /** + * 构建唯一编号,格式: build-{timestamp}-{random} + * 由前端生成并传入,用于关联 Redis Stream 事件 + */ + private String buildNo; + + /** + * 关联的岗位 ID + */ + private Long roleId; + + /** + * 岗位名称快照(构建时的岗位名称) + */ + private String roleName; + + /** + * 构建状态: PENDING, BUILDING, SUCCESS, FAILED, CANCELLED + */ + private String status; + + /** + * 构建产物镜像标签 + */ + private String imageTag; + + /** + * 构建耗时(毫秒) + */ + private Long durationMs; + + /** + * 失败原因 + */ + private String errorMessage; + + /** + * 构建日志文件 URL (OSS) + */ + private String logUrl; + + /** + * 构建时的配置快照(JSON 格式) + * 包含: baseImage, envVars, mcpModules, skills, knowledgeBases 等 + */ + @TableField(typeHandler = JacksonTypeHandler.class) + private Map configSnapshot; + + /** + * 创建者 ID + */ + private String creatorId; + + /** + * 创建者名称 + */ + private String creatorName; + + @TableField(fill = FieldFill.INSERT) + private LocalDateTime createdAt; + + @TableField(fill = FieldFill.INSERT_UPDATE) + private LocalDateTime updatedAt; + + @TableLogic + private Boolean isDeleted; + + // ========== 状态常量 ========== + public static final String STATUS_PENDING = "PENDING"; + public static final String STATUS_BUILDING = "BUILDING"; + public static final String STATUS_SUCCESS = "SUCCESS"; + public static final String STATUS_FAILED = "FAILED"; + public static final String STATUS_CANCELLED = "CANCELLED"; +} diff --git a/back/src/main/java/com/linkwork/model/entity/CronJob.java b/back/src/main/java/com/linkwork/model/entity/CronJob.java new file mode 100644 index 0000000..2302bc2 --- /dev/null +++ b/back/src/main/java/com/linkwork/model/entity/CronJob.java @@ -0,0 +1,41 @@ +package com.linkwork.model.entity; + +import com.baomidou.mybatisplus.annotation.IdType; +import com.baomidou.mybatisplus.annotation.TableId; +import com.baomidou.mybatisplus.annotation.TableName; +import lombok.Data; + +import java.time.LocalDateTime; + +@Data +@TableName("linkwork_cron_job") +public class CronJob { + @TableId(type = IdType.AUTO) + private Long id; + private String jobName; + private String creatorId; + private String creatorName; + private Long roleId; + private String roleName; + private String modelId; + private String fileIdsJson; + private String scheduleType; + private String cronExpr; + private Long intervalMs; + private LocalDateTime runAt; + private String timezone; + private String taskContent; + private Integer enabled; + private Integer deleteAfterRun; + private Integer maxRetry; + private Integer consecutiveFailures; + private LocalDateTime nextFireTime; + private String notifyMode; + private String notifyTarget; + private Integer totalRuns; + private LocalDateTime lastRunTime; + private String lastRunStatus; + private LocalDateTime createdAt; + private LocalDateTime updatedAt; + private Integer isDeleted; +} diff --git a/back/src/main/java/com/linkwork/model/entity/CronJobRun.java b/back/src/main/java/com/linkwork/model/entity/CronJobRun.java new file mode 100644 index 0000000..e5150f1 --- /dev/null +++ b/back/src/main/java/com/linkwork/model/entity/CronJobRun.java @@ -0,0 +1,27 @@ +package com.linkwork.model.entity; + +import com.baomidou.mybatisplus.annotation.IdType; +import com.baomidou.mybatisplus.annotation.TableId; +import com.baomidou.mybatisplus.annotation.TableName; +import lombok.Data; + +import java.time.LocalDateTime; + +@Data +@TableName("linkwork_cron_job_run") +public class CronJobRun { + @TableId(type = IdType.AUTO) + private Long id; + private Long cronJobId; + private String taskNo; + private String creatorId; + private Long roleId; + private String status; + private String triggerType; + private LocalDateTime plannedFireTime; + private LocalDateTime startedAt; + private LocalDateTime finishedAt; + private Long durationMs; + private String errorMessage; + private LocalDateTime createdAt; +} diff --git a/back/src/main/java/com/linkwork/model/entity/FileNodeEntity.java b/back/src/main/java/com/linkwork/model/entity/FileNodeEntity.java new file mode 100644 index 0000000..936c5e6 --- /dev/null +++ b/back/src/main/java/com/linkwork/model/entity/FileNodeEntity.java @@ -0,0 +1,42 @@ +package com.linkwork.model.entity; + +import com.baomidou.mybatisplus.annotation.FieldFill; +import com.baomidou.mybatisplus.annotation.IdType; +import com.baomidou.mybatisplus.annotation.TableField; +import com.baomidou.mybatisplus.annotation.TableId; +import com.baomidou.mybatisplus.annotation.TableName; +import lombok.Data; + +import java.time.LocalDateTime; + +@Data +@TableName("linkwork_file_node") +public class FileNodeEntity { + + @TableId(type = IdType.AUTO) + private Long id; + + private String nodeId; + + private String parentId; + + private String entryType; + + private String name; + + private String spaceType; + + private String workstationId; + + private String userId; + + private String fileId; + + @TableField(fill = FieldFill.INSERT) + private LocalDateTime createdAt; + + @TableField(fill = FieldFill.INSERT) + private LocalDateTime updatedAt; + + private LocalDateTime deletedAt; +} diff --git a/back/src/main/java/com/linkwork/model/entity/GitLabAuthEntity.java b/back/src/main/java/com/linkwork/model/entity/GitLabAuthEntity.java new file mode 100644 index 0000000..be34cf1 --- /dev/null +++ b/back/src/main/java/com/linkwork/model/entity/GitLabAuthEntity.java @@ -0,0 +1,46 @@ +package com.linkwork.model.entity; + +import com.baomidou.mybatisplus.annotation.*; +import com.fasterxml.jackson.databind.annotation.JsonSerialize; +import com.fasterxml.jackson.databind.ser.std.ToStringSerializer; +import lombok.Data; + +import java.time.LocalDateTime; + +@Data +@TableName("linkwork_user_auth_gitlab") +public class GitLabAuthEntity { + + @TableId(type = IdType.ASSIGN_ID) + @JsonSerialize(using = ToStringSerializer.class) + private Long id; + + private String userId; + + private Long gitlabId; + + private String username; + + private String name; + + private String avatarUrl; + + private String accessToken; + + private String refreshToken; + + private String tokenAlias; + + private LocalDateTime expiresAt; + + private String scope; + + @TableField(fill = FieldFill.INSERT) + private LocalDateTime createdAt; + + @TableField(fill = FieldFill.INSERT_UPDATE) + private LocalDateTime updatedAt; + + @TableLogic + private Boolean isDeleted; +} diff --git a/back/src/main/java/com/linkwork/model/entity/McpServerEntity.java b/back/src/main/java/com/linkwork/model/entity/McpServerEntity.java new file mode 100644 index 0000000..db4902b --- /dev/null +++ b/back/src/main/java/com/linkwork/model/entity/McpServerEntity.java @@ -0,0 +1,99 @@ +package com.linkwork.model.entity; + +import com.baomidou.mybatisplus.annotation.*; +import com.baomidou.mybatisplus.extension.handlers.JacksonTypeHandler; +import lombok.Data; + +import java.time.LocalDateTime; +import java.util.List; +import java.util.Map; + +/** + * MCP 服务实体类 + */ +@Data +@TableName(value = "linkwork_mcp_server", autoResultMap = true) +public class McpServerEntity { + + @TableId(type = IdType.AUTO) + private Long id; + + /** MCP编号,格式: MCP-{timestamp} */ + private String mcpNo; + + /** 服务名称 */ + private String name; + + /** 服务端点地址 */ + @TableField(updateStrategy = FieldStrategy.ALWAYS) + private String endpoint; + + /** 服务描述 */ + @TableField(updateStrategy = FieldStrategy.ALWAYS) + private String description; + + /** 可见性: public/private */ + private String visibility; + + /** 状态: online/offline/degraded/unknown */ + private String status; + + /** MCP类型: http/sse */ + private String type; + + /** 服务URL (http/sse) */ + @TableField(updateStrategy = FieldStrategy.ALWAYS) + private String url; + + /** HTTP请求头 JSON */ + @TableField(typeHandler = JacksonTypeHandler.class, updateStrategy = FieldStrategy.ALWAYS) + private Map headers; + + /** 网段标记: internal(服务器内网), office(办公网), external(外部互联网) */ + private String networkZone; + + /** 健康检查URL */ + @TableField(updateStrategy = FieldStrategy.ALWAYS) + private String healthCheckUrl; + + /** 最近探活延迟(ms) */ + private Integer healthLatencyMs; + + /** 最近探活消息 */ + private String healthMessage; + + /** 连续失败次数 */ + private Integer consecutiveFailures; + + /** 版本号 */ + @TableField(updateStrategy = FieldStrategy.ALWAYS) + private String version; + + /** 标签 */ + @TableField(typeHandler = JacksonTypeHandler.class, updateStrategy = FieldStrategy.ALWAYS) + private List tags; + + /** 最后健康检查时间 */ + private LocalDateTime lastHealthAt; + + /** 服务配置 JSON */ + @TableField(typeHandler = JacksonTypeHandler.class, updateStrategy = FieldStrategy.ALWAYS) + private Map configJson; + + private String creatorId; + + private String creatorName; + + private String updaterId; + + private String updaterName; + + @TableField(fill = FieldFill.INSERT) + private LocalDateTime createdAt; + + @TableField(fill = FieldFill.INSERT_UPDATE) + private LocalDateTime updatedAt; + + @TableLogic + private Boolean isDeleted; +} diff --git a/back/src/main/java/com/linkwork/model/entity/McpUsageDailyEntity.java b/back/src/main/java/com/linkwork/model/entity/McpUsageDailyEntity.java new file mode 100644 index 0000000..f38e4f0 --- /dev/null +++ b/back/src/main/java/com/linkwork/model/entity/McpUsageDailyEntity.java @@ -0,0 +1,33 @@ +package com.linkwork.model.entity; + +import com.baomidou.mybatisplus.annotation.*; +import lombok.Data; + +import java.time.LocalDate; +import java.time.LocalDateTime; + +@Data +@TableName("linkwork_mcp_usage_daily") +public class McpUsageDailyEntity { + + @TableId(type = IdType.AUTO) + private Long id; + + private LocalDate date; + + private String userId; + + private String mcpName; + + private Integer callCount; + + private Long reqBytes; + + private Long respBytes; + + @TableField(fill = FieldFill.INSERT) + private LocalDateTime createdAt; + + @TableField(fill = FieldFill.INSERT_UPDATE) + private LocalDateTime updatedAt; +} diff --git a/back/src/main/java/com/linkwork/model/entity/McpUserConfigEntity.java b/back/src/main/java/com/linkwork/model/entity/McpUserConfigEntity.java new file mode 100644 index 0000000..f8cced9 --- /dev/null +++ b/back/src/main/java/com/linkwork/model/entity/McpUserConfigEntity.java @@ -0,0 +1,35 @@ +package com.linkwork.model.entity; + +import com.baomidou.mybatisplus.annotation.*; +import com.baomidou.mybatisplus.extension.handlers.JacksonTypeHandler; +import lombok.Data; + +import java.time.LocalDateTime; +import java.util.Map; + +@Data +@TableName(value = "linkwork_mcp_user_config", autoResultMap = true) +public class McpUserConfigEntity { + + @TableId(type = IdType.AUTO) + private Long id; + + private String userId; + + private Long mcpServerId; + + @TableField(typeHandler = JacksonTypeHandler.class) + private Map headers; + + @TableField(typeHandler = JacksonTypeHandler.class) + private Map urlParams; + + @TableField(fill = FieldFill.INSERT) + private LocalDateTime createdAt; + + @TableField(fill = FieldFill.INSERT_UPDATE) + private LocalDateTime updatedAt; + + @TableLogic + private Boolean isDeleted; +} diff --git a/back/src/main/java/com/linkwork/model/entity/RoleEntity.java b/back/src/main/java/com/linkwork/model/entity/RoleEntity.java new file mode 100644 index 0000000..42e06ea --- /dev/null +++ b/back/src/main/java/com/linkwork/model/entity/RoleEntity.java @@ -0,0 +1,84 @@ +package com.linkwork.model.entity; + +import com.baomidou.mybatisplus.annotation.*; +import com.baomidou.mybatisplus.extension.handlers.JacksonTypeHandler; +import com.fasterxml.jackson.annotation.JsonIgnoreProperties; +import lombok.Data; + +import java.time.LocalDateTime; +import java.util.List; +import java.util.Map; + +@Data +@TableName(value = "linkwork_workstation", autoResultMap = true) +public class RoleEntity { + + @TableId(type = IdType.AUTO) + private Long id; + + private String roleNo; + + private String name; + + private String description; + + private String category; // devops, security, developer, research + + private String icon; + + private String image; + + private String prompt; + + private String status; // active, maintenance, disabled + + @TableField(typeHandler = JacksonTypeHandler.class) + private RoleConfig configJson; + + private Boolean isPublic; + + private Integer maxEmployees; + + private String creatorId; + + private String creatorName; + + private String updaterId; + + private String updaterName; + + @TableField(fill = FieldFill.INSERT) + private LocalDateTime createdAt; + + @TableField(fill = FieldFill.INSERT_UPDATE) + private LocalDateTime updatedAt; + + @TableLogic + private Boolean isDeleted; + + @Data + @JsonIgnoreProperties(ignoreUnknown = true) + public static class RoleConfig { + private List mcp; // List of IDs + private List skills; // List of IDs/Names + private List knowledge; // List of IDs/Names + private String deployMode; // K8S / COMPOSE + private String runtimeMode; // SIDECAR / ALONE + private String runnerImage; // 仅 SIDECAR 生效 + private Boolean memoryEnabled; // 岗位级记忆开关 + private List gitRepos; + private List env; + + @Data + public static class GitRepo { + private String url; + private String branch; + } + + @Data + public static class EnvVar { + private String key; + private String value; + } + } +} diff --git a/back/src/main/java/com/linkwork/model/entity/SecurityPolicy.java b/back/src/main/java/com/linkwork/model/entity/SecurityPolicy.java new file mode 100644 index 0000000..868b1fb --- /dev/null +++ b/back/src/main/java/com/linkwork/model/entity/SecurityPolicy.java @@ -0,0 +1,46 @@ +package com.linkwork.model.entity; + +import com.baomidou.mybatisplus.annotation.*; +import lombok.Data; + +import java.time.LocalDateTime; + +/** + * 安全策略实体 + * 对应数据库表: linkwork_security_policy + */ +@Data +@TableName("linkwork_security_policy") +public class SecurityPolicy { + + @TableId(type = IdType.AUTO) + private Long id; + + /** 策略名称 */ + private String name; + + /** 策略描述 */ + private String description; + + /** 类型: system/custom */ + private String type; + + /** 是否启用 */ + private Boolean enabled; + + /** 规则列表 JSON */ + private String rulesJson; + + private String creatorId; + + private String creatorName; + + @TableField(fill = FieldFill.INSERT) + private LocalDateTime createdAt; + + @TableField(fill = FieldFill.INSERT_UPDATE) + private LocalDateTime updatedAt; + + @TableLogic + private Integer isDeleted; +} diff --git a/back/src/main/java/com/linkwork/model/entity/SkillEntity.java b/back/src/main/java/com/linkwork/model/entity/SkillEntity.java new file mode 100644 index 0000000..0a4a264 --- /dev/null +++ b/back/src/main/java/com/linkwork/model/entity/SkillEntity.java @@ -0,0 +1,64 @@ +package com.linkwork.model.entity; + +import com.baomidou.mybatisplus.annotation.*; +import lombok.Data; + +import java.time.LocalDateTime; + +/** + * 技能实体类 + */ +@Data +@TableName(value = "linkwork_skill", autoResultMap = true) +public class SkillEntity { + + @TableId(type = IdType.AUTO) + private Long id; + + /** 技能编号,格式: SKL-{timestamp} */ + private String skillNo; + + /** 技能标识名(唯一) */ + private String name; + + /** 显示名称 */ + private String displayName; + + /** 技能描述 */ + private String description; + + /** 技能实现代码 */ + private String implementation; + + /** 状态: draft/ready/disabled */ + private String status; + + /** 是否公开 */ + private Boolean isPublic; + + /** Git 分支名称 */ + private String branchName; + + /** 最新 commit SHA */ + private String latestCommit; + + /** 最后同步时间 */ + private LocalDateTime lastSyncedAt; + + private String creatorId; + + private String creatorName; + + private String updaterId; + + private String updaterName; + + @TableField(fill = FieldFill.INSERT) + private LocalDateTime createdAt; + + @TableField(fill = FieldFill.INSERT_UPDATE) + private LocalDateTime updatedAt; + + @TableLogic + private Boolean isDeleted; +} diff --git a/back/src/main/java/com/linkwork/model/entity/Task.java b/back/src/main/java/com/linkwork/model/entity/Task.java new file mode 100644 index 0000000..dea4e62 --- /dev/null +++ b/back/src/main/java/com/linkwork/model/entity/Task.java @@ -0,0 +1,96 @@ +package com.linkwork.model.entity; + +import com.baomidou.mybatisplus.annotation.*; +import com.linkwork.model.enums.TaskStatus; +import lombok.Data; + +import java.math.BigDecimal; +import java.time.LocalDateTime; + +/** + * 任务实体 + * 对应数据库表: linkwork_task + */ +@Data +@TableName("linkwork_task") +public class Task { + + /** + * 主键ID(自增) + */ + @TableId(type = IdType.AUTO) + private Long id; + + /** + * 任务编号,格式: MSN-{yyyyMMddHHmmssSSS} + * 对外展示使用此字段 + */ + private String taskNo; + + /** + * 关联岗位ID + */ + private Long roleId; + + /** + * 岗位名称快照 + */ + private String roleName; + + private String prompt; + + private TaskStatus status; + + private String image; + + private String selectedModel; + + private Long assemblyId; + + private String configJson; + + /** + * 任务来源:MANUAL / CRON + */ + private String source; + + /** + * 定时任务来源时关联 linkwork_cron_job.id + */ + private Long cronJobId; + + private String creatorId; + + private String creatorName; + + private String creatorIp; + + private String updaterId; + + private String updaterName; + + private Integer tokensUsed; + + private Integer inputTokens; + + private Integer outputTokens; + + private Integer requestCount; + + private Long tokenLimit; + + private BigDecimal usagePercent; + + private Long durationMs; + + private String reportJson; + + @TableField(fill = FieldFill.INSERT) + private LocalDateTime createdAt; + + @TableField(fill = FieldFill.INSERT_UPDATE) + private LocalDateTime updatedAt; + + @TableLogic + private Integer isDeleted; +} diff --git a/back/src/main/java/com/linkwork/model/entity/TaskGitAuthEntity.java b/back/src/main/java/com/linkwork/model/entity/TaskGitAuthEntity.java new file mode 100644 index 0000000..12d3501 --- /dev/null +++ b/back/src/main/java/com/linkwork/model/entity/TaskGitAuthEntity.java @@ -0,0 +1,39 @@ +package com.linkwork.model.entity; + +import com.baomidou.mybatisplus.annotation.FieldFill; +import com.baomidou.mybatisplus.annotation.IdType; +import com.baomidou.mybatisplus.annotation.TableField; +import com.baomidou.mybatisplus.annotation.TableId; +import com.baomidou.mybatisplus.annotation.TableLogic; +import com.baomidou.mybatisplus.annotation.TableName; +import lombok.Data; + +import java.time.LocalDateTime; + +/** + * 任务与 Git 认证映射实体 + */ +@Data +@TableName("linkwork_task_git_auth") +public class TaskGitAuthEntity { + + @TableId(value = "task_id", type = IdType.INPUT) + private String taskId; + + private String userId; + + private String provider; + + private Long gitlabAuthId; + + private LocalDateTime expiresAt; + + @TableField(fill = FieldFill.INSERT) + private LocalDateTime createdAt; + + @TableField(fill = FieldFill.INSERT_UPDATE) + private LocalDateTime updatedAt; + + @TableLogic + private Boolean isDeleted; +} diff --git a/back/src/main/java/com/linkwork/model/entity/UserFavoriteRoleEntity.java b/back/src/main/java/com/linkwork/model/entity/UserFavoriteRoleEntity.java new file mode 100644 index 0000000..6e9cf09 --- /dev/null +++ b/back/src/main/java/com/linkwork/model/entity/UserFavoriteRoleEntity.java @@ -0,0 +1,25 @@ +package com.linkwork.model.entity; + +import com.baomidou.mybatisplus.annotation.FieldFill; +import com.baomidou.mybatisplus.annotation.IdType; +import com.baomidou.mybatisplus.annotation.TableField; +import com.baomidou.mybatisplus.annotation.TableId; +import com.baomidou.mybatisplus.annotation.TableName; +import lombok.Data; + +import java.time.LocalDateTime; + +@Data +@TableName("linkwork_user_favorite_workstation") +public class UserFavoriteRoleEntity { + + @TableId(type = IdType.AUTO) + private Long id; + + private String userId; + + private Long roleId; + + @TableField(fill = FieldFill.INSERT) + private LocalDateTime createdAt; +} diff --git a/back/src/main/java/com/linkwork/model/entity/UserSoulEntity.java b/back/src/main/java/com/linkwork/model/entity/UserSoulEntity.java new file mode 100644 index 0000000..4cad8dd --- /dev/null +++ b/back/src/main/java/com/linkwork/model/entity/UserSoulEntity.java @@ -0,0 +1,49 @@ +package com.linkwork.model.entity; + +import com.baomidou.mybatisplus.annotation.FieldFill; +import com.baomidou.mybatisplus.annotation.IdType; +import com.baomidou.mybatisplus.annotation.TableField; +import com.baomidou.mybatisplus.annotation.TableId; +import com.baomidou.mybatisplus.annotation.TableLogic; +import com.baomidou.mybatisplus.annotation.TableName; +import lombok.Data; + +import java.time.LocalDateTime; + +@Data +@TableName("linkwork_user_soul") +public class UserSoulEntity { + + @TableId(type = IdType.AUTO) + private Long id; + + private String userId; + + @TableField("soul") + private String content; + + @TableField("template_id") + private String presetId; + + @TableField(exist = false) + private Long version; + + private String creatorId; + + private String creatorName; + + @TableField(exist = false) + private String updaterId; + + @TableField(exist = false) + private String updaterName; + + @TableField(fill = FieldFill.INSERT) + private LocalDateTime createdAt; + + @TableField(fill = FieldFill.INSERT_UPDATE) + private LocalDateTime updatedAt; + + @TableLogic + private Integer isDeleted; +} diff --git a/back/src/main/java/com/linkwork/model/entity/WorkspaceFile.java b/back/src/main/java/com/linkwork/model/entity/WorkspaceFile.java new file mode 100644 index 0000000..7719e33 --- /dev/null +++ b/back/src/main/java/com/linkwork/model/entity/WorkspaceFile.java @@ -0,0 +1,52 @@ +package com.linkwork.model.entity; + +import com.baomidou.mybatisplus.annotation.FieldFill; +import com.baomidou.mybatisplus.annotation.IdType; +import com.baomidou.mybatisplus.annotation.TableField; +import com.baomidou.mybatisplus.annotation.TableId; +import com.baomidou.mybatisplus.annotation.TableName; +import lombok.Data; + +import java.time.LocalDateTime; + +@Data +@TableName("linkwork_file") +public class WorkspaceFile { + + @TableId(type = IdType.AUTO) + private Long id; + + private String fileId; + + private String fileName; + + private Long fileSize; + + private String fileType; + + private String contentType; + + private String spaceType; + + private String workstationId; + + private String userId; + + private String ossPath; + + private String parsedOssPath; + + private String parseStatus; + + private String memoryIndexStatus; + + private String fileHash; + + @TableField(fill = FieldFill.INSERT) + private LocalDateTime createdAt; + + @TableField(fill = FieldFill.INSERT) + private LocalDateTime updatedAt; + + private LocalDateTime deletedAt; +} diff --git a/back/src/main/java/com/linkwork/model/enums/ConflictPolicy.java b/back/src/main/java/com/linkwork/model/enums/ConflictPolicy.java new file mode 100644 index 0000000..660c5db --- /dev/null +++ b/back/src/main/java/com/linkwork/model/enums/ConflictPolicy.java @@ -0,0 +1,20 @@ +package com.linkwork.model.enums; + +import java.util.Locale; + +public enum ConflictPolicy { + REJECT, + OVERWRITE, + RENAME; + + public static ConflictPolicy fromString(String value) { + if (value == null || value.isBlank()) { + return REJECT; + } + try { + return valueOf(value.toUpperCase(Locale.ROOT)); + } catch (IllegalArgumentException e) { + return REJECT; + } + } +} diff --git a/back/src/main/java/com/linkwork/model/enums/ContainerEventType.java b/back/src/main/java/com/linkwork/model/enums/ContainerEventType.java new file mode 100644 index 0000000..ae08b7e --- /dev/null +++ b/back/src/main/java/com/linkwork/model/enums/ContainerEventType.java @@ -0,0 +1,117 @@ +package com.linkwork.model.enums; + +/** + * 容器日志事件类型枚举 + * 遵循 data-format.md 规范定义 + */ +public enum ContainerEventType { + + // ==================== 调度阶段 ==================== + + /** + * Pod 正在等待调度 + */ + POD_SCHEDULING, + + /** + * Pod 已调度到节点 + */ + POD_SCHEDULED, + + // ==================== 镜像阶段 ==================== + + /** + * 正在拉取容器镜像 + */ + IMAGE_PULLING, + + /** + * 镜像拉取完成 + */ + IMAGE_PULLED, + + // ==================== 启动阶段 ==================== + + /** + * 容器正在启动 + */ + CONTAINER_STARTING, + + /** + * 容器就绪(健康检查通过) + */ + CONTAINER_READY, + + // ==================== 环境阶段 ==================== + + /** + * 环境初始化步骤 + */ + ENV_SETUP, + + /** + * 工作区初始化 + */ + WORKSPACE_INIT, + + // ==================== 完成阶段 ==================== + + /** + * 全部初始化完成 + */ + INIT_COMPLETE, + + /** + * 初始化失败 + */ + INIT_FAILED, + + // ==================== 生命周期事件 ==================== + + /** + * 会话开始(Agent 启动) + */ + SESSION_START, + + /** + * 会话结束(服务停止) + */ + SESSION_END, + + // ==================== 镜像构建阶段 ==================== + + /** + * 构建开始 + */ + BUILD_STARTED, + + /** + * 构建进度(阶段性状态) + */ + BUILD_PROGRESS, + + /** + * 构建日志输出(Docker 实时日志行) + */ + BUILD_LOG, + + /** + * 构建成功完成 + */ + BUILD_COMPLETED, + + /** + * 构建失败 + */ + BUILD_FAILED, + + /** + * 正在推送镜像到仓库 + */ + BUILD_PUSHING, + + /** + * 镜像推送完成 + */ + BUILD_PUSHED +} diff --git a/back/src/main/java/com/linkwork/model/enums/DeployMode.java b/back/src/main/java/com/linkwork/model/enums/DeployMode.java new file mode 100644 index 0000000..0cf1cd7 --- /dev/null +++ b/back/src/main/java/com/linkwork/model/enums/DeployMode.java @@ -0,0 +1,9 @@ +package com.linkwork.model.enums; + +/** + * 部署模式 + */ +public enum DeployMode { + K8S, // Kubernetes 部署 + COMPOSE // Docker Compose 部署(仅支持 Alone 模式) +} diff --git a/back/src/main/java/com/linkwork/model/enums/PodMode.java b/back/src/main/java/com/linkwork/model/enums/PodMode.java new file mode 100644 index 0000000..c496f28 --- /dev/null +++ b/back/src/main/java/com/linkwork/model/enums/PodMode.java @@ -0,0 +1,9 @@ +package com.linkwork.model.enums; + +/** + * Pod 模式 + */ +public enum PodMode { + SIDECAR, // 双容器:Agent + Runner + ALONE // 单容器:三合一 +} diff --git a/back/src/main/java/com/linkwork/model/enums/ResumeType.java b/back/src/main/java/com/linkwork/model/enums/ResumeType.java new file mode 100644 index 0000000..bc6345f --- /dev/null +++ b/back/src/main/java/com/linkwork/model/enums/ResumeType.java @@ -0,0 +1,16 @@ +package com.linkwork.model.enums; + +/** + * 恢复类型 + */ +public enum ResumeType { + /** + * 温启动:命中镜像缓存,~20s + */ + WARM, + + /** + * 冷启动:未命中,需重新拉取镜像,~90s + */ + COLD +} diff --git a/back/src/main/java/com/linkwork/model/enums/ServiceType.java b/back/src/main/java/com/linkwork/model/enums/ServiceType.java new file mode 100644 index 0000000..5bd3613 --- /dev/null +++ b/back/src/main/java/com/linkwork/model/enums/ServiceType.java @@ -0,0 +1,9 @@ +package com.linkwork.model.enums; + +/** + * 服务类型 + */ +public enum ServiceType { + CODE_WRITING, // 代码编写 + TASK_EXECUTION // 任务执行 +} diff --git a/back/src/main/java/com/linkwork/model/enums/SnapshotStatus.java b/back/src/main/java/com/linkwork/model/enums/SnapshotStatus.java new file mode 100644 index 0000000..163d766 --- /dev/null +++ b/back/src/main/java/com/linkwork/model/enums/SnapshotStatus.java @@ -0,0 +1,16 @@ +package com.linkwork.model.enums; + +/** + * 快照状态 + */ +public enum SnapshotStatus { + /** + * 可用于恢复 + */ + ACTIVE, + + /** + * 已过期 + */ + EXPIRED +} diff --git a/back/src/main/java/com/linkwork/model/enums/TaskOutputType.java b/back/src/main/java/com/linkwork/model/enums/TaskOutputType.java new file mode 100644 index 0000000..d3e35a3 --- /dev/null +++ b/back/src/main/java/com/linkwork/model/enums/TaskOutputType.java @@ -0,0 +1,133 @@ +package com.linkwork.model.enums; + +import lombok.Getter; + +import java.util.Locale; +import java.util.Optional; + +/** + * 任务产出预估类型枚举 + */ +@Getter +public enum TaskOutputType { + GIT_BRANCH("git_branch", "code_delivery", "代码改动预计会落到 Git 分支"), + PULL_REQUEST("pull_request", "code_delivery", "预计会产出 PR/MR 链接"), + PYTHON_FILE("python_file", "code_delivery", "预计产出 Python 源码文件"), + JAVA_FILE("java_file", "code_delivery", "预计产出 Java 源码文件"), + JAVASCRIPT_FILE("javascript_file", "code_delivery", "预计产出 JavaScript 源码文件"), + TYPESCRIPT_FILE("typescript_file", "code_delivery", "预计产出 TypeScript 源码文件"), + SQL_FILE("sql_file", "code_delivery", "预计产出 SQL 脚本"), + SHELL_SCRIPT("shell_script", "code_delivery", "预计产出 Shell 脚本"), + CONFIG_FILE("config_file", "code_delivery", "预计产出配置文件"), + + TXT("txt", "document_delivery", "预计产出纯文本文件"), + MARKDOWN("markdown", "document_delivery", "预计产出 Markdown 文档"), + WORD("word", "document_delivery", "预计产出 Word 文档"), + EXCEL("excel", "document_delivery", "预计产出 Excel 表格"), + PPT("ppt", "document_delivery", "预计产出 PPT 文档"), + PDF("pdf", "document_delivery", "预计产出 PDF 文档"), + + JSON("json", "data_media_delivery", "预计产出 JSON 数据文件"), + CSV("csv", "data_media_delivery", "预计产出 CSV 数据文件"), + PNG("png", "data_media_delivery", "预计产出图片文件"), + + ARCHIVE("archive", "package_delivery", "预计产出压缩包"), + API_CALL_RESULT("api_call_result", "integration_delivery", "仅通过 API 调用产生结果"), + DIALOG_CONCLUSION("dialog_conclusion", "dialog_delivery", "无文件产出,仅对话结论"); + + private final String code; + private final String domain; + private final String description; + + TaskOutputType(String code, String domain, String description) { + this.code = code; + this.domain = domain; + this.description = description; + } + + public static Optional fromCode(String code) { + if (code == null || code.isBlank()) { + return Optional.empty(); + } + for (TaskOutputType type : values()) { + if (type.code.equalsIgnoreCase(code)) { + return Optional.of(type); + } + } + return Optional.empty(); + } + + public static Optional fromFileName(String fileName) { + if (fileName == null || fileName.isBlank()) { + return Optional.empty(); + } + + String lower = fileName.toLowerCase(Locale.ROOT); + + if (hasAnySuffix(lower, ".zip", ".tar", ".tar.gz", ".tgz", ".7z", ".rar")) { + return Optional.of(ARCHIVE); + } + + if (hasAnySuffix(lower, ".xlsx", ".xls")) { + return Optional.of(EXCEL); + } + if (hasAnySuffix(lower, ".csv")) { + return Optional.of(CSV); + } + if (hasAnySuffix(lower, ".docx", ".doc")) { + return Optional.of(WORD); + } + if (hasAnySuffix(lower, ".ppt", ".pptx")) { + return Optional.of(PPT); + } + if (hasAnySuffix(lower, ".pdf")) { + return Optional.of(PDF); + } + if (hasAnySuffix(lower, ".md", ".markdown")) { + return Optional.of(MARKDOWN); + } + if (hasAnySuffix(lower, ".txt", ".log")) { + return Optional.of(TXT); + } + + if (hasAnySuffix(lower, ".py")) { + return Optional.of(PYTHON_FILE); + } + if (hasAnySuffix(lower, ".java")) { + return Optional.of(JAVA_FILE); + } + if (hasAnySuffix(lower, ".ts", ".tsx")) { + return Optional.of(TYPESCRIPT_FILE); + } + if (hasAnySuffix(lower, ".js", ".jsx", ".mjs", ".cjs")) { + return Optional.of(JAVASCRIPT_FILE); + } + if (hasAnySuffix(lower, ".sql")) { + return Optional.of(SQL_FILE); + } + if (hasAnySuffix(lower, ".sh", ".bash", ".zsh")) { + return Optional.of(SHELL_SCRIPT); + } + + if (hasAnySuffix(lower, ".yaml", ".yml", ".toml", ".ini", ".properties", ".env", ".conf", ".xml")) { + return Optional.of(CONFIG_FILE); + } + if (hasAnySuffix(lower, ".json")) { + return Optional.of(JSON); + } + if (hasAnySuffix(lower, ".png", ".jpg", ".jpeg", ".svg", ".webp")) { + return Optional.of(PNG); + } + + return Optional.empty(); + } + + private static boolean hasAnySuffix(String value, String... suffixes) { + for (String suffix : suffixes) { + if (value.endsWith(suffix)) { + return true; + } + } + return false; + } +} diff --git a/back/src/main/java/com/linkwork/model/enums/TaskStatus.java b/back/src/main/java/com/linkwork/model/enums/TaskStatus.java new file mode 100644 index 0000000..d2480fe --- /dev/null +++ b/back/src/main/java/com/linkwork/model/enums/TaskStatus.java @@ -0,0 +1,28 @@ +package com.linkwork.model.enums; + +import com.baomidou.mybatisplus.annotation.EnumValue; +import com.fasterxml.jackson.annotation.JsonValue; +import lombok.Getter; + +/** + * 任务状态枚举 + */ +@Getter +public enum TaskStatus { + PENDING("pending", "等待执行"), + RUNNING("running", "执行中"), + COMPLETED("completed", "已完成"), + FAILED("failed", "执行失败"), + ABORTED("aborted", "已终止"), + PENDING_AUTH("pending_auth", "等待人工授权"); + + @EnumValue + @JsonValue + private final String code; + private final String description; + + TaskStatus(String code, String description) { + this.code = code; + this.description = description; + } +} diff --git a/back/src/main/java/com/linkwork/service/AdminAccessService.java b/back/src/main/java/com/linkwork/service/AdminAccessService.java new file mode 100644 index 0000000..b8af703 --- /dev/null +++ b/back/src/main/java/com/linkwork/service/AdminAccessService.java @@ -0,0 +1,33 @@ +package com.linkwork.service; + +import org.springframework.beans.factory.annotation.Value; +import org.springframework.stereotype.Service; +import org.springframework.util.StringUtils; + +import java.util.Arrays; +import java.util.Set; +import java.util.stream.Collectors; + +/** + * 管理员访问能力判断(统一维护,避免各模块配置漂移) + */ +@Service +public class AdminAccessService { + + @Value("${robot.admin-user-ids:}") + private String adminUserIdsConfig; + + public boolean isAdmin(String userId) { + return StringUtils.hasText(userId) && getAdminUserIds().contains(userId.trim()); + } + + private Set getAdminUserIds() { + if (!StringUtils.hasText(adminUserIdsConfig)) { + return Set.of(); + } + return Arrays.stream(adminUserIdsConfig.split(",")) + .map(String::trim) + .filter(StringUtils::hasText) + .collect(Collectors.toSet()); + } +} diff --git a/back/src/main/java/com/linkwork/service/ApprovalRequestConsumer.java b/back/src/main/java/com/linkwork/service/ApprovalRequestConsumer.java new file mode 100644 index 0000000..3b7d351 --- /dev/null +++ b/back/src/main/java/com/linkwork/service/ApprovalRequestConsumer.java @@ -0,0 +1,269 @@ +package com.linkwork.service; + +import com.fasterxml.jackson.databind.JsonNode; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.linkwork.config.DispatchConfig; +import com.linkwork.model.entity.Approval; +import jakarta.annotation.PreDestroy; +import lombok.RequiredArgsConstructor; +import lombok.extern.slf4j.Slf4j; +import org.springframework.boot.context.event.ApplicationReadyEvent; +import org.springframework.context.event.EventListener; +import org.springframework.data.redis.connection.stream.StreamRecords; +import org.springframework.data.redis.core.StringRedisTemplate; +import org.springframework.stereotype.Component; + +import java.time.Instant; +import java.util.ArrayList; +import java.util.Collections; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.atomic.AtomicBoolean; + +/** + * 审批请求消费者 + *

+ * 监听 momo-worker 写入的审批请求队列 (Redis List), + * 自动创建审批记录到数据库。 + *

+ * 队列 Key: approval:{workstationId} + * 消息格式: + *

+ * {
+ *   "request_id": "req-uuid",
+ *   "task_id": "test-task-001",
+ *   "tool_name": "Bash",
+ *   "command": "sudo whoami",
+ *   "risk_level": "high",
+ *   "timestamp": "2026-02-10T10:00:00Z"
+ * }
+ * 
+ */ +@Slf4j +@Component +@RequiredArgsConstructor +public class ApprovalRequestConsumer { + + private final StringRedisTemplate redisTemplate; + private final DispatchConfig dispatchConfig; + private final ApprovalService approvalService; + private final ObjectMapper objectMapper; + private final TaskService taskService; + + private final AtomicBoolean running = new AtomicBoolean(false); + private final ExecutorService executor = Executors.newSingleThreadExecutor(r -> { + Thread t = new Thread(r, "approval-consumer"); + t.setDaemon(true); + return t; + }); + + @EventListener(ApplicationReadyEvent.class) + public void onApplicationReady() { + running.set(true); + executor.submit(this::consumeLoop); + log.info("审批请求消费者已启动,监听队列模式: {}", dispatchConfig.getApprovalRequestKeyPattern()); + } + + @PreDestroy + public void shutdown() { + running.set(false); + executor.shutdownNow(); + log.info("审批请求消费者已停止"); + } + + private void consumeLoop() { + while (running.get() && !Thread.currentThread().isInterrupted()) { + try { + boolean consumed = false; + for (String queueKey : resolveApprovalRequestQueues()) { + String message = redisTemplate.opsForList().rightPop(queueKey); + if (message == null) { + continue; + } + processApprovalRequest(message); + consumed = true; + break; + } + if (!consumed) { + Thread.sleep(500); + } + } catch (Exception e) { + if (e instanceof InterruptedException) { + Thread.currentThread().interrupt(); + break; + } + if (running.get()) { + log.error("消费审批请求异常,5秒后重试", e); + try { Thread.sleep(5000); } catch (InterruptedException ie) { + Thread.currentThread().interrupt(); + break; + } + } + } + } + } + + private void processApprovalRequest(String message) { + try { + JsonNode json = objectMapper.readTree(message); + + String requestId = json.has("request_id") ? json.get("request_id").asText() : null; + String taskId = json.has("task_id") ? json.get("task_id").asText() : null; + String command = json.has("command") ? json.get("command").asText() : ""; + String reason = json.has("reason") ? json.get("reason").asText() : ""; + + // tool_name 优先,否则从 command_type 推导 + String toolName = json.has("tool_name") ? json.get("tool_name").asText() : null; + if (toolName == null || toolName.isEmpty()) { + String commandType = json.has("command_type") ? json.get("command_type").asText() : ""; + toolName = mapCommandTypeToToolName(commandType); + } + + // risk_level: 兼容整数 (momo-worker) 和字符串两种格式 + String riskLevel = mapRiskLevel(json.get("risk_level")); + + if (requestId == null) { + log.warn("审批请求缺少 request_id,跳过: {}", message); + return; + } + + log.info("收到审批请求: requestId={}, taskId={}, toolName={}, command={}, riskLevel={}", + requestId, taskId, toolName, command, riskLevel); + + // 构建操作描述 + String action = String.format("%s: %s", toolName, command); + String description = (reason != null && !reason.isEmpty()) + ? reason + : String.format("momo-worker 请求审批: %s", action); + + // 创建审批记录(通过 ApprovalService) + Approval approval = approvalService.createApproval( + taskId, // taskNo + "任务 " + taskId, // taskTitle + action, // action + description, // description + riskLevel, // riskLevel + "momo-worker", // creatorId + "AI 执行器" // creatorName + ); + + // 保存 requestId 到审批记录(用于后续回写响应) + approval.setRequestId(requestId); + approvalService.updateRequestId(approval.getApprovalNo(), requestId); + + log.info("审批记录已创建: approvalNo={}, requestId={}", approval.getApprovalNo(), requestId); + + // 向任务日志 Stream 写入 USER_CONFIRM_REQUEST 事件,驱动 WebSocket 通知前端 + publishApprovalEvent(taskId, approval, requestId, command, riskLevel, reason); + + } catch (Exception e) { + log.error("处理审批请求失败: {}", message, e); + } + } + + /** + * risk_level 映射:兼容整数 (momo-worker) 和字符串格式 + */ + private String mapRiskLevel(JsonNode node) { + if (node == null || node.isNull()) return "medium"; + if (node.isTextual()) { + String text = node.asText().toLowerCase(); + if (text.matches("low|medium|high|critical")) return text; + } + if (node.isNumber()) { + return switch (node.asInt()) { + case 1 -> "low"; + case 2 -> "medium"; + case 3 -> "high"; + case 4 -> "critical"; + default -> "high"; // 0 或未知 → high(策略触发的审批默认高风险) + }; + } + return "medium"; + } + + /** + * command_type → tool_name 映射 + */ + private String mapCommandTypeToToolName(String commandType) { + if (commandType == null || commandType.isEmpty()) return "Shell"; + return switch (commandType.toLowerCase()) { + case "shell", "bash" -> "Bash"; + case "python" -> "Python"; + default -> commandType; + }; + } + + /** + * 向任务日志 Stream 写入 USER_CONFIRM_REQUEST 事件 + * 让 TaskWebSocketHandler 能推送到前端 + */ + private void publishApprovalEvent(String taskId, Approval approval, + String requestId, String command, + String riskLevel, String reason) { + try { + Long roleId = resolveRoleIdByTaskNo(taskId); + String streamKey = dispatchConfig.getLogStreamKey(roleId, taskId); + + Map eventData = new LinkedHashMap<>(); + eventData.put("approval_no", approval.getApprovalNo()); + eventData.put("request_id", requestId); + eventData.put("task_id", taskId); + eventData.put("command", command); + eventData.put("risk_level", riskLevel); + eventData.put("reason", reason); + eventData.put("expired_at", approval.getExpiredAt() != null + ? approval.getExpiredAt().toString() : ""); + + Map fields = new LinkedHashMap<>(); + fields.put("event_type", "USER_CONFIRM_REQUEST"); + fields.put("timestamp", Instant.now().toString()); + fields.put("session_id", "backend"); + fields.put("data", objectMapper.writeValueAsString(eventData)); + + redisTemplate.opsForStream().add( + StreamRecords.string(fields).withStreamKey(streamKey)); + + log.info("USER_CONFIRM_REQUEST 事件已写入 Stream: key={}, approvalNo={}", + streamKey, approval.getApprovalNo()); + + } catch (Exception e) { + log.error("写入 USER_CONFIRM_REQUEST 事件失败(不影响审批创建): {}", e.getMessage()); + } + } + + private List resolveApprovalRequestQueues() { + Set keySet = redisTemplate.keys(dispatchConfig.getApprovalRequestKeyPattern()); + if (keySet == null || keySet.isEmpty()) { + return List.of(dispatchConfig.getApprovalRequestKey()); + } + List queues = new ArrayList<>(); + for (String key : keySet) { + if (key == null || key.contains(":response:")) { + continue; + } + queues.add(key); + } + if (queues.isEmpty()) { + queues.add(dispatchConfig.getApprovalRequestKey()); + } + Collections.sort(queues); + return queues; + } + + private Long resolveRoleIdByTaskNo(String taskNo) { + if (taskNo == null || taskNo.isEmpty()) { + return null; + } + try { + return taskService.getTaskByNo(taskNo).getRoleId(); + } catch (Exception e) { + log.debug("审批请求链路解析任务 roleId 失败,回退默认 workstation: taskNo={}", taskNo); + return null; + } + } +} diff --git a/back/src/main/java/com/linkwork/service/ApprovalService.java b/back/src/main/java/com/linkwork/service/ApprovalService.java new file mode 100644 index 0000000..c072bd1 --- /dev/null +++ b/back/src/main/java/com/linkwork/service/ApprovalService.java @@ -0,0 +1,295 @@ +package com.linkwork.service; + +import com.baomidou.mybatisplus.core.conditions.query.LambdaQueryWrapper; +import com.baomidou.mybatisplus.extension.plugins.pagination.Page; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.linkwork.common.SnowflakeIdGenerator; +import com.linkwork.config.DispatchConfig; +import com.linkwork.mapper.ApprovalMapper; +import com.linkwork.model.entity.Approval; +import lombok.RequiredArgsConstructor; +import lombok.extern.slf4j.Slf4j; +import org.springframework.data.redis.core.StringRedisTemplate; +import org.springframework.stereotype.Service; +import org.springframework.transaction.annotation.Transactional; +import org.springframework.util.StringUtils; + +import java.time.Duration; +import java.time.Instant; +import java.time.LocalDateTime; +import java.util.*; + +import org.springframework.data.redis.connection.stream.StreamRecords; + +/** + * 审批服务 + */ +@Slf4j +@Service +@RequiredArgsConstructor +public class ApprovalService { + + private final ApprovalMapper approvalMapper; + private final StringRedisTemplate redisTemplate; + private final SnowflakeIdGenerator idGenerator; + private final DispatchConfig dispatchConfig; + private final ObjectMapper objectMapper; + private final TaskService taskService; + + /** + * 获取审批列表(分页) + */ + public Page listApprovals(String status, Integer page, Integer pageSize) { + return listApprovals(status, page, pageSize, null); + } + + /** + * 获取审批列表(分页,按创建人隔离) + */ + public Page listApprovals(String status, Integer page, Integer pageSize, String creatorId) { + LambdaQueryWrapper wrapper = new LambdaQueryWrapper<>(); + if (StringUtils.hasText(creatorId)) { + wrapper.eq(Approval::getCreatorId, creatorId); + } + if (StringUtils.hasText(status) && !"all".equalsIgnoreCase(status)) { + wrapper.eq(Approval::getStatus, status); + } + wrapper.orderByDesc(Approval::getCreatedAt); + return approvalMapper.selectPage(new Page<>(page, pageSize), wrapper); + } + + /** + * 获取审批统计 + */ + public Map getStats() { + return getStats(null); + } + + /** + * 获取审批统计(按创建人隔离) + */ + public Map getStats(String creatorId) { + Map stats = new LinkedHashMap<>(); + + LambdaQueryWrapper pendingWrapper = new LambdaQueryWrapper<>(); + if (StringUtils.hasText(creatorId)) { + pendingWrapper.eq(Approval::getCreatorId, creatorId); + } + pendingWrapper.eq(Approval::getStatus, "pending"); + stats.put("pending", approvalMapper.selectCount(pendingWrapper)); + + LambdaQueryWrapper approvedWrapper = new LambdaQueryWrapper<>(); + if (StringUtils.hasText(creatorId)) { + approvedWrapper.eq(Approval::getCreatorId, creatorId); + } + approvedWrapper.eq(Approval::getStatus, "approved"); + stats.put("approved", approvalMapper.selectCount(approvedWrapper)); + + LambdaQueryWrapper rejectedWrapper = new LambdaQueryWrapper<>(); + if (StringUtils.hasText(creatorId)) { + rejectedWrapper.eq(Approval::getCreatorId, creatorId); + } + rejectedWrapper.eq(Approval::getStatus, "rejected"); + stats.put("rejected", approvalMapper.selectCount(rejectedWrapper)); + + LambdaQueryWrapper totalWrapper = new LambdaQueryWrapper<>(); + if (StringUtils.hasText(creatorId)) { + totalWrapper.eq(Approval::getCreatorId, creatorId); + } + stats.put("total", approvalMapper.selectCount(totalWrapper)); + return stats; + } + + /** + * 提交审批决策 + */ + @Transactional + public Approval decide(String approvalNo, String decision, String comment, + String operatorId, String operatorName, String operatorIp) { + LambdaQueryWrapper wrapper = new LambdaQueryWrapper<>(); + wrapper.eq(Approval::getApprovalNo, approvalNo); + Approval approval = approvalMapper.selectOne(wrapper); + + if (approval == null) { + throw new IllegalArgumentException("审批记录不存在: " + approvalNo); + } + if (!"pending".equals(approval.getStatus())) { + throw new IllegalArgumentException("审批已处理,当前状态: " + approval.getStatus()); + } + if (!"approved".equals(decision) && !"rejected".equals(decision)) { + throw new IllegalArgumentException("无效的决策: " + decision + ",允许值: approved/rejected"); + } + + approval.setStatus(decision); + approval.setDecision(decision); + approval.setComment(comment); + approval.setOperatorId(operatorId); + approval.setOperatorName(operatorName); + approval.setOperatorIp(operatorIp); + approval.setDecidedAt(LocalDateTime.now()); + approval.setUpdatedAt(LocalDateTime.now()); + approvalMapper.updateById(approval); + + // 通知 momo-worker:通过 Redis String 回写审批结果 + // key: approval:{workstationId}:response:{requestId} (String, TTL 120s) + String requestId = approval.getRequestId(); + if (StringUtils.hasText(requestId)) { + try { + Long roleId = resolveRoleIdByTaskNo(approval.getTaskNo()); + String responseKey = dispatchConfig.getApprovalResponseKey(roleId, requestId); + Map response = new LinkedHashMap<>(); + response.put("request_id", requestId); + response.put("status", decision); + if ("approved".equals(decision)) { + response.put("approved_by", operatorName != null ? operatorName : "system"); + } else { + response.put("rejected_by", operatorName != null ? operatorName : "system"); + } + response.put("comment", comment != null ? comment : ""); + response.put("responded_at", Instant.now().toString()); + String responseJson = objectMapper.writeValueAsString(response); + redisTemplate.opsForValue().set(responseKey, responseJson, Duration.ofSeconds(120)); + log.info("审批结果已回写 Redis (String, TTL=120s): key={}, decision={}", responseKey, decision); + } catch (Exception e) { + log.error("审批结果回写 Redis 失败: requestId={}", requestId, e); + } + } + + // 向任务日志 Stream 写入 USER_CONFIRM_RESOLVED 事件,通知前端审批已处理 + publishResolvedEvent(approval); + + // 兼容旧模式:通过 Redis Pub/Sub 发送审批结果 + if ("approved".equals(decision) && StringUtils.hasText(approval.getTaskNo())) { + String channel = "approval:" + approval.getTaskNo(); + redisTemplate.convertAndSend(channel, "approved:" + approvalNo); + log.info("审批通过,已通知 Agent (Pub/Sub): taskNo={}, approvalNo={}", approval.getTaskNo(), approvalNo); + } + + log.info("审批决策完成: approvalNo={}, decision={}, operator={}, operatorIp={}", + approvalNo, decision, operatorName, operatorIp); + return approval; + } + + /** + * 创建审批请求(由 Agent/Worker 触发) + */ + @Transactional + public Approval createApproval(String taskNo, String taskTitle, String action, + String description, String riskLevel, + String creatorId, String creatorName) { + String approvalNo = "AUTH-" + idGenerator.nextId(); + + Approval approval = new Approval(); + approval.setApprovalNo(approvalNo); + approval.setTaskNo(taskNo); + approval.setTaskTitle(taskTitle); + approval.setAction(action); + approval.setDescription(description); + approval.setRiskLevel(riskLevel != null ? riskLevel : "medium"); + approval.setStatus("pending"); + approval.setCreatorId(creatorId); + approval.setCreatorName(creatorName); + // 默认 30 分钟过期 + approval.setExpiredAt(LocalDateTime.now().plusMinutes(30)); + approval.setCreatedAt(LocalDateTime.now()); + approval.setUpdatedAt(LocalDateTime.now()); + approval.setIsDeleted(0); + approvalMapper.insert(approval); + + log.info("审批请求创建: approvalNo={}, taskNo={}, action={}, riskLevel={}", + approvalNo, taskNo, action, riskLevel); + + return approval; + } + + /** + * 更新审批记录的 requestId(momo-worker 审批请求 ID) + */ + public void updateRequestId(String approvalNo, String requestId) { + LambdaQueryWrapper wrapper = new LambdaQueryWrapper<>(); + wrapper.eq(Approval::getApprovalNo, approvalNo); + Approval approval = approvalMapper.selectOne(wrapper); + if (approval != null) { + approval.setRequestId(requestId); + approvalMapper.updateById(approval); + } + } + + /** + * 转换为响应格式 + */ + public Map toResponse(Approval approval) { + Map map = new LinkedHashMap<>(); + map.put("id", approval.getApprovalNo()); + map.put("taskNo", approval.getTaskNo()); + map.put("taskTitle", approval.getTaskTitle()); + map.put("action", approval.getAction()); + map.put("description", approval.getDescription()); + map.put("riskLevel", approval.getRiskLevel()); + map.put("status", approval.getStatus()); + map.put("decision", approval.getDecision()); + map.put("comment", approval.getComment()); + map.put("operatorName", approval.getOperatorName()); + map.put("expiredAt", approval.getExpiredAt()); + map.put("decidedAt", approval.getDecidedAt()); + map.put("creatorName", approval.getCreatorName()); + map.put("createdAt", approval.getCreatedAt()); + return map; + } + + public List> toResponseList(List approvals) { + List> list = new ArrayList<>(); + for (Approval approval : approvals) { + list.add(toResponse(approval)); + } + return list; + } + + /** + * 向任务日志 Stream 写入 USER_CONFIRM_RESOLVED 事件 + * 让 WebSocket 能推送审批结果到前端任务执行面板 + */ + private void publishResolvedEvent(Approval approval) { + String taskNo = approval.getTaskNo(); + if (taskNo == null || taskNo.isEmpty()) return; + + try { + Long roleId = resolveRoleIdByTaskNo(taskNo); + String streamKey = dispatchConfig.getLogStreamKey(roleId, taskNo); + + Map eventData = new LinkedHashMap<>(); + eventData.put("approval_no", approval.getApprovalNo()); + eventData.put("request_id", approval.getRequestId()); + eventData.put("task_id", taskNo); + eventData.put("decision", approval.getDecision()); + eventData.put("operator", approval.getOperatorName()); + eventData.put("comment", approval.getComment()); + + Map fields = new LinkedHashMap<>(); + fields.put("event_type", "USER_CONFIRM_RESOLVED"); + fields.put("timestamp", Instant.now().toString()); + fields.put("session_id", "backend"); + fields.put("data", objectMapper.writeValueAsString(eventData)); + + redisTemplate.opsForStream().add( + StreamRecords.string(fields).withStreamKey(streamKey)); + + log.info("USER_CONFIRM_RESOLVED 事件已写入 Stream: key={}, decision={}", + streamKey, approval.getDecision()); + } catch (Exception e) { + log.error("写入 USER_CONFIRM_RESOLVED 事件失败: {}", e.getMessage()); + } + } + + private Long resolveRoleIdByTaskNo(String taskNo) { + if (!StringUtils.hasText(taskNo)) { + return null; + } + try { + return taskService.getTaskByNo(taskNo).getRoleId(); + } catch (Exception e) { + log.debug("审批链路解析任务 roleId 失败,回退默认 workstation: taskNo={}", taskNo); + return null; + } + } +} diff --git a/back/src/main/java/com/linkwork/service/AuthService.java b/back/src/main/java/com/linkwork/service/AuthService.java new file mode 100644 index 0000000..fe1622f --- /dev/null +++ b/back/src/main/java/com/linkwork/service/AuthService.java @@ -0,0 +1,168 @@ +package com.linkwork.service; + +import com.linkwork.context.UserInfo; +import io.jsonwebtoken.Claims; +import io.jsonwebtoken.Jwts; +import io.jsonwebtoken.security.Keys; +import jakarta.annotation.PostConstruct; +import lombok.extern.slf4j.Slf4j; +import org.springframework.beans.factory.annotation.Value; +import org.springframework.security.crypto.bcrypt.BCryptPasswordEncoder; +import org.springframework.stereotype.Service; + +import javax.crypto.SecretKey; +import java.nio.charset.StandardCharsets; +import java.util.*; + +/** + * 认证服务 + * 处理密码验证和 JWT Token 生成/验证 + */ +@Slf4j +@Service +public class AuthService { + + private final BCryptPasswordEncoder passwordEncoder = new BCryptPasswordEncoder(); + + @Value("${robot.auth.password:}") + private String configuredPasswordHash; + + @Value("${robot.auth.jwt-secret:}") + private String jwtSecret; + + @Value("${robot.auth.jwt-expiration:86400000}") + private long jwtExpiration; // 默认 24 小时 + + private SecretKey secretKey; + + @PostConstruct + public void init() { + if (jwtSecret == null || jwtSecret.isBlank()) { + throw new IllegalStateException("robot.auth.jwt-secret 未配置"); + } + + if (configuredPasswordHash == null || configuredPasswordHash.isBlank()) { + throw new IllegalStateException("robot.auth.password 未配置(需提供 BCrypt 哈希)"); + } + + // 初始化 JWT 密钥 + secretKey = Keys.hmacShaKeyFor(jwtSecret.getBytes(StandardCharsets.UTF_8)); + } + + /** + * 验证密码 + */ + public boolean validatePassword(String rawPassword) { + return passwordEncoder.matches(rawPassword, configuredPasswordHash); + } + + /** + * 生成 JWT Token + */ + public String generateToken(String subject) { + Date now = new Date(); + Date expiry = new Date(now.getTime() + jwtExpiration); + + return Jwts.builder() + .subject(subject) + .issuedAt(now) + .expiration(expiry) + .signWith(secretKey) + .compact(); + } + + /** + * 验证 JWT Token + */ + public boolean validateToken(String token) { + try { + Jwts.parser() + .verifyWith(secretKey) + .build() + .parseSignedClaims(token); + return true; + } catch (Exception e) { + log.debug("Token 验证失败: {}", e.getMessage()); + return false; + } + } + + /** + * 从 Token 获取主题 + */ + public String getSubjectFromToken(String token) { + Claims claims = Jwts.parser() + .verifyWith(secretKey) + .build() + .parseSignedClaims(token) + .getPayload(); + return claims.getSubject(); + } + + /** + * 工具方法:生成密码的 BCrypt 哈希(用于配置新密码) + */ + public String encodePassword(String rawPassword) { + return passwordEncoder.encode(rawPassword); + } + + /** + * 基于用户信息生成 JWT Token + * JWT payload 承载完整用户信息,后续请求直接从 JWT 解析,不查库 + */ + @SuppressWarnings("unchecked") + public String generateTokenForUser(UserInfo userInfo) { + Date now = new Date(); + Date expiry = new Date(now.getTime() + jwtExpiration); + + Map claims = new HashMap<>(); + claims.put("name", userInfo.getName()); + if (userInfo.getEmail() != null) { + claims.put("email", userInfo.getEmail()); + } + if (userInfo.getWorkId() != null) { + claims.put("workId", userInfo.getWorkId()); + } + if (userInfo.getAvatarUrl() != null) { + claims.put("avatarUrl", userInfo.getAvatarUrl()); + } + if (userInfo.getPermissions() != null) { + claims.put("permissions", userInfo.getPermissions()); + } + + return Jwts.builder() + .subject(userInfo.getUserId()) + .claims(claims) + .issuedAt(now) + .expiration(expiry) + .signWith(secretKey) + .compact(); + } + + /** + * 从 JWT Token 解析完整用户信息 + */ + @SuppressWarnings("unchecked") + public UserInfo getUserInfoFromToken(String token) { + Claims claims = Jwts.parser() + .verifyWith(secretKey) + .build() + .parseSignedClaims(token) + .getPayload(); + + List permissions = null; + Object permObj = claims.get("permissions"); + if (permObj instanceof List) { + permissions = (List) permObj; + } + + return UserInfo.builder() + .userId(claims.getSubject()) + .name(claims.get("name", String.class)) + .email(claims.get("email", String.class)) + .workId(claims.get("workId", String.class)) + .avatarUrl(claims.get("avatarUrl", String.class)) + .permissions(permissions) + .build(); + } +} diff --git a/back/src/main/java/com/linkwork/service/BuildExecutor.java b/back/src/main/java/com/linkwork/service/BuildExecutor.java new file mode 100644 index 0000000..0ba2364 --- /dev/null +++ b/back/src/main/java/com/linkwork/service/BuildExecutor.java @@ -0,0 +1,211 @@ +package com.linkwork.service; + +import com.linkwork.config.ImageBuildConfig; +import com.linkwork.model.dto.BuildTask; +import com.linkwork.model.dto.ImageBuildResult; +import com.linkwork.model.dto.MergedConfig; +import com.linkwork.model.dto.ServiceBuildRequest; +import com.linkwork.model.dto.ServiceBuildResult; +import com.linkwork.model.enums.DeployMode; +import lombok.extern.slf4j.Slf4j; +import org.springframework.stereotype.Component; +import org.springframework.util.StringUtils; + +/** + * 构建执行器 + * + * 封装实际的构建逻辑,由 BuildQueueService 调用 + */ +@Component +@Slf4j +public class BuildExecutor { + + private final ImageBuildService imageBuildService; + private final ImageBuildConfig imageBuildConfig; + private final K8sOrchestrator orchestrator; + private final BuildRecordService buildRecordService; + private final ScheduleEventPublisher eventPublisher; + private final BuildLogBuffer buildLogBuffer; + + public BuildExecutor(ImageBuildService imageBuildService, + ImageBuildConfig imageBuildConfig, + K8sOrchestrator orchestrator, + BuildRecordService buildRecordService, + ScheduleEventPublisher eventPublisher, + BuildLogBuffer buildLogBuffer) { + this.imageBuildService = imageBuildService; + this.imageBuildConfig = imageBuildConfig; + this.orchestrator = orchestrator; + this.buildRecordService = buildRecordService; + this.eventPublisher = eventPublisher; + this.buildLogBuffer = buildLogBuffer; + } + + /** + * 执行构建任务 + * + * @param task 构建任务 + * @return 构建结果 + */ + public ServiceBuildResult execute(BuildTask task) { + ServiceBuildRequest request = task.getRequest(); + MergedConfig config = task.getConfig(); + String serviceId = request.getServiceId(); + String buildId = request.getBuildId(); + Long roleId = request.getRoleId(); + String roleName = request.getRoleName(); + long startTime = System.currentTimeMillis(); + + try { + log.info("开始执行构建: serviceId={}, buildId={}", serviceId, buildId); + addBuildLog(buildId, "INFO", "========== 开始构建 =========="); + addBuildLog(buildId, "INFO", "服务ID: " + serviceId); + addBuildLog(buildId, "INFO", "构建ID: " + buildId); + + // 更新构建记录状态为 BUILDING + if (roleId != null && StringUtils.hasText(buildId)) { + buildRecordService.markBuilding(buildId); + } + + // 1. 镜像构建(如果启用,仅构建 Agent 镜像) + if (imageBuildConfig.isEnabled() && request.getDeployMode() == DeployMode.K8S) { + log.info("镜像构建已启用,开始构建: serviceId={}", serviceId); + addBuildLog(buildId, "INFO", "========== 镜像构建阶段 =========="); + + // 发布 BUILD_STARTED 事件 + if (StringUtils.hasText(buildId)) { + eventPublisher.publishBuildStarted(buildId, buildId, roleId, roleName, + config.getAgentImage()); + eventPublisher.publishBuildProgress(buildId, buildId, "dockerfile", + "Generating Dockerfile..."); + } + + ImageBuildResult buildResult = imageBuildService.buildImages(request); + + if (!buildResult.isSuccess()) { + log.error("镜像构建失败: serviceId={}, error={}", serviceId, buildResult.getErrorMessage()); + addBuildLog(buildId, "ERROR", "镜像构建失败: " + buildResult.getErrorMessage()); + + // 发布 BUILD_FAILED 事件并更新记录 + long durationMs = System.currentTimeMillis() - startTime; + finishBuild(buildId, false, durationMs); + if (StringUtils.hasText(buildId)) { + eventPublisher.publishBuildFailed(buildId, buildId, "BUILD_ERROR", + buildResult.getErrorMessage(), durationMs); + if (roleId != null) { + buildRecordService.markFailed(buildId, buildResult.getErrorMessage(), durationMs); + } + } + return ServiceBuildResult.failed(serviceId, "BUILD_ERROR", buildResult.getErrorMessage()); + } + + // 只更新 Agent 镜像地址 + config.setAgentImage(buildResult.getAgentImageTag()); + config.setImageBuildDurationMs(buildResult.getBuildDurationMs()); + + // 发布镜像推送事件(如果已推送) + if (buildResult.isPushed() && StringUtils.hasText(buildId)) { + eventPublisher.publishBuildPushed(buildId, buildId, buildResult.getAgentImageTag()); + } + + log.info("镜像构建完成: serviceId={}, agentImage={}, duration={}ms", + serviceId, buildResult.getAgentImageTag(), buildResult.getBuildDurationMs()); + addBuildLog(buildId, "INFO", "镜像构建完成: " + buildResult.getAgentImageTag()); + } else { + log.info("镜像构建已禁用或非 K8s 模式,使用基础镜像: serviceId={}", serviceId); + addBuildLog(buildId, "INFO", "使用基础镜像: " + config.getAgentImage()); + } + + // 2. 创建 K8s 资源 + addBuildLog(buildId, "INFO", "========== K8s 资源创建阶段 =========="); + addBuildLog(buildId, "INFO", "命名空间: " + config.getNamespace()); + addBuildLog(buildId, "INFO", "Pod 数量: " + config.getPodCount()); + addBuildLog(buildId, "INFO", "Pod 模式: " + config.getPodMode()); + + ServiceBuildResult result = orchestrator.buildService(config); + + long durationMs = System.currentTimeMillis() - startTime; + + if (result.isSuccess()) { + log.info("构建完成: serviceId={}, duration={}ms", serviceId, durationMs); + addBuildLog(buildId, "INFO", "========== 构建成功 =========="); + addBuildLog(buildId, "INFO", "PodGroup: " + result.getPodGroupName()); + addBuildLog(buildId, "INFO", "Pods: " + result.getPodNames()); + addBuildLog(buildId, "INFO", "调度节点: " + result.getScheduledNode()); + addBuildLog(buildId, "INFO", "总耗时: " + durationMs + "ms"); + finishBuild(buildId, true, durationMs); + + // 发布 BUILD_COMPLETED 事件并更新记录 + if (StringUtils.hasText(buildId)) { + eventPublisher.publishBuildCompleted(buildId, buildId, config.getAgentImage(), durationMs); + if (roleId != null) { + buildRecordService.markSuccess(buildId, config.getAgentImage(), durationMs); + } + } + } else { + log.error("K8s 资源创建失败: serviceId={}, error={}", serviceId, result.getErrorMessage()); + addBuildLog(buildId, "ERROR", "========== K8s 资源创建失败 =========="); + addBuildLog(buildId, "ERROR", "错误码: " + result.getErrorCode()); + addBuildLog(buildId, "ERROR", "错误信息: " + result.getErrorMessage()); + addBuildLog(buildId, "INFO", "总耗时: " + durationMs + "ms"); + finishBuild(buildId, false, durationMs); + + // 发布 BUILD_FAILED 事件并更新记录 + if (StringUtils.hasText(buildId)) { + eventPublisher.publishBuildFailed(buildId, buildId, "K8S_ERROR", + result.getErrorMessage(), durationMs); + if (roleId != null) { + buildRecordService.markFailed(buildId, result.getErrorMessage(), durationMs); + } + } + } + + return result; + + } catch (Throwable t) { + log.error("构建执行异常: serviceId={}, error={}", serviceId, t.getMessage(), t); + + long durationMs = System.currentTimeMillis() - startTime; + + addBuildLog(buildId, "ERROR", "========== 构建执行异常 =========="); + addBuildLog(buildId, "ERROR", "异常类型: " + t.getClass().getSimpleName()); + addBuildLog(buildId, "ERROR", "异常信息: " + t.getMessage()); + addBuildLog(buildId, "INFO", "总耗时: " + durationMs + "ms"); + finishBuild(buildId, false, durationMs); + + // 发布 BUILD_FAILED 事件并更新记录 + if (StringUtils.hasText(buildId)) { + eventPublisher.publishBuildFailed(buildId, buildId, "INTERNAL_ERROR", + t.getMessage(), durationMs); + if (roleId != null) { + buildRecordService.markFailed(buildId, t.getMessage(), durationMs); + } + } + + return ServiceBuildResult.failed(serviceId, "INTERNAL_ERROR", t.getMessage()); + } + } + + /** + * 添加构建日志 + */ + private void addBuildLog(String buildId, String level, String message) { + if (StringUtils.hasText(buildId)) { + buildLogBuffer.addLog(buildId, level, message); + } + } + + /** + * 完成构建(标记完成并调度清理) + */ + private void finishBuild(String buildId, boolean success, long durationMs) { + if (StringUtils.hasText(buildId)) { + // 先标记完成状态 + buildLogBuffer.markCompleted(buildId, success); + // 添加最终状态日志(触发订阅者发送 complete 事件) + addBuildLog(buildId, "SYSTEM", success ? "构建完成" : "构建失败"); + // 延迟 30 分钟清理日志缓冲区 + buildLogBuffer.scheduleCleanup(buildId, 30); + } + } +} diff --git a/back/src/main/java/com/linkwork/service/BuildLogBuffer.java b/back/src/main/java/com/linkwork/service/BuildLogBuffer.java new file mode 100644 index 0000000..36190ad --- /dev/null +++ b/back/src/main/java/com/linkwork/service/BuildLogBuffer.java @@ -0,0 +1,180 @@ +package com.linkwork.service; + +import lombok.extern.slf4j.Slf4j; +import org.springframework.stereotype.Component; + +import java.util.List; +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.CopyOnWriteArrayList; +import java.util.concurrent.TimeUnit; +import java.util.function.Consumer; + +/** + * 构建日志缓冲区 + * 用于存储构建过程中的日志,支持 SSE 推送 + */ +@Slf4j +@Component +public class BuildLogBuffer { + + /** + * 日志条目 + */ + public record LogEntry(long timestamp, String level, String message) {} + + /** + * 每个 buildId 的日志缓冲区 + */ + private final Map> buffers = new ConcurrentHashMap<>(); + + /** + * 每个 buildId 的订阅者(SSE 推送用) + */ + private final Map>> subscribers = new ConcurrentHashMap<>(); + + /** + * 构建是否完成的标记 + */ + private final Map completed = new ConcurrentHashMap<>(); + + /** + * 构建完成状态(success = true, failure = false) + */ + private final Map completionStatus = new ConcurrentHashMap<>(); + + /** + * 添加日志条目 + */ + public void addLog(String buildId, String level, String message) { + if (buildId == null || message == null) return; + + LogEntry entry = new LogEntry(System.currentTimeMillis(), level, message); + + // 存入缓冲区 + buffers.computeIfAbsent(buildId, k -> new CopyOnWriteArrayList<>()).add(entry); + + // 推送给所有订阅者 + CopyOnWriteArrayList> subs = subscribers.get(buildId); + if (subs != null) { + for (Consumer sub : subs) { + try { + sub.accept(entry); + } catch (Exception e) { + log.debug("Failed to push log to subscriber: {}", e.getMessage()); + } + } + } + } + + /** + * 获取所有历史日志 + */ + public List getHistory(String buildId) { + CopyOnWriteArrayList buffer = buffers.get(buildId); + return buffer != null ? List.copyOf(buffer) : List.of(); + } + + /** + * 获取指定索引之后的日志 + */ + public List getLogsAfter(String buildId, int afterIndex) { + CopyOnWriteArrayList buffer = buffers.get(buildId); + if (buffer == null || afterIndex >= buffer.size()) { + return List.of(); + } + return List.copyOf(buffer.subList(afterIndex, buffer.size())); + } + + /** + * 订阅日志(SSE 用) + */ + public void subscribe(String buildId, Consumer subscriber) { + subscribers.computeIfAbsent(buildId, k -> new CopyOnWriteArrayList<>()).add(subscriber); + } + + /** + * 取消订阅 + */ + public void unsubscribe(String buildId, Consumer subscriber) { + CopyOnWriteArrayList> subs = subscribers.get(buildId); + if (subs != null) { + subs.remove(subscriber); + } + } + + /** + * 标记构建完成(带状态) + * @param buildId 构建 ID + * @param success 是否成功 + */ + public void markCompleted(String buildId, boolean success) { + completed.put(buildId, true); + completionStatus.put(buildId, success); + } + + /** + * 检查构建是否完成 + */ + public boolean isCompleted(String buildId) { + return Boolean.TRUE.equals(completed.get(buildId)); + } + + /** + * 获取构建完成状态 + * @return true=成功, false=失败, null=未完成 + */ + public Boolean getCompletionStatus(String buildId) { + if (!isCompleted(buildId)) { + return null; + } + return completionStatus.get(buildId); + } + + /** + * 导出日志为纯文本格式 + * @param buildId 构建 ID + * @return 日志文本内容 + */ + public String exportAsText(String buildId) { + List entries = getHistory(buildId); + if (entries.isEmpty()) { + return ""; + } + + StringBuilder sb = new StringBuilder(); + java.time.format.DateTimeFormatter formatter = + java.time.format.DateTimeFormatter.ofPattern("yyyy-MM-dd HH:mm:ss.SSS"); + + for (LogEntry entry : entries) { + java.time.LocalDateTime time = java.time.Instant.ofEpochMilli(entry.timestamp()) + .atZone(java.time.ZoneId.systemDefault()) + .toLocalDateTime(); + sb.append(String.format("[%s] [%s] %s%n", + formatter.format(time), + entry.level().toUpperCase(), + entry.message())); + } + + return sb.toString(); + } + + /** + * 清理缓冲区(构建完成后延迟清理) + */ + public void scheduleCleanup(String buildId, long delayMinutes) { + // 简单实现:使用虚拟线程延迟清理 + Thread.startVirtualThread(() -> { + try { + TimeUnit.MINUTES.sleep(delayMinutes); + buffers.remove(buildId); + subscribers.remove(buildId); + completed.remove(buildId); + completionStatus.remove(buildId); + log.debug("Cleaned up build log buffer for: {}", buildId); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + } + }); + } +} diff --git a/back/src/main/java/com/linkwork/service/BuildQueueService.java b/back/src/main/java/com/linkwork/service/BuildQueueService.java new file mode 100644 index 0000000..7f459af --- /dev/null +++ b/back/src/main/java/com/linkwork/service/BuildQueueService.java @@ -0,0 +1,329 @@ +package com.linkwork.service; + +import com.linkwork.config.BuildQueueConfig; +import com.linkwork.model.dto.BuildQueueStatus; +import com.linkwork.model.dto.BuildTask; +import com.linkwork.model.dto.MergedConfig; +import com.linkwork.model.dto.ServiceBuildRequest; +import com.linkwork.model.dto.ServiceBuildResult; +import jakarta.annotation.PostConstruct; +import jakarta.annotation.PreDestroy; +import lombok.extern.slf4j.Slf4j; +import org.springframework.stereotype.Service; + +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.*; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.stream.Collectors; + +/** + * 构建队列服务 + * + * 基于系统资源(CPU/内存)动态控制并发构建数量 + */ +@Service +@Slf4j +public class BuildQueueService { + + private final SystemResourceMonitor resourceMonitor; + private final BuildQueueConfig config; + private final BuildExecutor buildExecutor; + + // 等待队列 + private final LinkedBlockingQueue waitingQueue; + + // 正在执行的任务 + private final ConcurrentHashMap runningTasks = new ConcurrentHashMap<>(); + + // 调度器 + private ScheduledExecutorService scheduler; + + // 任务执行线程池 + private ExecutorService taskExecutor; + + // 是否已停止 + private final AtomicBoolean stopped = new AtomicBoolean(false); + + public BuildQueueService(SystemResourceMonitor resourceMonitor, + BuildQueueConfig config, + BuildExecutor buildExecutor) { + this.resourceMonitor = resourceMonitor; + this.config = config; + this.buildExecutor = buildExecutor; + this.waitingQueue = new LinkedBlockingQueue<>(config.getMaxQueueSize()); + } + + @PostConstruct + public void start() { + if (!config.isEnabled()) { + log.info("BuildQueueService is disabled"); + return; + } + + // 初始化调度器 + scheduler = Executors.newSingleThreadScheduledExecutor(r -> { + Thread t = new Thread(r, "build-queue-scheduler"); + t.setDaemon(true); + return t; + }); + + // 初始化任务执行线程池 + taskExecutor = Executors.newFixedThreadPool(config.getMaxConcurrent(), r -> { + Thread t = new Thread(r, "build-task-executor"); + t.setDaemon(true); + return t; + }); + + // 启动队列消费线程 + scheduler.scheduleWithFixedDelay( + this::processQueue, + 0, + config.getCheckInterval(), + TimeUnit.MILLISECONDS + ); + + log.info("BuildQueueService started: maxConcurrent={}, cpuThreshold={}, memoryThreshold={}, checkInterval={}ms", + config.getMaxConcurrent(), config.getCpuThreshold(), config.getMemoryThreshold(), config.getCheckInterval()); + } + + @PreDestroy + public void stop() { + stopped.set(true); + + if (scheduler != null) { + scheduler.shutdown(); + try { + if (!scheduler.awaitTermination(5, TimeUnit.SECONDS)) { + scheduler.shutdownNow(); + } + } catch (InterruptedException e) { + scheduler.shutdownNow(); + Thread.currentThread().interrupt(); + } + } + + if (taskExecutor != null) { + taskExecutor.shutdown(); + try { + if (!taskExecutor.awaitTermination(60, TimeUnit.SECONDS)) { + taskExecutor.shutdownNow(); + } + } catch (InterruptedException e) { + taskExecutor.shutdownNow(); + Thread.currentThread().interrupt(); + } + } + + log.info("BuildQueueService stopped"); + } + + /** + * 提交构建任务 + * + * @param request 构建请求 + * @param mergedConfig 融合后的配置 + * @return 构建任务 + * @throws IllegalStateException 如果队列已满 + */ + public BuildTask submit(ServiceBuildRequest request, MergedConfig mergedConfig) { + if (!config.isEnabled()) { + throw new IllegalStateException("Build queue is disabled"); + } + + BuildTask task = new BuildTask(request, mergedConfig); + + boolean offered = waitingQueue.offer(task); + if (!offered) { + throw new IllegalStateException("Build queue is full (max: " + config.getMaxQueueSize() + ")"); + } + + log.info("任务入队: buildId={}, serviceId={}, 当前队列长度={}", + task.getBuildId(), task.getServiceId(), waitingQueue.size()); + + return task; + } + + /** + * 处理队列(定期执行) + */ + private void processQueue() { + if (stopped.get()) { + return; + } + + try { + // 检查是否达到硬性并发上限 + if (runningTasks.size() >= config.getMaxConcurrent()) { + log.debug("达到并发上限: {}/{}", runningTasks.size(), config.getMaxConcurrent()); + return; + } + + // 检查系统资源是否充足 + if (!resourceMonitor.hasAvailableResources( + config.getCpuThreshold(), + config.getMemoryThreshold())) { + log.debug("系统资源不足,等待中..."); + return; + } + + // 从队列取出任务执行 + BuildTask task = waitingQueue.poll(); + if (task != null) { + executeTask(task); + } + } catch (Exception e) { + log.error("处理队列时发生错误", e); + } + } + + /** + * 异步执行构建任务 + */ + private void executeTask(BuildTask task) { + runningTasks.put(task.getBuildId(), task); + task.markStarted(); + + long waitTime = task.getWaitTimeMs(); + log.info("开始执行任务: buildId={}, serviceId={}, 等待时间={}ms, 当前并发={}", + task.getBuildId(), task.getServiceId(), waitTime, runningTasks.size()); + + CompletableFuture future = CompletableFuture.supplyAsync(() -> { + try { + return buildExecutor.execute(task); + } catch (Throwable t) { + log.error("构建任务执行失败: buildId={}", task.getBuildId(), t); + String error = t.getMessage() != null ? t.getMessage() : t.getClass().getSimpleName(); + task.markFailed(error); + return ServiceBuildResult.failed(task.getServiceId(), "BUILD_ERROR", error); + } finally { + runningTasks.remove(task.getBuildId()); + if (task.getState() == BuildTask.TaskState.RUNNING) { + task.markCompleted(); + } + log.info("任务执行完成: buildId={}, serviceId={}, 执行时间={}ms", + task.getBuildId(), task.getServiceId(), task.getExecutionTimeMs()); + } + }, taskExecutor); + + // 设置超时 + future.orTimeout(config.getTaskTimeout(), TimeUnit.SECONDS) + .exceptionally(e -> { + if (e instanceof TimeoutException) { + log.error("构建任务超时: buildId={}, timeout={}s", task.getBuildId(), config.getTaskTimeout()); + task.markFailed("Build timeout after " + config.getTaskTimeout() + " seconds"); + } + return ServiceBuildResult.failed(task.getServiceId(), "TIMEOUT", e.getMessage()); + }); + + task.setResultFuture(future); + } + + /** + * 取消排队中的任务 + * + * @param buildId 构建 ID + * @return true 如果取消成功 + */ + public boolean cancel(String buildId) { + // 检查是否在运行中 + if (runningTasks.containsKey(buildId)) { + log.warn("无法取消正在执行的任务: buildId={}", buildId); + return false; + } + + // 从等待队列中移除 + boolean removed = waitingQueue.removeIf(task -> task.getBuildId().equals(buildId)); + if (removed) { + log.info("任务已取消: buildId={}", buildId); + } + return removed; + } + + /** + * 获取任务在队列中的位置 + * + * @param buildId 构建 ID + * @return 位置(从 1 开始),-1 表示正在执行,null 表示不存在 + */ + public Integer getPosition(String buildId) { + // 检查是否正在执行 + if (runningTasks.containsKey(buildId)) { + return -1; + } + + // 查找在等待队列中的位置 + int position = 1; + for (BuildTask task : waitingQueue) { + if (task.getBuildId().equals(buildId)) { + return position; + } + position++; + } + + return null; + } + + /** + * 获取队列状态 + */ + public BuildQueueStatus getStatus() { + List waitingTaskInfos = new ArrayList<>(); + int position = 1; + for (BuildTask task : waitingQueue) { + waitingTaskInfos.add(BuildQueueStatus.TaskInfo.builder() + .buildId(task.getBuildId()) + .serviceId(task.getServiceId()) + .waitTimeMs(task.getWaitTimeMs()) + .position(position++) + .build()); + } + + List runningTaskInfos = runningTasks.values().stream() + .map(task -> BuildQueueStatus.TaskInfo.builder() + .buildId(task.getBuildId()) + .serviceId(task.getServiceId()) + .waitTimeMs(task.getWaitTimeMs()) + .executionTimeMs(task.getExecutionTimeMs()) + .position(-1) + .build()) + .collect(Collectors.toList()); + + boolean canAccept = waitingQueue.remainingCapacity() > 0 + && resourceMonitor.hasAvailableResources(config.getCpuThreshold(), config.getMemoryThreshold()); + + return BuildQueueStatus.builder() + .waitingCount(waitingQueue.size()) + .runningCount(runningTasks.size()) + .maxConcurrent(config.getMaxConcurrent()) + .maxQueueSize(config.getMaxQueueSize()) + .resourceStatus(resourceMonitor.getStatus()) + .cpuThreshold(config.getCpuThreshold()) + .memoryThreshold(config.getMemoryThreshold()) + .canAcceptNewTask(canAccept) + .waitingTasks(waitingTaskInfos) + .runningTasks(runningTaskInfos) + .build(); + } + + /** + * 检查队列是否启用 + */ + public boolean isEnabled() { + return config.isEnabled(); + } + + /** + * 获取等待中的任务数 + */ + public int getWaitingCount() { + return waitingQueue.size(); + } + + /** + * 获取正在执行的任务数 + */ + public int getRunningCount() { + return runningTasks.size(); + } +} diff --git a/back/src/main/java/com/linkwork/service/BuildRecordService.java b/back/src/main/java/com/linkwork/service/BuildRecordService.java new file mode 100644 index 0000000..8d9fbe9 --- /dev/null +++ b/back/src/main/java/com/linkwork/service/BuildRecordService.java @@ -0,0 +1,218 @@ +package com.linkwork.service; + +import com.baomidou.mybatisplus.core.conditions.query.LambdaQueryWrapper; +import com.baomidou.mybatisplus.extension.plugins.pagination.Page; +import com.baomidou.mybatisplus.extension.service.impl.ServiceImpl; +import com.linkwork.mapper.BuildRecordMapper; +import com.linkwork.model.entity.BuildRecordEntity; +import lombok.extern.slf4j.Slf4j; +import org.springframework.stereotype.Service; + +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; + +/** + * 构建记录 Service + * + * 管理镜像构建记录的完整生命周期 + */ +@Slf4j +@Service +public class BuildRecordService extends ServiceImpl { + + /** + * 创建构建记录 + * + * @param buildNo 构建编号(由前端生成) + * @param roleId 岗位 ID + * @param roleName 岗位名称 + * @param configSnapshot 构建配置快照 + * @param creatorId 创建者 ID + * @param creatorName 创建者名称 + * @return 构建记录实体 + */ + public BuildRecordEntity createBuildRecord(String buildNo, Long roleId, String roleName, + Map configSnapshot, + String creatorId, String creatorName) { + BuildRecordEntity entity = new BuildRecordEntity(); + entity.setBuildNo(buildNo); + entity.setRoleId(roleId); + entity.setRoleName(roleName); + entity.setStatus(BuildRecordEntity.STATUS_PENDING); + entity.setConfigSnapshot(configSnapshot); + entity.setCreatorId(creatorId); + entity.setCreatorName(creatorName); + + this.save(entity); + log.info("Created build record: {} for role {} by user {}", buildNo, roleId, creatorId); + return entity; + } + + /** + * 更新构建状态为 BUILDING + */ + public void markBuilding(String buildNo) { + updateStatus(buildNo, BuildRecordEntity.STATUS_BUILDING, null, null, null); + } + + /** + * 更新构建状态为 SUCCESS + */ + public void markSuccess(String buildNo, String imageTag, Long durationMs) { + updateStatus(buildNo, BuildRecordEntity.STATUS_SUCCESS, imageTag, durationMs, null); + } + + /** + * 更新构建状态为 FAILED + */ + public void markFailed(String buildNo, String errorMessage, Long durationMs) { + updateStatus(buildNo, BuildRecordEntity.STATUS_FAILED, null, durationMs, errorMessage); + } + + /** + * 更新构建状态为 CANCELLED + */ + public void markCancelled(String buildNo) { + updateStatus(buildNo, BuildRecordEntity.STATUS_CANCELLED, null, null, "Build cancelled by user"); + } + + /** + * 更新构建日志 URL + */ + public void updateLogUrl(String buildNo, String logUrl) { + BuildRecordEntity entity = getByBuildNo(buildNo); + if (entity == null) { + log.warn("Build record not found for log URL update: {}", buildNo); + return; + } + entity.setLogUrl(logUrl); + this.updateById(entity); + log.info("Updated build record {} log URL", buildNo); + } + + /** + * 更新构建记录状态 + */ + public void updateStatus(String buildNo, String status, String imageTag, + Long durationMs, String errorMessage) { + BuildRecordEntity entity = getByBuildNo(buildNo); + if (entity == null) { + log.warn("Build record not found: {}", buildNo); + return; + } + + entity.setStatus(status); + if (imageTag != null) { + entity.setImageTag(imageTag); + } + if (durationMs != null) { + entity.setDurationMs(durationMs); + } + if (errorMessage != null) { + entity.setErrorMessage(errorMessage); + } + + this.updateById(entity); + log.info("Updated build record {} status to {}", buildNo, status); + } + + /** + * 获取岗位最新构建记录 + */ + public BuildRecordEntity getLatestByRoleId(Long roleId) { + LambdaQueryWrapper wrapper = new LambdaQueryWrapper<>(); + wrapper.eq(BuildRecordEntity::getRoleId, roleId); + wrapper.orderByDesc(BuildRecordEntity::getCreatedAt); + wrapper.last("LIMIT 1"); + return this.getOne(wrapper, false); + } + + /** + * 根据构建编号获取记录 + */ + public BuildRecordEntity getByBuildNo(String buildNo) { + LambdaQueryWrapper wrapper = new LambdaQueryWrapper<>(); + wrapper.eq(BuildRecordEntity::getBuildNo, buildNo); + return this.getOne(wrapper); + } + + /** + * 获取岗位的构建历史 + */ + public Map listByRoleId(Long roleId, int page, int pageSize) { + Page pageObj = new Page<>(page, pageSize); + + LambdaQueryWrapper wrapper = new LambdaQueryWrapper<>(); + wrapper.eq(BuildRecordEntity::getRoleId, roleId); + wrapper.orderByDesc(BuildRecordEntity::getCreatedAt); + + Page result = this.page(pageObj, wrapper); + + List> items = result.getRecords().stream() + .map(this::toResponseMap) + .collect(Collectors.toList()); + + Map response = new HashMap<>(); + response.put("items", items); + response.put("pagination", Map.of( + "page", result.getCurrent(), + "pageSize", result.getSize(), + "total", result.getTotal(), + "totalPages", result.getPages() + )); + return response; + } + + /** + * 获取最近的构建记录(全局) + */ + public Map listRecent(int page, int pageSize, String status) { + Page pageObj = new Page<>(page, pageSize); + + LambdaQueryWrapper wrapper = new LambdaQueryWrapper<>(); + if (status != null && !status.isEmpty()) { + wrapper.eq(BuildRecordEntity::getStatus, status); + } + wrapper.orderByDesc(BuildRecordEntity::getCreatedAt); + + Page result = this.page(pageObj, wrapper); + + List> items = result.getRecords().stream() + .map(this::toResponseMap) + .collect(Collectors.toList()); + + Map response = new HashMap<>(); + response.put("items", items); + response.put("pagination", Map.of( + "page", result.getCurrent(), + "pageSize", result.getSize(), + "total", result.getTotal(), + "totalPages", result.getPages() + )); + return response; + } + + /** + * 转换为响应 Map + */ + private Map toResponseMap(BuildRecordEntity entity) { + Map map = new HashMap<>(); + map.put("id", entity.getId().toString()); + map.put("buildNo", entity.getBuildNo()); + map.put("roleId", entity.getRoleId() != null ? entity.getRoleId().toString() : null); + map.put("roleName", entity.getRoleName()); + map.put("status", entity.getStatus()); + map.put("imageTag", entity.getImageTag()); + map.put("durationMs", entity.getDurationMs()); + map.put("errorMessage", entity.getErrorMessage()); + map.put("configSnapshot", entity.getConfigSnapshot()); + map.put("creatorId", entity.getCreatorId()); + map.put("creatorName", entity.getCreatorName()); + map.put("logUrl", entity.getLogUrl()); + map.put("createdAt", entity.getCreatedAt() != null ? entity.getCreatedAt().toString() : null); + map.put("updatedAt", entity.getUpdatedAt() != null ? entity.getUpdatedAt().toString() : null); + return map; + } +} diff --git a/back/src/main/java/com/linkwork/service/ConfigMergeService.java b/back/src/main/java/com/linkwork/service/ConfigMergeService.java new file mode 100644 index 0000000..a11d871 --- /dev/null +++ b/back/src/main/java/com/linkwork/service/ConfigMergeService.java @@ -0,0 +1,184 @@ +package com.linkwork.service; + +import com.linkwork.config.EnvConfig; +import com.linkwork.config.ImageBuildConfig; +import com.linkwork.model.dto.MergedConfig; +import com.linkwork.model.dto.ResourceConfig; +import com.linkwork.model.dto.ResourceSpec; +import com.linkwork.model.dto.ServiceBuildRequest; +import com.linkwork.model.enums.DeployMode; +import com.linkwork.model.enums.PodMode; +import lombok.extern.slf4j.Slf4j; +import org.springframework.stereotype.Service; + +import java.util.Optional; + +/** + * 配置融合服务 + * 将请求参数与环境配置融合,生成 MergedConfig + * + * 设计说明: + * - 仅构建 Agent 镜像,Runner 由运行时 agent 启动 + * - token 放入 buildEnvVars,在 build.sh 执行前 export + */ +@Service +@Slf4j +public class ConfigMergeService { + + private final EnvConfig envConfig; + private final QueueSelector queueSelector; + private final ImageBuildConfig imageBuildConfig; + + public ConfigMergeService(EnvConfig envConfig, QueueSelector queueSelector, + ImageBuildConfig imageBuildConfig) { + this.envConfig = envConfig; + this.queueSelector = queueSelector; + this.imageBuildConfig = imageBuildConfig; + } + + /** + * 融合请求配置与环境配置 + */ + public MergedConfig merge(ServiceBuildRequest request) { + log.info("Merging config for service {}", request.getServiceId()); + + // 1. 决策 PodMode + PodMode podMode = decidePodMode(request); + + // 2. 决策 Pod 数量 + int podCount = decidePodCount(request); + + // 3. 选择调度队列(仅 K8s 模式) + String queueName = request.getDeployMode() == DeployMode.K8S + ? queueSelector.selectQueue(request.getPriority(), false) + : null; + + // 4. 融合资源配置 + ResourceSpec agentResources = mergeResources( + request.getResourceConfig(), envConfig.getDefaultResources().getAgent()); + ResourceSpec runnerResources = mergeResources( + request.getResourceConfig(), envConfig.getDefaultResources().getRunner()); + + // 5. 决定 Agent 镜像地址(使用默认配置) + String agentBaseImage = envConfig.getImages().getAgent(); + + // 6. 决定 Runner 镜像地址 + // - K8s + Sidecar 模式:使用 runnerBaseImage 或默认值 + // - 非 K8s 或 Alone 模式:不需要 Runner 镜像 + String runnerImage; + if (request.getDeployMode() == DeployMode.K8S && podMode == PodMode.SIDECAR) { + runnerImage = Optional.ofNullable(request.getRunnerBaseImage()) + .orElse(envConfig.getImages().getRunner()); + } else { + runnerImage = null; // 非 Sidecar 模式不需要 Runner 镜像 + } + + // 7. 构建融合配置 + return MergedConfig.builder() + // 服务标识 + .serviceId(request.getServiceId()) + .userId(request.getUserId()) + .roleId(request.getRoleId()) + // 模式配置 + .deployMode(request.getDeployMode()) + .podMode(podMode) + .podCount(podCount) + // K8s 配置 + .namespace(envConfig.getCluster().getNamespace()) + .queueName(queueName) + .priorityClassName(getPriorityClass(request.getPriority())) + // 镜像配置 + .agentImage(agentBaseImage) + .runnerImage(runnerImage) + .buildEnvVars(request.getBuildEnvVars()) + .agentBaseImage(agentBaseImage) + .imageRegistry(request.getImageRegistry()) + .imagePullPolicy(imageBuildConfig.getImagePullPolicy()) + .imagePullSecret(imageBuildConfig.getImagePullSecret()) + // Agent 启动脚本配置 + .mainPyUrl(envConfig.getAgentBootstrap().getMainPyUrl()) + // 文件放置配置 + .filePlacement(envConfig.getFilePlacement()) + // 资源配置 + .agentResources(agentResources) + .runnerResources(runnerResources) + // 网络配置 + .apiBaseUrl(envConfig.getNetwork().getApiBaseUrl()) + .wsBaseUrl(envConfig.getNetwork().getWsBaseUrl()) + .llmGatewayUrl(envConfig.getNetwork().getLlmGatewayUrl()) + .redisUrl(envConfig.getNetwork().getRedisUrl()) + // SSH 配置 + .sshPort(podMode == PodMode.SIDECAR ? envConfig.getSsh().getPort() : null) + // Agent 启动配置(双容器模式) + .workstationId(request.getServiceId()) + // 工作目录配置 + .workspaceSizeLimit(Optional.ofNullable(request.getWorkspaceSizeLimit()).orElse(10)) + // 任务元信息(可选) + .description(request.getDescription()) + // 回调配置 + .callbackUrl(request.getCallbackUrl()) + // 快速恢复配置 + .preferredNode(request.getPreferredNode()) + // OSS 挂载配置 + .ossMount(envConfig.getOssMount()) + .build(); + } + + /** + * 决策 PodMode + */ + private PodMode decidePodMode(ServiceBuildRequest request) { + // 1. 用户显式指定 + if (request.getPodMode() != null) { + return request.getPodMode(); + } + + // 2. Compose 强制 Alone + if (request.getDeployMode() == DeployMode.COMPOSE) { + return PodMode.ALONE; + } + + // 3. 使用默认值 + return envConfig.getPodModeRules().getDefaultMode(); + } + + /** + * 决策 Pod 数量 + */ + private int decidePodCount(ServiceBuildRequest request) { + // Compose 模式不支持多 Pod + if (request.getDeployMode() == DeployMode.COMPOSE) { + return 1; + } + + // 默认 Pod 数量为 4 + int count = Optional.ofNullable(request.getPodCount()).orElse(4); + return Math.min(Math.max(count, 1), 10); // 限制 1-10 + } + + /** + * 融合资源配置 + */ + private ResourceSpec mergeResources(ResourceConfig requested, ResourceSpec defaultSpec) { + if (requested == null) { + return defaultSpec; + } + return ResourceSpec.builder() + .cpuRequest(Optional.ofNullable(requested.getCpuRequest()).orElse(defaultSpec.getCpuRequest())) + .cpuLimit(Optional.ofNullable(requested.getCpuLimit()).orElse(defaultSpec.getCpuLimit())) + .memoryRequest(Optional.ofNullable(requested.getMemoryRequest()).orElse(defaultSpec.getMemoryRequest())) + .memoryLimit(Optional.ofNullable(requested.getMemoryLimit()).orElse(defaultSpec.getMemoryLimit())) + .build(); + } + + /** + * 获取优先级类 + */ + private String getPriorityClass(Integer priority) { + int p = Optional.ofNullable(priority).orElse(50); + if (p >= 90) return "critical-priority"; + if (p >= 70) return "high-priority"; + if (p >= 30) return "normal-priority"; + return "low-priority"; + } +} diff --git a/back/src/main/java/com/linkwork/service/CronExpressionHelper.java b/back/src/main/java/com/linkwork/service/CronExpressionHelper.java new file mode 100644 index 0000000..56a958f --- /dev/null +++ b/back/src/main/java/com/linkwork/service/CronExpressionHelper.java @@ -0,0 +1,168 @@ +package com.linkwork.service; + +import com.linkwork.model.entity.CronJob; +import org.springframework.scheduling.support.CronExpression; +import org.springframework.stereotype.Component; +import org.springframework.util.StringUtils; + +import java.time.LocalDateTime; +import java.time.ZoneId; +import java.time.ZonedDateTime; +import java.util.ArrayList; +import java.util.List; +import java.util.Locale; + +@Component +public class CronExpressionHelper { + + public String normalizeScheduleType(String scheduleType) { + if (!StringUtils.hasText(scheduleType)) { + throw new IllegalArgumentException("scheduleType 不能为空"); + } + return scheduleType.trim().toLowerCase(Locale.ROOT); + } + + public String normalizeTimezone(String timezone) { + if (!StringUtils.hasText(timezone)) { + return "Asia/Shanghai"; + } + String tz = timezone.trim(); + try { + ZoneId.of(tz); + } catch (Exception e) { + throw new IllegalArgumentException("无效时区: " + timezone); + } + return tz; + } + + public void validateSchedule(String scheduleType, + String cronExpr, + Long intervalMs, + LocalDateTime runAt, + String timezone) { + String type = normalizeScheduleType(scheduleType); + String tz = normalizeTimezone(timezone); + LocalDateTime now = LocalDateTime.now(ZoneId.of(tz)); + + switch (type) { + case "cron" -> { + if (!StringUtils.hasText(cronExpr)) { + throw new IllegalArgumentException("scheduleType=cron 时 cronExpr 必填"); + } + try { + CronExpression expression = CronExpression.parse(cronExpr.trim()); + ZonedDateTime from = ZonedDateTime.of(now.withSecond(0).withNano(0), ZoneId.of(tz)); + ZonedDateTime next = expression.next(from); + if (next == null) { + throw new IllegalArgumentException("cronExpr 无法计算下一次触发时间"); + } + long delta = java.time.Duration.between(from, next).toMillis(); + if (delta < 60_000) { + throw new IllegalArgumentException("cron 表达式最小粒度为 1 分钟"); + } + } catch (IllegalArgumentException e) { + throw e; + } catch (Exception e) { + throw new IllegalArgumentException("cronExpr 不合法: " + e.getMessage()); + } + } + case "every" -> { + if (intervalMs == null) { + throw new IllegalArgumentException("scheduleType=every 时 intervalMs 必填"); + } + if (intervalMs < 60_000) { + throw new IllegalArgumentException("intervalMs 最小为 60000"); + } + } + case "at" -> { + if (runAt == null) { + throw new IllegalArgumentException("scheduleType=at 时 runAt 必填"); + } + if (!runAt.isAfter(now)) { + throw new IllegalArgumentException("runAt 必须是未来时间"); + } + } + default -> throw new IllegalArgumentException("不支持的 scheduleType: " + scheduleType); + } + } + + public LocalDateTime computeFirstFireTime(String scheduleType, + String cronExpr, + Long intervalMs, + LocalDateTime runAt, + String timezone) { + String type = normalizeScheduleType(scheduleType); + ZoneId zoneId = ZoneId.of(normalizeTimezone(timezone)); + LocalDateTime now = LocalDateTime.now(zoneId).withSecond(0).withNano(0); + return switch (type) { + case "cron" -> { + CronExpression expression = CronExpression.parse(cronExpr.trim()); + ZonedDateTime next = expression.next(ZonedDateTime.of(now, zoneId)); + yield next == null ? null : next.toLocalDateTime(); + } + case "every" -> now.plusNanos(intervalMs * 1_000_000); + case "at" -> runAt; + default -> throw new IllegalArgumentException("不支持的 scheduleType: " + scheduleType); + }; + } + + public LocalDateTime computeNextFireTime(CronJob job, LocalDateTime currentFireTime) { + String type = normalizeScheduleType(job.getScheduleType()); + ZoneId zoneId = ZoneId.of(normalizeTimezone(job.getTimezone())); + LocalDateTime base = currentFireTime == null ? LocalDateTime.now(zoneId).withSecond(0).withNano(0) : currentFireTime; + + return switch (type) { + case "cron" -> { + CronExpression expression = CronExpression.parse(job.getCronExpr().trim()); + ZonedDateTime next = expression.next(ZonedDateTime.of(base, zoneId)); + yield next == null ? null : next.toLocalDateTime(); + } + case "every" -> base.plusNanos(job.getIntervalMs() * 1_000_000); + case "at" -> null; + default -> null; + }; + } + + public List previewNextFireTimes(String scheduleType, + String cronExpr, + Long intervalMs, + LocalDateTime runAt, + String timezone, + int limit) { + String type = normalizeScheduleType(scheduleType); + ZoneId zoneId = ZoneId.of(normalizeTimezone(timezone)); + List times = new ArrayList<>(); + int size = Math.max(limit, 0); + + if (size == 0) { + return times; + } + + if ("at".equals(type)) { + if (runAt != null) { + times.add(runAt.toString()); + } + return times; + } + + LocalDateTime cursor = LocalDateTime.now(zoneId).withSecond(0).withNano(0); + if ("every".equals(type)) { + for (int i = 0; i < size; i++) { + cursor = cursor.plusNanos(intervalMs * 1_000_000); + times.add(cursor.toString()); + } + return times; + } + + CronExpression expression = CronExpression.parse(cronExpr.trim()); + ZonedDateTime zonedCursor = ZonedDateTime.of(cursor, zoneId); + for (int i = 0; i < size; i++) { + zonedCursor = expression.next(zonedCursor); + if (zonedCursor == null) { + break; + } + times.add(zonedCursor.toLocalDateTime().toString()); + } + return times; + } +} diff --git a/back/src/main/java/com/linkwork/service/CronJobExecutor.java b/back/src/main/java/com/linkwork/service/CronJobExecutor.java new file mode 100644 index 0000000..1b4aace --- /dev/null +++ b/back/src/main/java/com/linkwork/service/CronJobExecutor.java @@ -0,0 +1,120 @@ +package com.linkwork.service; + +import com.linkwork.mapper.CronJobRunMapper; +import com.linkwork.model.dto.TaskCreateRequest; +import com.linkwork.model.entity.CronJob; +import com.linkwork.model.entity.CronJobRun; +import com.linkwork.model.entity.Task; +import lombok.RequiredArgsConstructor; +import lombok.extern.slf4j.Slf4j; +import org.springframework.stereotype.Component; +import org.springframework.transaction.annotation.Transactional; + +import java.time.LocalDateTime; + +@Slf4j +@Component +@RequiredArgsConstructor +public class CronJobExecutor { + + private final CronJobRunMapper cronJobRunMapper; + private final TaskService taskService; + + @Transactional + public CronJobRun dispatchScheduled(CronJob cronJob, LocalDateTime plannedFireTime) { + CronJobRun run = initRun(cronJob, "SCHEDULED", plannedFireTime); + cronJobRunMapper.insert(run); + + try { + Task task = taskService.createTask( + buildTaskCreateRequest(cronJob), + cronJob.getCreatorId(), + cronJob.getCreatorName(), + "cron-scheduler", + false, + "CRON", + cronJob.getId()); + + CronJobRun update = new CronJobRun(); + update.setId(run.getId()); + update.setTaskNo(task.getTaskNo()); + update.setStatus("DISPATCHED"); + update.setStartedAt(LocalDateTime.now()); + cronJobRunMapper.updateById(update); + + run.setTaskNo(task.getTaskNo()); + run.setStatus("DISPATCHED"); + run.setStartedAt(update.getStartedAt()); + log.info("Cron 调度触发成功: cronJobId={}, runId={}, taskNo={}", cronJob.getId(), run.getId(), task.getTaskNo()); + return run; + } catch (Exception e) { + CronJobRun fail = new CronJobRun(); + fail.setId(run.getId()); + fail.setStatus("FAILED"); + fail.setFinishedAt(LocalDateTime.now()); + fail.setErrorMessage(e.getMessage()); + cronJobRunMapper.updateById(fail); + throw e; + } + } + + @Transactional + public CronJobRun dispatchManual(CronJob cronJob) { + LocalDateTime now = LocalDateTime.now(); + CronJobRun run = initRun(cronJob, "MANUAL", now); + cronJobRunMapper.insert(run); + + try { + Task task = taskService.createTask( + buildTaskCreateRequest(cronJob), + cronJob.getCreatorId(), + cronJob.getCreatorName(), + "cron-manual-trigger", + false, + "CRON", + cronJob.getId()); + + CronJobRun update = new CronJobRun(); + update.setId(run.getId()); + update.setTaskNo(task.getTaskNo()); + update.setStatus("DISPATCHED"); + update.setStartedAt(LocalDateTime.now()); + cronJobRunMapper.updateById(update); + + run.setTaskNo(task.getTaskNo()); + run.setStatus("DISPATCHED"); + run.setStartedAt(update.getStartedAt()); + log.info("Cron 手动触发成功: cronJobId={}, runId={}, taskNo={}", cronJob.getId(), run.getId(), task.getTaskNo()); + return run; + } catch (Exception e) { + CronJobRun fail = new CronJobRun(); + fail.setId(run.getId()); + fail.setStatus("FAILED"); + fail.setFinishedAt(LocalDateTime.now()); + fail.setErrorMessage(e.getMessage()); + cronJobRunMapper.updateById(fail); + throw e; + } + } + + private CronJobRun initRun(CronJob cronJob, String triggerType, LocalDateTime plannedFireTime) { + CronJobRun run = new CronJobRun(); + run.setCronJobId(cronJob.getId()); + run.setCreatorId(cronJob.getCreatorId()); + run.setRoleId(cronJob.getRoleId()); + run.setStatus("PENDING"); + run.setTriggerType(triggerType); + run.setPlannedFireTime(plannedFireTime); + run.setCreatedAt(LocalDateTime.now()); + return run; + } + + private TaskCreateRequest buildTaskCreateRequest(CronJob cronJob) { + TaskCreateRequest request = new TaskCreateRequest(); + request.setRoleId(cronJob.getRoleId()); + request.setModelId(cronJob.getModelId()); + request.setPrompt(cronJob.getTaskContent()); + request.setFileIds(null); + return request; + } +} diff --git a/back/src/main/java/com/linkwork/service/CronJobService.java b/back/src/main/java/com/linkwork/service/CronJobService.java new file mode 100644 index 0000000..64d6e0b --- /dev/null +++ b/back/src/main/java/com/linkwork/service/CronJobService.java @@ -0,0 +1,519 @@ +package com.linkwork.service; + +import com.baomidou.mybatisplus.core.conditions.query.LambdaQueryWrapper; +import com.baomidou.mybatisplus.extension.plugins.pagination.Page; +import com.linkwork.common.ForbiddenOperationException; +import com.linkwork.common.ResourceNotFoundException; +import com.linkwork.config.CronConfig; +import com.linkwork.mapper.CronJobMapper; +import com.linkwork.mapper.CronJobRunMapper; +import com.linkwork.mapper.RoleMapper; +import com.linkwork.model.dto.CronJobCreateRequest; +import com.linkwork.model.dto.CronJobResponse; +import com.linkwork.model.dto.CronJobRunResponse; +import com.linkwork.model.dto.CronJobToggleRequest; +import com.linkwork.model.dto.CronJobUpdateRequest; +import com.linkwork.model.entity.CronJob; +import com.linkwork.model.entity.CronJobRun; +import com.linkwork.model.entity.RoleEntity; +import com.linkwork.model.entity.Task; +import com.linkwork.model.enums.TaskStatus; +import lombok.RequiredArgsConstructor; +import lombok.extern.slf4j.Slf4j; +import org.springframework.stereotype.Service; +import org.springframework.transaction.annotation.Transactional; +import org.springframework.util.StringUtils; + +import java.time.Duration; +import java.time.LocalDateTime; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Objects; + +@Slf4j +@Service +@RequiredArgsConstructor +public class CronJobService { + + private static final String NOTIFY_NONE = "none"; + + private final CronJobMapper cronJobMapper; + private final CronJobRunMapper cronJobRunMapper; + private final RoleMapper roleMapper; + private final CronExpressionHelper cronExpressionHelper; + private final CronConfig cronConfig; + + @Transactional + public CronJobResponse create(CronJobCreateRequest request, String creatorId, String creatorName) { + validateRoleVisible(request.getRoleId(), creatorId); + validateSchedule(request.getScheduleType(), request.getCronExpr(), request.getIntervalMs(), + request.getRunAt(), request.getTimezone()); + enforceQuota(creatorId, request.getRoleId()); + + RoleEntity role = roleMapper.selectById(request.getRoleId()); + CronJob job = new CronJob(); + job.setJobName(request.getJobName().trim()); + job.setCreatorId(creatorId); + job.setCreatorName(StringUtils.hasText(creatorName) ? creatorName : creatorId); + job.setRoleId(request.getRoleId()); + job.setRoleName(role.getName()); + job.setModelId(request.getModelId().trim()); + job.setFileIdsJson(null); + job.setScheduleType(cronExpressionHelper.normalizeScheduleType(request.getScheduleType())); + job.setCronExpr(StringUtils.hasText(request.getCronExpr()) ? request.getCronExpr().trim() : null); + job.setIntervalMs(request.getIntervalMs()); + job.setRunAt(request.getRunAt()); + job.setTimezone(cronExpressionHelper.normalizeTimezone(request.getTimezone())); + job.setTaskContent(request.getTaskContent().trim()); + job.setEnabled(1); + job.setDeleteAfterRun(Boolean.TRUE.equals(request.getDeleteAfterRun()) ? 1 : 0); + if ("at".equals(job.getScheduleType()) && request.getDeleteAfterRun() == null) { + job.setDeleteAfterRun(1); + } + job.setMaxRetry(normalizeMaxRetry(request.getMaxRetry())); + job.setConsecutiveFailures(0); + // Cron 通知策略临时下线:固定兼容值,实际结果通知由任务终态链路统一处理。 + job.setNotifyMode(NOTIFY_NONE); + job.setNotifyTarget(null); + job.setNextFireTime(cronExpressionHelper.computeFirstFireTime( + job.getScheduleType(), job.getCronExpr(), job.getIntervalMs(), job.getRunAt(), job.getTimezone())); + job.setTotalRuns(0); + job.setIsDeleted(0); + job.setCreatedAt(LocalDateTime.now()); + job.setUpdatedAt(LocalDateTime.now()); + + cronJobMapper.insert(job); + return toResponse(job, true); + } + + public Map listMine(String creatorId, Long roleId, Boolean enabled, String scheduleType, + String keyword, Integer page, Integer pageSize) { + int pageNum = page == null || page <= 0 ? 1 : page; + int size = pageSize == null || pageSize <= 0 ? 20 : pageSize; + + Page pageReq = new Page<>(pageNum, size); + LambdaQueryWrapper wrapper = new LambdaQueryWrapper<>(); + wrapper.eq(CronJob::getIsDeleted, 0) + .eq(CronJob::getCreatorId, creatorId) + .orderByDesc(CronJob::getCreatedAt); + if (roleId != null) { + wrapper.eq(CronJob::getRoleId, roleId); + } + if (enabled != null) { + wrapper.eq(CronJob::getEnabled, enabled ? 1 : 0); + } + if (StringUtils.hasText(scheduleType)) { + wrapper.eq(CronJob::getScheduleType, cronExpressionHelper.normalizeScheduleType(scheduleType)); + } + if (StringUtils.hasText(keyword)) { + wrapper.like(CronJob::getJobName, keyword.trim()); + } + + Page result = cronJobMapper.selectPage(pageReq, wrapper); + List items = result.getRecords().stream().map(job -> toResponse(job, false)).toList(); + + Map pagination = new HashMap<>(); + pagination.put("page", result.getCurrent()); + pagination.put("pageSize", result.getSize()); + pagination.put("total", result.getTotal()); + pagination.put("totalPages", result.getPages()); + + Map data = new HashMap<>(); + data.put("items", items); + data.put("pagination", pagination); + return data; + } + + public CronJobResponse getDetail(Long id, String creatorId) { + CronJob job = getOwnedJob(id, creatorId); + return toResponse(job, true); + } + + @Transactional + public CronJobResponse update(Long id, CronJobUpdateRequest request, String creatorId) { + CronJob job = getOwnedJob(id, creatorId); + + validateRoleVisible(request.getRoleId(), creatorId); + validateSchedule(request.getScheduleType(), request.getCronExpr(), request.getIntervalMs(), + request.getRunAt(), request.getTimezone()); + + RoleEntity role = roleMapper.selectById(request.getRoleId()); + job.setJobName(request.getJobName().trim()); + job.setRoleId(request.getRoleId()); + job.setRoleName(role.getName()); + job.setModelId(request.getModelId().trim()); + job.setFileIdsJson(null); + job.setScheduleType(cronExpressionHelper.normalizeScheduleType(request.getScheduleType())); + job.setCronExpr(StringUtils.hasText(request.getCronExpr()) ? request.getCronExpr().trim() : null); + job.setIntervalMs(request.getIntervalMs()); + job.setRunAt(request.getRunAt()); + job.setTimezone(cronExpressionHelper.normalizeTimezone(request.getTimezone())); + job.setTaskContent(request.getTaskContent().trim()); + job.setDeleteAfterRun(Boolean.TRUE.equals(request.getDeleteAfterRun()) ? 1 : 0); + if ("at".equals(job.getScheduleType()) && request.getDeleteAfterRun() == null) { + job.setDeleteAfterRun(1); + } + job.setMaxRetry(normalizeMaxRetry(request.getMaxRetry())); + // Cron 通知策略临时下线:固定兼容值,实际结果通知由任务终态链路统一处理。 + job.setNotifyMode(NOTIFY_NONE); + job.setNotifyTarget(null); + job.setConsecutiveFailures(0); + if (job.getEnabled() != null && job.getEnabled() == 1) { + job.setNextFireTime(cronExpressionHelper.computeFirstFireTime( + job.getScheduleType(), job.getCronExpr(), job.getIntervalMs(), job.getRunAt(), job.getTimezone())); + } + job.setUpdatedAt(LocalDateTime.now()); + cronJobMapper.updateById(job); + return toResponse(job, true); + } + + @Transactional + public CronJobResponse toggle(Long id, CronJobToggleRequest request, String creatorId) { + CronJob job = getOwnedJob(id, creatorId); + boolean enabled = Boolean.TRUE.equals(request.getEnabled()); + job.setEnabled(enabled ? 1 : 0); + if (enabled) { + cronExpressionHelper.validateSchedule(job.getScheduleType(), job.getCronExpr(), job.getIntervalMs(), + job.getRunAt(), job.getTimezone()); + job.setConsecutiveFailures(0); + job.setNextFireTime(cronExpressionHelper.computeFirstFireTime( + job.getScheduleType(), job.getCronExpr(), job.getIntervalMs(), job.getRunAt(), job.getTimezone())); + } else { + job.setNextFireTime(null); + } + job.setUpdatedAt(LocalDateTime.now()); + cronJobMapper.updateById(job); + return toResponse(job, true); + } + + @Transactional + public void delete(Long id, String creatorId) { + CronJob job = getOwnedJob(id, creatorId); + cronJobMapper.deleteById(job.getId()); + } + + public Map listRuns(Long cronJobId, String creatorId, Integer page, Integer pageSize) { + getOwnedJob(cronJobId, creatorId); + int pageNum = page == null || page <= 0 ? 1 : page; + int size = pageSize == null || pageSize <= 0 ? 20 : pageSize; + Page req = new Page<>(pageNum, size); + LambdaQueryWrapper wrapper = new LambdaQueryWrapper() + .eq(CronJobRun::getCronJobId, cronJobId) + .orderByDesc(CronJobRun::getCreatedAt); + Page result = cronJobRunMapper.selectPage(req, wrapper); + + List items = result.getRecords().stream().map(this::toRunResponse).toList(); + Map pagination = new HashMap<>(); + pagination.put("page", result.getCurrent()); + pagination.put("pageSize", result.getSize()); + pagination.put("total", result.getTotal()); + pagination.put("totalPages", result.getPages()); + + Map data = new HashMap<>(); + data.put("items", items); + data.put("pagination", pagination); + return data; + } + + public List findDueJobs(LocalDateTime threshold) { + return cronJobMapper.selectList(new LambdaQueryWrapper() + .eq(CronJob::getEnabled, 1) + .eq(CronJob::getIsDeleted, 0) + .isNotNull(CronJob::getNextFireTime) + .le(CronJob::getNextFireTime, threshold) + .orderByAsc(CronJob::getNextFireTime)); + } + + @Transactional + public void advanceAfterDispatch(CronJob job, LocalDateTime firedAt) { + CronJob update = new CronJob(); + update.setId(job.getId()); + update.setTotalRuns((job.getTotalRuns() == null ? 0 : job.getTotalRuns()) + 1); + update.setUpdatedAt(LocalDateTime.now()); + + LocalDateTime next = cronExpressionHelper.computeNextFireTime(job, firedAt); + if (next == null) { + update.setNextFireTime(null); + if (Objects.equals(job.getDeleteAfterRun(), 1) || "at".equals(job.getScheduleType())) { + update.setEnabled(0); + } + } else { + update.setNextFireTime(next); + } + + cronJobMapper.updateById(update); + trimRunHistory(job.getId()); + } + + @Transactional + public void recordDispatchFailure(CronJob job, String error) { + int failures = (job.getConsecutiveFailures() == null ? 0 : job.getConsecutiveFailures()) + 1; + CronJob update = new CronJob(); + update.setId(job.getId()); + update.setConsecutiveFailures(failures); + update.setLastRunTime(LocalDateTime.now()); + update.setLastRunStatus("FAILED"); + update.setUpdatedAt(LocalDateTime.now()); + if (failures >= normalizeMaxRetry(job.getMaxRetry())) { + update.setEnabled(0); + update.setNextFireTime(null); + } + cronJobMapper.updateById(update); + + if (failures >= normalizeMaxRetry(job.getMaxRetry())) { + log.info("Cron 连续失败已自动禁用: cronJobId={}, jobName={}, reason={}", job.getId(), job.getJobName(), truncate(error)); + } + } + + @Transactional + public void onTaskStatusChanged(Task task, TaskStatus status) { + if (task == null || !"CRON".equalsIgnoreCase(task.getSource()) || task.getCronJobId() == null) { + return; + } + + CronJobRun run = cronJobRunMapper.selectOne(new LambdaQueryWrapper() + .eq(CronJobRun::getTaskNo, task.getTaskNo()) + .orderByDesc(CronJobRun::getId) + .last("LIMIT 1")); + if (run == null) { + return; + } + + String mappedStatus = switch (status) { + case RUNNING -> "RUNNING"; + case COMPLETED -> "COMPLETED"; + case FAILED -> "FAILED"; + case ABORTED -> "ABORTED"; + default -> null; + }; + if (mappedStatus == null) { + return; + } + + if (isTerminal(run.getStatus())) { + return; + } + + CronJobRun update = new CronJobRun(); + update.setId(run.getId()); + update.setStatus(mappedStatus); + if ("RUNNING".equals(mappedStatus)) { + if (run.getStartedAt() == null) { + update.setStartedAt(LocalDateTime.now()); + } + cronJobRunMapper.updateById(update); + return; + } + + LocalDateTime now = LocalDateTime.now(); + update.setFinishedAt(now); + if (task.getDurationMs() != null && task.getDurationMs() > 0) { + update.setDurationMs(task.getDurationMs()); + } else if (run.getStartedAt() != null) { + update.setDurationMs(Duration.between(run.getStartedAt(), now).toMillis()); + } + if (status == TaskStatus.FAILED) { + update.setErrorMessage(extractTaskError(task)); + } + cronJobRunMapper.updateById(update); + + CronJob job = cronJobMapper.selectById(task.getCronJobId()); + if (job == null || job.getIsDeleted() != null && job.getIsDeleted() == 1) { + return; + } + + CronJob jobUpdate = new CronJob(); + jobUpdate.setId(job.getId()); + jobUpdate.setLastRunTime(now); + jobUpdate.setLastRunStatus(mappedStatus); + jobUpdate.setUpdatedAt(now); + + if (status == TaskStatus.COMPLETED) { + jobUpdate.setConsecutiveFailures(0); + } else if (status == TaskStatus.FAILED) { + int failures = (job.getConsecutiveFailures() == null ? 0 : job.getConsecutiveFailures()) + 1; + jobUpdate.setConsecutiveFailures(failures); + if (failures >= normalizeMaxRetry(job.getMaxRetry())) { + jobUpdate.setEnabled(0); + jobUpdate.setNextFireTime(null); + } + } + cronJobMapper.updateById(jobUpdate); + + log.debug("Cron 任务终态已同步: cronJobId={}, taskNo={}, status={}", job.getId(), task.getTaskNo(), mappedStatus); + } + + @Transactional + public void disableByRoleId(Long roleId, String reason) { + if (roleId == null) { + return; + } + List jobs = cronJobMapper.selectList(new LambdaQueryWrapper() + .eq(CronJob::getRoleId, roleId) + .eq(CronJob::getIsDeleted, 0) + .eq(CronJob::getEnabled, 1)); + + for (CronJob job : jobs) { + CronJob update = new CronJob(); + update.setId(job.getId()); + update.setEnabled(0); + update.setNextFireTime(null); + update.setUpdatedAt(LocalDateTime.now()); + cronJobMapper.updateById(update); + log.info("Cron 任务因岗位变更被禁用: cronJobId={}, jobName={}, reason={}", job.getId(), job.getJobName(), reason); + } + } + + public CronJob getOwnedJob(Long id, String creatorId) { + CronJob job = cronJobMapper.selectById(id); + if (job == null || (job.getIsDeleted() != null && job.getIsDeleted() == 1)) { + throw new ResourceNotFoundException("定时任务不存在: id=" + id); + } + if (!Objects.equals(job.getCreatorId(), creatorId)) { + throw new ForbiddenOperationException("无权限操作该定时任务"); + } + return job; + } + + public CronJobResponse toResponse(CronJob job, boolean includePreview) { + CronJobResponse response = new CronJobResponse(); + response.setId(job.getId()); + response.setJobName(job.getJobName()); + response.setCreatorId(job.getCreatorId()); + response.setCreatorName(job.getCreatorName()); + response.setRoleId(job.getRoleId()); + response.setRoleName(job.getRoleName()); + response.setModelId(job.getModelId()); + response.setScheduleType(job.getScheduleType()); + response.setCronExpr(job.getCronExpr()); + response.setIntervalMs(job.getIntervalMs()); + response.setRunAt(job.getRunAt()); + response.setTimezone(job.getTimezone()); + response.setTaskContent(job.getTaskContent()); + response.setEnabled(job.getEnabled() != null && job.getEnabled() == 1); + response.setDeleteAfterRun(job.getDeleteAfterRun() != null && job.getDeleteAfterRun() == 1); + response.setMaxRetry(job.getMaxRetry()); + response.setConsecutiveFailures(job.getConsecutiveFailures()); + response.setNextFireTime(job.getNextFireTime()); + response.setNotifyMode(job.getNotifyMode()); + response.setNotifyTarget(job.getNotifyTarget()); + response.setTotalRuns(job.getTotalRuns()); + response.setLastRunTime(job.getLastRunTime()); + response.setLastRunStatus(job.getLastRunStatus()); + response.setCreatedAt(job.getCreatedAt()); + response.setUpdatedAt(job.getUpdatedAt()); + + if (includePreview) { + response.setNextFireTimes(cronExpressionHelper.previewNextFireTimes( + job.getScheduleType(), job.getCronExpr(), job.getIntervalMs(), job.getRunAt(), job.getTimezone(), 5)); + } + return response; + } + + public CronJobRunResponse toRunResponse(CronJobRun run) { + CronJobRunResponse response = new CronJobRunResponse(); + response.setId(run.getId()); + response.setCronJobId(run.getCronJobId()); + response.setTaskNo(run.getTaskNo()); + response.setCreatorId(run.getCreatorId()); + response.setRoleId(run.getRoleId()); + response.setStatus(run.getStatus()); + response.setTriggerType(run.getTriggerType()); + response.setPlannedFireTime(run.getPlannedFireTime()); + response.setStartedAt(run.getStartedAt()); + response.setFinishedAt(run.getFinishedAt()); + response.setDurationMs(run.getDurationMs()); + response.setErrorMessage(run.getErrorMessage()); + response.setCreatedAt(run.getCreatedAt()); + return response; + } + + public List previewSchedule(String scheduleType, String cronExpr, Long intervalMs, + LocalDateTime runAt, String timezone, Integer limit) { + cronExpressionHelper.validateSchedule(scheduleType, cronExpr, intervalMs, runAt, timezone); + int size = limit == null || limit <= 0 ? 5 : Math.min(limit, 10); + return cronExpressionHelper.previewNextFireTimes( + scheduleType, cronExpr, intervalMs, runAt, timezone, size); + } + + private void enforceQuota(String creatorId, Long roleId) { + long userCount = cronJobMapper.selectCount(new LambdaQueryWrapper() + .eq(CronJob::getIsDeleted, 0) + .eq(CronJob::getCreatorId, creatorId)); + if (userCount >= cronConfig.getMaxJobsPerUser()) { + throw new IllegalArgumentException("已达到每用户定时任务上限: " + cronConfig.getMaxJobsPerUser()); + } + + long roleCount = cronJobMapper.selectCount(new LambdaQueryWrapper() + .eq(CronJob::getIsDeleted, 0) + .eq(CronJob::getRoleId, roleId)); + if (roleCount >= cronConfig.getMaxJobsPerRole()) { + throw new IllegalArgumentException("已达到每岗位定时任务上限: " + cronConfig.getMaxJobsPerRole()); + } + } + + private void validateRoleVisible(Long roleId, String creatorId) { + RoleEntity role = roleMapper.selectById(roleId); + if (role == null || Boolean.TRUE.equals(role.getIsDeleted())) { + throw new IllegalArgumentException("岗位不存在: roleId=" + roleId); + } + if (!"active".equalsIgnoreCase(role.getStatus())) { + throw new IllegalArgumentException("岗位不可用: status=" + role.getStatus()); + } + boolean visible = Boolean.TRUE.equals(role.getIsPublic()) || Objects.equals(role.getCreatorId(), creatorId); + if (!visible) { + throw new ForbiddenOperationException("当前用户无权访问该岗位"); + } + } + + private void validateSchedule(String scheduleType, String cronExpr, Long intervalMs, + LocalDateTime runAt, String timezone) { + cronExpressionHelper.validateSchedule(scheduleType, cronExpr, intervalMs, runAt, timezone); + } + + private int normalizeMaxRetry(Integer maxRetry) { + if (maxRetry == null) { + return 3; + } + if (maxRetry < 1 || maxRetry > 20) { + throw new IllegalArgumentException("maxRetry 取值范围为 1~20"); + } + return maxRetry; + } + + private boolean isTerminal(String status) { + return "COMPLETED".equals(status) || "FAILED".equals(status) || "ABORTED".equals(status) || "SKIPPED".equals(status); + } + + private void trimRunHistory(Long cronJobId) { + int maxRuns = cronConfig.getMaxRunsPerJob(); + if (maxRuns <= 0) { + return; + } + List runs = cronJobRunMapper.selectList(new LambdaQueryWrapper() + .eq(CronJobRun::getCronJobId, cronJobId) + .orderByDesc(CronJobRun::getCreatedAt)); + if (runs.size() <= maxRuns) { + return; + } + List removeIds = runs.stream().skip(maxRuns).map(CronJobRun::getId).toList(); + for (Long id : removeIds) { + cronJobRunMapper.deleteById(id); + } + } + + private String truncate(String text) { + if (!StringUtils.hasText(text)) { + return "unknown"; + } + return text.length() > 200 ? text.substring(0, 200) + "..." : text; + } + + private String extractTaskError(Task task) { + if (task == null || !StringUtils.hasText(task.getReportJson())) { + return "unknown"; + } + return truncate(task.getReportJson()); + } +} diff --git a/back/src/main/java/com/linkwork/service/CronScheduler.java b/back/src/main/java/com/linkwork/service/CronScheduler.java new file mode 100644 index 0000000..a03c472 --- /dev/null +++ b/back/src/main/java/com/linkwork/service/CronScheduler.java @@ -0,0 +1,56 @@ +package com.linkwork.service; + +import com.linkwork.config.CronConfig; +import com.linkwork.model.entity.CronJob; +import lombok.RequiredArgsConstructor; +import lombok.extern.slf4j.Slf4j; +import org.springframework.scheduling.annotation.Scheduled; +import org.springframework.stereotype.Component; + +import java.time.LocalDateTime; +import java.util.List; + +@Slf4j +@Component +@RequiredArgsConstructor +public class CronScheduler { + + private final CronConfig cronConfig; + private final DistributedLockService distributedLockService; + private final CronJobService cronJobService; + private final CronJobExecutor cronJobExecutor; + + @Scheduled(fixedDelayString = "${robot.cron.scan-interval-ms:60000}") + public void scanAndDispatch() { + if (!cronConfig.isEnabled()) { + return; + } + + String lockKey = cronConfig.getLockKey(); + String lockValue = distributedLockService.tryAcquireLockByKey(lockKey, cronConfig.getLockTtlSeconds(), 1); + if (lockValue == null) { + return; + } + + try { + LocalDateTime threshold = LocalDateTime.now().plusNanos(cronConfig.getDispatchLeadMs() * 1_000_000); + List dueJobs = cronJobService.findDueJobs(threshold); + if (dueJobs.isEmpty()) { + return; + } + + for (CronJob job : dueJobs) { + LocalDateTime firedAt = job.getNextFireTime(); + try { + cronJobExecutor.dispatchScheduled(job, firedAt); + cronJobService.advanceAfterDispatch(job, firedAt); + } catch (Exception e) { + log.error("Cron 调度执行失败: cronJobId={}, error={}", job.getId(), e.getMessage(), e); + cronJobService.recordDispatchFailure(job, e.getMessage()); + } + } + } finally { + distributedLockService.releaseLockByKey(lockKey, lockValue); + } + } +} diff --git a/back/src/main/java/com/linkwork/service/DistributedLockService.java b/back/src/main/java/com/linkwork/service/DistributedLockService.java new file mode 100644 index 0000000..b5b534c --- /dev/null +++ b/back/src/main/java/com/linkwork/service/DistributedLockService.java @@ -0,0 +1,246 @@ +package com.linkwork.service; + +import lombok.extern.slf4j.Slf4j; +import org.springframework.data.redis.core.StringRedisTemplate; +import org.springframework.data.redis.core.script.DefaultRedisScript; +import org.springframework.lang.Nullable; +import org.springframework.stereotype.Component; + +import java.util.Collections; +import java.util.UUID; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.locks.ReentrantLock; + +/** + * Redis 分布式锁服务 + * + * 用于多实例部署时,保证操作原子性: + * - 扩缩容操作:锁键 scale:lock:{serviceId} + * - OSS 目录操作:锁键 oss:lock:{serviceId}:{userId} + * + * 支持降级:Redis 不可用时自动降级为本地锁(用于测试环境) + */ +@Component +@Slf4j +public class DistributedLockService { + + private final StringRedisTemplate redisTemplate; + + /** + * 锁 Key 前缀 + */ + private static final String LOCK_PREFIX = "scale:lock:"; + + /** + * 锁超时时间(秒)- 防止死锁 + */ + private static final int LOCK_TIMEOUT_SECONDS = 30; + + /** + * 获取锁等待时间(秒) + */ + private static final int LOCK_WAIT_SECONDS = 35; + + /** + * Lua 脚本:原子释放锁(只释放自己持有的锁) + */ + private static final String RELEASE_LOCK_SCRIPT = + "if redis.call('get', KEYS[1]) == ARGV[1] then " + + " return redis.call('del', KEYS[1]) " + + "else " + + " return 0 " + + "end"; + + /** + * 本地锁(用于降级,仅在 Redis 不可用时使用) + */ + private final ConcurrentHashMap localLocks = new ConcurrentHashMap<>(); + + /** + * 本地锁标识前缀 + */ + private static final String LOCAL_LOCK_PREFIX = "LOCAL:"; + + /** + * Redis 是否可用 + */ + private volatile boolean redisAvailable = true; + + public DistributedLockService(@Nullable StringRedisTemplate redisTemplate) { + this.redisTemplate = redisTemplate; + if (redisTemplate == null) { + this.redisAvailable = false; + log.warn("Redis not configured, using local locks only (not suitable for multi-instance deployment)"); + } + } + + /** + * 尝试获取扩缩容分布式锁(按 serviceId) + * + * @param serviceId 服务 ID + * @return lockValue 如果获取成功返回锁标识(用于释放),失败返回 null + */ + public String tryAcquireLock(String serviceId) { + return tryAcquireLockByKey(LOCK_PREFIX + serviceId); + } + + /** + * 尝试获取分布式锁(通用方法,支持自定义锁键) + * + * @param fullKey 完整的锁键(如 "oss:lock:svc1:user1") + * @return lockValue 如果获取成功返回锁标识(用于释放),失败返回 null + */ + public String tryAcquireLockByKey(String fullKey) { + return tryAcquireLockByKey(fullKey, LOCK_TIMEOUT_SECONDS, LOCK_WAIT_SECONDS); + } + + /** + * 尝试获取分布式锁(支持自定义超时时间) + */ + public String tryAcquireLockByKey(String fullKey, int lockTimeoutSeconds, int lockWaitSeconds) { + // 如果 Redis 不可用,降级为本地锁 + if (!redisAvailable || redisTemplate == null) { + return tryAcquireLocalLock(fullKey, lockWaitSeconds); + } + + String lockValue = UUID.randomUUID().toString(); + + long startTime = System.currentTimeMillis(); + long waitMillis = lockWaitSeconds * 1000L; + + while (System.currentTimeMillis() - startTime < waitMillis) { + try { + // SET NX EX:不存在则设置,带过期时间 + Boolean success = redisTemplate.opsForValue() + .setIfAbsent(fullKey, lockValue, lockTimeoutSeconds, TimeUnit.SECONDS); + + if (Boolean.TRUE.equals(success)) { + log.debug("Acquired distributed lock key={}, lockValue={}", fullKey, lockValue); + return lockValue; + } + + // 等待 100ms 后重试 + Thread.sleep(100); + + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + log.warn("Interrupted while waiting for lock key={}", fullKey); + return null; + } catch (Exception e) { + // Redis 连接异常,降级为本地锁 + log.warn("Redis error, falling back to local lock key={}: {}", fullKey, e.getMessage()); + redisAvailable = false; + return tryAcquireLocalLock(fullKey, lockWaitSeconds); + } + } + + log.warn("Failed to acquire lock key={} within {} seconds", fullKey, lockWaitSeconds); + return null; + } + + /** + * 获取本地锁(降级方案) + */ + private String tryAcquireLocalLock(String key, int lockWaitSeconds) { + ReentrantLock lock = localLocks.computeIfAbsent(key, k -> new ReentrantLock(true)); + try { + boolean acquired = lock.tryLock(lockWaitSeconds, TimeUnit.SECONDS); + if (acquired) { + String lockValue = LOCAL_LOCK_PREFIX + UUID.randomUUID().toString(); + log.debug("Acquired local lock key={}, lockValue={}", key, lockValue); + return lockValue; + } + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + log.warn("Interrupted while waiting for local lock key={}", key); + } + return null; + } + + /** + * 释放扩缩容分布式锁(按 serviceId) + * + * @param serviceId 服务 ID + * @param lockValue 锁标识(tryAcquireLock 返回的值) + */ + public void releaseLock(String serviceId, String lockValue) { + releaseLockByKey(LOCK_PREFIX + serviceId, lockValue); + } + + /** + * 释放分布式锁(通用方法,支持自定义锁键) + * + * 使用 Lua 脚本保证原子性:只有锁的持有者才能释放 + * + * @param fullKey 完整的锁键 + * @param lockValue 锁标识(tryAcquireLockByKey 返回的值) + */ + public void releaseLockByKey(String fullKey, String lockValue) { + if (lockValue == null) { + return; + } + + // 如果是本地锁,释放本地锁 + if (lockValue.startsWith(LOCAL_LOCK_PREFIX)) { + releaseLocalLock(fullKey); + return; + } + + // 释放 Redis 分布式锁 + if (redisTemplate == null) { + return; + } + + try { + Long result = redisTemplate.execute( + new DefaultRedisScript<>(RELEASE_LOCK_SCRIPT, Long.class), + Collections.singletonList(fullKey), + lockValue + ); + + if (result != null && result == 1) { + log.debug("Released distributed lock key={}", fullKey); + } else { + log.warn("Lock key={} was not held by this instance or already expired", fullKey); + } + } catch (Exception e) { + log.error("Error releasing lock key={}: {}", fullKey, e.getMessage()); + } + } + + /** + * 释放本地锁 + */ + private void releaseLocalLock(String key) { + ReentrantLock lock = localLocks.get(key); + if (lock != null && lock.isHeldByCurrentThread()) { + lock.unlock(); + log.debug("Released local lock key={}", key); + } + } + + /** + * 检查扩缩容锁是否存在(用于测试/诊断) + * + * @param serviceId 服务 ID + * @return true 如果锁存在 + */ + public boolean isLocked(String serviceId) { + return isLockedByKey(LOCK_PREFIX + serviceId); + } + + /** + * 检查指定锁键是否存在(用于测试/诊断) + * + * @param fullKey 完整的锁键 + * @return true 如果锁存在 + */ + public boolean isLockedByKey(String fullKey) { + if (redisTemplate == null) { + ReentrantLock lock = localLocks.get(fullKey); + return lock != null && lock.isLocked(); + } + return Boolean.TRUE.equals(redisTemplate.hasKey(fullKey)); + } +} diff --git a/back/src/main/java/com/linkwork/service/DockerComposeGenerator.java b/back/src/main/java/com/linkwork/service/DockerComposeGenerator.java new file mode 100644 index 0000000..3e65dcf --- /dev/null +++ b/back/src/main/java/com/linkwork/service/DockerComposeGenerator.java @@ -0,0 +1,115 @@ +package com.linkwork.service; + +import com.linkwork.model.dto.MergedConfig; +import lombok.extern.slf4j.Slf4j; +import org.springframework.stereotype.Component; +import org.springframework.util.StringUtils; + +/** + * Docker Compose 构建包生成器 + * + * 仅支持 Alone 模式,生成用户可本地构建和启动的 docker-compose.yaml。 + * 镜像由用户在本地通过 docker compose up --build 构建,无需服务端推送。 + */ +@Component +@Slf4j +public class DockerComposeGenerator { + + /** + * 生成 docker-compose.yaml 内容 + * + * @param config 融合后的配置 + * @return docker-compose.yaml 字符串 + */ + public String generateComposeYaml(MergedConfig config) { + log.info("Generating Docker Compose YAML for service {}", config.getServiceId()); + + StringBuilder yaml = new StringBuilder(); + yaml.append(generateHeader(config)); + yaml.append(generateServices(config)); + yaml.append(generateVolumes()); + return yaml.toString(); + } + + private String generateHeader(MergedConfig config) { + return String.format(""" +# docker-compose.yaml - AI Worker Service +# Service ID: %s +# User ID: %s +# +# 使用方式: +# 1. 首次启动(构建镜像 + 启动容器): +# docker compose up --build -d +# 2. 查看日志: +# docker compose logs -f agent +# 3. 停止服务: +# docker compose down +# 4. 后续启动(已构建过镜像,无需重新构建): +# docker compose up -d +# +# 注意: +# - 首次构建需要拉取基础镜像和安装依赖,耗时较长(约 5-10 分钟) +# - 需要能访问 docker.momo.com 拉取基础镜像 +# - 需要能访问 git.wemomo.com 克隆 SDK 仓库 + +""", config.getServiceId(), config.getUserId()); + } + + private String generateServices(MergedConfig config) { + String workstationId = config.getWorkstationId() != null + ? config.getWorkstationId() : config.getServiceId(); + + StringBuilder sb = new StringBuilder(); + sb.append("services:\n"); + sb.append(" agent:\n"); + sb.append(" build:\n"); + sb.append(" context: .\n"); + sb.append(" dockerfile: Dockerfile\n"); + sb.append(String.format(" image: ai-worker-%s:latest\n", config.getServiceId())); + sb.append(String.format(" container_name: ai-worker-%s\n", config.getServiceId())); + sb.append(" user: root\n"); + sb.append(" command: [\"/opt/agent/start-single.sh\"]\n"); + sb.append(" environment:\n"); + + appendEnv(sb, "WORKSTATION_ID", workstationId); + appendEnv(sb, "REDIS_URL", config.getRedisUrl()); + appendEnv(sb, "CONFIG_FILE", "/opt/agent/config.json"); + appendEnv(sb, "IDLE_TIMEOUT", "86400"); + appendEnv(sb, "SERVICE_ID", config.getServiceId()); + appendEnv(sb, "USER_ID", config.getUserId()); + appendEnv(sb, "API_BASE_URL", config.getApiBaseUrl()); + appendEnv(sb, "WS_BASE_URL", config.getWsBaseUrl()); + appendEnv(sb, "LLM_GATEWAY_URL", config.getLlmGatewayUrl()); + if (config.getRoleId() != null) { + appendEnv(sb, "ROLE_ID", String.valueOf(config.getRoleId())); + } + + sb.append(" volumes:\n"); + sb.append(" - workspace:/workspace\n"); + sb.append(" restart: \"no\"\n"); + sb.append(" deploy:\n"); + sb.append(" resources:\n"); + sb.append(" limits:\n"); + sb.append(String.format(" cpus: '%s'\n", config.getAgentResources().getCpuLimit())); + sb.append(String.format(" memory: %s\n", config.getAgentResources().getMemoryLimit())); + sb.append(" reservations:\n"); + sb.append(String.format(" cpus: '%s'\n", config.getAgentResources().getCpuRequest())); + sb.append(String.format(" memory: %s\n", config.getAgentResources().getMemoryRequest())); + + return sb.toString(); + } + + private void appendEnv(StringBuilder sb, String key, String value) { + if (StringUtils.hasText(value)) { + sb.append(String.format(" - %s=%s\n", key, value)); + } + } + + private String generateVolumes() { + return """ + +volumes: + workspace: +"""; + } +} diff --git a/back/src/main/java/com/linkwork/service/FileNodeService.java b/back/src/main/java/com/linkwork/service/FileNodeService.java new file mode 100644 index 0000000..37cc9a5 --- /dev/null +++ b/back/src/main/java/com/linkwork/service/FileNodeService.java @@ -0,0 +1,398 @@ +package com.linkwork.service; + +import com.baomidou.mybatisplus.core.conditions.query.LambdaQueryWrapper; +import com.linkwork.common.FileConflictException; +import com.linkwork.common.ForbiddenOperationException; +import com.linkwork.common.ResourceNotFoundException; +import com.linkwork.mapper.FileNodeMapper; +import com.linkwork.mapper.WorkspaceFileMapper; +import com.linkwork.model.dto.CreateFolderRequest; +import com.linkwork.model.dto.FileNodeResponse; +import com.linkwork.model.entity.FileNodeEntity; +import com.linkwork.model.entity.WorkspaceFile; +import com.linkwork.service.memory.MemoryService; +import lombok.RequiredArgsConstructor; +import lombok.extern.slf4j.Slf4j; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.stereotype.Service; +import org.springframework.util.StringUtils; + +import java.time.LocalDateTime; +import java.util.*; +import java.util.stream.Collectors; + +@Slf4j +@Service +@RequiredArgsConstructor +public class FileNodeService { + + private static final int NODE_NAME_MAX_LENGTH = 512; + private static final String DELETED_NAME_MARKER = "__deleted__"; + + private final FileNodeMapper fileNodeMapper; + private final WorkspaceFileMapper workspaceFileMapper; + private final NfsStorageService nfsStorageService; + + @Autowired(required = false) + private MemoryService memoryService; + + public FileNodeResponse createFolder(CreateFolderRequest request, String userId) { + validateSpaceType(request.getSpaceType(), request.getWorkstationId()); + String normalizedSpace = request.getSpaceType().toUpperCase(Locale.ROOT); + + if (!StringUtils.hasText(request.getName()) || request.getName().trim().isEmpty()) { + throw new IllegalArgumentException("目录名称不能为空"); + } + String folderName = request.getName().trim(); + + validateParentId(request.getParentId(), userId, normalizedSpace, request.getWorkstationId()); + + FileNodeEntity existing = findSameNameNode(userId, normalizedSpace, + request.getWorkstationId(), request.getParentId(), folderName); + if (existing != null) { + throw new FileConflictException("目标目录已存在同名节点", + existing.getFileId() != null ? existing.getFileId() : existing.getNodeId(), + existing.getName(), existing.getEntryType(), + null, existing.getUpdatedAt()); + } + + FileNodeEntity node = new FileNodeEntity(); + node.setNodeId(UUID.randomUUID().toString().replace("-", "")); + node.setParentId(request.getParentId()); + node.setEntryType("DIR"); + node.setName(folderName); + node.setSpaceType(normalizedSpace); + node.setWorkstationId("WORKSTATION".equals(normalizedSpace) ? request.getWorkstationId() : null); + node.setUserId(userId); + node.setCreatedAt(LocalDateTime.now()); + node.setUpdatedAt(LocalDateTime.now()); + + fileNodeMapper.insert(node); + return toResponse(node, false); + } + + public List listChildren(String spaceType, String workstationId, + String parentId, String userId) { + validateSpaceType(spaceType, workstationId); + String normalizedSpace = spaceType.toUpperCase(Locale.ROOT); + + LambdaQueryWrapper wrapper = new LambdaQueryWrapper() + .eq(FileNodeEntity::getUserId, userId) + .eq(FileNodeEntity::getSpaceType, normalizedSpace) + .isNull(FileNodeEntity::getDeletedAt) + .orderByAsc(FileNodeEntity::getEntryType) + .orderByAsc(FileNodeEntity::getName); + + if ("WORKSTATION".equals(normalizedSpace)) { + wrapper.eq(FileNodeEntity::getWorkstationId, workstationId); + } else { + wrapper.isNull(FileNodeEntity::getWorkstationId); + } + + if (StringUtils.hasText(parentId)) { + wrapper.eq(FileNodeEntity::getParentId, parentId); + } else { + wrapper.isNull(FileNodeEntity::getParentId); + } + + List nodes = fileNodeMapper.selectList(wrapper); + + Set dirNodeIds = nodes.stream() + .filter(n -> "DIR".equals(n.getEntryType())) + .map(FileNodeEntity::getNodeId) + .collect(Collectors.toSet()); + + Set dirsWithChildren = new HashSet<>(); + if (!dirNodeIds.isEmpty()) { + LambdaQueryWrapper childCheck = new LambdaQueryWrapper() + .in(FileNodeEntity::getParentId, dirNodeIds) + .isNull(FileNodeEntity::getDeletedAt) + .select(FileNodeEntity::getParentId) + .groupBy(FileNodeEntity::getParentId); + List childResults = fileNodeMapper.selectList(childCheck); + childResults.forEach(c -> dirsWithChildren.add(c.getParentId())); + } + + Set fileIds = nodes.stream() + .filter(n -> "FILE".equals(n.getEntryType()) && StringUtils.hasText(n.getFileId())) + .map(FileNodeEntity::getFileId) + .collect(Collectors.toSet()); + + Map fileMap = new HashMap<>(); + if (!fileIds.isEmpty()) { + LambdaQueryWrapper fileWrapper = new LambdaQueryWrapper() + .in(WorkspaceFile::getFileId, fileIds) + .isNull(WorkspaceFile::getDeletedAt); + workspaceFileMapper.selectList(fileWrapper) + .forEach(f -> fileMap.put(f.getFileId(), f)); + } + + return nodes.stream().map(node -> { + boolean hasChildren = dirsWithChildren.contains(node.getNodeId()); + FileNodeResponse resp = toResponse(node, hasChildren); + if ("FILE".equals(node.getEntryType()) && node.getFileId() != null) { + WorkspaceFile rf = fileMap.get(node.getFileId()); + if (rf != null) { + resp.setFileSize(rf.getFileSize()); + resp.setFileType(rf.getFileType()); + resp.setParseStatus(rf.getParseStatus()); + resp.setMemoryIndexStatus(rf.getMemoryIndexStatus()); + } + } + return resp; + }).toList(); + } + + public FileNodeEntity createFileNode(String fileName, String spaceType, String workstationId, + String userId, String fileId, String parentId) { + validateParentId(parentId, userId, spaceType, workstationId); + + FileNodeEntity node = new FileNodeEntity(); + node.setNodeId(UUID.randomUUID().toString().replace("-", "")); + node.setParentId(parentId); + node.setEntryType("FILE"); + node.setName(fileName); + node.setSpaceType(spaceType.toUpperCase(Locale.ROOT)); + node.setWorkstationId("WORKSTATION".equals(spaceType.toUpperCase(Locale.ROOT)) ? workstationId : null); + node.setUserId(userId); + node.setFileId(fileId); + node.setCreatedAt(LocalDateTime.now()); + node.setUpdatedAt(LocalDateTime.now()); + fileNodeMapper.insert(node); + return node; + } + + public void renameNode(String nodeId, String newName, String userId) { + FileNodeEntity node = findActiveNode(nodeId); + if (!Objects.equals(node.getUserId(), userId)) { + throw new ForbiddenOperationException("无权限操作该节点"); + } + if (!StringUtils.hasText(newName) || newName.trim().isEmpty()) { + throw new IllegalArgumentException("名称不能为空"); + } + String trimmedName = newName.trim(); + + FileNodeEntity conflict = findSameNameNode(userId, node.getSpaceType(), + node.getWorkstationId(), node.getParentId(), trimmedName); + if (conflict != null && !conflict.getNodeId().equals(nodeId)) { + throw new FileConflictException("目标目录已存在同名节点", + conflict.getFileId() != null ? conflict.getFileId() : conflict.getNodeId(), + conflict.getName(), conflict.getEntryType(), + null, conflict.getUpdatedAt()); + } + + node.setName(trimmedName); + node.setUpdatedAt(LocalDateTime.now()); + fileNodeMapper.updateById(node); + + if ("FILE".equals(node.getEntryType()) && StringUtils.hasText(node.getFileId())) { + WorkspaceFile file = workspaceFileMapper.selectOne(new LambdaQueryWrapper() + .eq(WorkspaceFile::getFileId, node.getFileId()) + .isNull(WorkspaceFile::getDeletedAt) + .last("limit 1")); + if (file != null) { + file.setFileName(trimmedName); + file.setUpdatedAt(LocalDateTime.now()); + workspaceFileMapper.updateById(file); + } + } + } + + public void deleteNode(String nodeId, String userId) { + FileNodeEntity node = findActiveNode(nodeId); + if (!Objects.equals(node.getUserId(), userId)) { + throw new ForbiddenOperationException("无权限操作该节点"); + } + softDeleteRecursive(node); + } + + public void updateNode(FileNodeEntity node) { + fileNodeMapper.updateById(node); + } + + public FileNodeEntity findByFileId(String fileId) { + return findByFileId(fileId, null, null, null); + } + + public FileNodeEntity findByFileId(String fileId, String userId, String spaceType, String workstationId) { + LambdaQueryWrapper wrapper = new LambdaQueryWrapper() + .eq(FileNodeEntity::getFileId, fileId) + .isNull(FileNodeEntity::getDeletedAt) + .orderByDesc(FileNodeEntity::getUpdatedAt) + .last("limit 1"); + + if (StringUtils.hasText(userId)) { + wrapper.eq(FileNodeEntity::getUserId, userId); + } + + if (StringUtils.hasText(spaceType)) { + String normalized = spaceType.toUpperCase(Locale.ROOT); + wrapper.eq(FileNodeEntity::getSpaceType, normalized); + if ("WORKSTATION".equals(normalized) && StringUtils.hasText(workstationId)) { + wrapper.eq(FileNodeEntity::getWorkstationId, workstationId); + } else if ("USER".equals(normalized)) { + wrapper.isNull(FileNodeEntity::getWorkstationId); + } + } + + return fileNodeMapper.selectOne(wrapper); + } + + private void softDeleteRecursive(FileNodeEntity node) { + LocalDateTime now = LocalDateTime.now(); + node.setDeletedAt(now); + node.setUpdatedAt(now); + node.setName(buildDeletedTombstoneName(node.getName(), node.getNodeId())); + fileNodeMapper.updateById(node); + + if ("FILE".equals(node.getEntryType()) && StringUtils.hasText(node.getFileId())) { + cleanupFileResources(node.getFileId()); + } + + if ("DIR".equals(node.getEntryType())) { + List children = fileNodeMapper.selectList( + new LambdaQueryWrapper() + .eq(FileNodeEntity::getParentId, node.getNodeId()) + .isNull(FileNodeEntity::getDeletedAt)); + for (FileNodeEntity child : children) { + softDeleteRecursive(child); + } + } + } + + private String buildDeletedTombstoneName(String originalName, String nodeId) { + String baseName = StringUtils.hasText(originalName) ? originalName : "node"; + String safeNodeId = StringUtils.hasText(nodeId) + ? nodeId + : UUID.randomUUID().toString().replace("-", ""); + String suffix = DELETED_NAME_MARKER + safeNodeId; + int allowedBaseLength = NODE_NAME_MAX_LENGTH - suffix.length(); + if (allowedBaseLength <= 0) { + return suffix.substring(Math.max(0, suffix.length() - NODE_NAME_MAX_LENGTH)); + } + if (baseName.length() > allowedBaseLength) { + baseName = baseName.substring(0, allowedBaseLength); + } + return baseName + suffix; + } + + private void cleanupFileResources(String fileId) { + WorkspaceFile file = workspaceFileMapper.selectOne(new LambdaQueryWrapper() + .eq(WorkspaceFile::getFileId, fileId) + .isNull(WorkspaceFile::getDeletedAt) + .last("limit 1")); + if (file == null) return; + + file.setDeletedAt(LocalDateTime.now()); + file.setUpdatedAt(LocalDateTime.now()); + workspaceFileMapper.updateById(file); + + try { + nfsStorageService.deleteFile(file.getOssPath()); + } catch (Exception e) { + log.warn("目录级联删除原文件失败: fileId={}, path={}, err={}", fileId, file.getOssPath(), e.getMessage()); + } + if (StringUtils.hasText(file.getParsedOssPath())) { + try { + nfsStorageService.deleteFile(file.getParsedOssPath()); + } catch (Exception e) { + log.warn("目录级联删除解析文件失败: fileId={}, path={}, err={}", fileId, file.getParsedOssPath(), e.getMessage()); + } + } + if (memoryService != null) { + try { + String source = StringUtils.hasText(file.getParsedOssPath()) ? file.getParsedOssPath() : file.getOssPath(); + memoryService.deleteSource(file.getWorkstationId(), file.getUserId(), source); + } catch (Exception e) { + log.warn("目录级联删除 Memory 索引失败: fileId={}, err={}", fileId, e.getMessage()); + } + } + } + + /** + * 校验 parentId 合法性:存在、是目录、同用户、同空间。 + * parentId 为空表示根目录,直接放行。 + */ + public void validateParentId(String parentId, String userId, String spaceType, String workstationId) { + if (!StringUtils.hasText(parentId)) return; + + FileNodeEntity parent = findActiveNode(parentId); + if (!"DIR".equals(parent.getEntryType())) { + throw new IllegalArgumentException("父节点不是目录"); + } + if (!Objects.equals(parent.getUserId(), userId)) { + throw new ForbiddenOperationException("无权在该目录下操作"); + } + String normalizedSpace = spaceType.toUpperCase(Locale.ROOT); + if (!parent.getSpaceType().equals(normalizedSpace)) { + throw new IllegalArgumentException("父节点与目标空间类型不匹配"); + } + String expectedWs = "WORKSTATION".equals(normalizedSpace) ? workstationId : null; + if (!Objects.equals(parent.getWorkstationId(), expectedWs)) { + throw new IllegalArgumentException("父节点与目标工作站不匹配"); + } + } + + FileNodeEntity findSameNameNode(String userId, String spaceType, String workstationId, + String parentId, String name) { + LambdaQueryWrapper wrapper = new LambdaQueryWrapper() + .eq(FileNodeEntity::getUserId, userId) + .eq(FileNodeEntity::getSpaceType, spaceType) + .eq(FileNodeEntity::getName, name) + .isNull(FileNodeEntity::getDeletedAt) + .last("limit 1"); + + if ("WORKSTATION".equals(spaceType)) { + wrapper.eq(FileNodeEntity::getWorkstationId, workstationId); + } else { + wrapper.isNull(FileNodeEntity::getWorkstationId); + } + + if (StringUtils.hasText(parentId)) { + wrapper.eq(FileNodeEntity::getParentId, parentId); + } else { + wrapper.isNull(FileNodeEntity::getParentId); + } + + return fileNodeMapper.selectOne(wrapper); + } + + private FileNodeEntity findActiveNode(String nodeId) { + FileNodeEntity node = fileNodeMapper.selectOne(new LambdaQueryWrapper() + .eq(FileNodeEntity::getNodeId, nodeId) + .isNull(FileNodeEntity::getDeletedAt) + .last("limit 1")); + if (node == null) { + throw new ResourceNotFoundException("节点不存在: " + nodeId); + } + return node; + } + + private FileNodeResponse toResponse(FileNodeEntity node, boolean hasChildren) { + FileNodeResponse resp = new FileNodeResponse(); + resp.setNodeId(node.getNodeId()); + resp.setParentId(node.getParentId()); + resp.setEntryType(node.getEntryType()); + resp.setName(node.getName()); + resp.setSpaceType(node.getSpaceType()); + resp.setWorkstationId(node.getWorkstationId()); + resp.setFileId(node.getFileId()); + resp.setCreatedAt(node.getCreatedAt()); + resp.setUpdatedAt(node.getUpdatedAt()); + resp.setHasChildren(hasChildren); + return resp; + } + + private void validateSpaceType(String spaceType, String workstationId) { + if (!StringUtils.hasText(spaceType)) { + throw new IllegalArgumentException("spaceType 不能为空"); + } + String normalized = spaceType.toUpperCase(Locale.ROOT); + if (!"USER".equals(normalized) && !"WORKSTATION".equals(normalized)) { + throw new IllegalArgumentException("spaceType 仅支持 USER 或 WORKSTATION"); + } + if ("WORKSTATION".equals(normalized) && !StringUtils.hasText(workstationId)) { + throw new IllegalArgumentException("WORKSTATION 空间必须提供 workstationId"); + } + } +} diff --git a/back/src/main/java/com/linkwork/service/FileParseConsumer.java b/back/src/main/java/com/linkwork/service/FileParseConsumer.java new file mode 100644 index 0000000..f318058 --- /dev/null +++ b/back/src/main/java/com/linkwork/service/FileParseConsumer.java @@ -0,0 +1,130 @@ +package com.linkwork.service; + +import com.baomidou.mybatisplus.core.conditions.query.LambdaQueryWrapper; +import com.linkwork.mapper.WorkspaceFileMapper; +import com.linkwork.model.entity.WorkspaceFile; +import com.linkwork.service.memory.DocumentParserService; +import jakarta.annotation.PostConstruct; +import jakarta.annotation.PreDestroy; +import lombok.RequiredArgsConstructor; +import lombok.extern.slf4j.Slf4j; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.data.redis.core.StringRedisTemplate; +import org.springframework.stereotype.Service; + +import java.nio.file.Files; +import java.nio.file.Path; +import java.time.LocalDateTime; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.atomic.AtomicBoolean; + +@Slf4j +@Service +@RequiredArgsConstructor +public class FileParseConsumer { + + private static final String FILE_PARSE_QUEUE_KEY = "file:parse:jobs"; + private static final int POLL_INTERVAL_MS = 1000; + + private final StringRedisTemplate redisTemplate; + private final WorkspaceFileMapper workspaceFileMapper; + private final NfsStorageService nfsStorageService; + private final FileService fileService; + + @Autowired(required = false) + private DocumentParserService documentParserService; + + private final AtomicBoolean running = new AtomicBoolean(true); + private ExecutorService executorService; + + @PostConstruct + public void start() { + executorService = Executors.newSingleThreadExecutor(r -> { + Thread t = new Thread(r, "file-parse-consumer"); + t.setDaemon(true); + return t; + }); + executorService.submit(this::consumeLoop); + log.info("FileParseConsumer started, queue={}", FILE_PARSE_QUEUE_KEY); + } + + @PreDestroy + public void stop() { + running.set(false); + if (executorService != null) { + executorService.shutdownNow(); + } + log.info("FileParseConsumer stopped"); + } + + private void consumeLoop() { + while (running.get()) { + try { + String fileId = redisTemplate.opsForList().rightPop(FILE_PARSE_QUEUE_KEY); + if (fileId == null) { + Thread.sleep(POLL_INTERVAL_MS); + continue; + } + processFile(fileId); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + break; + } catch (Exception e) { + log.error("消费文件解析任务失败", e); + try { + Thread.sleep(POLL_INTERVAL_MS * 2L); + } catch (InterruptedException ex) { + Thread.currentThread().interrupt(); + break; + } + } + } + } + + private void processFile(String fileId) { + WorkspaceFile file = workspaceFileMapper.selectOne(new LambdaQueryWrapper() + .eq(WorkspaceFile::getFileId, fileId) + .isNull(WorkspaceFile::getDeletedAt) + .last("limit 1")); + if (file == null) { + log.warn("文件解析任务找不到文件记录: fileId={}", fileId); + return; + } + if (documentParserService == null) { + log.warn("DocumentParserService 未启用,跳过解析: fileId={}", fileId); + file.setParseStatus("FAILED"); + file.setUpdatedAt(LocalDateTime.now()); + workspaceFileMapper.updateById(file); + return; + } + + Path tempPath = null; + try { + tempPath = nfsStorageService.downloadToTempFile(file.getOssPath()); + String text = documentParserService.parseFile(tempPath); + String parsedOssPath = fileService.buildParsedPath(file.getOssPath()); + nfsStorageService.uploadTextToPath(text, parsedOssPath); + + file.setParsedOssPath(parsedOssPath); + file.setParseStatus("PARSED"); + file.setUpdatedAt(LocalDateTime.now()); + workspaceFileMapper.updateById(file); + + fileService.triggerMemoryIndex(file); + } catch (Exception e) { + log.error("文件解析失败: fileId={}", fileId, e); + file.setParseStatus("FAILED"); + file.setUpdatedAt(LocalDateTime.now()); + workspaceFileMapper.updateById(file); + } finally { + if (tempPath != null) { + try { + Files.deleteIfExists(tempPath); + } catch (Exception e) { + log.warn("删除临时文件失败: {}", tempPath, e); + } + } + } + } +} diff --git a/back/src/main/java/com/linkwork/service/FileService.java b/back/src/main/java/com/linkwork/service/FileService.java new file mode 100644 index 0000000..280e0fd --- /dev/null +++ b/back/src/main/java/com/linkwork/service/FileService.java @@ -0,0 +1,934 @@ +package com.linkwork.service; + +import com.baomidou.mybatisplus.core.conditions.query.LambdaQueryWrapper; +import com.baomidou.mybatisplus.extension.plugins.pagination.Page; +import com.linkwork.common.FileConflictException; +import com.linkwork.common.ForbiddenOperationException; +import com.linkwork.common.ResourceNotFoundException; +import com.linkwork.mapper.WorkspaceFileMapper; +import com.linkwork.model.enums.ConflictPolicy; +import com.linkwork.model.dto.FileMentionResponse; +import com.linkwork.model.dto.FileResponse; +import com.linkwork.model.dto.FileTransferRequest; +import com.linkwork.model.dto.MemoryIndexJob; +import com.linkwork.model.entity.WorkspaceFile; +import com.linkwork.model.entity.RoleEntity; +import com.linkwork.service.memory.MemoryService; +import com.fasterxml.jackson.databind.ObjectMapper; +import lombok.RequiredArgsConstructor; +import lombok.extern.slf4j.Slf4j; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.data.redis.core.StringRedisTemplate; +import org.springframework.stereotype.Service; +import org.springframework.util.StringUtils; +import org.springframework.web.multipart.MultipartFile; + +import java.io.IOException; +import java.io.InputStream; +import java.security.MessageDigest; +import java.time.LocalDateTime; +import java.util.*; +import java.util.concurrent.TimeUnit; +import java.util.stream.Stream; + +@Slf4j +@Service +@RequiredArgsConstructor +public class FileService { + + private static final Set ALLOWED_TYPES = Set.of( + "txt", "md", "csv", "doc", "docx", "pdf", "ppt", "pptx", "xlsx", "xls", + "jpg", "jpeg", "png", "gif" + ); + private static final long MAX_FILE_SIZE = 100L * 1024 * 1024; + private static final Set PARSE_REQUIRED_TYPES = Set.of("doc", "docx", "pdf", "ppt", "pptx"); + private static final Set MEMORY_DIRECT_TYPES = Set.of("txt", "md", "csv"); + private static final Set MEMORY_SKIP_TYPES = Set.of("xlsx", "xls", "jpg", "jpeg", "png", "gif"); + private static final String FILE_PARSE_QUEUE_KEY = "file:parse:jobs"; + private static final String FILE_TRANSFER_DEDUP_KEY_PREFIX = "file:transfer:dedup"; + private static final long FILE_TRANSFER_DEDUP_SECONDS = 5L; + + private final WorkspaceFileMapper workspaceFileMapper; + private final NfsStorageService nfsStorageService; + private final StringRedisTemplate redisTemplate; + private final ObjectMapper objectMapper; + private final RoleService roleService; + private final com.linkwork.config.MemoryConfig memoryConfig; + private final FileNodeService fileNodeService; + + @Autowired(required = false) + private MemoryService memoryService; + + public FileResponse uploadFile(MultipartFile file, String spaceType, String workstationId, String userId) { + return uploadFile(file, spaceType, workstationId, userId, null, null); + } + + public FileResponse uploadFile(MultipartFile file, String spaceType, String workstationId, + String userId, String conflictPolicyStr) { + return uploadFile(file, spaceType, workstationId, userId, conflictPolicyStr, null); + } + + public FileResponse uploadFile(MultipartFile file, String spaceType, String workstationId, + String userId, String conflictPolicyStr, String parentId) { + validateUpload(file, spaceType, workstationId, userId); + String normalizedSpace = spaceType.toUpperCase(Locale.ROOT); + fileNodeService.validateParentId(parentId, userId, normalizedSpace, workstationId); + ConflictPolicy policy = ConflictPolicy.fromString(conflictPolicyStr); + String originalName = file.getOriginalFilename(); + + com.linkwork.model.entity.FileNodeEntity existingNode = fileNodeService.findSameNameNode( + userId, normalizedSpace, workstationId, parentId, originalName); + if (existingNode != null) { + switch (policy) { + case REJECT -> { + WorkspaceFile existingFile = "FILE".equals(existingNode.getEntryType()) && existingNode.getFileId() != null + ? findActiveByFileId(existingNode.getFileId()) : null; + throw new FileConflictException( + "目标目录已存在同名" + ("DIR".equals(existingNode.getEntryType()) ? "目录" : "文件"), + existingNode.getFileId() != null ? existingNode.getFileId() : existingNode.getNodeId(), + existingNode.getName(), existingNode.getEntryType(), + existingFile != null ? existingFile.getFileSize() : null, + existingNode.getUpdatedAt()); + } + case OVERWRITE -> { + if ("DIR".equals(existingNode.getEntryType())) { + throw new IllegalArgumentException("无法用文件覆盖目录"); + } + WorkspaceFile existingFile = findActiveByFileId(existingNode.getFileId()); + return overwriteUpload(existingFile, file); + } + case RENAME -> originalName = generateUniqueNodeName(userId, normalizedSpace, workstationId, parentId, originalName); + } + } + + String ext = getExtension(originalName); + String fileId = UUID.randomUUID().toString().replace("-", ""); + String ossPath = buildOssPath(normalizedSpace, workstationId, userId, fileId, ext); + + WorkspaceFile workspaceFile = new WorkspaceFile(); + workspaceFile.setFileId(fileId); + workspaceFile.setFileName(originalName); + workspaceFile.setFileSize(file.getSize()); + workspaceFile.setFileType(ext); + workspaceFile.setContentType(file.getContentType()); + workspaceFile.setSpaceType(normalizedSpace); + workspaceFile.setWorkstationId(workstationId); + workspaceFile.setUserId(userId); + workspaceFile.setOssPath(ossPath); + workspaceFile.setMemoryIndexStatus("NONE"); + workspaceFile.setParseStatus(PARSE_REQUIRED_TYPES.contains(ext) ? "NONE" : "SKIP"); + workspaceFile.setFileHash(computeSha256(file)); + workspaceFile.setCreatedAt(LocalDateTime.now()); + workspaceFile.setUpdatedAt(LocalDateTime.now()); + + try { + nfsStorageService.uploadFileToPath(file, ossPath); + } catch (IOException e) { + throw new IllegalStateException("上传文件到存储失败", e); + } + workspaceFileMapper.insert(workspaceFile); + + fileNodeService.createFileNode(originalName, normalizedSpace, workstationId, userId, fileId, parentId); + + if ("NONE".equals(workspaceFile.getParseStatus())) { + workspaceFile.setParseStatus("PARSING"); + workspaceFile.setUpdatedAt(LocalDateTime.now()); + workspaceFileMapper.updateById(workspaceFile); + redisTemplate.opsForList().leftPush(FILE_PARSE_QUEUE_KEY, fileId); + } else if (MEMORY_DIRECT_TYPES.contains(ext)) { + triggerMemoryIndex(workspaceFile); + } + + return toResponse(workspaceFile); + } + + private FileResponse overwriteUpload(WorkspaceFile target, MultipartFile newFile) { + String ext = getExtension(newFile.getOriginalFilename()); + try { + nfsStorageService.uploadFileToPath(newFile, target.getOssPath()); + } catch (IOException e) { + throw new IllegalStateException("覆盖上传文件失败", e); + } + + if (StringUtils.hasText(target.getParsedOssPath())) { + try { + nfsStorageService.deleteFile(target.getParsedOssPath()); + } catch (Exception e) { + log.warn("删除旧解析文件失败: fileId={}, err={}", target.getFileId(), e.getMessage()); + } + } + + target.setFileSize(newFile.getSize()); + target.setFileType(ext); + target.setContentType(newFile.getContentType()); + target.setFileHash(computeSha256(newFile)); + target.setParseStatus(PARSE_REQUIRED_TYPES.contains(ext) ? "PARSING" : "SKIP"); + target.setMemoryIndexStatus("NONE"); + target.setParsedOssPath(null); + target.setUpdatedAt(LocalDateTime.now()); + workspaceFileMapper.updateById(target); + + if (PARSE_REQUIRED_TYPES.contains(ext)) { + redisTemplate.opsForList().leftPush(FILE_PARSE_QUEUE_KEY, target.getFileId()); + } else if (MEMORY_DIRECT_TYPES.contains(ext)) { + triggerMemoryIndex(target); + } + + return toResponse(target); + } + + public Map listFiles(String spaceType, String workstationId, String fileType, + String keyword, Integer page, Integer pageSize, String userId) { + validateSpaceType(spaceType, workstationId); + int currentPage = page == null || page < 1 ? 1 : page; + int size = pageSize == null || pageSize < 1 ? 20 : pageSize; + if (size > 100) { + size = 100; + } + + LambdaQueryWrapper wrapper = new LambdaQueryWrapper() + .eq(WorkspaceFile::getUserId, userId) + .eq(WorkspaceFile::getSpaceType, spaceType.toUpperCase(Locale.ROOT)) + .isNull(WorkspaceFile::getDeletedAt) + .orderByDesc(WorkspaceFile::getCreatedAt); + + if ("WORKSTATION".equalsIgnoreCase(spaceType)) { + wrapper.eq(WorkspaceFile::getWorkstationId, workstationId); + } + if (StringUtils.hasText(fileType)) { + wrapper.eq(WorkspaceFile::getFileType, fileType.toLowerCase(Locale.ROOT)); + } + if (StringUtils.hasText(keyword)) { + wrapper.like(WorkspaceFile::getFileName, keyword.trim()); + } + + Page result = workspaceFileMapper.selectPage(new Page<>(currentPage, size), wrapper); + List items = result.getRecords().stream().map(this::toResponse).toList(); + + Map pagination = new HashMap<>(); + pagination.put("page", result.getCurrent()); + pagination.put("pageSize", result.getSize()); + pagination.put("total", result.getTotal()); + pagination.put("totalPages", result.getPages()); + + Map payload = new HashMap<>(); + payload.put("items", items); + payload.put("pagination", pagination); + return payload; + } + + public FileResponse getFileDetail(String fileId, String userId) { + WorkspaceFile file = findActiveByFileId(fileId); + checkPermission(file, userId); + return toResponse(file); + } + + public DownloadInfo getDownloadInfo(String fileId, String userId) { + WorkspaceFile file = findActiveByFileId(fileId); + checkPermission(file, userId); + return new DownloadInfo(file.getOssPath(), file.getFileName(), file.getContentType()); + } + + public record DownloadInfo(String storagePath, String fileName, String contentType) {} + + public void deleteFile(String fileId, String userId) { + WorkspaceFile file = findActiveByFileId(fileId); + checkPermission(file, userId); + + file.setDeletedAt(LocalDateTime.now()); + file.setUpdatedAt(LocalDateTime.now()); + workspaceFileMapper.updateById(file); + + com.linkwork.model.entity.FileNodeEntity node = fileNodeService.findByFileId( + fileId, userId, file.getSpaceType(), file.getWorkstationId()); + if (node != null) { + fileNodeService.deleteNode(node.getNodeId(), userId); + } + + try { + nfsStorageService.deleteFile(file.getOssPath()); + } catch (Exception e) { + log.warn("删除原文件失败: fileId={}, path={}, err={}", fileId, file.getOssPath(), e.getMessage()); + } + if (StringUtils.hasText(file.getParsedOssPath())) { + try { + nfsStorageService.deleteFile(file.getParsedOssPath()); + } catch (Exception e) { + log.warn("删除解析文件失败: fileId={}, path={}, err={}", fileId, file.getParsedOssPath(), e.getMessage()); + } + } + if (memoryService != null) { + try { + String source = StringUtils.hasText(file.getParsedOssPath()) ? file.getParsedOssPath() : file.getOssPath(); + memoryService.deleteSource(file.getWorkstationId(), file.getUserId(), source); + } catch (Exception e) { + log.warn("删除 Memory 索引失败: fileId={}, err={}", fileId, e.getMessage()); + } + } + } + + public FileResponse replaceFile(String fileId, MultipartFile newFile, String userId) { + WorkspaceFile file = findActiveByFileId(fileId); + checkPermission(file, userId); + validateUpload(newFile, file.getSpaceType(), file.getWorkstationId(), userId); + + String ext = getExtension(newFile.getOriginalFilename()); + try { + nfsStorageService.uploadFileToPath(newFile, file.getOssPath()); + } catch (IOException e) { + throw new IllegalStateException("覆盖上传文件失败", e); + } + + if (StringUtils.hasText(file.getParsedOssPath())) { + try { + nfsStorageService.deleteFile(file.getParsedOssPath()); + } catch (Exception e) { + log.warn("删除旧解析文件失败: fileId={}, err={}", fileId, e.getMessage()); + } + } + + file.setFileName(newFile.getOriginalFilename()); + file.setFileSize(newFile.getSize()); + file.setFileType(ext); + file.setContentType(newFile.getContentType()); + file.setFileHash(computeSha256(newFile)); + file.setParseStatus(PARSE_REQUIRED_TYPES.contains(ext) ? "PARSING" : "SKIP"); + file.setMemoryIndexStatus("NONE"); + file.setParsedOssPath(null); + file.setUpdatedAt(LocalDateTime.now()); + workspaceFileMapper.updateById(file); + + if (PARSE_REQUIRED_TYPES.contains(ext)) { + redisTemplate.opsForList().leftPush(FILE_PARSE_QUEUE_KEY, file.getFileId()); + } else if (MEMORY_DIRECT_TYPES.contains(ext)) { + triggerMemoryIndex(file); + } + + return toResponse(file); + } + + public FileResponse copyFile(String fileId, FileTransferRequest request, String userId) { + WorkspaceFile source = findActiveByFileId(fileId); + checkPermission(source, userId); + validateSpaceType(request.getTargetSpaceType(), request.getTargetWorkstationId()); + String targetSpaceType = request.getTargetSpaceType().toUpperCase(Locale.ROOT); + String targetParentId = request.getTargetParentId(); + fileNodeService.validateParentId(targetParentId, userId, targetSpaceType, request.getTargetWorkstationId()); + + ConflictPolicy policy = request.resolveConflictPolicy(); + com.linkwork.model.entity.FileNodeEntity conflictNode = fileNodeService.findSameNameNode( + userId, targetSpaceType, request.getTargetWorkstationId(), targetParentId, source.getFileName()); + String targetFileName = source.getFileName(); + + if (conflictNode != null) { + switch (policy) { + case REJECT -> { + WorkspaceFile conflictFile = "FILE".equals(conflictNode.getEntryType()) && conflictNode.getFileId() != null + ? findActiveByFileId(conflictNode.getFileId()) : null; + throw new FileConflictException( + "目标目录已存在同名" + ("DIR".equals(conflictNode.getEntryType()) ? "目录" : "文件"), + conflictNode.getFileId() != null ? conflictNode.getFileId() : conflictNode.getNodeId(), + conflictNode.getName(), conflictNode.getEntryType(), + conflictFile != null ? conflictFile.getFileSize() : null, + conflictNode.getUpdatedAt()); + } + case OVERWRITE -> { + if ("DIR".equals(conflictNode.getEntryType())) { + throw new IllegalArgumentException("无法用文件覆盖目录"); + } + WorkspaceFile conflictFile = findActiveByFileId(conflictNode.getFileId()); + acquireTransferDedup(source.getFileId(), userId, "copy", targetSpaceType, + request.getTargetWorkstationId(), policy, targetParentId); + return toResponse(overwriteTargetFile(source, conflictFile)); + } + case RENAME -> { + if (StringUtils.hasText(request.getNewName())) { + com.linkwork.model.entity.FileNodeEntity newNameConflict = fileNodeService.findSameNameNode( + userId, targetSpaceType, request.getTargetWorkstationId(), targetParentId, request.getNewName()); + if (newNameConflict != null) { + WorkspaceFile conflictFile = "FILE".equals(newNameConflict.getEntryType()) && newNameConflict.getFileId() != null + ? findActiveByFileId(newNameConflict.getFileId()) : null; + throw new FileConflictException( + "目标目录已存在同名" + ("DIR".equals(newNameConflict.getEntryType()) ? "目录" : "文件"), + newNameConflict.getFileId() != null ? newNameConflict.getFileId() : newNameConflict.getNodeId(), + newNameConflict.getName(), newNameConflict.getEntryType(), + conflictFile != null ? conflictFile.getFileSize() : null, + newNameConflict.getUpdatedAt()); + } + targetFileName = request.getNewName(); + } else { + targetFileName = generateUniqueNodeName(userId, targetSpaceType, + request.getTargetWorkstationId(), targetParentId, source.getFileName()); + } + } + } + } + + acquireTransferDedup(source.getFileId(), userId, "copy", targetSpaceType, + request.getTargetWorkstationId(), policy, targetParentId); + + String newFileId = UUID.randomUUID().toString().replace("-", ""); + String ext = source.getFileType(); + String targetOssPath = buildOssPath(targetSpaceType, request.getTargetWorkstationId(), userId, newFileId, ext); + nfsStorageService.copyObject(source.getOssPath(), targetOssPath); + + String targetParsedPath = null; + if (StringUtils.hasText(source.getParsedOssPath())) { + targetParsedPath = buildParsedPath(targetOssPath); + nfsStorageService.copyObject(source.getParsedOssPath(), targetParsedPath); + } + + WorkspaceFile copied = new WorkspaceFile(); + copied.setFileId(newFileId); + copied.setFileName(targetFileName); + copied.setFileSize(source.getFileSize()); + copied.setFileType(source.getFileType()); + copied.setContentType(source.getContentType()); + copied.setSpaceType(targetSpaceType); + copied.setWorkstationId("WORKSTATION".equals(targetSpaceType) ? request.getTargetWorkstationId() : null); + copied.setUserId(userId); + copied.setOssPath(targetOssPath); + copied.setParsedOssPath(targetParsedPath); + copied.setParseStatus(source.getParseStatus()); + copied.setMemoryIndexStatus("NONE"); + copied.setFileHash(source.getFileHash()); + copied.setCreatedAt(LocalDateTime.now()); + copied.setUpdatedAt(LocalDateTime.now()); + + try { + workspaceFileMapper.insert(copied); + } catch (Exception e) { + try { nfsStorageService.deleteFile(targetOssPath); } catch (Exception ignored) { } + if (targetParsedPath != null) { + try { nfsStorageService.deleteFile(targetParsedPath); } catch (Exception ignored) { } + } + throw e; + } + + fileNodeService.createFileNode(targetFileName, targetSpaceType, request.getTargetWorkstationId(), + userId, newFileId, targetParentId); + + if ("PARSED".equals(copied.getParseStatus()) || MEMORY_DIRECT_TYPES.contains(copied.getFileType())) { + triggerMemoryIndex(copied); + } + + return toResponse(copied); + } + + public FileResponse moveFile(String fileId, FileTransferRequest request, String userId) { + WorkspaceFile source = findActiveByFileId(fileId); + checkPermission(source, userId); + validateSpaceType(request.getTargetSpaceType(), request.getTargetWorkstationId()); + String targetSpaceType = request.getTargetSpaceType().toUpperCase(Locale.ROOT); + String targetParentId = request.getTargetParentId(); + fileNodeService.validateParentId(targetParentId, userId, targetSpaceType, request.getTargetWorkstationId()); + + ConflictPolicy policy = request.resolveConflictPolicy(); + + com.linkwork.model.entity.FileNodeEntity sourceNode = fileNodeService.findByFileId( + fileId, userId, source.getSpaceType(), source.getWorkstationId()); + com.linkwork.model.entity.FileNodeEntity conflictNode = fileNodeService.findSameNameNode( + userId, targetSpaceType, request.getTargetWorkstationId(), targetParentId, source.getFileName()); + // move 排除自身节点 + if (conflictNode != null && sourceNode != null && conflictNode.getNodeId().equals(sourceNode.getNodeId())) { + conflictNode = null; + } + + // ── 阶段 1:校验(可能抛 409,不占 dedup,不做写操作) ── + if (conflictNode != null) { + switch (policy) { + case REJECT -> { + WorkspaceFile conflictFile = "FILE".equals(conflictNode.getEntryType()) && conflictNode.getFileId() != null + ? findActiveByFileId(conflictNode.getFileId()) : null; + throw new FileConflictException( + "目标目录已存在同名" + ("DIR".equals(conflictNode.getEntryType()) ? "目录" : "文件"), + conflictNode.getFileId() != null ? conflictNode.getFileId() : conflictNode.getNodeId(), + conflictNode.getName(), conflictNode.getEntryType(), + conflictFile != null ? conflictFile.getFileSize() : null, + conflictNode.getUpdatedAt()); + } + case RENAME -> { + if (StringUtils.hasText(request.getNewName())) { + com.linkwork.model.entity.FileNodeEntity newNameConflict = fileNodeService.findSameNameNode( + userId, targetSpaceType, request.getTargetWorkstationId(), targetParentId, request.getNewName()); + if (newNameConflict != null && (sourceNode == null || !newNameConflict.getNodeId().equals(sourceNode.getNodeId()))) { + WorkspaceFile cf = "FILE".equals(newNameConflict.getEntryType()) && newNameConflict.getFileId() != null + ? findActiveByFileId(newNameConflict.getFileId()) : null; + throw new FileConflictException( + "目标目录已存在同名" + ("DIR".equals(newNameConflict.getEntryType()) ? "目录" : "文件"), + newNameConflict.getFileId() != null ? newNameConflict.getFileId() : newNameConflict.getNodeId(), + newNameConflict.getName(), newNameConflict.getEntryType(), + cf != null ? cf.getFileSize() : null, + newNameConflict.getUpdatedAt()); + } + } + } + default -> { } + } + } + + // ── 阶段 2:dedup → 执行写操作 ── + acquireTransferDedup(source.getFileId(), userId, "move", targetSpaceType, + request.getTargetWorkstationId(), policy, targetParentId); + + if (conflictNode != null) { + switch (policy) { + case OVERWRITE -> { + if ("DIR".equals(conflictNode.getEntryType())) { + throw new IllegalArgumentException("无法用文件覆盖目录"); + } + WorkspaceFile conflictFile = findActiveByFileId(conflictNode.getFileId()); + conflictFile.setDeletedAt(LocalDateTime.now()); + conflictFile.setUpdatedAt(LocalDateTime.now()); + workspaceFileMapper.updateById(conflictFile); + fileNodeService.deleteNode(conflictNode.getNodeId(), userId); + try { + nfsStorageService.deleteFile(conflictFile.getOssPath()); + if (StringUtils.hasText(conflictFile.getParsedOssPath())) { + nfsStorageService.deleteFile(conflictFile.getParsedOssPath()); + } + } catch (Exception e) { + log.warn("清理被覆盖文件失败: fileId={}, err={}", conflictFile.getFileId(), e.getMessage()); + } + } + case RENAME -> { + if (StringUtils.hasText(request.getNewName())) { + source.setFileName(request.getNewName()); + } else { + String newName = generateUniqueNodeName(userId, targetSpaceType, + request.getTargetWorkstationId(), targetParentId, source.getFileName()); + source.setFileName(newName); + } + } + default -> { } + } + } + + String oldWorkstationId = source.getWorkstationId(); + String oldSpaceType = source.getSpaceType(); + String oldOssPath = source.getOssPath(); + String oldParsedPath = source.getParsedOssPath(); + + String targetOssPath = buildOssPath(targetSpaceType, request.getTargetWorkstationId(), userId, source.getFileId(), source.getFileType()); + boolean storagePathChanged = !Objects.equals(oldOssPath, targetOssPath); + String targetParsedPath = StringUtils.hasText(oldParsedPath) ? buildParsedPath(targetOssPath) : null; + + if (storagePathChanged) { + nfsStorageService.copyObject(oldOssPath, targetOssPath); + if (StringUtils.hasText(oldParsedPath)) { + nfsStorageService.copyObject(oldParsedPath, targetParsedPath); + } + } else { + // Same path (e.g. move within same space/directory tree), no copy/delete required. + targetParsedPath = oldParsedPath; + } + + source.setSpaceType(targetSpaceType); + source.setWorkstationId("WORKSTATION".equals(targetSpaceType) ? request.getTargetWorkstationId() : null); + source.setOssPath(targetOssPath); + source.setParsedOssPath(targetParsedPath); + source.setUpdatedAt(LocalDateTime.now()); + source.setMemoryIndexStatus("NONE"); + workspaceFileMapper.updateById(source); + + // 更新 file node 归属 + if (sourceNode != null) { + sourceNode.setParentId(targetParentId); + sourceNode.setSpaceType(targetSpaceType); + sourceNode.setWorkstationId("WORKSTATION".equals(targetSpaceType) ? request.getTargetWorkstationId() : null); + sourceNode.setName(source.getFileName()); + sourceNode.setUpdatedAt(LocalDateTime.now()); + fileNodeService.updateNode(sourceNode); + } + + if (storagePathChanged) { + try { + nfsStorageService.deleteFile(oldOssPath); + } catch (Exception e) { + log.warn("删除旧原文件失败: fileId={}, path={}, err={}", fileId, oldOssPath, e.getMessage()); + } + if (StringUtils.hasText(oldParsedPath) && !Objects.equals(oldParsedPath, targetParsedPath)) { + try { + nfsStorageService.deleteFile(oldParsedPath); + } catch (Exception e) { + log.warn("删除旧解析文件失败: fileId={}, path={}, err={}", fileId, oldParsedPath, e.getMessage()); + } + } + } + + if (memoryService != null) { + try { + if ("WORKSTATION".equals(oldSpaceType)) { + memoryService.deleteSource(oldWorkstationId, source.getUserId(), oldOssPath); + } else { + memoryService.deleteSource(null, source.getUserId(), oldOssPath); + } + } catch (Exception e) { + log.warn("清理旧Memory索引失败: {}", e.getMessage()); + } + } + + if ("PARSED".equals(source.getParseStatus()) || MEMORY_DIRECT_TYPES.contains(source.getFileType())) { + triggerMemoryIndex(source); + } + + return toResponse(source); + } + + public List mentionFiles(String workstationId, String keyword, String userId) { + List wsFiles = listBySpaceForMention(userId, "WORKSTATION", workstationId, keyword); + List userFiles = listBySpaceForMention(userId, "USER", null, keyword); + return Stream.concat(wsFiles.stream(), userFiles.stream()) + .limit(50) + .map(this::toMentionResponse) + .toList(); + } + + public void triggerMemoryIndex(WorkspaceFile file) { + if (memoryService == null || file == null) { + return; + } + + String fileType = file.getFileType(); + String objectName = null; + if (MEMORY_DIRECT_TYPES.contains(fileType)) { + objectName = file.getOssPath(); + } else if (PARSE_REQUIRED_TYPES.contains(fileType) && "PARSED".equals(file.getParseStatus())) { + objectName = file.getParsedOssPath(); + } else if (MEMORY_SKIP_TYPES.contains(fileType)) { + file.setMemoryIndexStatus("SKIP"); + file.setUpdatedAt(LocalDateTime.now()); + workspaceFileMapper.updateById(file); + return; + } else { + return; + } + + if (!StringUtils.hasText(objectName)) { + return; + } + + if ("WORKSTATION".equals(file.getSpaceType())) { + RoleEntity role = null; + if (StringUtils.hasText(file.getWorkstationId())) { + role = roleService.getOne(new LambdaQueryWrapper() + .eq(RoleEntity::getRoleNo, file.getWorkstationId()) + .last("limit 1")); + if (role == null) { + try { + role = roleService.getById(Long.valueOf(file.getWorkstationId())); + } catch (Exception ignored) { + // ignore non-numeric workstation id + } + } + } + Boolean enabled = role != null && role.getConfigJson() != null ? role.getConfigJson().getMemoryEnabled() : null; + if (Boolean.FALSE.equals(enabled)) { + file.setMemoryIndexStatus("SKIP"); + file.setUpdatedAt(LocalDateTime.now()); + workspaceFileMapper.updateById(file); + return; + } + } + + String collectionName = "USER".equals(file.getSpaceType()) + ? memoryConfig.userCollectionName(file.getUserId()) + : memoryConfig.collectionName(file.getWorkstationId(), file.getUserId()); + + MemoryIndexJob job = MemoryIndexJob.builder() + .jobId(UUID.randomUUID().toString()) + .workstationId(file.getWorkstationId()) + .userId(file.getUserId()) + .jobType(MemoryIndexJob.JobType.FILE_UPLOAD) + .fileType(fileType) + .source(objectName) + .storageType("NFS") + .objectName(objectName) + .collectionName(collectionName) + .build(); + + try { + String payload = objectMapper.writeValueAsString(job); + redisTemplate.opsForList().leftPush(memoryConfig.getIndex().getQueueKey(), payload); + file.setMemoryIndexStatus("INDEXING"); + file.setUpdatedAt(LocalDateTime.now()); + workspaceFileMapper.updateById(file); + } catch (Exception e) { + throw new IllegalStateException("触发 Memory 索引失败", e); + } + } + + private List listBySpaceForMention(String userId, String spaceType, String workstationId, String keyword) { + LambdaQueryWrapper wrapper = new LambdaQueryWrapper() + .eq(WorkspaceFile::getUserId, userId) + .eq(WorkspaceFile::getSpaceType, spaceType) + .isNull(WorkspaceFile::getDeletedAt) + .orderByDesc(WorkspaceFile::getCreatedAt) + .last("limit 50"); + if (StringUtils.hasText(workstationId)) { + wrapper.eq(WorkspaceFile::getWorkstationId, workstationId); + } + if (StringUtils.hasText(keyword)) { + wrapper.like(WorkspaceFile::getFileName, keyword.trim()); + } + return workspaceFileMapper.selectList(wrapper); + } + + private FileMentionResponse toMentionResponse(WorkspaceFile file) { + FileMentionResponse response = new FileMentionResponse(); + response.setFileId(file.getFileId()); + response.setFileName(file.getFileName()); + response.setFileType(file.getFileType()); + response.setFileSize(file.getFileSize()); + response.setSpaceType(file.getSpaceType()); + response.setWorkstationId(file.getWorkstationId()); + response.setCreatedAt(file.getCreatedAt()); + return response; + } + + private void acquireTransferDedup(String fileId, String userId, String operation, + String targetSpaceType, String targetWorkstationId, + ConflictPolicy policy, String targetParentId) { + String key = buildTransferDedupKey(fileId, userId, operation, targetSpaceType, targetWorkstationId, policy, targetParentId); + Boolean acquired = redisTemplate.opsForValue().setIfAbsent( + key, "1", FILE_TRANSFER_DEDUP_SECONDS, TimeUnit.SECONDS); + if (!Boolean.TRUE.equals(acquired)) { + throw new IllegalStateException("重复提交,请稍后重试"); + } + } + + private WorkspaceFile findSameNameFile(String userId, String spaceType, String workstationId, + String fileName, String excludeFileId) { + LambdaQueryWrapper wrapper = new LambdaQueryWrapper() + .eq(WorkspaceFile::getUserId, userId) + .eq(WorkspaceFile::getSpaceType, spaceType) + .eq(WorkspaceFile::getFileName, fileName) + .isNull(WorkspaceFile::getDeletedAt) + .last("limit 1"); + if ("WORKSTATION".equals(spaceType)) { + wrapper.eq(WorkspaceFile::getWorkstationId, workstationId); + } else { + wrapper.isNull(WorkspaceFile::getWorkstationId); + } + if (StringUtils.hasText(excludeFileId)) { + wrapper.ne(WorkspaceFile::getFileId, excludeFileId); + } + return workspaceFileMapper.selectOne(wrapper); + } + + private String generateUniqueNodeName(String userId, String spaceType, String workstationId, + String parentId, String originalName) { + String baseName; + String extension; + int dotIdx = originalName.lastIndexOf('.'); + if (dotIdx > 0) { + baseName = originalName.substring(0, dotIdx); + extension = originalName.substring(dotIdx); + } else { + baseName = originalName; + extension = ""; + } + + for (int i = 1; i <= 100; i++) { + String candidate = baseName + " (" + i + ")" + extension; + com.linkwork.model.entity.FileNodeEntity existing = fileNodeService.findSameNameNode( + userId, spaceType, workstationId, parentId, candidate); + if (existing == null) { + return candidate; + } + } + return baseName + " (" + UUID.randomUUID().toString().substring(0, 8) + ")" + extension; + } + + private String generateUniqueName(String userId, String spaceType, String workstationId, String originalName) { + String baseName; + String extension; + int dotIdx = originalName.lastIndexOf('.'); + if (dotIdx > 0) { + baseName = originalName.substring(0, dotIdx); + extension = originalName.substring(dotIdx); + } else { + baseName = originalName; + extension = ""; + } + + for (int i = 1; i <= 100; i++) { + String candidate = baseName + " (" + i + ")" + extension; + WorkspaceFile existing = findSameNameFile(userId, spaceType, workstationId, candidate, null); + if (existing == null) { + return candidate; + } + } + return baseName + " (" + UUID.randomUUID().toString().substring(0, 8) + ")" + extension; + } + + private WorkspaceFile overwriteTargetFile(WorkspaceFile source, WorkspaceFile target) { + nfsStorageService.copyObject(source.getOssPath(), target.getOssPath()); + + String targetParsedPath = target.getParsedOssPath(); + if (StringUtils.hasText(source.getParsedOssPath())) { + if (!StringUtils.hasText(targetParsedPath)) { + targetParsedPath = buildParsedPath(target.getOssPath()); + } + nfsStorageService.copyObject(source.getParsedOssPath(), targetParsedPath); + } else if (StringUtils.hasText(targetParsedPath)) { + try { + nfsStorageService.deleteFile(targetParsedPath); + } catch (Exception e) { + log.warn("清理覆盖前解析文件失败: fileId={}, path={}, err={}", + target.getFileId(), targetParsedPath, e.getMessage()); + } + targetParsedPath = null; + } + + target.setFileSize(source.getFileSize()); + target.setFileType(source.getFileType()); + target.setContentType(source.getContentType()); + target.setFileHash(source.getFileHash()); + target.setParseStatus(source.getParseStatus()); + target.setParsedOssPath(targetParsedPath); + target.setMemoryIndexStatus("NONE"); + target.setUpdatedAt(LocalDateTime.now()); + workspaceFileMapper.updateById(target); + + if ("PARSED".equals(target.getParseStatus()) || MEMORY_DIRECT_TYPES.contains(target.getFileType())) { + triggerMemoryIndex(target); + } + + return target; + } + + private String buildTransferDedupKey(String fileId, String userId, String operation, + String targetSpaceType, String targetWorkstationId, + ConflictPolicy policy, String targetParentId) { + String safeTargetWorkstationId = StringUtils.hasText(targetWorkstationId) + ? sanitizePathSegment(targetWorkstationId) + : "-"; + String safeTargetParentId = StringUtils.hasText(targetParentId) + ? sanitizePathSegment(targetParentId) + : "root"; + return String.format("%s:%s:%s:%s:%s:%s:%s:%s", + FILE_TRANSFER_DEDUP_KEY_PREFIX, + sanitizePathSegment(userId), + sanitizePathSegment(fileId), + sanitizePathSegment(operation), + sanitizePathSegment(targetSpaceType), + safeTargetWorkstationId, + policy.name(), + safeTargetParentId); + } + + private void validateUpload(MultipartFile file, String spaceType, String workstationId, String userId) { + if (file == null || file.isEmpty()) { + throw new IllegalArgumentException("文件不能为空"); + } + if (file.getSize() > MAX_FILE_SIZE) { + throw new IllegalArgumentException("文件大小不能超过 100MB"); + } + validateSpaceType(spaceType, workstationId); + String ext = getExtension(file.getOriginalFilename()); + if (!ALLOWED_TYPES.contains(ext)) { + throw new IllegalArgumentException("不支持的文件类型: " + ext); + } + if (!StringUtils.hasText(userId)) { + throw new IllegalArgumentException("用户信息缺失"); + } + } + + private void validateSpaceType(String spaceType, String workstationId) { + if (!StringUtils.hasText(spaceType)) { + throw new IllegalArgumentException("spaceType 不能为空"); + } + String normalized = spaceType.toUpperCase(Locale.ROOT); + if (!"USER".equals(normalized) && !"WORKSTATION".equals(normalized)) { + throw new IllegalArgumentException("spaceType 仅支持 USER 或 WORKSTATION"); + } + if ("WORKSTATION".equals(normalized) && !StringUtils.hasText(workstationId)) { + throw new IllegalArgumentException("WORKSTATION 空间必须提供 workstationId"); + } + } + + private WorkspaceFile findActiveByFileId(String fileId) { + WorkspaceFile file = workspaceFileMapper.selectOne(new LambdaQueryWrapper() + .eq(WorkspaceFile::getFileId, fileId) + .isNull(WorkspaceFile::getDeletedAt) + .last("limit 1")); + if (file == null) { + throw new ResourceNotFoundException("文件不存在: " + fileId); + } + return file; + } + + private void checkPermission(WorkspaceFile file, String userId) { + if (!Objects.equals(file.getUserId(), userId)) { + throw new ForbiddenOperationException("无权限访问该文件"); + } + } + + public String buildParsedPath(String ossPath) { + String parsed = ossPath.replace("/original/", "/parsed/"); + int dotIndex = parsed.lastIndexOf('.'); + if (dotIndex > 0) { + return parsed.substring(0, dotIndex) + ".md"; + } + return parsed + ".md"; + } + + private String getExtension(String filename) { + if (!StringUtils.hasText(filename) || !filename.contains(".")) { + throw new IllegalArgumentException("文件名缺少扩展名"); + } + return filename.substring(filename.lastIndexOf('.') + 1).toLowerCase(Locale.ROOT); + } + + private String buildOssPath(String spaceType, String workstationId, String userId, String fileId, String ext) { + String normalized = spaceType.toUpperCase(Locale.ROOT); + String safeUserId = sanitizePathSegment(userId); + if ("USER".equals(normalized)) { + return String.format("user-files/%s/original/%s.%s", safeUserId, fileId, ext); + } + String safeWorkstationId = sanitizePathSegment(workstationId); + return String.format("workstation/%s/%s/original/%s.%s", safeWorkstationId, safeUserId, fileId, ext); + } + + private String sanitizePathSegment(String segment) { + if (segment == null) { + return ""; + } + return segment.replaceAll("[^a-zA-Z0-9_.-]", "_"); + } + + private String computeSha256(MultipartFile file) { + try (InputStream inputStream = file.getInputStream()) { + MessageDigest digest = MessageDigest.getInstance("SHA-256"); + byte[] buffer = new byte[8192]; + int len; + while ((len = inputStream.read(buffer)) > 0) { + digest.update(buffer, 0, len); + } + byte[] hash = digest.digest(); + StringBuilder sb = new StringBuilder(hash.length * 2); + for (byte b : hash) { + sb.append(String.format("%02x", b)); + } + return sb.toString(); + } catch (Exception e) { + throw new IllegalStateException("计算文件哈希失败", e); + } + } + + public FileResponse toResponse(WorkspaceFile file) { + FileResponse response = new FileResponse(); + response.setFileId(file.getFileId()); + response.setFileName(file.getFileName()); + response.setFileSize(file.getFileSize()); + response.setFileType(file.getFileType()); + response.setContentType(file.getContentType()); + response.setSpaceType(file.getSpaceType()); + response.setWorkstationId(file.getWorkstationId()); + response.setParseStatus(file.getParseStatus()); + response.setMemoryIndexStatus(file.getMemoryIndexStatus()); + response.setCreatedAt(file.getCreatedAt()); + return response; + } +} diff --git a/back/src/main/java/com/linkwork/service/FileSpaceSyncService.java b/back/src/main/java/com/linkwork/service/FileSpaceSyncService.java new file mode 100644 index 0000000..8fcfa3e --- /dev/null +++ b/back/src/main/java/com/linkwork/service/FileSpaceSyncService.java @@ -0,0 +1,167 @@ +package com.linkwork.service; + +import com.linkwork.model.dto.FileSpaceSyncRequest; +import com.linkwork.model.dto.FileSpaceSyncResponse; +import lombok.RequiredArgsConstructor; +import lombok.extern.slf4j.Slf4j; +import org.springframework.stereotype.Service; +import org.springframework.util.StringUtils; + +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.ArrayList; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Locale; +import java.util.Map; + +@Slf4j +@Service +@RequiredArgsConstructor +public class FileSpaceSyncService { + + private static final String SPACE_TYPE_USER = "USER"; + private static final String SPACE_TYPE_WORKSTATION = "WORKSTATION"; + + private final NfsStorageService nfsStorageService; + private final TaskOutputWorkspaceSyncService taskOutputWorkspaceSyncService; + + public FileSpaceSyncResponse syncSpace(String userId, FileSpaceSyncRequest request) { + if (!StringUtils.hasText(userId)) { + throw new IllegalArgumentException("userId is required"); + } + if (request == null) { + throw new IllegalArgumentException("request is required"); + } + if (!nfsStorageService.isConfigured()) { + throw new IllegalStateException("NFS storage is not configured"); + } + + String spaceType = normalizeSpaceType(request.getSpaceType()); + String workstationId = normalizeWorkstationId(spaceType, request.getWorkstationId()); + List scopes = resolveScopes(spaceType, workstationId, userId); + int scannedCount = 0; + int skippedCount = 0; + Map> artifactsByObjectName = new LinkedHashMap<>(); + + for (SyncScope scope : scopes) { + List objectNames = nfsStorageService.listObjects(scope.prefix()); + scannedCount += objectNames.size(); + for (String objectName : objectNames) { + String normalizedObjectName = normalizePath(objectName); + String relativePath = resolveRelativePath(scope, normalizedObjectName); + if (!StringUtils.hasText(relativePath) || shouldSkipRelativePath(relativePath)) { + skippedCount++; + continue; + } + if (artifactsByObjectName.containsKey(normalizedObjectName)) { + skippedCount++; + continue; + } + Map artifact = new LinkedHashMap<>(); + artifact.put("relative_path", relativePath); + artifact.put("object_name", normalizedObjectName); + artifact.put("action", "upsert"); + artifact.put("size", resolveFileSize(normalizedObjectName)); + artifactsByObjectName.put(normalizedObjectName, artifact); + } + } + + List> artifacts = new ArrayList<>(artifactsByObjectName.values()); + if (!artifacts.isEmpty()) { + taskOutputWorkspaceSyncService.syncTaskPathListArtifacts( + "MANUAL_SYNC", + userId, + workstationId, + artifacts + ); + } + + log.info("manual file space sync done: userId={}, spaceType={}, workstationId={}, scanned={}, synced={}, skipped={}", + userId, spaceType, workstationId, scannedCount, artifacts.size(), skippedCount); + return new FileSpaceSyncResponse( + spaceType, + workstationId, + scannedCount, + artifacts.size(), + skippedCount + ); + } + + private String normalizeSpaceType(String spaceType) { + if (!StringUtils.hasText(spaceType)) { + throw new IllegalArgumentException("spaceType is required"); + } + String normalized = spaceType.trim().toUpperCase(Locale.ROOT); + if (!SPACE_TYPE_USER.equals(normalized) && !SPACE_TYPE_WORKSTATION.equals(normalized)) { + throw new IllegalArgumentException("spaceType must be USER or WORKSTATION"); + } + return normalized; + } + + private String normalizeWorkstationId(String spaceType, String workstationId) { + if (!SPACE_TYPE_WORKSTATION.equals(spaceType)) { + return null; + } + if (!StringUtils.hasText(workstationId)) { + throw new IllegalArgumentException("workstationId is required when spaceType=WORKSTATION"); + } + return workstationId.trim(); + } + + private List resolveScopes(String spaceType, String workstationId, String userId) { + if (SPACE_TYPE_USER.equals(spaceType)) { + return List.of(new SyncScope("user-files/" + userId, "user/", true)); + } + // WORKSTATION 手动同步补扫任务日志(system/{ws}/logs/{userId}/**), + // 并以 workstation/logs/** 形态入库展示在记忆空间。 + return List.of( + new SyncScope("workstation/" + workstationId + "/" + userId, "workstation/", true), + new SyncScope("system/" + workstationId + "/logs/" + userId, "workstation/logs/", false) + ); + } + + private String resolveRelativePath(SyncScope scope, String objectName) { + String normalized = normalizePath(objectName); + String prefix = scope.prefix(); + String prefixWithSlash = prefix.endsWith("/") ? prefix : prefix + "/"; + if (!normalized.startsWith(prefixWithSlash)) { + return null; + } + String subPath = normalized.substring(prefixWithSlash.length()); + if (!StringUtils.hasText(subPath)) { + return null; + } + if (scope.skipDerivedArtifacts() && (subPath.startsWith("original/") || subPath.startsWith("parsed/"))) { + return null; + } + return scope.relativePrefix() + subPath; + } + + private boolean shouldSkipRelativePath(String relativePath) { + String normalized = normalizePath(relativePath); + return "user/MEMORY.md".equalsIgnoreCase(normalized) + || "workstation/MEMORY.md".equalsIgnoreCase(normalized); + } + + private long resolveFileSize(String objectName) { + try { + Path path = nfsStorageService.getAbsolutePath(objectName); + return Files.exists(path) ? Files.size(path) : 0L; + } catch (Exception e) { + log.debug("resolve nfs file size failed: objectName={}, err={}", objectName, e.getMessage()); + return 0L; + } + } + + private String normalizePath(String path) { + String normalized = path == null ? "" : path.trim().replace('\\', '/'); + while (normalized.startsWith("/")) { + normalized = normalized.substring(1); + } + return normalized; + } + + private record SyncScope(String prefix, String relativePrefix, boolean skipDerivedArtifacts) { + } +} diff --git a/back/src/main/java/com/linkwork/service/GitLabAuthService.java b/back/src/main/java/com/linkwork/service/GitLabAuthService.java new file mode 100644 index 0000000..b825b6b --- /dev/null +++ b/back/src/main/java/com/linkwork/service/GitLabAuthService.java @@ -0,0 +1,435 @@ +package com.linkwork.service; + +import com.baomidou.mybatisplus.core.conditions.query.LambdaQueryWrapper; +import com.linkwork.mapper.GitLabAuthMapper; +import com.linkwork.model.entity.GitLabAuthEntity; +import lombok.Data; +import lombok.RequiredArgsConstructor; +import lombok.extern.slf4j.Slf4j; +import org.springframework.beans.factory.annotation.Value; +import org.springframework.http.HttpEntity; +import org.springframework.http.HttpHeaders; +import org.springframework.http.HttpMethod; +import org.springframework.http.MediaType; +import org.springframework.http.ResponseEntity; +import org.springframework.stereotype.Service; +import org.springframework.util.LinkedMultiValueMap; +import org.springframework.util.MultiValueMap; +import org.springframework.util.StringUtils; +import org.springframework.web.client.RestTemplate; + +import java.time.Duration; +import java.time.LocalDateTime; +import java.util.List; +import java.util.Map; + +@Slf4j +@Service +@RequiredArgsConstructor +public class GitLabAuthService { + + private static final String DEFAULT_GITLAB_BASE_URL = "https://git.example.com"; + + private final GitLabAuthMapper gitLabAuthMapper; + private final RestTemplate restTemplate = new RestTemplate(); + + @Value("${robot.gitlab.base-url}") + private String gitlabBaseUrl; + + @Value("${robot.gitlab.auth-base-url:}") + private String gitlabAuthBaseUrl; + + @Value("${robot.gitlab.client-id}") + private String clientId; + + @Value("${robot.gitlab.client-secret}") + private String clientSecret; + + @Value("${robot.gitlab.redirect-uri}") + private String defaultRedirectUri; + + public String getAuthUrl(String redirectUri, String scopeType) { + String normalizedScopeType = normalizeScopeType(scopeType); + String uri = resolveRedirectUri(redirectUri); + String scope = resolveScope(normalizedScopeType); + return String.format("%s/oauth/authorize?client_id=%s&redirect_uri=%s&response_type=code&state=%s&scope=%s", + resolveOauthBaseUrl(), clientId, uri, normalizedScopeType, scope); + } + + public void callback(String userId, String code, String redirectUri, String scopeType) { + String normalizedScopeType = normalizeScopeType(scopeType); + String uri = resolveRedirectUri(redirectUri); + + // 1. Exchange Token + String tokenUrl = resolveOauthBaseUrl() + "/oauth/token"; + MultiValueMap params = new LinkedMultiValueMap<>(); + params.add("client_id", clientId); + params.add("client_secret", clientSecret); + params.add("code", code); + params.add("grant_type", "authorization_code"); + params.add("redirect_uri", uri); + + HttpHeaders headers = new HttpHeaders(); + headers.setContentType(MediaType.APPLICATION_FORM_URLENCODED); + + ResponseEntity response = restTemplate.postForEntity(tokenUrl, new HttpEntity<>(params, headers), Map.class); + Map body = response.getBody(); + + if (body == null || body.get("access_token") == null) { + throw new RuntimeException("Failed to get access token from GitLab"); + } + + String accessToken = (String) body.get("access_token"); + String refreshToken = (String) body.get("refresh_token"); + Integer expiresIn = (Integer) body.get("expires_in"); + if (expiresIn == null) { + expiresIn = 7200; + } + + // 2. Get User Info + String userUrl = resolveApiBaseUrl() + "/api/v4/user"; + HttpHeaders authHeaders = new HttpHeaders(); + authHeaders.setBearerAuth(accessToken); + ResponseEntity userResp = restTemplate.exchange(userUrl, HttpMethod.GET, new HttpEntity<>(authHeaders), Map.class); + Map userBody = userResp.getBody(); + + if (userBody == null) { + throw new RuntimeException("Failed to get user info from GitLab"); + } + + Long gitlabId = ((Number) userBody.get("id")).longValue(); + String username = (String) userBody.get("username"); + String name = (String) userBody.get("name"); + String avatarUrl = (String) userBody.get("avatar_url"); + + String scope = resolveScope(normalizedScopeType); + + // 3. Save to DB - use custom query that includes soft-deleted records + GitLabAuthEntity entity = gitLabAuthMapper.selectIncludingDeleted(userId, gitlabId, scope); + + if (entity == null) { + entity = new GitLabAuthEntity(); + entity.setUserId(userId); + entity.setGitlabId(gitlabId); + entity.setCreatedAt(LocalDateTime.now()); + } + + entity.setUsername(username); + entity.setName(name); + entity.setAvatarUrl(avatarUrl); + entity.setAccessToken(encrypt(accessToken)); + entity.setRefreshToken(encrypt(refreshToken)); + entity.setExpiresAt(LocalDateTime.now().plusSeconds(expiresIn)); + entity.setTokenAlias(maskToken(accessToken)); + entity.setScope(scope); + entity.setUpdatedAt(LocalDateTime.now()); + entity.setIsDeleted(false); + + if (entity.getId() == null) { + gitLabAuthMapper.insert(entity); + } else { + gitLabAuthMapper.updateIncludingDeleted(entity); + } + } + + public List listUsers(String userId) { + List authList = gitLabAuthMapper.selectList(new LambdaQueryWrapper() + .eq(GitLabAuthEntity::getUserId, userId) + .eq(GitLabAuthEntity::getIsDeleted, false) + .orderByDesc(GitLabAuthEntity::getCreatedAt)); + + // 自动刷新过期的 token,让前端看到最新的过期时间 + LocalDateTime now = LocalDateTime.now(); + for (GitLabAuthEntity auth : authList) { + if (auth.getExpiresAt() != null && auth.getExpiresAt().isBefore(now)) { + tryRefreshToken(auth); + } + } + + return authList; + } + + public void deleteUser(String userId, String authId) { + gitLabAuthMapper.delete(new LambdaQueryWrapper() + .eq(GitLabAuthEntity::getId, authId) + .eq(GitLabAuthEntity::getUserId, userId)); + } + + /** + * 获取用户最新的 GitLab 授权。 + */ + public GitLabAuthEntity getLatestAuth(String userId) { + if (!StringUtils.hasText(userId)) { + return null; + } + + List authList = gitLabAuthMapper.selectList( + new LambdaQueryWrapper() + .eq(GitLabAuthEntity::getUserId, userId) + .eq(GitLabAuthEntity::getIsDeleted, false) + .orderByDesc(GitLabAuthEntity::getUpdatedAt) + .last("LIMIT 1") + ); + + if (authList == null || authList.isEmpty()) { + return null; + } + return authList.get(0); + } + + /** + * 根据 authId 获取有效 token,若已过期或将过期则先刷新。 + */ + public ValidToken getValidTokenByAuthId(Long authId, Duration refreshAheadWindow) { + if (authId == null) { + return null; + } + + GitLabAuthEntity auth = gitLabAuthMapper.selectById(authId); + if (auth == null || Boolean.TRUE.equals(auth.getIsDeleted())) { + return null; + } + + LocalDateTime now = LocalDateTime.now(); + Duration window = refreshAheadWindow != null ? refreshAheadWindow : Duration.ofMinutes(5); + LocalDateTime refreshThreshold = now.plus(window); + + if (auth.getExpiresAt() == null || !auth.getExpiresAt().isAfter(refreshThreshold)) { + String refreshedToken = tryRefreshToken(auth); + if (StringUtils.hasText(refreshedToken)) { + auth = gitLabAuthMapper.selectById(authId); + if (auth == null || Boolean.TRUE.equals(auth.getIsDeleted())) { + return null; + } + } + } + + if (auth.getExpiresAt() == null || !auth.getExpiresAt().isAfter(now)) { + log.warn("GitLab token expired after refresh attempt: authId={}, tokenAlias={}", + authId, auth.getTokenAlias()); + throw new IllegalStateException("GitLab token expired and refresh failed: authId=" + authId); + } + + String token = decrypt(auth.getAccessToken()); + if (!StringUtils.hasText(token)) { + throw new IllegalStateException("GitLab token is blank: authId=" + authId); + } + + ValidToken validToken = new ValidToken(); + validToken.setToken(token); + validToken.setTokenAlias(auth.getTokenAlias()); + validToken.setExpiresAt(auth.getExpiresAt()); + validToken.setScope(auth.getScope()); + validToken.setTokenType(resolveTokenType(auth, token)); + return validToken; + } + + /** + * 获取用户有效的 GitLab access token + * 优先返回未过期的 token;如果过期则自动使用 refresh_token 续期 + */ + public String getAccessToken(String userId) { + GitLabAuthEntity latest = getLatestAuth(userId); + if (latest == null) { + log.debug("No GitLab auth found for userId: {}", userId); + return null; + } + + ValidToken validToken = getValidTokenByAuthId(latest.getId(), Duration.ofMinutes(5)); + if (validToken == null) { + log.warn("No valid GitLab token available for userId: {}", userId); + return null; + } + return validToken.getToken(); + } + + /** + * 通过当前有效 token 查询 GitLab 用户身份,用于 git commit 身份注入。 + */ + public CommitIdentity resolveCommitIdentity(String accessToken) { + if (!StringUtils.hasText(accessToken)) { + throw new IllegalArgumentException("accessToken is required"); + } + + String userUrl = resolveApiBaseUrl() + "/api/v4/user"; + HttpHeaders headers = new HttpHeaders(); + headers.setBearerAuth(accessToken); + + ResponseEntity response = restTemplate.exchange( + userUrl, + HttpMethod.GET, + new HttpEntity<>(headers), + Map.class + ); + Map body = response.getBody(); + if (body == null) { + throw new IllegalStateException("GitLab user info response is empty"); + } + + String username = body.get("username") == null ? "" : String.valueOf(body.get("username")).trim(); + String email = body.get("email") == null ? "" : String.valueOf(body.get("email")).trim(); + if (!StringUtils.hasText(email)) { + email = body.get("public_email") == null ? "" : String.valueOf(body.get("public_email")).trim(); + } + if (!StringUtils.hasText(username)) { + throw new IllegalStateException("GitLab user info missing username"); + } + if (!StringUtils.hasText(email)) { + throw new IllegalStateException("GitLab user info missing email/public_email"); + } + + CommitIdentity identity = new CommitIdentity(); + identity.setUsername(username); + identity.setEmail(email); + return identity; + } + + /** + * 根据 scopeType 解析实际的 GitLab scope 字符串 + */ + private String resolveScope(String scopeType) { + if ("read".equals(scopeType)) { + return "read_user read_repository"; + } + // Keep write flow compatible with existing GitLab OAuth app configuration. + return "read_user api"; + } + + /** + * 多因子判定 tokenType(oauth / pat): + * 1. token 以 glpat- 开头 → PAT(GitLab Personal Access Token 固定前缀) + * 2. scope 含 api / read_repository 等 OAuth 授权特征 → oauth + * 3. 存在 refresh_token → oauth + * 4. 兜底 → oauth(当前 OAuth 授权流是主要入口) + */ + private String resolveTokenType(GitLabAuthEntity auth, String decryptedToken) { + if (StringUtils.hasText(decryptedToken) && decryptedToken.startsWith("glpat-")) { + return "pat"; + } + String scope = auth.getScope(); + if (StringUtils.hasText(scope) && (scope.contains("api") || scope.contains("read_repository"))) { + return "oauth"; + } + if (StringUtils.hasText(decrypt(auth.getRefreshToken()))) { + return "oauth"; + } + return "oauth"; + } + + /** + * 使用 refresh_token 向 GitLab 换取新的 access_token + */ + protected String tryRefreshToken(GitLabAuthEntity auth) { + String refreshToken = decrypt(auth.getRefreshToken()); + if (!StringUtils.hasText(refreshToken)) { + log.warn("No refresh token available for GitLab auth id: {}", auth.getId()); + return null; + } + + try { + String tokenUrl = resolveOauthBaseUrl() + "/oauth/token"; + MultiValueMap params = new LinkedMultiValueMap<>(); + params.add("client_id", clientId); + params.add("client_secret", clientSecret); + params.add("refresh_token", refreshToken); + params.add("grant_type", "refresh_token"); + + HttpHeaders headers = new HttpHeaders(); + headers.setContentType(MediaType.APPLICATION_FORM_URLENCODED); + + ResponseEntity response = restTemplate.postForEntity(tokenUrl, new HttpEntity<>(params, headers), Map.class); + Map body = response.getBody(); + + if (body == null || body.get("access_token") == null) { + log.warn("GitLab refresh response missing access_token for auth id: {}", auth.getId()); + return null; + } + + String newAccessToken = (String) body.get("access_token"); + String newRefreshToken = (String) body.get("refresh_token"); + Integer expiresIn = (Integer) body.get("expires_in"); + if (expiresIn == null) { + expiresIn = 7200; + } + + auth.setAccessToken(encrypt(newAccessToken)); + if (newRefreshToken != null) { + auth.setRefreshToken(encrypt(newRefreshToken)); + } + auth.setExpiresAt(LocalDateTime.now().plusSeconds(expiresIn)); + auth.setTokenAlias(maskToken(newAccessToken)); + auth.setUpdatedAt(LocalDateTime.now()); + gitLabAuthMapper.updateById(auth); + + log.info("GitLab token refreshed: userId={}, username={}, tokenAlias={}, expiresAt={}", + auth.getUserId(), auth.getUsername(), auth.getTokenAlias(), auth.getExpiresAt()); + return newAccessToken; + } catch (Exception e) { + log.warn("Failed to refresh GitLab token for auth id {}: {}", auth.getId(), e.getMessage()); + return null; + } + } + + private String resolveOauthBaseUrl() { + if (StringUtils.hasText(gitlabAuthBaseUrl)) { + return gitlabAuthBaseUrl; + } + if (StringUtils.hasText(gitlabBaseUrl)) { + return gitlabBaseUrl; + } + return DEFAULT_GITLAB_BASE_URL; + } + + private String resolveApiBaseUrl() { + if (StringUtils.hasText(gitlabBaseUrl)) { + return gitlabBaseUrl; + } + return DEFAULT_GITLAB_BASE_URL; + } + + private String resolveRedirectUri(String redirectUri) { + if (StringUtils.hasText(redirectUri)) { + return redirectUri; + } + return defaultRedirectUri; + } + + private String normalizeScopeType(String scopeType) { + if ("read".equals(scopeType)) { + return "read"; + } + return "write"; + } + + private String encrypt(String token) { + // TODO: Implement reversible encryption + return token; + } + + private String decrypt(String encryptedToken) { + // TODO: Implement reversible encryption (matches encrypt) + return encryptedToken; + } + + private String maskToken(String token) { + if (token == null || token.length() < 10) { + return "******"; + } + return token.substring(0, 4) + "****" + token.substring(token.length() - 4); + } + + @Data + public static class ValidToken { + private String token; + private String tokenAlias; + private LocalDateTime expiresAt; + private String scope; + private String tokenType; + } + + @Data + public static class CommitIdentity { + private String username; + private String email; + } +} diff --git a/back/src/main/java/com/linkwork/service/ImageBuildService.java b/back/src/main/java/com/linkwork/service/ImageBuildService.java new file mode 100644 index 0000000..38afd60 --- /dev/null +++ b/back/src/main/java/com/linkwork/service/ImageBuildService.java @@ -0,0 +1,1292 @@ +package com.linkwork.service; + +import com.fasterxml.jackson.core.JsonProcessingException; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.github.dockerjava.api.DockerClient; +import com.github.dockerjava.api.async.ResultCallback; +import com.github.dockerjava.api.command.BuildImageResultCallback; +import com.github.dockerjava.api.command.ExecCreateCmdResponse; +import com.github.dockerjava.api.command.InspectExecResponse; +import com.github.dockerjava.api.command.InspectImageResponse; +import com.github.dockerjava.api.model.AuthConfig; +import com.github.dockerjava.api.model.BuildResponseItem; +import com.github.dockerjava.api.model.Container; +import com.github.dockerjava.api.model.Frame; +import com.github.dockerjava.api.model.Image; +import com.github.dockerjava.api.model.PullResponseItem; +import com.github.dockerjava.api.model.PushResponseItem; +import com.github.dockerjava.api.exception.NotFoundException; +import com.github.dockerjava.core.DefaultDockerClientConfig; +import com.github.dockerjava.core.DockerClientConfig; +import com.github.dockerjava.core.DockerClientImpl; +import com.github.dockerjava.zerodep.ZerodepDockerHttpClient; +import com.github.dockerjava.transport.DockerHttpClient; +import com.linkwork.config.BuildQueueConfig; +import com.linkwork.config.ImageBuildConfig; +import com.linkwork.model.dto.ImageBuildResult; +import com.linkwork.model.dto.ServiceBuildRequest; +import com.linkwork.model.enums.DeployMode; +import jakarta.annotation.PostConstruct; +import jakarta.annotation.PreDestroy; +import lombok.extern.slf4j.Slf4j; +import org.springframework.core.io.ClassPathResource; +import org.springframework.scheduling.annotation.Scheduled; +import org.springframework.stereotype.Service; +import org.springframework.util.StringUtils; + +import java.io.ByteArrayOutputStream; +import java.io.BufferedReader; +import java.io.File; +import java.nio.file.DirectoryStream; +import java.io.IOException; +import java.io.InputStream; +import java.io.InputStreamReader; +import java.nio.charset.StandardCharsets; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.StandardCopyOption; +import java.time.Duration; +import java.time.Instant; +import java.time.OffsetDateTime; +import java.time.ZoneOffset; +import java.time.format.DateTimeFormatter; +import java.util.ArrayList; +import java.util.Base64; +import java.util.Collections; +import java.util.HashSet; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.TimeUnit; +import java.util.regex.Pattern; + +/** + * 镜像构建服务 + * + * 职责: + * 1. 生成 Dockerfile(基础镜像 + ENV 声明 + 执行 build.sh) + * 2. 调用 Docker API 构建 Agent 镜像 + * 3. 推送镜像到仓库(K8s 模式) + * + * 设计说明: + * - 仅构建 Agent 镜像,Runner 由运行时 agent 启动 + * - token 放入 buildEnvVars,在 build.sh 执行前 export + */ +@Service +@Slf4j +public class ImageBuildService { + + private static final DateTimeFormatter TIMESTAMP_FORMATTER = + DateTimeFormatter.ofPattern("yyyyMMddHHmmss").withZone(ZoneOffset.UTC); + private static final Pattern LOCAL_BUILD_REPO_PATTERN = Pattern.compile("^service-.*-agent$"); + + private final ImageBuildConfig config; + private final BuildQueueConfig buildQueueConfig; + private final ObjectMapper objectMapper; + private final ScheduleEventPublisher eventPublisher; + private final BuildLogBuffer logBuffer; + private final NfsStorageService nfsStorageService; + private final BuildRecordService buildRecordService; + private DockerClient dockerClient; + + public ImageBuildService(ImageBuildConfig config, + BuildQueueConfig buildQueueConfig, + ObjectMapper objectMapper, + ScheduleEventPublisher eventPublisher, + BuildLogBuffer logBuffer, + NfsStorageService nfsStorageService, + BuildRecordService buildRecordService) { + this.config = config; + this.buildQueueConfig = buildQueueConfig; + this.objectMapper = objectMapper; + this.eventPublisher = eventPublisher; + this.logBuffer = logBuffer; + this.nfsStorageService = nfsStorageService; + this.buildRecordService = buildRecordService; + } + + @PostConstruct + public void init() { + log.info("Initializing Docker client with host: {}", config.getDockerHost()); + + DefaultDockerClientConfig.Builder configBuilder = DefaultDockerClientConfig.createDefaultConfigBuilder() + .withDockerHost(config.getDockerHost()); + + // 注册 Registry 凭证到 DockerClientConfig,确保 Docker Daemon 能正确完成 Harbor Token 鉴权 + String registry = config.getRegistry(); + String registryHost = ""; + if (StringUtils.hasText(registry) && StringUtils.hasText(config.getRegistryUsername())) { + registryHost = registry; + if (registryHost.contains("/")) { + registryHost = registryHost.substring(0, registryHost.indexOf("/")); + } + configBuilder + .withRegistryUrl("http://" + registryHost) + .withRegistryUsername(config.getRegistryUsername()) + .withRegistryPassword(config.getRegistryPassword()); + log.info("Registry credentials configured for: {}", registryHost); + } + + // ★ 写入 ~/.docker/config.json,确保 Docker Daemon 能从凭据文件读取认证信息 + if (StringUtils.hasText(registryHost) && StringUtils.hasText(config.getRegistryUsername())) { + writeDockerConfigJson(registryHost, config.getRegistryUsername(), config.getRegistryPassword()); + } + + DockerClientConfig clientConfig = configBuilder.build(); + + DockerHttpClient httpClient = new ZerodepDockerHttpClient.Builder() + .dockerHost(clientConfig.getDockerHost()) + .sslConfig(clientConfig.getSSLConfig()) + .maxConnections(100) + .connectionTimeout(Duration.ofSeconds(30)) + .responseTimeout(Duration.ofSeconds(config.getBuildTimeout())) + .build(); + + this.dockerClient = DockerClientImpl.getInstance(clientConfig, httpClient); + + // 确保构建上下文根目录存在 + try { + Path buildContextBase = Path.of(config.getBuildContextDir()); + if (!Files.exists(buildContextBase)) { + Files.createDirectories(buildContextBase); + log.info("Created build context directory: {}", buildContextBase); + } + } catch (IOException e) { + log.warn("Failed to create build context directory: {}", e.getMessage()); + } + + log.info("Docker client initialized successfully"); + } + + @PreDestroy + public void cleanup() { + if (dockerClient != null) { + try { + dockerClient.close(); + } catch (IOException e) { + log.warn("Failed to close Docker client: {}", e.getMessage()); + } + } + } + + /** + * 写入 Docker 配置文件 ~/.docker/config.json + * 确保 Docker daemon 能通过凭据文件完成 Harbor Token 鉴权 + */ + private void writeDockerConfigJson(String registryHost, String username, String password) { + try { + Path dockerConfigDir = Path.of(System.getProperty("user.home"), ".docker"); + Files.createDirectories(dockerConfigDir); + Path configFile = dockerConfigDir.resolve("config.json"); + + String auth = Base64.getEncoder().encodeToString( + (username + ":" + password).getBytes(java.nio.charset.StandardCharsets.UTF_8) + ); + + // 同时写入 带http://前缀 和 不带前缀 的两种格式,确保匹配 + String configJson = "{\n" + + " \"auths\": {\n" + + " \"" + registryHost + "\": {\n" + + " \"auth\": \"" + auth + "\"\n" + + " },\n" + + " \"http://" + registryHost + "\": {\n" + + " \"auth\": \"" + auth + "\"\n" + + " }\n" + + " }\n" + + "}"; + + Files.writeString(configFile, configJson); + log.info("Docker config.json written to: {}", configFile); + } catch (Exception e) { + log.warn("Failed to write Docker config.json: {}", e.getMessage()); + } + } + + /** + * 构建 Agent 镜像 + * + * @param request 服务构建请求 + * @return 构建结果 + */ + public ImageBuildResult buildImages(ServiceBuildRequest request) { + String serviceId = request.getServiceId(); + String buildId = request.getBuildId(); + long startTime = System.currentTimeMillis(); + + log.info("Starting image build for service: {}, buildId: {}", serviceId, buildId); + publishLog(buildId, "info", "=== 开始构建镜像 ==="); + publishLog(buildId, "info", "服务ID: " + serviceId); + + try { + // 生成时间戳 tag + String timestamp = TIMESTAMP_FORMATTER.format(Instant.now()); + + // 解析镜像仓库 + String registry = resolveRegistry(request); + boolean shouldPush = shouldPushImage(request, registry); + publishLog(buildId, "info", "镜像仓库: " + (StringUtils.hasText(registry) ? registry : "本地")); + + // 构建 Agent 镜像 + String agentBaseImage = resolveAgentBaseImage(request); + publishLog(buildId, "info", "基础镜像: " + agentBaseImage); + publishLog(buildId, "info", ""); + publishLog(buildId, "info", "=== 开始 Docker 构建 ==="); + + String agentImageTag = buildAgentImage(serviceId, timestamp, registry, + agentBaseImage, request.getBuildEnvVars(), buildId); + + publishLog(buildId, "info", ""); + publishLog(buildId, "info", "镜像构建成功: " + agentImageTag); + + // K8s 模式推送镜像(仅请求配置了镜像仓库时推送) + boolean pushed = false; + if (shouldPush) { + publishLog(buildId, "info", ""); + publishLog(buildId, "info", "=== 开始推送镜像 ==="); + pushImage(agentImageTag, buildId, registry); + pushed = true; + publishLog(buildId, "info", "镜像推送成功"); + + // 推送成功后删除本地镜像(K8s 会从仓库拉取,不需要保留本地副本) + removeLocalImage(agentImageTag, buildId); + } else { + publishLog(buildId, "warn", "跳过镜像推送(未配置镜像仓库或非 K8S 模式)"); + log.info("Image push skipped (deployMode={}, registry={}) for image: {}", + request.getDeployMode(), registry, agentImageTag); + syncLocalImageToKindIfNeeded(request, agentImageTag, buildId); + } + + long duration = System.currentTimeMillis() - startTime; + publishLog(buildId, "info", ""); + publishLog(buildId, "info", "=== 构建完成 ==="); + publishLog(buildId, "info", String.format("总耗时: %.1f 秒", duration / 1000.0)); + markBuildCompleted(buildId, true); + log.info("Image build completed for service: {}, duration: {}ms", serviceId, duration); + + return ImageBuildResult.success(agentImageTag, null, duration, pushed); + + } catch (Exception e) { + long duration = System.currentTimeMillis() - startTime; + publishLog(buildId, "error", ""); + publishLog(buildId, "error", "=== 构建失败 ==="); + publishLog(buildId, "error", "错误: " + e.getMessage()); + markBuildCompleted(buildId, false); + log.error("Image build failed for service: {}, duration: {}ms, error: {}", + serviceId, duration, e.getMessage(), e); + return ImageBuildResult.failed(e.getMessage()); + } + } + + /** + * 发布构建日志 + * 优先使用 BuildLogBuffer(直接 SSE 推送),同时保留 Redis Stream 兼容 + */ + private void publishLog(String buildId, String level, String message) { + if (!StringUtils.hasText(buildId)) return; + + // 写入日志缓冲区(SSE 直接推送) + if (logBuffer != null) { + logBuffer.addLog(buildId, level, message); + } + } + + /** + * 标记构建完成并上传日志到 OSS + * @param buildId 构建 ID + * @param success 是否成功 + */ + private void markBuildCompleted(String buildId, boolean success) { + if (logBuffer != null && StringUtils.hasText(buildId)) { + logBuffer.markCompleted(buildId, success); + + // 上传日志到 OSS + uploadBuildLogToOss(buildId); + + // 10 分钟后清理日志缓冲区 + logBuffer.scheduleCleanup(buildId, 10); + } + } + + /** + * 上传构建日志到 NFS 存储 + */ + private void uploadBuildLogToOss(String buildId) { + try { + if (nfsStorageService == null || !nfsStorageService.isConfigured()) { + log.warn("NFS 存储未配置,跳过日志上传: {}", buildId); + return; + } + + String logContent = logBuffer.exportAsText(buildId); + if (logContent == null || logContent.isEmpty()) { + log.warn("构建日志为空,跳过上传: {}", buildId); + return; + } + + String filename = buildId + ".txt"; + + String logPath = nfsStorageService.uploadText(logContent, "build-logs", filename); + log.info("构建日志已上传到 NFS: {} -> {}", buildId, logPath); + + if (buildRecordService != null) { + buildRecordService.updateLogUrl(buildId, logPath); + } + + } catch (Exception e) { + log.error("上传构建日志失败: {}, error: {}", buildId, e.getMessage(), e); + } + } + + /** + * 解析镜像仓库地址 + */ + private String resolveRegistry(ServiceBuildRequest request) { + if (!StringUtils.hasText(request.getImageRegistry())) { + return ""; + } + String registry = request.getImageRegistry().trim(); + while (registry.endsWith("/")) { + registry = registry.substring(0, registry.length() - 1); + } + return registry; + } + + /** + * 仅当 K8s 且请求显式配置了仓库时才推送 + */ + private boolean shouldPushImage(ServiceBuildRequest request, String registry) { + return request.getDeployMode() == DeployMode.K8S && StringUtils.hasText(registry); + } + + private void syncLocalImageToKindIfNeeded(ServiceBuildRequest request, String imageTag, String buildId) { + if (request.getDeployMode() != DeployMode.K8S) { + return; + } + if (!config.isAutoLoadToKind()) { + log.info("Auto kind image load disabled, skip local image sync: {}", imageTag); + return; + } + if (hasRegistryHost(imageTag)) { + return; + } + + List kindNodes = findKindNodeContainers(); + if (kindNodes.isEmpty()) { + throw new IllegalStateException("本地镜像模式下未发现 Kind 节点,无法自动分发镜像。" + + "请配置 imageRegistry 推送远程仓库,或检查 Kind 集群/节点标签配置。"); + } + + publishLog(buildId, "info", "检测到本地镜像,开始同步到 Kind 节点"); + for (Container node : kindNodes) { + String nodeName = resolveContainerName(node); + if (!StringUtils.hasText(nodeName)) { + continue; + } + publishLog(buildId, "debug", "同步镜像到节点: " + nodeName); + importImageIntoKindNode(imageTag, node, nodeName); + } + publishLog(buildId, "info", "Kind 节点镜像同步完成"); + } + + private List findKindNodeContainers() { + List all = dockerClient.listContainersCmd().withShowAll(false).exec(); + List nodes = new ArrayList<>(); + String expectedCluster = normalize(config.getKindClusterName()); + + for (Container c : all) { + Map labels = c.getLabels(); + if (labels == null) { + continue; + } + String cluster = normalize(labels.get("io.x-k8s.kind.cluster")); + String role = normalize(labels.get("io.x-k8s.kind.role")); + if (!StringUtils.hasText(cluster) || !StringUtils.hasText(role)) { + continue; + } + if (StringUtils.hasText(expectedCluster) && !expectedCluster.equals(cluster)) { + continue; + } + if (!"control-plane".equals(role) && !"worker".equals(role)) { + continue; + } + nodes.add(c); + } + return nodes; + } + + private void importImageIntoKindNode(String imageTag, Container node, String nodeName) { + ExecCreateCmdResponse exec = dockerClient.execCreateCmd(node.getId()) + .withAttachStdin(true) + .withAttachStdout(true) + .withAttachStderr(true) + .withCmd("ctr", "-n", "k8s.io", "images", "import", "-") + .exec(); + + ByteArrayOutputStream output = new ByteArrayOutputStream(); + try (InputStream tarStream = dockerClient.saveImageCmd(imageTag).exec()) { + ResultCallback.Adapter callback = new ResultCallback.Adapter<>() { + @Override + public void onNext(Frame item) { + try { + if (item != null && item.getPayload() != null) { + output.write(item.getPayload()); + } + } catch (IOException ignored) { + // 输出只用于排障,写失败不影响主流程 + } + super.onNext(item); + } + }; + + dockerClient.execStartCmd(exec.getId()) + .withStdIn(tarStream) + .exec(callback) + .awaitCompletion(config.getKindLoadTimeout(), TimeUnit.SECONDS); + + InspectExecResponse inspect = dockerClient.inspectExecCmd(exec.getId()).exec(); + Long exitCode = inspect != null ? inspect.getExitCodeLong() : null; + if (exitCode == null || exitCode != 0L) { + String details = output.toString(); + throw new IllegalStateException("Kind 节点导入失败: node=" + nodeName + + ", exitCode=" + exitCode + ", output=" + details); + } + log.info("Image imported to kind node successfully: node={}, image={}", nodeName, imageTag); + } catch (Exception e) { + throw new RuntimeException("同步镜像到 Kind 节点失败: node=" + nodeName + ", image=" + imageTag, e); + } + } + + private boolean hasRegistryHost(String image) { + if (!StringUtils.hasText(image)) { + return false; + } + String value = image.trim(); + int slash = value.indexOf('/'); + if (slash <= 0) { + return false; + } + String first = value.substring(0, slash); + return first.contains(".") || first.contains(":") || "localhost".equals(first); + } + + private String resolveContainerName(Container container) { + if (container == null || container.getNames() == null || container.getNames().length == 0) { + return ""; + } + String name = container.getNames()[0]; + if (name == null) { + return ""; + } + return name.startsWith("/") ? name.substring(1) : name; + } + + private String normalize(String value) { + return value == null ? "" : value.trim(); + } + + /** + * 解析 Agent 基础镜像(使用系统默认配置) + */ + private String resolveAgentBaseImage(ServiceBuildRequest request) { + // 直接使用系统默认配置,不再从请求中获取 + return config.getDefaultAgentBaseImage(); + } + + /** + * 构建 Agent 镜像 + */ + private String buildAgentImage(String serviceId, String timestamp, String registry, + String baseImage, Map envVars, + String buildId) throws Exception { + String imageName = generateImageName(registry, serviceId, "agent", timestamp); + + log.info("Building Agent image: {}, baseImage: {}", imageName, baseImage); + publishLog(buildId, "info", "目标镜像: " + imageName); + + // 确保基础镜像已缓存到本地(避免每次 docker build 都从 registry 拉取) + ensureBaseImageCached(baseImage, buildId); + + // 创建构建上下文目录 + publishLog(buildId, "debug", "创建构建上下文..."); + Path buildContextPath = createBuildContext(serviceId, baseImage, envVars); + publishLog(buildId, "debug", "构建上下文路径: " + buildContextPath); + + try { + // 执行构建 + buildImage(buildContextPath, imageName, buildId); + return imageName; + } finally { + // 清理构建上下文 + publishLog(buildId, "debug", "清理构建上下文..."); + cleanupBuildContext(buildContextPath); + // 清理残留的历史构建上下文 + cleanupStaleContexts(); + } + } + + /** + * 确保基础镜像已缓存到本地 + * + * 先通过 inspectImageCmd 检查本地是否已存在该镜像: + * - 已存在 → 直接使用,跳过网络拉取(毫秒级) + * - 不存在 → 执行 pull 并输出进度日志 + * + * 这样 docker build 的 FROM 指令不再需要联网验证/拉取,大幅加快构建速度。 + */ + private void ensureBaseImageCached(String baseImage, String buildId) { + try { + dockerClient.inspectImageCmd(baseImage).exec(); + publishLog(buildId, "info", "基础镜像已缓存在本地,跳过拉取"); + log.info("Base image already cached locally: {}", baseImage); + } catch (NotFoundException e) { + // 本地不存在,需要拉取 + publishLog(buildId, "info", "本地未找到基础镜像,开始拉取: " + baseImage); + log.info("Base image not found locally, pulling: {}", baseImage); + try { + final String finalBuildId = buildId; + dockerClient.pullImageCmd(baseImage) + .exec(new ResultCallback.Adapter() { + @Override + public void onNext(PullResponseItem item) { + if (item.getStatus() != null) { + String status = item.getStatus(); + // 只记录关键进度,避免日志刷屏 + if (status.contains("Pulling") || status.contains("Pull complete") || + status.contains("Downloaded") || status.contains("digest") || + status.contains("Status")) { + publishLog(finalBuildId, "info", "[PULL] " + status); + } + } + } + }) + .awaitCompletion(config.getBuildTimeout(), TimeUnit.SECONDS); + publishLog(buildId, "info", "基础镜像拉取完成"); + log.info("Base image pulled successfully: {}", baseImage); + } catch (Exception pullEx) { + publishLog(buildId, "warn", "基础镜像拉取失败(将由 docker build 重试): " + pullEx.getMessage()); + log.warn("Failed to pre-pull base image: {}, will retry during build", baseImage, pullEx); + } + } catch (Exception e) { + publishLog(buildId, "warn", "检查本地镜像异常: " + e.getMessage()); + log.warn("Failed to inspect local image: {}", baseImage, e); + } + } + + /** + * 生成镜像名称 + * 格式:{registry}/service-{serviceId}-agent:{serviceId}-{timestamp} + */ + private String generateImageName(String registry, String serviceId, String type, String timestamp) { + String tag = serviceId + "-" + timestamp; + String imageName = String.format("service-%s-%s:%s", serviceId, type, tag); + if (StringUtils.hasText(registry)) { + return registry + "/" + imageName; + } + return imageName; + } + + /** + * 创建构建上下文目录 + */ + private Path createBuildContext(String serviceId, String baseImage, + Map envVars) throws IOException { + // 创建临时目录 + Path contextDir = Files.createTempDirectory( + Path.of(config.getBuildContextDir()), + String.format("build-%s-", serviceId) + ); + + // 生成 Dockerfile + String dockerfile = generateDockerfile(baseImage, envVars); + Files.writeString(contextDir.resolve("Dockerfile"), dockerfile); + + // 复制 build.sh(优先从 classpath 读取,其次从文件系统) + boolean buildScriptCopied = false; + + // 1. 尝试从 classpath 读取(打包后的资源) + try { + ClassPathResource resource = new ClassPathResource("scripts/build.sh"); + if (resource.exists()) { + try (var inputStream = resource.getInputStream()) { + Files.copy(inputStream, contextDir.resolve("build.sh")); + buildScriptCopied = true; + log.debug("Build script loaded from classpath"); + } + } + } catch (Exception e) { + log.warn("Failed to load build script from classpath: {}", e.getMessage()); + } + + // 2. 如果 classpath 中没有,尝试从文件系统读取 + if (!buildScriptCopied) { + Path buildScript = Path.of(config.getBuildScriptPath()); + if (Files.exists(buildScript)) { + Files.copy(buildScript, contextDir.resolve("build.sh")); + buildScriptCopied = true; + log.debug("Build script loaded from file system: {}", buildScript); + } + } + + // 3. 如果都没有,创建空的 build.sh 占位 + if (!buildScriptCopied) { + Files.writeString(contextDir.resolve("build.sh"), "#!/bin/bash\n# Placeholder build script\necho 'Build script executed'\n"); + log.warn("Build script not found, using placeholder"); + } + + // 复制 Cedar 策略文件到构建上下文(zzd fail-closed 需要至少一个 .cedar 文件) + Path cedarDir = contextDir.resolve("cedar-policies"); + Files.createDirectories(cedarDir); + try { + ClassPathResource cedarResource = new ClassPathResource("scripts/00-platform.cedar"); + if (cedarResource.exists()) { + try (var inputStream = cedarResource.getInputStream()) { + Files.copy(inputStream, cedarDir.resolve("00-platform.cedar")); + log.debug("Cedar policy loaded from classpath"); + } + } else { + log.warn("00-platform.cedar not found in classpath"); + } + } catch (Exception e) { + log.warn("Failed to load cedar policy: {}", e.getMessage()); + } + + // 复制默认 config.json 到构建上下文(镜像内兜底,运行时由 ConfigMap 覆盖挂载) + try { + ClassPathResource configResource = new ClassPathResource("scripts/config.json"); + if (configResource.exists()) { + try (var inputStream = configResource.getInputStream()) { + Files.copy(inputStream, contextDir.resolve("config.json")); + log.debug("Default config.json loaded from classpath"); + } + } + } catch (Exception e) { + log.warn("Failed to load default config.json: {}", e.getMessage()); + } + + int copied = copyBundledBuildAssets(contextDir); + if (copied <= 0) { + throw new IOException("Bundled build-assets are required but missing or empty"); + } + log.debug("Bundled build assets copied into context: {} files", copied); + + log.debug("Build context created at: {}", contextDir); + return contextDir; + } + + private boolean hasBundledBuildAssets() { + return new ClassPathResource("build-assets/manifest.txt").exists(); + } + + private int copyBundledBuildAssets(Path contextDir) throws IOException { + ClassPathResource manifestResource = new ClassPathResource("build-assets/manifest.txt"); + if (!manifestResource.exists()) { + return 0; + } + + Path assetsRoot = contextDir.resolve("build-assets").toAbsolutePath().normalize(); + int copied = 0; + try (BufferedReader reader = new BufferedReader( + new InputStreamReader(manifestResource.getInputStream(), StandardCharsets.UTF_8))) { + String line; + while ((line = reader.readLine()) != null) { + String relative = line.trim(); + if (relative.isEmpty() || relative.startsWith("#")) { + continue; + } + ClassPathResource resource = new ClassPathResource("build-assets/" + relative); + if (!resource.exists()) { + log.warn("Bundled build asset listed but missing: {}", relative); + continue; + } + + Path target = assetsRoot.resolve(relative).normalize(); + if (!target.startsWith(assetsRoot)) { + throw new IOException("Illegal bundled build asset path: " + relative); + } + if (target.getParent() != null) { + Files.createDirectories(target.getParent()); + } + try (InputStream inputStream = resource.getInputStream()) { + Files.copy(inputStream, target, StandardCopyOption.REPLACE_EXISTING); + } + copied++; + } + } + return copied; + } + + /** + * 生成 Dockerfile 内容 + * Package-private:供 Compose 模式复用 + */ + String generateDockerfile(String baseImage, Map envVars) { + StringBuilder sb = new StringBuilder(); + + String sdkSourcePath = StringUtils.hasText(config.getSdkSourcePath()) + ? config.getSdkSourcePath().trim() + : "/opt/linkwork-agent-build/sdk-source"; + String zzdBinariesPath = StringUtils.hasText(config.getZzdBinariesPath()) + ? config.getZzdBinariesPath().trim() + : "/opt/linkwork-agent-build/zzd-binaries"; + String buildAssetsRoot = sdkSourcePath.contains("/") + ? sdkSourcePath.substring(0, sdkSourcePath.lastIndexOf('/')) + : "/opt/linkwork-agent-build"; + String startScriptsPath = buildAssetsRoot + "/start-scripts"; + + // FROM 指令 + sb.append("# Auto-generated Dockerfile\n"); + sb.append("FROM ").append(baseImage).append("\n\n"); + + // ENV 指令(注入环境变量) + if (envVars != null && !envVars.isEmpty()) { + sb.append("# Environment variables from build request\n"); + for (Map.Entry entry : envVars.entrySet()) { + String key = entry.getKey(); + Object valueObj = entry.getValue(); + String valueStr; + + if (valueObj instanceof String) { + valueStr = (String) valueObj; + } else { + try { + // 对象/数组序列化为 JSON 字符串 + valueStr = objectMapper.writeValueAsString(valueObj); + } catch (JsonProcessingException e) { + log.warn("Failed to serialize env var {} to JSON", key, e); + valueStr = String.valueOf(valueObj); + } + } + + // 转义特殊字符 + String value = escapeEnvValue(valueStr); + sb.append("ENV ").append(key).append("=").append(value).append("\n"); + } + sb.append("\n"); + } + + // Bundled assets from project resources + sb.append("# Bundled build assets from project resources\n"); + sb.append("RUN mkdir -p ").append(zzdBinariesPath).append(" \\\n"); + sb.append(" && mkdir -p ").append(sdkSourcePath).append(" \\\n"); + sb.append(" && mkdir -p ").append(startScriptsPath).append("\n"); + sb.append("COPY build-assets/ ").append(buildAssetsRoot).append("/\n\n"); + + // Cedar 策略文件 → /tmp/cedar-policies/ (build.sh download_cedar_policies 会从这里读取) + sb.append("# Cedar policy files for build.sh to deploy\n"); + sb.append("COPY cedar-policies/ /tmp/cedar-policies/\n\n"); + + // 默认 config.json → /opt/agent/config.json(build.sh finalize_permissions 会设置权限) + sb.append("# Default agent config (overridden at runtime by ConfigMap mount)\n"); + sb.append("RUN mkdir -p /opt/agent\n"); + sb.append("COPY config.json /opt/agent/config.json\n\n"); + + // 复制并执行 build.sh(生产版 v2 — 完整部署: 依赖检查、zzd、SDK、agent用户、权限) + sb.append("# Copy and execute build script (production v2)\n"); + sb.append("COPY build.sh /build.sh\n"); + sb.append("RUN chmod +x /build.sh && /build.sh\n\n"); + + // ENTRYPOINT + sb.append("# Set entrypoint\n"); + sb.append("ENTRYPOINT [\"").append(config.getEntrypointScript()).append("\"]\n"); + + return sb.toString(); + } + + /** + * 转义环境变量值中的特殊字符 + */ + String escapeEnvValue(String value) { + if (value == null) { + return "\"\""; + } + // 如果包含空格或特殊字符,使用双引号包裹 + if (value.contains(" ") || value.contains("\"") || value.contains("$") || + value.contains("\\") || value.contains("\n")) { + return "\"" + value.replace("\\", "\\\\") + .replace("\"", "\\\"") + .replace("$", "\\$") + .replace("\n", "\\n") + "\""; + } + return value; + } + + /** + * 执行 Docker build + */ + private void buildImage(Path contextPath, String imageTag, String buildId) throws Exception { + log.info("Executing docker build: tag={}, context={}", imageTag, contextPath); + publishLog(buildId, "info", ""); + publishLog(buildId, "info", "--- Docker Build 日志 ---"); + + File contextDir = contextPath.toFile(); + File dockerfile = contextPath.resolve("Dockerfile").toFile(); + + // 用于在回调中引用 buildId(必须是 effectively final) + final String finalBuildId = buildId; + + BuildImageResultCallback callback = new BuildImageResultCallback() { + @Override + public void onNext(BuildResponseItem item) { + // 发布 Docker 构建的实时日志 + if (item.getStream() != null) { + String logLine = item.getStream().trim(); + if (!logLine.isEmpty()) { + log.debug("Build output: {}", logLine); + // 将 Docker 日志发布到 Redis Stream + publishLog(finalBuildId, "info", logLine); + } + } + if (item.getErrorDetail() != null) { + String errorMsg = item.getErrorDetail().getMessage(); + log.error("Build error: {}", errorMsg); + publishLog(finalBuildId, "error", "[ERROR] " + errorMsg); + } + // 发布构建进度信息 + if (item.getStatus() != null) { + String status = item.getStatus(); + log.debug("Build status: {}", status); + publishLog(finalBuildId, "debug", "[STATUS] " + status); + } + super.onNext(item); + } + }; + + String imageId = dockerClient.buildImageCmd() + .withDockerfile(dockerfile) + .withBaseDirectory(contextDir) + .withTags(Collections.singleton(imageTag)) + .withNoCache(false) + .withPull(false) // 使用本地镜像,避免网络问题 + .exec(callback) + .awaitImageId(config.getBuildTimeout(), TimeUnit.SECONDS); + + publishLog(buildId, "info", ""); + publishLog(buildId, "info", "--- Docker Build 完成 ---"); + publishLog(buildId, "info", "镜像ID: " + imageId); + log.info("Image built successfully: tag={}, imageId={}", imageTag, imageId); + } + + /** + * 推送镜像到仓库(带重试机制) + */ + private void pushImage(String imageTag, String buildId, String registry) throws Exception { + log.info("Pushing image: {}", imageTag); + publishLog(buildId, "info", "正在推送镜像: " + imageTag); + + final int maxRetries = 3; + Exception lastException = null; + + for (int attempt = 1; attempt <= maxRetries; attempt++) { + try { + doPushImage(imageTag, buildId, registry, attempt); + // 推送成功 + publishLog(buildId, "info", "镜像推送完成: " + imageTag); + log.info("Image pushed successfully: {}", imageTag); + return; + } catch (Exception e) { + lastException = e; + String errorMsg = e.getMessage() != null ? e.getMessage() : e.toString(); + boolean isAuthError = errorMsg.contains("401") || errorMsg.contains("Unauthorized") + || errorMsg.contains("authentication") || errorMsg.contains("unauthorized"); + + if (isAuthError && attempt < maxRetries) { + publishLog(buildId, "warn", String.format( + "推送失败 (第%d次, 认证错误), %d秒后重试...", attempt, attempt * 5)); + log.warn("Push attempt {} failed with auth error, retrying in {}s: {}", + attempt, attempt * 5, errorMsg); + + // 重新写入凭据文件并刷新认证 + String registryHost = extractRegistryHost(registry); + if (StringUtils.hasText(registryHost) && StringUtils.hasText(config.getRegistryUsername())) { + writeDockerConfigJson(registryHost, config.getRegistryUsername(), config.getRegistryPassword()); + } + + Thread.sleep(attempt * 5000L); + } else if (!isAuthError && attempt < maxRetries) { + publishLog(buildId, "warn", String.format( + "推送失败 (第%d次), %d秒后重试...", attempt, attempt * 3)); + Thread.sleep(attempt * 3000L); + } else { + // 最后一次重试也失败了 + publishLog(buildId, "error", String.format( + "推送失败 (第%d次/%d次): %s", attempt, maxRetries, errorMsg)); + } + } + } + + throw new RuntimeException("镜像推送失败 (重试" + maxRetries + "次后): " + + (lastException != null ? lastException.getMessage() : "unknown error")); + } + + /** + * 执行单次镜像推送 + */ + private void doPushImage(String imageTag, String buildId, String registry, int attempt) throws Exception { + // 构建认证配置 + AuthConfig authConfig = null; + if (StringUtils.hasText(config.getRegistryUsername()) && + StringUtils.hasText(config.getRegistryPassword())) { + String registryAddress = extractRegistryHost(registry); + // 使用 http:// 前缀明确标识为 HTTP 仓库,避免 daemon 默认走 HTTPS token 交换 + authConfig = new AuthConfig() + .withRegistryAddress("http://" + registryAddress) + .withUsername(config.getRegistryUsername()) + .withPassword(config.getRegistryPassword()); + publishLog(buildId, "debug", String.format( + "使用认证推送 (registry: %s, attempt: %d)...", registryAddress, attempt)); + } + + final String finalBuildId = buildId; + // 用于捕获回调中的推送错误 + final StringBuilder pushError = new StringBuilder(); + + ResultCallback.Adapter callback = new ResultCallback.Adapter() { + @Override + public void onNext(PushResponseItem item) { + if (item.getStatus() != null) { + String status = item.getStatus(); + log.debug("Push status: {}", status); + // 过滤一些噪音日志,只发布关键状态 + if (status.contains("Pushing") || status.contains("Pushed") || + status.contains("Layer") || status.contains("digest")) { + publishLog(finalBuildId, "info", status); + } + } + if (item.getProgressDetail() != null && item.getProgressDetail().getCurrent() != null) { + // 推送进度,可选择性发布 + Long current = item.getProgressDetail().getCurrent(); + Long total = item.getProgressDetail().getTotal(); + if (total != null && total > 0) { + int percent = (int) (current * 100 / total); + if (percent % 20 == 0) { // 每 20% 发布一次 + publishLog(finalBuildId, "debug", String.format("推送进度: %d%%", percent)); + } + } + } + if (item.getErrorDetail() != null) { + String errorMsg = item.getErrorDetail().getMessage(); + log.error("Push error: {}", errorMsg); + publishLog(finalBuildId, "error", "[ERROR] " + errorMsg); + pushError.append(errorMsg); + } + } + }; + + // ★ push 前先 docker login(刷新 Docker Daemon 的 Bearer Token) + if (authConfig != null) { + try { + // 同时尝试带 http:// 和不带前缀两种格式 + dockerClient.authCmd().withAuthConfig(authConfig).exec(); + publishLog(buildId, "debug", "Registry 认证刷新成功 (http://)"); + log.info("Registry auth refreshed for push: {}", authConfig.getRegistryAddress()); + } catch (Exception e) { + log.warn("Auth with http:// prefix failed, trying without: {}", e.getMessage()); + try { + // 回退:不带 http:// 前缀 + String plainAddress = authConfig.getRegistryAddress().replace("http://", ""); + AuthConfig fallbackAuth = new AuthConfig() + .withRegistryAddress(plainAddress) + .withUsername(config.getRegistryUsername()) + .withPassword(config.getRegistryPassword()); + dockerClient.authCmd().withAuthConfig(fallbackAuth).exec(); + // 如果不带前缀的成功了,使用这个 authConfig 进行推送 + authConfig = fallbackAuth; + publishLog(buildId, "debug", "Registry 认证刷新成功 (plain)"); + log.info("Registry auth refreshed (plain) for push: {}", plainAddress); + } catch (Exception e2) { + publishLog(buildId, "warn", "Registry 认证刷新失败: " + e2.getMessage()); + log.warn("Failed to refresh registry auth: {}", e2.getMessage()); + } + } + } + + if (authConfig != null) { + dockerClient.pushImageCmd(imageTag) + .withAuthConfig(authConfig) + .exec(callback) + .awaitCompletion(config.getBuildTimeout(), TimeUnit.SECONDS); + } else { + dockerClient.pushImageCmd(imageTag) + .exec(callback) + .awaitCompletion(config.getBuildTimeout(), TimeUnit.SECONDS); + } + + // 检查回调中是否捕获到错误 + if (pushError.length() > 0) { + throw new RuntimeException("镜像推送失败: " + pushError); + } + + publishLog(buildId, "info", "镜像推送完成: " + imageTag); + log.info("Image pushed successfully: {}", imageTag); + } + + private String extractRegistryHost(String registry) { + if (!StringUtils.hasText(registry)) { + return ""; + } + String value = registry.trim(); + if (value.startsWith("http://")) { + value = value.substring("http://".length()); + } else if (value.startsWith("https://")) { + value = value.substring("https://".length()); + } + int slash = value.indexOf("/"); + if (slash > 0) { + return value.substring(0, slash); + } + return value; + } + + /** + * 删除本地镜像(推送成功后清理,避免磁盘堆积) + */ + private void removeLocalImage(String imageTag, String buildId) { + try { + dockerClient.removeImageCmd(imageTag).withForce(false).withNoPrune(true).exec(); + publishLog(buildId, "info", "本地镜像已清理: " + imageTag); + log.info("Local image removed: {}", imageTag); + } catch (Exception e) { + // 清理失败不影响构建结果,仅告警 + publishLog(buildId, "warn", "本地镜像清理失败(不影响部署): " + e.getMessage()); + log.warn("Failed to remove local image {}: {}", imageTag, e.getMessage()); + } + } + + /** + * 周期清理本地构建镜像,并在 Kind 节点触发未使用镜像清理。 + */ + @Scheduled(cron = "${image-build.local-cleanup-cron:0 40 * * * *}") + public void periodicLocalImageCleanup() { + try { + Map result = runLocalImageMaintenance("scheduled"); + log.info("Local image cleanup finished: {}", result); + } catch (Exception e) { + log.warn("Periodic local image cleanup failed: {}", e.getMessage(), e); + } + } + + /** + * 立即执行一次本地镜像维护(供运维手动触发)。 + */ + public synchronized Map runLocalImageMaintenance(String triggerSource) { + Map result = new LinkedHashMap<>(); + result.put("triggerSource", StringUtils.hasText(triggerSource) ? triggerSource : "unknown"); + result.put("cleanupEnabled", config.isLocalCleanupEnabled()); + result.put("kindPruneEnabled", config.isKindPruneEnabled()); + result.put("retentionHours", Math.max(config.getLocalImageRetentionHours(), 1)); + result.put("kindClusterName", normalize(config.getKindClusterName())); + + if (!config.isLocalCleanupEnabled()) { + result.put("removedLocalImages", 0); + result.put("prunedKindNodes", 0); + result.put("skipped", "image-build.local-cleanup-enabled=false"); + return result; + } + + int removed = cleanupExpiredLocalBuildImages(); + result.put("removedLocalImages", removed); + + if (config.isKindPruneEnabled()) { + int prunedNodes = pruneKindNodeImages(); + result.put("prunedKindNodes", prunedNodes); + } else { + result.put("prunedKindNodes", 0); + } + return result; + } + + private int cleanupExpiredLocalBuildImages() { + long nowMs = System.currentTimeMillis(); + long retentionMs = TimeUnit.HOURS.toMillis(Math.max(config.getLocalImageRetentionHours(), 1)); + Set activeImageIds = collectActiveContainerImageIds(); + int removed = 0; + + List images = dockerClient.listImagesCmd().withShowAll(true).exec(); + for (Image image : images) { + String imageId = image.getId(); + if (StringUtils.hasText(imageId) && activeImageIds.contains(imageId)) { + continue; + } + long createdMs = resolveImageCreatedMillis(image); + if (createdMs <= 0 || nowMs - createdMs < retentionMs) { + continue; + } + String[] repoTags = image.getRepoTags(); + if (repoTags == null || repoTags.length == 0) { + continue; + } + for (String tag : repoTags) { + if (!shouldCleanupLocalTag(tag)) { + continue; + } + try { + dockerClient.removeImageCmd(tag).withForce(false).withNoPrune(true).exec(); + removed++; + log.info("Removed expired local build image: {}", tag); + } catch (Exception e) { + log.debug("Skip removing image {}: {}", tag, e.getMessage()); + } + } + } + return removed; + } + + private Set collectActiveContainerImageIds() { + Set result = new HashSet<>(); + List running = dockerClient.listContainersCmd().withShowAll(false).exec(); + for (Container c : running) { + if (StringUtils.hasText(c.getImageId())) { + result.add(c.getImageId()); + } + } + return result; + } + + private long resolveImageCreatedMillis(Image image) { + try { + if (image.getCreated() != null && image.getCreated() > 0) { + return image.getCreated() * 1000; + } + if (!StringUtils.hasText(image.getId())) { + return -1; + } + InspectImageResponse inspect = dockerClient.inspectImageCmd(image.getId()).exec(); + if (inspect != null && StringUtils.hasText(inspect.getCreated())) { + return OffsetDateTime.parse(inspect.getCreated()).toInstant().toEpochMilli(); + } + } catch (Exception e) { + log.debug("Resolve image created time failed: image={}, err={}", image.getId(), e.getMessage()); + } + return -1; + } + + private boolean shouldCleanupLocalTag(String tag) { + if (!StringUtils.hasText(tag) || ":".equals(tag)) { + return false; + } + int idx = tag.lastIndexOf(':'); + String repo = idx > 0 ? tag.substring(0, idx) : tag; + if (repo.startsWith("docker.io/library/")) { + repo = repo.substring("docker.io/library/".length()); + } + return LOCAL_BUILD_REPO_PATTERN.matcher(repo).matches(); + } + + private int pruneKindNodeImages() { + List kindNodes = findKindNodeContainers(); + int prunedNodes = 0; + for (Container node : kindNodes) { + String nodeName = resolveContainerName(node); + if (!StringUtils.hasText(nodeName)) { + continue; + } + try { + execInNode(node, "crictl", "rmi", "--prune"); + prunedNodes++; + log.info("Pruned unused images on kind node: {}", nodeName); + } catch (Exception e) { + log.warn("Kind node image prune failed on {}: {}", nodeName, e.getMessage()); + } + } + return prunedNodes; + } + + private void execInNode(Container node, String... cmd) throws InterruptedException { + ExecCreateCmdResponse exec = dockerClient.execCreateCmd(node.getId()) + .withAttachStdout(true) + .withAttachStderr(true) + .withCmd(cmd) + .exec(); + ByteArrayOutputStream output = new ByteArrayOutputStream(); + ResultCallback.Adapter callback = new ResultCallback.Adapter<>() { + @Override + public void onNext(Frame item) { + try { + if (item != null && item.getPayload() != null) { + output.write(item.getPayload()); + } + } catch (IOException ignored) { + // 仅用于日志 + } + super.onNext(item); + } + }; + dockerClient.execStartCmd(exec.getId()) + .exec(callback) + .awaitCompletion(Math.max(config.getKindLoadTimeout(), 60), TimeUnit.SECONDS); + InspectExecResponse inspect = dockerClient.inspectExecCmd(exec.getId()).exec(); + Long exit = inspect != null ? inspect.getExitCodeLong() : null; + if (exit == null || exit != 0L) { + throw new IllegalStateException("exec failed: cmd=" + String.join(" ", cmd) + + ", exitCode=" + exit + ", output=" + output); + } + } + + /** + * 清理构建上下文目录 + */ + private void cleanupBuildContext(Path contextPath) { + deleteDirectory(contextPath); + } + + /** + * 清理残留的历史构建上下文 + * 扫描 BUILD_CONTEXT_DIR 下超过指定时间的目录 + */ + private void cleanupStaleContexts() { + Path baseDir = Path.of(config.getBuildContextDir()); + if (!Files.exists(baseDir)) { + return; + } + + int staleHours = buildQueueConfig.getStaleContextHours(); + long staleThreshold = System.currentTimeMillis() - TimeUnit.HOURS.toMillis(staleHours); + + try (DirectoryStream stream = Files.newDirectoryStream(baseDir, "build-*")) { + for (Path dir : stream) { + try { + if (!Files.isDirectory(dir)) { + continue; + } + long lastModified = Files.getLastModifiedTime(dir).toMillis(); + if (lastModified < staleThreshold) { + log.info("清理残留构建上下文: {} (超过 {} 小时)", dir, staleHours); + deleteDirectory(dir); + } + } catch (IOException e) { + log.warn("检查目录时间失败: {}", dir, e); + } + } + } catch (IOException e) { + log.warn("扫描构建目录失败: {}", baseDir, e); + } + } + + /** + * 删除目录及其内容 + */ + private void deleteDirectory(Path path) { + try { + Files.walk(path) + .sorted((a, b) -> b.compareTo(a)) // 逆序,先删除文件再删除目录 + .forEach(p -> { + try { + Files.deleteIfExists(p); + } catch (IOException e) { + log.warn("Failed to delete: {}", p); + } + }); + log.debug("Build context cleaned: {}", path); + } catch (IOException e) { + log.warn("Failed to cleanup build context: {}", path, e); + } + } + + /** + * 获取 Dockerfile 预览(用于调试) + */ + public String previewDockerfile(String baseImage, Map envVars) { + return generateDockerfile(baseImage, envVars); + } +} diff --git a/back/src/main/java/com/linkwork/service/K8sClusterService.java b/back/src/main/java/com/linkwork/service/K8sClusterService.java new file mode 100644 index 0000000..4d1310b --- /dev/null +++ b/back/src/main/java/com/linkwork/service/K8sClusterService.java @@ -0,0 +1,450 @@ +package com.linkwork.service; + +import com.linkwork.model.dto.*; +import io.fabric8.kubernetes.api.model.*; +import io.fabric8.kubernetes.api.model.metrics.v1beta1.ContainerMetrics; +import io.fabric8.kubernetes.api.model.metrics.v1beta1.NodeMetrics; +import io.fabric8.kubernetes.api.model.metrics.v1beta1.PodMetrics; +import io.fabric8.kubernetes.client.KubernetesClient; +import io.fabric8.kubernetes.client.KubernetesClientException; +import lombok.RequiredArgsConstructor; +import lombok.extern.slf4j.Slf4j; +import org.springframework.stereotype.Service; + +import java.io.BufferedReader; +import java.io.InputStream; +import java.io.InputStreamReader; +import java.nio.charset.StandardCharsets; +import java.time.Duration; +import java.time.Instant; +import java.util.*; +import java.util.stream.Collectors; + +/** + * K8s 集群监控 Service — 多命名空间、全量资源查看 + * 复用现有 KubernetesClient Bean + */ +@Service +@Slf4j +@RequiredArgsConstructor +public class K8sClusterService { + + private final KubernetesClient kubernetesClient; + + public List listNamespaces() { + return kubernetesClient.namespaces().list().getItems().stream() + .map(ns -> ns.getMetadata().getName()) + .sorted() + .collect(Collectors.toList()); + } + + public ClusterOverviewDTO getOverview(String namespace) { + var podList = kubernetesClient.pods().inNamespace(namespace).list().getItems(); + + int running = 0, pending = 0, failed = 0, succeeded = 0; + for (Pod pod : podList) { + String phase = pod.getStatus() != null ? pod.getStatus().getPhase() : "Unknown"; + switch (phase) { + case "Running" -> running++; + case "Pending" -> pending++; + case "Failed" -> failed++; + case "Succeeded" -> succeeded++; + } + } + + long usedCpu = 0, usedMem = 0; + try { + var metrics = kubernetesClient.top().pods().inNamespace(namespace).metrics().getItems(); + for (PodMetrics pm : metrics) { + for (ContainerMetrics cm : pm.getContainers()) { + Quantity cpu = cm.getUsage().get("cpu"); + Quantity mem = cm.getUsage().get("memory"); + if (cpu != null) usedCpu += parseMillicores(cpu); + if (mem != null) usedMem += parseBytes(mem); + } + } + } catch (Exception e) { + log.warn("Metrics not available for namespace {}: {}", namespace, e.getMessage()); + } + + long totalCpu = 0, totalMem = 0; + int nodeCount = 0; + try { + var nodes = kubernetesClient.nodes().list().getItems(); + nodeCount = nodes.size(); + for (Node node : nodes) { + Quantity cpuCap = node.getStatus().getAllocatable().get("cpu"); + Quantity memCap = node.getStatus().getAllocatable().get("memory"); + if (cpuCap != null) totalCpu += parseMillicores(cpuCap); + if (memCap != null) totalMem += parseBytes(memCap); + } + } catch (Exception e) { + log.warn("Failed to get node info: {}", e.getMessage()); + } + + int pgCount = 0; + try { + pgCount = kubernetesClient + .genericKubernetesResources("scheduling.volcano.sh/v1beta1", "PodGroup") + .inNamespace(namespace).list().getItems().size(); + } catch (Exception ignored) {} + + return ClusterOverviewDTO.builder() + .namespace(namespace) + .totalPods(podList.size()) + .runningPods(running) + .pendingPods(pending) + .failedPods(failed) + .succeededPods(succeeded) + .totalCpuMillicores(totalCpu) + .usedCpuMillicores(usedCpu) + .cpuUsagePercent(totalCpu > 0 ? Math.round(usedCpu * 10000.0 / totalCpu) / 100.0 : null) + .totalMemoryBytes(totalMem) + .usedMemoryBytes(usedMem) + .memoryUsagePercent(totalMem > 0 ? Math.round(usedMem * 10000.0 / totalMem) / 100.0 : null) + .podGroupCount(pgCount) + .nodeCount(nodeCount) + .build(); + } + + public List listNodes() { + var nodes = kubernetesClient.nodes().list().getItems(); + + Map metricsMap = new HashMap<>(); + try { + kubernetesClient.top().nodes().metrics().getItems() + .forEach(nm -> metricsMap.put(nm.getMetadata().getName(), nm)); + } catch (Exception e) { + log.warn("Node metrics not available: {}", e.getMessage()); + } + + Map podCounts = new HashMap<>(); + try { + kubernetesClient.pods().inAnyNamespace().list().getItems() + .forEach(p -> { + String node = p.getSpec().getNodeName(); + if (node != null) podCounts.merge(node, 1, Integer::sum); + }); + } catch (Exception e) { + log.warn("Failed to count pods per node: {}", e.getMessage()); + } + + return nodes.stream().map(node -> { + String name = node.getMetadata().getName(); + String status = node.getStatus().getConditions().stream() + .filter(c -> "Ready".equals(c.getType())) + .findFirst() + .map(c -> "True".equals(c.getStatus()) ? "Ready" : "NotReady") + .orElse("Unknown"); + + List roles = node.getMetadata().getLabels().entrySet().stream() + .filter(e -> e.getKey().startsWith("node-role.kubernetes.io/")) + .map(e -> e.getKey().substring("node-role.kubernetes.io/".length())) + .collect(Collectors.toList()); + + long cpuCap = parseMillicores(node.getStatus().getCapacity().get("cpu")); + long cpuAlloc = parseMillicores(node.getStatus().getAllocatable().get("cpu")); + long memCap = parseBytes(node.getStatus().getCapacity().get("memory")); + long memAlloc = parseBytes(node.getStatus().getAllocatable().get("memory")); + int podCap = parseIntQuantity(node.getStatus().getCapacity().get("pods")); + + long cpuUsage = 0, memUsage = 0; + NodeMetrics nm = metricsMap.get(name); + if (nm != null) { + Quantity cpu = nm.getUsage().get("cpu"); + Quantity mem = nm.getUsage().get("memory"); + if (cpu != null) cpuUsage = parseMillicores(cpu); + if (mem != null) memUsage = parseBytes(mem); + } + + return ClusterNodeInfo.builder() + .name(name) + .status(status) + .roles(roles) + .kubeletVersion(node.getStatus().getNodeInfo().getKubeletVersion()) + .cpuCapacity(cpuCap) + .cpuAllocatable(cpuAlloc) + .cpuUsage(cpuUsage) + .cpuUsagePercent(cpuAlloc > 0 ? Math.round(cpuUsage * 10000.0 / cpuAlloc) / 100.0 : null) + .memCapacity(memCap) + .memAllocatable(memAlloc) + .memUsage(memUsage) + .memUsagePercent(memAlloc > 0 ? Math.round(memUsage * 10000.0 / memAlloc) / 100.0 : null) + .podCount(podCounts.getOrDefault(name, 0)) + .podCapacity(podCap) + .build(); + }).collect(Collectors.toList()); + } + + public List listPods(String namespace, String statusFilter, String nodeFilter, String podGroupFilter) { + var podList = kubernetesClient.pods().inNamespace(namespace).list().getItems(); + + Map metricsMap = new HashMap<>(); + try { + kubernetesClient.top().pods().inNamespace(namespace).metrics().getItems() + .forEach(pm -> metricsMap.put(pm.getMetadata().getName(), pm)); + } catch (Exception e) { + log.warn("Pod metrics not available for {}: {}", namespace, e.getMessage()); + } + + Map podGroupPhases = new HashMap<>(); + try { + kubernetesClient.genericKubernetesResources("scheduling.volcano.sh/v1beta1", "PodGroup") + .inNamespace(namespace).list().getItems().forEach(pg -> { + @SuppressWarnings("unchecked") + Map st = (Map) pg.getAdditionalProperties().get("status"); + String phase = st != null ? String.valueOf(st.getOrDefault("phase", "Unknown")) : "Unknown"; + podGroupPhases.put(pg.getMetadata().getName(), phase); + }); + } catch (Exception ignored) {} + + return podList.stream() + .map(pod -> buildClusterPodInfo(pod, metricsMap, podGroupPhases)) + .filter(p -> statusFilter == null || statusFilter.isEmpty() || p.getPhase().equalsIgnoreCase(statusFilter)) + .filter(p -> nodeFilter == null || nodeFilter.isEmpty() || nodeFilter.equals(p.getNodeName())) + .filter(p -> podGroupFilter == null || podGroupFilter.isEmpty() || podGroupFilter.equals(p.getPodGroupName())) + .collect(Collectors.toList()); + } + + public List listPodGroups(String namespace) { + List result = new ArrayList<>(); + try { + var pgList = kubernetesClient.genericKubernetesResources("scheduling.volcano.sh/v1beta1", "PodGroup") + .inNamespace(namespace).list().getItems(); + for (var pg : pgList) { + @SuppressWarnings("unchecked") + Map status = (Map) pg.getAdditionalProperties().get("status"); + @SuppressWarnings("unchecked") + Map spec = (Map) pg.getAdditionalProperties().get("spec"); + + result.add(PodGroupStatusInfo.builder() + .name(pg.getMetadata().getName()) + .phase(status != null ? String.valueOf(status.getOrDefault("phase", "Unknown")) : "Unknown") + .minMember(spec != null && spec.get("minMember") != null ? ((Number) spec.get("minMember")).intValue() : null) + .running(status != null && status.get("running") != null ? ((Number) status.get("running")).intValue() : 0) + .succeeded(status != null && status.get("succeeded") != null ? ((Number) status.get("succeeded")).intValue() : 0) + .failed(status != null && status.get("failed") != null ? ((Number) status.get("failed")).intValue() : 0) + .pending(status != null && status.get("pending") != null ? ((Number) status.get("pending")).intValue() : 0) + .build()); + } + } catch (Exception e) { + log.warn("Failed to list PodGroups in {}: {}", namespace, e.getMessage()); + } + return result; + } + + public PodLogResponseDTO getPodLogs(String namespace, String podName, String container, int tailLines) { + try { + var pod = kubernetesClient.pods().inNamespace(namespace).withName(podName).get(); + if (pod == null) { + return PodLogResponseDTO.builder().podName(podName).namespace(namespace).logs("Pod not found").tailLines(tailLines).build(); + } + + String targetContainer = container; + if (targetContainer == null || targetContainer.isEmpty()) { + var cs = pod.getSpec().getContainers(); + if (!cs.isEmpty()) targetContainer = cs.get(0).getName(); + } + + String logContent; + try (InputStream is = kubernetesClient.pods().inNamespace(namespace).withName(podName) + .inContainer(targetContainer).tailingLines(tailLines).getLogInputStream()) { + logContent = new BufferedReader(new InputStreamReader(is, StandardCharsets.UTF_8)) + .lines().collect(Collectors.joining("\n")); + } + + return PodLogResponseDTO.builder() + .podName(podName).namespace(namespace).containerName(targetContainer) + .logs(logContent).tailLines(tailLines).build(); + } catch (Exception e) { + log.error("Failed to get logs for {}/{}: {}", namespace, podName, e.getMessage()); + return PodLogResponseDTO.builder().podName(podName).namespace(namespace) + .logs("Error: " + e.getMessage()).tailLines(tailLines).build(); + } + } + + public List listEvents(String namespace, int limit) { + var events = kubernetesClient.v1().events().inNamespace(namespace).list().getItems(); + events.sort((a, b) -> { + String ta = a.getLastTimestamp() != null ? a.getLastTimestamp() : ""; + String tb = b.getLastTimestamp() != null ? b.getLastTimestamp() : ""; + return tb.compareTo(ta); + }); + + return events.stream().limit(limit).map(e -> ClusterEventDTO.builder() + .type(e.getType()) + .reason(e.getReason()) + .message(e.getMessage()) + .objectKind(e.getInvolvedObject() != null ? e.getInvolvedObject().getKind() : null) + .objectName(e.getInvolvedObject() != null ? e.getInvolvedObject().getName() : null) + .namespace(e.getMetadata().getNamespace()) + .firstTimestamp(e.getFirstTimestamp()) + .lastTimestamp(e.getLastTimestamp()) + .count(e.getCount()) + .build() + ).collect(Collectors.toList()); + } + + public List listPodEvents(String namespace, String podName) { + return kubernetesClient.v1().events().inNamespace(namespace).list().getItems().stream() + .filter(e -> e.getInvolvedObject() != null && podName.equals(e.getInvolvedObject().getName())) + .map(e -> ClusterEventDTO.builder() + .type(e.getType()) + .reason(e.getReason()) + .message(e.getMessage()) + .objectKind(e.getInvolvedObject().getKind()) + .objectName(e.getInvolvedObject().getName()) + .namespace(namespace) + .firstTimestamp(e.getFirstTimestamp()) + .lastTimestamp(e.getLastTimestamp()) + .count(e.getCount()) + .build() + ).collect(Collectors.toList()); + } + + public void deletePod(String namespace, String podName) { + kubernetesClient.pods().inNamespace(namespace).withName(podName).delete(); + log.info("Deleted pod {}/{}", namespace, podName); + } + + // ─── private helpers ───────────────────────────────────────────── + + private ClusterPodInfo buildClusterPodInfo(Pod pod, Map metricsMap, Map podGroupPhases) { + String name = pod.getMetadata().getName(); + Map annotations = pod.getMetadata().getAnnotations(); + Map labels = pod.getMetadata().getLabels(); + + String pgName = annotations != null ? annotations.get("scheduling.volcano.sh/group-name") : null; + String pgPhase = pgName != null ? podGroupPhases.getOrDefault(pgName, "Unknown") : null; + String serviceId = labels != null ? labels.get("service-id") : null; + String userId = labels != null ? labels.get("user-id") : null; + + List containers = new ArrayList<>(); + int totalRestarts = 0; + + if (pod.getStatus() != null && pod.getStatus().getContainerStatuses() != null) { + for (var cs : pod.getStatus().getContainerStatuses()) { + String state = "waiting"; + String reason = null; + Integer exitCode = null; + if (cs.getState().getRunning() != null) state = "running"; + else if (cs.getState().getTerminated() != null) { + state = "terminated"; + reason = cs.getState().getTerminated().getReason(); + exitCode = cs.getState().getTerminated().getExitCode(); + } else if (cs.getState().getWaiting() != null) { + reason = cs.getState().getWaiting().getReason(); + } + + totalRestarts += cs.getRestartCount(); + containers.add(ContainerStatusInfo.builder() + .name(cs.getName()).ready(cs.getReady()).state(state) + .reason(reason).exitCode(exitCode).restartCount(cs.getRestartCount()) + .build()); + } + } + + List images = pod.getSpec().getContainers().stream() + .map(Container::getImage).collect(Collectors.toList()); + + String startTime = null; + String age = ""; + if (pod.getStatus() != null && pod.getStatus().getStartTime() != null) { + startTime = pod.getStatus().getStartTime(); + try { + Duration d = Duration.between(Instant.parse(startTime), Instant.now()); + if (d.toDays() > 0) age = d.toDays() + "d"; + else if (d.toHours() > 0) age = d.toHours() + "h"; + else age = d.toMinutes() + "m"; + } catch (Exception ignored) { age = "N/A"; } + } + + ResourceUsageInfo resUsage = null; + PodMetrics pm = metricsMap.get(name); + if (pm != null) { + long cpuUsage = 0, memUsage = 0; + for (ContainerMetrics cm : pm.getContainers()) { + Quantity cpu = cm.getUsage().get("cpu"); + Quantity mem = cm.getUsage().get("memory"); + if (cpu != null) cpuUsage += parseMillicores(cpu); + if (mem != null) memUsage += parseBytes(mem); + } + resUsage = ResourceUsageInfo.builder() + .cpuMillicores(cpuUsage).cpuUsage(formatMillicores(cpuUsage)) + .memoryBytes(memUsage).memoryUsage(formatBytes(memUsage)) + .build(); + } + + return ClusterPodInfo.builder() + .name(name) + .namespace(pod.getMetadata().getNamespace()) + .phase(pod.getStatus() != null ? pod.getStatus().getPhase() : "Unknown") + .nodeName(pod.getSpec().getNodeName()) + .podGroupName(pgName) + .podGroupPhase(pgPhase) + .serviceId(serviceId) + .userId(userId) + .containers(containers) + .restartCount(totalRestarts) + .startTime(startTime) + .age(age) + .images(images) + .resourceUsage(resUsage) + .build(); + } + + private long parseMillicores(Quantity q) { + if (q == null) return 0; + try { + double value = Double.parseDouble(q.getAmount()); + String fmt = q.getFormat(); + if (fmt != null) { + return switch (fmt) { + case "n" -> (long) (value / 1_000_000); + case "u" -> (long) (value / 1_000); + case "m" -> (long) value; + default -> (long) (value * 1000); + }; + } + return (long) (value * 1000); + } catch (NumberFormatException e) { return 0; } + } + + private long parseBytes(Quantity q) { + if (q == null) return 0; + try { + double value = Double.parseDouble(q.getAmount()); + String fmt = q.getFormat(); + if (fmt != null) { + return switch (fmt) { + case "Ki" -> (long) (value * 1024); + case "Mi" -> (long) (value * 1024 * 1024); + case "Gi" -> (long) (value * 1024 * 1024 * 1024); + case "K" -> (long) (value * 1000); + case "M" -> (long) (value * 1000_000); + case "G" -> (long) (value * 1000_000_000); + default -> (long) value; + }; + } + return (long) value; + } catch (NumberFormatException e) { return 0; } + } + + private int parseIntQuantity(Quantity q) { + if (q == null) return 0; + try { return Integer.parseInt(q.getAmount()); } + catch (Exception e) { return 0; } + } + + private String formatMillicores(long millicores) { + if (millicores >= 1000) return String.format("%.2f", millicores / 1000.0); + return millicores + "m"; + } + + private String formatBytes(long bytes) { + if (bytes >= 1024L * 1024 * 1024) return String.format("%.2fGi", bytes / (1024.0 * 1024 * 1024)); + if (bytes >= 1024L * 1024) return String.format("%.2fMi", bytes / (1024.0 * 1024)); + if (bytes >= 1024) return String.format("%.2fKi", bytes / 1024.0); + return bytes + "B"; + } +} diff --git a/back/src/main/java/com/linkwork/service/K8sOrchestrator.java b/back/src/main/java/com/linkwork/service/K8sOrchestrator.java new file mode 100644 index 0000000..2001655 --- /dev/null +++ b/back/src/main/java/com/linkwork/service/K8sOrchestrator.java @@ -0,0 +1,78 @@ +package com.linkwork.service; + +import com.linkwork.model.dto.*; + +import java.util.List; + +/** + * K8s 编排器接口 + */ +public interface K8sOrchestrator { + + /** + * 构建服务(核心方法) + * 执行顺序:Secret → PodGroup → Pod ×N + * @param config 融合后的配置 + * @return 构建结果 + */ + ServiceBuildResult buildService(MergedConfig config); + + /** + * 获取服务状态 + * @param serviceId 服务 ID + */ + ServiceStatusResponse getServiceStatus(String serviceId); + + /** + * 停止服务 + * @param serviceId 服务 ID + * @param graceful true=优雅停止 + */ + StopResult stopService(String serviceId, boolean graceful); + + /** + * 清理服务资源 + * @param serviceId 服务 ID + */ + void cleanupService(String serviceId); + + /** + * 预览生成的 Spec(不实际创建) + * @param config 融合后的配置 + * @return 生成的 Spec + */ + GeneratedSpec previewSpec(MergedConfig config); + + // ==================== 动态伸缩接口 ==================== + + /** + * 缩容:删除指定的 Pod + * @param serviceId 服务 ID + * @param podName 要删除的 Pod 名称(必填,不指定则返回错误) + * @return 伸缩结果 + */ + ScaleResult scaleDown(String serviceId, String podName); + + /** + * 扩容:创建新的 Pod 到指定数量 + * @param serviceId 服务 ID + * @param targetPodCount 目标 Pod 数量 + * @param config 融合后的配置(用于创建新 Pod) + * @return 伸缩结果 + */ + ScaleResult scaleUp(String serviceId, int targetPodCount, MergedConfig config); + + /** + * 获取当前运行的 Pod 列表 + * @param serviceId 服务 ID + * @return Pod 名称列表 + */ + List getRunningPods(String serviceId); + + /** + * 扫描 K8s namespace,获取所有带 service-id label 的运行中服务 ID 列表 + * 用于后端重启后反向发现 K8s 中仍在运行的服务 + * @return 去重后的 serviceId 列表 + */ + List listAllServiceIds(); +} diff --git a/back/src/main/java/com/linkwork/service/K8sOrchestratorImpl.java b/back/src/main/java/com/linkwork/service/K8sOrchestratorImpl.java new file mode 100644 index 0000000..2bcd18b --- /dev/null +++ b/back/src/main/java/com/linkwork/service/K8sOrchestratorImpl.java @@ -0,0 +1,1672 @@ +package com.linkwork.service; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.linkwork.config.EnvConfig; +import com.linkwork.config.ImageBuildConfig; +import com.linkwork.model.dto.*; +import io.fabric8.kubernetes.api.model.*; +import io.fabric8.kubernetes.api.model.GenericKubernetesResource; +import io.fabric8.kubernetes.api.model.metrics.v1beta1.ContainerMetrics; +import io.fabric8.kubernetes.api.model.metrics.v1beta1.PodMetrics; +import io.fabric8.kubernetes.client.KubernetesClient; +import io.fabric8.kubernetes.client.KubernetesClientException; +import lombok.extern.slf4j.Slf4j; +import org.springframework.stereotype.Service; +import org.springframework.util.StringUtils; + +import java.io.BufferedReader; +import java.io.InputStream; +import java.io.InputStreamReader; +import java.nio.charset.StandardCharsets; +import java.time.Instant; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; + +/** + * K8s 编排器实现 + */ +@Service +@Slf4j +public class K8sOrchestratorImpl implements K8sOrchestrator { + + private static final ResourceSpec FALLBACK_AGENT_RESOURCES = ResourceSpec.builder() + .cpuRequest("1") + .cpuLimit("2") + .memoryRequest("2Gi") + .memoryLimit("4Gi") + .build(); + + private static final ResourceSpec FALLBACK_RUNNER_RESOURCES = ResourceSpec.builder() + .cpuRequest("1") + .cpuLimit("4") + .memoryRequest("2Gi") + .memoryLimit("8Gi") + .build(); + + private static final ResourceSpec PERMISSION_INIT_RESOURCES = ResourceSpec.builder() + .cpuRequest("100m") + .cpuLimit("500m") + .memoryRequest("128Mi") + .memoryLimit("512Mi") + .build(); + + private final KubernetesClient kubernetesClient; + private final PodGroupSpecGenerator podGroupSpecGenerator; + private final PodSpecGenerator podSpecGenerator; + private final EnvConfig envConfig; + private final ObjectMapper objectMapper; + private final ScheduleEventPublisher eventPublisher; + private final ImageBuildConfig imageBuildConfig; + private final DistributedLockService lockService; + + /** 存储目录级分布式锁键前缀(保留 oss: 前缀以兼容已有锁) */ + private static final String OSS_LOCK_PREFIX = "oss:lock:"; + + public K8sOrchestratorImpl(KubernetesClient kubernetesClient, + PodGroupSpecGenerator podGroupSpecGenerator, + PodSpecGenerator podSpecGenerator, + EnvConfig envConfig, + ObjectMapper objectMapper, + ScheduleEventPublisher eventPublisher, + ImageBuildConfig imageBuildConfig, + DistributedLockService lockService) { + this.kubernetesClient = kubernetesClient; + this.podGroupSpecGenerator = podGroupSpecGenerator; + this.podSpecGenerator = podSpecGenerator; + this.envConfig = envConfig; + this.objectMapper = objectMapper; + this.eventPublisher = eventPublisher; + this.imageBuildConfig = imageBuildConfig; + this.lockService = lockService; + } + + @Override + public ServiceBuildResult buildService(MergedConfig config) { + String serviceId = config.getServiceId(); + String podGroupName = "svc-" + serviceId + "-pg"; + + // 如果启用了 OSS 挂载,获取 OSS 目录级分布式锁 + String ossLockKey = buildOssLockKey(config); + String ossLock = null; + if (ossLockKey != null) { + ossLock = lockService.tryAcquireLockByKey(ossLockKey); + if (ossLock == null) { + log.warn("Failed to acquire OSS lock for service {}, key={}", serviceId, ossLockKey); + return ServiceBuildResult.failed(serviceId, "OSS_LOCK_FAILED", + "Another operation is modifying OSS directory for this service"); + } + log.info("Acquired OSS lock for service {}, key={}", serviceId, ossLockKey); + } + + try { + return doBuildService(config); + } finally { + if (ossLockKey != null && ossLock != null) { + lockService.releaseLockByKey(ossLockKey, ossLock); + log.info("Released OSS lock for service {}, key={}", serviceId, ossLockKey); + } + } + } + + private ServiceBuildResult doBuildService(MergedConfig config) { + String serviceId = config.getServiceId(); + String podGroupName = "svc-" + serviceId + "-pg"; + String namespace = config.getNamespace(); + List podNames = new ArrayList<>(); + String scheduledNode = null; + + try { + // ── 重复构建保护:先清理同 serviceId 的旧资源,再重新创建 ── + List existingPods = kubernetesClient.pods() + .inNamespace(namespace) + .withLabel("service-id", serviceId) + .list() + .getItems(); + if (!existingPods.isEmpty()) { + List oldPodNames = existingPods.stream() + .map(p -> p.getMetadata().getName()) + .collect(Collectors.toList()); + log.info("Service {} already has {} pod(s): {}, cleaning up before rebuild", + serviceId, existingPods.size(), oldPodNames); + doCleanupService(serviceId, namespace); + // 等待旧 Pod 完全删除,防止后续创建时出现 409 冲突 + waitForPodsDeleted(namespace, serviceId, 60); + } + + log.info("Building service {}, podCount={}, podMode={}, namespace={}, preferredNode={}", + serviceId, config.getPodCount(), config.getPodMode(), namespace, config.getPreferredNode()); + + createImagePullSecret(config); + createTokenSecret(config); + createAgentConfigMap(config); + ensureRunnerScriptsConfigMap(config.getNamespace()); + createPodGroup(config); + waitForPodGroupReady(namespace, podGroupName, 30); + + for (int i = 0; i < config.getPodCount(); i++) { + Pod pod = ensurePodResources(podSpecGenerator.generate(config, i), config); + Pod createdPod = createPodWithRetry(namespace, pod, 3, 2000); + String podName = createdPod.getMetadata().getName(); + podNames.add(podName); + log.info("Created Pod: {}", podName); + + eventPublisher.publishPodScheduling(serviceId, podName, i, config.getQueueName()); + } + + log.info("Service {} created successfully, pods: {}", serviceId, podNames); + + scheduledNode = waitForScheduledNodeWithEvents(namespace, serviceId, podNames, 10); + log.info("Service {} scheduled to node: {}", serviceId, scheduledNode); + + eventPublisher.publishInitComplete(serviceId, podNames.get(0), podGroupName, + podNames.size(), config.getPodCount()); + + return ServiceBuildResult.success(serviceId, podGroupName, podNames, + config.getQueueName(), scheduledNode); + + } catch (KubernetesClientException e) { + log.error("Failed to build service {}: {}", serviceId, e.getMessage(), e); + eventPublisher.publishInitFailed(serviceId, null, podGroupName, "K8S_ERROR", e.getMessage()); + cleanupService(serviceId); + return ServiceBuildResult.failed(serviceId, "K8S_ERROR", e.getMessage()); + } catch (Exception e) { + log.error("Unexpected error building service {}: {}", serviceId, e.getMessage(), e); + eventPublisher.publishInitFailed(serviceId, null, podGroupName, "INTERNAL_ERROR", e.getMessage()); + cleanupService(serviceId); + return ServiceBuildResult.failed(serviceId, "INTERNAL_ERROR", e.getMessage()); + } + } + + private String waitForScheduledNodeWithEvents(String namespace, String serviceId, + List podNames, int timeoutSeconds) { + long startTime = System.currentTimeMillis(); + long timeoutMillis = timeoutSeconds * 1000L; + + java.util.Set scheduledPods = new java.util.HashSet<>(); + String firstNodeName = null; + + while (System.currentTimeMillis() - startTime < timeoutMillis) { + try { + for (int i = 0; i < podNames.size(); i++) { + String podName = podNames.get(i); + + if (scheduledPods.contains(podName)) { + continue; + } + + Pod pod = kubernetesClient.pods() + .inNamespace(namespace) + .withName(podName) + .get(); + + if (pod != null && pod.getSpec() != null && pod.getSpec().getNodeName() != null) { + String nodeName = pod.getSpec().getNodeName(); + + eventPublisher.publishPodScheduled(serviceId, podName, i, nodeName); + scheduledPods.add(podName); + + if (firstNodeName == null) { + firstNodeName = nodeName; + } + + log.info("Pod {} scheduled to node {}", podName, nodeName); + } + } + + if (scheduledPods.size() == podNames.size()) { + break; + } + + Thread.sleep(500); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + break; + } catch (Exception e) { + log.warn("Error waiting for pods to be scheduled: {}", e.getMessage()); + } + } + + if (scheduledPods.size() < podNames.size()) { + log.warn("Timeout waiting for pods to be scheduled, scheduled: {}/{}", + scheduledPods.size(), podNames.size()); + } + + return firstNodeName; + } + + /** + * 创建镜像拉取 Secret(如果配置了仓库凭证) + */ + private void createImagePullSecret(MergedConfig config) { + String secretName = config.getImagePullSecret(); + if (secretName == null || secretName.isEmpty()) { + return; + } + + String registryHost = resolveRegistryHostForPull(config); + if (!StringUtils.hasText(registryHost)) { + log.info("Agent image appears local/no-registry, skipping imagePullSecret creation"); + return; + } + String username = imageBuildConfig.getRegistryUsername(); + String password = imageBuildConfig.getRegistryPassword(); + + if (!StringUtils.hasText(username) || !StringUtils.hasText(password)) { + log.info("Registry credentials not configured, skipping imagePullSecret creation"); + return; + } + + String namespace = config.getNamespace(); + + // 检查 Secret 是否已存在(存在则覆盖,确保 registry host 与凭证保持最新) + Secret existing = kubernetesClient.secrets() + .inNamespace(namespace) + .withName(secretName) + .get(); + + if (existing != null) { + kubernetesClient.secrets() + .inNamespace(namespace) + .withName(secretName) + .delete(); + log.info("ImagePullSecret {} already exists in namespace {}, recreating", secretName, namespace); + } + + // 构建 dockerconfigjson 格式的凭证 + // 格式: {"auths":{"registry-host":{"username":"user","password":"pass","auth":"base64(user:pass)"}}} + String auth = java.util.Base64.getEncoder().encodeToString((username + ":" + password).getBytes()); + String dockerConfigJson = String.format( + "{\"auths\":{\"%s\":{\"username\":\"%s\",\"password\":\"%s\",\"auth\":\"%s\"}}}", + registryHost, username, password, auth + ); + + Secret secret = new SecretBuilder() + .withNewMetadata() + .withName(secretName) + .withNamespace(namespace) + .endMetadata() + .withType("kubernetes.io/dockerconfigjson") + .addToData(".dockerconfigjson", + java.util.Base64.getEncoder().encodeToString(dockerConfigJson.getBytes())) + .build(); + + kubernetesClient.secrets() + .inNamespace(namespace) + .resource(secret) + .create(); + + log.info("Created ImagePullSecret {} in namespace {}", secretName, namespace); + } + + private String resolveRegistryHostForPull(MergedConfig config) { + String registryHostFromImage = extractRegistryHostFromImage(config.getAgentImage()); + if (StringUtils.hasText(registryHostFromImage)) { + return registryHostFromImage; + } + + if (StringUtils.hasText(config.getImageRegistry())) { + return extractRegistryHost(config.getImageRegistry()); + } + + if (StringUtils.hasText(imageBuildConfig.getRegistry())) { + return extractRegistryHost(imageBuildConfig.getRegistry()); + } + + return ""; + } + + /** + * 从镜像地址提取 registry host: + * 仅当首段符合 registry 形式(含 '.'、':' 或 localhost)时认为包含 registry。 + * 例如: + * - 10.30.107.146/robot/a:b -> 10.30.107.146 + * - docker.io/library/nginx:latest -> docker.io + * - service-123-agent:tag -> "" + */ + private String extractRegistryHostFromImage(String image) { + if (!StringUtils.hasText(image)) { + return ""; + } + String value = image.trim(); + int slash = value.indexOf('/'); + if (slash <= 0) { + return ""; + } + String first = value.substring(0, slash); + if (first.contains(".") || first.contains(":") || "localhost".equals(first)) { + return first; + } + return ""; + } + + private String extractRegistryHost(String registry) { + if (!StringUtils.hasText(registry)) { + return ""; + } + String value = registry.trim(); + if (value.startsWith("http://")) { + value = value.substring("http://".length()); + } else if (value.startsWith("https://")) { + value = value.substring("https://".length()); + } + int slash = value.indexOf("/"); + if (slash > 0) { + return value.substring(0, slash); + } + return value; + } + + private void createTokenSecret(MergedConfig config) { + String secretName = "svc-" + config.getServiceId() + "-token"; + String namespace = config.getNamespace(); + + Secret existing = kubernetesClient.secrets() + .inNamespace(namespace) + .withName(secretName) + .get(); + + if (existing != null) { + log.info("Token Secret {} already exists, updating...", secretName); + kubernetesClient.secrets() + .inNamespace(namespace) + .withName(secretName) + .delete(); + } + + Secret secret = new SecretBuilder() + .withNewMetadata() + .withName(secretName) + .withNamespace(namespace) + .addToLabels("app", "ai-worker-service") + .addToLabels("service-id", config.getServiceId()) + .endMetadata() + .withType("Opaque") + .addToStringData("token", config.getToken()) + .build(); + + kubernetesClient.secrets() + .inNamespace(namespace) + .resource(secret) + .create(); + + log.info("Created Token Secret {} in namespace {}", secretName, namespace); + } + + /** + * 创建 Agent 配置文件 ConfigMap (per-service) + * + * ConfigMap 名称: svc-{serviceId}-agent-config + * 数据: config.json → 挂载到 /opt/agent/config.json + */ + private void createAgentConfigMap(MergedConfig config) { + String configMapName = PodSpecGenerator.agentConfigMapName(config.getServiceId()); + String namespace = config.getNamespace(); + + // 删除旧的(如果存在) + ConfigMap existing = kubernetesClient.configMaps() + .inNamespace(namespace) + .withName(configMapName) + .get(); + if (existing != null) { + kubernetesClient.configMaps() + .inNamespace(namespace) + .withName(configMapName) + .delete(); + log.info("Deleted existing Agent ConfigMap: {}", configMapName); + } + + // config.json 内容:优先使用 MergedConfig.configJson,否则从 classpath 加载默认配置 + String configJsonContent = config.getConfigJson(); + if (configJsonContent == null || configJsonContent.isBlank()) { + configJsonContent = loadDefaultAgentConfig(); + log.info("Agent configJson is empty, loaded default from classpath for service {}", config.getServiceId()); + } + + ConfigMap configMap = new ConfigMapBuilder() + .withNewMetadata() + .withName(configMapName) + .withNamespace(namespace) + .addToLabels("app", "ai-worker-service") + .addToLabels("service-id", config.getServiceId()) + .endMetadata() + .addToData("config.json", configJsonContent) + .build(); + + kubernetesClient.configMaps() + .inNamespace(namespace) + .resource(configMap) + .create(); + + log.info("Created Agent ConfigMap {} in namespace {}", configMapName, namespace); + } + + /** + * 从 classpath 加载默认 Agent config.json + * 与镜像构建时 COPY 进 /opt/agent/config.json 的内容一致 + */ + private String loadDefaultAgentConfig() { + try (InputStream is = getClass().getClassLoader().getResourceAsStream("scripts/config.json")) { + if (is != null) { + return new BufferedReader(new InputStreamReader(is, StandardCharsets.UTF_8)) + .lines().collect(Collectors.joining("\n")); + } + } catch (Exception e) { + log.warn("Failed to load default agent config from classpath: {}", e.getMessage()); + } + log.warn("Default agent config not found in classpath, falling back to '{}'"); + return "{}"; + } + + /** + * 确保 Runner 启动脚本 ConfigMap 最新 (集群级共享,所有 Pod 复用) + * + * ConfigMap 名称: runner-start-script + * 数据: start-runner.sh → 挂载到 /opt/runner/start-runner.sh + * + * 每次调用都 createOrReplace,避免旧脚本长期滞留导致权限模型不一致。 + */ + private void ensureRunnerScriptsConfigMap(String namespace) { + String configMapName = PodSpecGenerator.RUNNER_SCRIPT_CONFIGMAP; + + // 从 classpath 加载 start-runner.sh 内容 + String scriptContent = loadRunnerStartScript(); + + ConfigMap configMap = new ConfigMapBuilder() + .withNewMetadata() + .withName(configMapName) + .withNamespace(namespace) + .addToLabels("app", "ai-worker-service") + .addToLabels("component", "runner") + .endMetadata() + .addToData(PodSpecGenerator.RUNNER_SCRIPT_KEY, scriptContent) + .build(); + + kubernetesClient.configMaps() + .inNamespace(namespace) + .resource(configMap) + .createOrReplace(); + + log.info("Created/Updated Runner scripts ConfigMap {} in namespace {}", configMapName, namespace); + } + + /** + * 从 classpath 加载 start-runner.sh 脚本内容 + * 路径: classpath:scripts/start-runner.sh + */ + private String loadRunnerStartScript() { + try (InputStream is = getClass().getResourceAsStream("/scripts/start-runner.sh")) { + if (is != null) { + try (BufferedReader reader = new BufferedReader(new InputStreamReader(is, StandardCharsets.UTF_8))) { + return reader.lines().collect(Collectors.joining("\n")); + } + } + } catch (Exception e) { + log.warn("Failed to load start-runner.sh from classpath: {}", e.getMessage()); + } + + // 兜底:内联最小化的 start-runner.sh + log.warn("Using fallback inline start-runner.sh"); + return """ + #!/bin/bash + set -e + SHARED_KEY_DIR="/shared-keys" + PUBKEY_FILE="${SHARED_KEY_DIR}/zzd_pubkey.pub" + PUBKEY_TIMEOUT="${PUBKEY_TIMEOUT:-120}" + + echo "[Runner] Starting..." + + # 1. sshd 环境 + if [ ! -x /usr/sbin/sshd ]; then + dnf install -y openssh-server openssh-clients sudo && dnf clean all + fi + [ ! -f /etc/ssh/ssh_host_rsa_key ] && ssh-keygen -A + sed -i 's/^#*PermitRootLogin.*/PermitRootLogin yes/' /etc/ssh/sshd_config + sed -i 's/^#*PubkeyAuthentication.*/PubkeyAuthentication yes/' /etc/ssh/sshd_config + sed -i 's/^#*PasswordAuthentication.*/PasswordAuthentication no/' /etc/ssh/sshd_config + grep -q '^AuthorizedKeysFile' /etc/ssh/sshd_config || echo 'AuthorizedKeysFile .ssh/authorized_keys' >> /etc/ssh/sshd_config + + # 2. momobot 用户 + id momobot &>/dev/null || { groupadd -g 1000 momobot; useradd -u 1000 -g momobot -m -s /bin/bash momobot; echo 'momobot ALL=(ALL) NOPASSWD:ALL' > /etc/sudoers.d/momobot; chmod 0440 /etc/sudoers.d/momobot; } + + # 3. 等待公钥 + WAIT=0 + while [ ! -f "$PUBKEY_FILE" ] && [ $WAIT -lt $PUBKEY_TIMEOUT ]; do sleep 1; WAIT=$((WAIT+1)); done + [ ! -f "$PUBKEY_FILE" ] && { echo "[Runner] ERROR: pubkey timeout"; exit 1; } + + # 4. authorized_keys + mkdir -p /home/momobot/.ssh /root/.ssh + cp "$PUBKEY_FILE" /home/momobot/.ssh/authorized_keys + chown -R momobot:momobot /home/momobot/.ssh; chmod 700 /home/momobot/.ssh; chmod 600 /home/momobot/.ssh/authorized_keys + cp "$PUBKEY_FILE" /root/.ssh/authorized_keys; chmod 700 /root/.ssh; chmod 600 /root/.ssh/authorized_keys + + # 5. workspace 权限(共享组模型) + WORKSPACE_GROUP="${WORKSPACE_GROUP:-workspace}" + WORKSPACE_GID="${WORKSPACE_GID:-2000}" + if getent group "${WORKSPACE_GID}" >/dev/null 2>&1; then + resolved_group=$(getent group "${WORKSPACE_GID}" | cut -d: -f1) + elif getent group "${WORKSPACE_GROUP}" >/dev/null 2>&1; then + resolved_group="${WORKSPACE_GROUP}" + else + groupadd -g "${WORKSPACE_GID}" "${WORKSPACE_GROUP}" + resolved_group="${WORKSPACE_GROUP}" + fi + usermod -aG "${resolved_group}" momobot + for dir in /workspace /workspace/logs /workspace/user /workspace/workstation /workspace/task-logs /workspace/worker-logs; do + mkdir -p "$dir"; chgrp -R "${resolved_group}" "$dir"; chmod -R g+rwX "$dir"; find "$dir" -type d -exec chmod g+s {} +; chmod 2770 "$dir" + done + + # 6. sshd + exec /usr/sbin/sshd -D -e + """; + } + + private void createPodGroup(MergedConfig config) { + Map podGroupSpec = podGroupSpecGenerator.generate(config); + String namespace = config.getNamespace(); + + GenericKubernetesResource podGroup = new GenericKubernetesResource(); + podGroup.setApiVersion("scheduling.volcano.sh/v1beta1"); + podGroup.setKind("PodGroup"); + podGroup.setMetadata(new ObjectMetaBuilder() + .withName("svc-" + config.getServiceId() + "-pg") + .withNamespace(namespace) + .addToLabels("app", "ai-worker-service") + .addToLabels("service-id", config.getServiceId()) + .build()); + podGroup.setAdditionalProperties(Map.of( + "spec", podGroupSpec.get("spec") + )); + + kubernetesClient.genericKubernetesResources("scheduling.volcano.sh/v1beta1", "PodGroup") + .inNamespace(namespace) + .resource(podGroup) + .createOrReplace(); + + log.info("Created/Updated PodGroup svc-{}-pg in namespace {}", config.getServiceId(), namespace); + } + + /** + * 等待 PodGroup phase 离开 Pending 状态。 + * Volcano scheduler 处理 PodGroup 需要时间,如果在 phase=Pending 时就创建 Pod, + * admission webhook (validatepod.volcano.sh) 会拒绝请求。 + * + * @param namespace 命名空间 + * @param podGroupName PodGroup 名称 + * @param timeoutSeconds 最大等待秒数 + */ + private void waitForPodGroupReady(String namespace, String podGroupName, int timeoutSeconds) { + long deadline = System.currentTimeMillis() + timeoutSeconds * 1000L; + long pollIntervalMs = 500; + String phase = null; + + log.info("Waiting for PodGroup {} phase to leave Pending (timeout={}s)...", podGroupName, timeoutSeconds); + + while (System.currentTimeMillis() < deadline) { + try { + GenericKubernetesResource pg = kubernetesClient + .genericKubernetesResources("scheduling.volcano.sh/v1beta1", "PodGroup") + .inNamespace(namespace) + .withName(podGroupName) + .get(); + + if (pg != null) { + @SuppressWarnings("unchecked") + Map status = (Map) pg.getAdditionalProperties().get("status"); + phase = (status != null) ? (String) status.get("phase") : null; + + if (phase != null && !"Pending".equals(phase)) { + log.info("PodGroup {} phase is now '{}', proceeding to create Pods", podGroupName, phase); + return; + } + } + } catch (Exception e) { + log.warn("Error checking PodGroup {} status: {}", podGroupName, e.getMessage()); + } + + try { + Thread.sleep(pollIntervalMs); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + log.warn("Interrupted while waiting for PodGroup {}", podGroupName); + break; + } + } + + log.warn("PodGroup {} still in phase '{}' after {}s timeout, will attempt Pod creation anyway", + podGroupName, phase, timeoutSeconds); + } + + /** + * 等待指定 serviceId 的所有 Pod 完全删除。 + * 防止旧 Pod 处于 Terminating 状态时创建同名新 Pod 导致 409 冲突。 + * + * @param namespace 命名空间 + * @param serviceId 服务 ID + * @param timeoutSeconds 最大等待秒数 + */ + private void waitForPodsDeleted(String namespace, String serviceId, int timeoutSeconds) { + long deadline = System.currentTimeMillis() + timeoutSeconds * 1000L; + long pollIntervalMs = 1000; + + log.info("Waiting for all pods of service {} to be fully deleted (timeout={}s)...", serviceId, timeoutSeconds); + + while (System.currentTimeMillis() < deadline) { + try { + List remaining = kubernetesClient.pods() + .inNamespace(namespace) + .withLabel("service-id", serviceId) + .list() + .getItems(); + + if (remaining.isEmpty()) { + log.info("All pods of service {} have been fully deleted", serviceId); + return; + } + + List remainingNames = remaining.stream() + .map(p -> p.getMetadata().getName()) + .collect(Collectors.toList()); + log.debug("Still waiting for {} pod(s) to be deleted: {}", remaining.size(), remainingNames); + + } catch (Exception e) { + log.warn("Error checking pod deletion status for service {}: {}", serviceId, e.getMessage()); + } + + try { + Thread.sleep(pollIntervalMs); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + log.warn("Interrupted while waiting for pods of service {} to be deleted", serviceId); + break; + } + } + + log.warn("Timeout ({}s) waiting for pods of service {} to be fully deleted, proceeding anyway", + timeoutSeconds, serviceId); + } + + /** + * 创建 Pod,带重试机制。 + * 防止 PodGroup 短暂竞态导致 admission webhook 拒绝。 + * + * @param namespace 命名空间 + * @param pod Pod 对象 + * @param maxRetries 最大重试次数 + * @param retryDelayMs 重试间隔(毫秒) + * @return 创建成功的 Pod + */ + private Pod createPodWithRetry(String namespace, Pod pod, int maxRetries, long retryDelayMs) { + String podName = pod.getMetadata().getName(); + Exception lastException = null; + String podDebugJson = toDebugJson(pod); + + for (int attempt = 1; attempt <= maxRetries; attempt++) { + try { + log.info("Creating Pod {} with spec: {}", podName, podDebugJson); + return kubernetesClient.pods() + .inNamespace(namespace) + .resource(pod) + .create(); + } catch (Exception e) { + lastException = e; + String msg = e.getMessage(); + log.error("Create Pod {} failed on attempt {}/{} with spec: {}", podName, attempt, maxRetries, podDebugJson, e); + boolean isAdmissionReject = msg != null && msg.contains("podgroup phase is Pending"); + boolean isDeleteConflict = msg != null && msg.contains("object is being deleted"); + + if ((isAdmissionReject || isDeleteConflict) && attempt < maxRetries) { + log.warn("Pod {} creation failed (attempt {}/{}): {}, retrying in {}ms...", + podName, attempt, maxRetries, + isDeleteConflict ? "object is being deleted (409 Conflict)" : "admission webhook rejected", + retryDelayMs); + try { + Thread.sleep(retryDelayMs); + } catch (InterruptedException ie) { + Thread.currentThread().interrupt(); + throw new RuntimeException("Interrupted while retrying Pod creation", ie); + } + } else { + // 非 admission 竞态错误 或 已达最大重试次数,直接抛出 + throw new RuntimeException("Failed to create Pod " + podName + " after " + attempt + " attempt(s)", e); + } + } + } + + // 不应该到这里,但以防万一 + throw new RuntimeException("Failed to create Pod " + podName + " after " + maxRetries + " retries", lastException); + } + + private Pod ensurePodResources(Pod pod, MergedConfig config) { + if (pod == null || pod.getSpec() == null) { + return pod; + } + + List normalizedContainers = new ArrayList<>(); + for (Container container : pod.getSpec().getContainers()) { + ResourceSpec defaults = "runner".equals(container.getName()) + ? normalizeDefaults(config.getRunnerResources(), FALLBACK_RUNNER_RESOURCES) + : normalizeDefaults(config.getAgentResources(), FALLBACK_AGENT_RESOURCES); + normalizedContainers.add(rebuildContainerResources(container, defaults)); + } + + List initContainers = pod.getSpec().getInitContainers(); + List normalizedInitContainers = new ArrayList<>(); + if (initContainers != null) { + for (Container initContainer : initContainers) { + ResourceSpec defaults = "permission-init".equals(initContainer.getName()) + ? PERMISSION_INIT_RESOURCES + : normalizeDefaults(config.getAgentResources(), FALLBACK_AGENT_RESOURCES); + normalizedInitContainers.add(rebuildContainerResources(initContainer, defaults)); + } + } + + Pod normalizedPod = new PodBuilder(pod) + .editSpec() + .withContainers(normalizedContainers) + .withInitContainers(normalizedInitContainers) + .endSpec() + .build(); + log.info("Pod {} resource summary before create: {}", pod.getMetadata().getName(), summarizePodResources(normalizedPod)); + return normalizedPod; + } + + private Container rebuildContainerResources(Container container, ResourceSpec defaults) { + if (container == null || defaults == null) { + return container; + } + + ResourceRequirements current = container.getResources(); + Map requests = current != null && current.getRequests() != null + ? new HashMap<>(current.getRequests()) + : new HashMap<>(); + Map limits = current != null && current.getLimits() != null + ? new HashMap<>(current.getLimits()) + : new HashMap<>(); + + putIfMissing(requests, "cpu", defaults.getCpuRequest()); + putIfMissing(limits, "cpu", defaults.getCpuLimit()); + putIfMissing(requests, "memory", defaults.getMemoryRequest()); + putIfMissing(limits, "memory", defaults.getMemoryLimit()); + + ResourceRequirements normalized = new ResourceRequirementsBuilder() + .withRequests(requests) + .withLimits(limits) + .build(); + return new ContainerBuilder(container) + .withResources(normalized) + .build(); + } + + private boolean putIfMissing(Map values, String key, String value) { + if (value == null || value.isBlank()) { + return false; + } + Quantity existing = values.get(key); + if (existing != null && existing.getAmount() != null && !existing.getAmount().isBlank()) { + return false; + } + values.put(key, new Quantity(value)); + return true; + } + + private ResourceSpec normalizeDefaults(ResourceSpec actual, ResourceSpec fallback) { + if (fallback == null) { + return actual; + } + if (actual == null) { + return fallback; + } + return ResourceSpec.builder() + .cpuRequest(isBlank(actual.getCpuRequest()) ? fallback.getCpuRequest() : actual.getCpuRequest()) + .cpuLimit(isBlank(actual.getCpuLimit()) ? fallback.getCpuLimit() : actual.getCpuLimit()) + .memoryRequest(isBlank(actual.getMemoryRequest()) ? fallback.getMemoryRequest() : actual.getMemoryRequest()) + .memoryLimit(isBlank(actual.getMemoryLimit()) ? fallback.getMemoryLimit() : actual.getMemoryLimit()) + .build(); + } + + private boolean isBlank(String value) { + return value == null || value.isBlank(); + } + + private String summarizePodResources(Pod pod) { + List parts = new ArrayList<>(); + if (pod.getSpec().getInitContainers() != null) { + for (Container container : pod.getSpec().getInitContainers()) { + parts.add("init:" + summarizeContainerResources(container)); + } + } + if (pod.getSpec().getContainers() != null) { + for (Container container : pod.getSpec().getContainers()) { + parts.add("main:" + summarizeContainerResources(container)); + } + } + return String.join("; ", parts); + } + + private String summarizeContainerResources(Container container) { + ResourceRequirements resources = container.getResources(); + Map requests = resources != null ? resources.getRequests() : null; + Map limits = resources != null ? resources.getLimits() : null; + return container.getName() + + "[req.cpu=" + quantityToString(requests, "cpu") + + ",req.mem=" + quantityToString(requests, "memory") + + ",lim.cpu=" + quantityToString(limits, "cpu") + + ",lim.mem=" + quantityToString(limits, "memory") + "]"; + } + + private String quantityToString(Map values, String key) { + if (values == null) { + return "null"; + } + Quantity quantity = values.get(key); + return quantity == null ? "null" : quantity.toString(); + } + + private String toDebugJson(Pod pod) { + try { + return objectMapper.writeValueAsString(pod); + } catch (Exception e) { + return "{\"error\":\"failed to serialize pod\",\"message\":\"" + e.getMessage() + "\"}"; + } + } + + @Override + public ServiceStatusResponse getServiceStatus(String serviceId) { + String namespace = envConfig.getCluster().getNamespace(); + + PodGroupStatusInfo podGroupStatus = getPodGroupStatus(serviceId, namespace); + List podStatuses = getPodStatuses(serviceId, namespace); + enrichWithMetrics(podStatuses, serviceId, namespace); + + return ServiceStatusResponse.builder() + .serviceId(serviceId) + .podGroupStatus(podGroupStatus) + .pods(podStatuses) + .updatedAt(Instant.now()) + .build(); + } + + private void enrichWithMetrics(List podStatuses, String serviceId, String namespace) { + try { + Map metricsMap = getPodMetrics(serviceId, namespace); + log.info("Found {} metrics for service {}", metricsMap.size(), serviceId); + + for (PodStatusInfo podStatus : podStatuses) { + PodMetrics metrics = metricsMap.get(podStatus.getName()); + if (metrics != null) { + log.info("Enriching metrics for pod {}", podStatus.getName()); + podStatus.setResourceUsage(aggregatePodMetrics(metrics, podStatus)); + enrichContainerMetrics(podStatus, metrics); + } + } + } catch (Exception e) { + log.warn("Failed to get metrics for service {}: {}", serviceId, e.getMessage(), e); + } + } + + private Map getPodMetrics(String serviceId, String namespace) { + Map metricsMap = new HashMap<>(); + + try { + List allMetrics = kubernetesClient.top() + .pods() + .inNamespace(namespace) + .metrics() + .getItems(); + + for (PodMetrics pm : allMetrics) { + Map labels = pm.getMetadata().getLabels(); + if (labels != null && serviceId.equals(labels.get("service-id"))) { + metricsMap.put(pm.getMetadata().getName(), pm); + } + } + } catch (Exception e) { + log.warn("Metrics not available: {}", e.getMessage()); + } + + return metricsMap; + } + + private ResourceUsageInfo aggregatePodMetrics(PodMetrics metrics, PodStatusInfo podStatus) { + long totalCpuMillicores = 0; + long totalMemoryBytes = 0; + + for (ContainerMetrics cm : metrics.getContainers()) { + Quantity cpuQuantity = cm.getUsage().get("cpu"); + Quantity memQuantity = cm.getUsage().get("memory"); + + if (cpuQuantity != null) { + totalCpuMillicores += parseMillicores(cpuQuantity.getAmount(), cpuQuantity.getFormat()); + } + if (memQuantity != null) { + totalMemoryBytes += parseBytes(memQuantity.getAmount(), memQuantity.getFormat()); + } + } + + String cpuLimit = null; + String memoryLimit = null; + String cpuRequest = null; + String memoryRequest = null; + + if (podStatus.getContainers() != null && !podStatus.getContainers().isEmpty()) { + ContainerStatusInfo firstContainer = podStatus.getContainers().get(0); + if (firstContainer.getResourceUsage() != null) { + cpuLimit = firstContainer.getResourceUsage().getCpuLimit(); + memoryLimit = firstContainer.getResourceUsage().getMemoryLimit(); + cpuRequest = firstContainer.getResourceUsage().getCpuRequest(); + memoryRequest = firstContainer.getResourceUsage().getMemoryRequest(); + } + } + + return ResourceUsageInfo.builder() + .cpuUsage(formatMillicores(totalCpuMillicores)) + .memoryUsage(formatBytes(totalMemoryBytes)) + .cpuMillicores(totalCpuMillicores) + .memoryBytes(totalMemoryBytes) + .cpuLimit(cpuLimit) + .memoryLimit(memoryLimit) + .cpuRequest(cpuRequest) + .memoryRequest(memoryRequest) + .cpuUsagePercent(calculateUsagePercent(totalCpuMillicores, cpuLimit)) + .memoryUsagePercent(calculateUsagePercent(totalMemoryBytes, memoryLimit)) + .build(); + } + + private void enrichContainerMetrics(PodStatusInfo podStatus, PodMetrics metrics) { + if (podStatus.getContainers() == null) return; + + Map containerMetricsMap = new HashMap<>(); + for (ContainerMetrics cm : metrics.getContainers()) { + containerMetricsMap.put(cm.getName(), cm); + } + + for (ContainerStatusInfo containerStatus : podStatus.getContainers()) { + ContainerMetrics cm = containerMetricsMap.get(containerStatus.getName()); + if (cm != null) { + Quantity cpuQuantity = cm.getUsage().get("cpu"); + Quantity memQuantity = cm.getUsage().get("memory"); + + long cpuMillicores = cpuQuantity != null + ? parseMillicores(cpuQuantity.getAmount(), cpuQuantity.getFormat()) : 0; + long memoryBytes = memQuantity != null + ? parseBytes(memQuantity.getAmount(), memQuantity.getFormat()) : 0; + + ResourceUsageInfo existingUsage = containerStatus.getResourceUsage(); + String cpuLimit = existingUsage != null ? existingUsage.getCpuLimit() : null; + String memoryLimit = existingUsage != null ? existingUsage.getMemoryLimit() : null; + + containerStatus.setResourceUsage(ResourceUsageInfo.builder() + .cpuUsage(formatMillicores(cpuMillicores)) + .memoryUsage(formatBytes(memoryBytes)) + .cpuMillicores(cpuMillicores) + .memoryBytes(memoryBytes) + .cpuLimit(cpuLimit) + .memoryLimit(memoryLimit) + .cpuRequest(existingUsage != null ? existingUsage.getCpuRequest() : null) + .memoryRequest(existingUsage != null ? existingUsage.getMemoryRequest() : null) + .cpuUsagePercent(calculateUsagePercent(cpuMillicores, cpuLimit)) + .memoryUsagePercent(calculateUsagePercent(memoryBytes, memoryLimit)) + .build()); + } + } + } + + private long parseMillicores(String amount, String format) { + try { + if (amount == null) return 0; + double value = Double.parseDouble(amount); + + if (format != null) { + if (format.equals("n")) { + return (long) (value / 1_000_000); + } else if (format.equals("u")) { + return (long) (value / 1_000); + } else if (format.equals("m")) { + return (long) value; + } + } + + return (long) (value * 1000); + } catch (NumberFormatException e) { + return 0; + } + } + + private long parseBytes(String amount, String format) { + try { + if (amount == null) return 0; + double value = Double.parseDouble(amount); + + if (format != null) { + switch (format) { + case "Ki": return (long) (value * 1024); + case "Mi": return (long) (value * 1024 * 1024); + case "Gi": return (long) (value * 1024 * 1024 * 1024); + case "K": return (long) (value * 1000); + case "M": return (long) (value * 1000 * 1000); + case "G": return (long) (value * 1000 * 1000 * 1000); + } + } + + return (long) value; + } catch (NumberFormatException e) { + return 0; + } + } + + private String formatMillicores(long millicores) { + if (millicores >= 1000) { + return String.format("%.2f", millicores / 1000.0); + } + return millicores + "m"; + } + + private String formatBytes(long bytes) { + if (bytes >= 1024L * 1024 * 1024) { + return String.format("%.2fGi", bytes / (1024.0 * 1024 * 1024)); + } else if (bytes >= 1024L * 1024) { + return String.format("%.2fMi", bytes / (1024.0 * 1024)); + } else if (bytes >= 1024) { + return String.format("%.2fKi", bytes / 1024.0); + } + return bytes + "B"; + } + + private Double calculateUsagePercent(long usage, String limit) { + if (limit == null || limit.isEmpty()) return null; + + try { + long limitValue; + if (limit.endsWith("m")) { + limitValue = Long.parseLong(limit.substring(0, limit.length() - 1)); + } else if (limit.endsWith("Gi")) { + limitValue = (long) (Double.parseDouble(limit.substring(0, limit.length() - 2)) * 1024 * 1024 * 1024); + } else if (limit.endsWith("Mi")) { + limitValue = (long) (Double.parseDouble(limit.substring(0, limit.length() - 2)) * 1024 * 1024); + } else { + limitValue = Long.parseLong(limit) * 1000; + } + + if (limitValue == 0) return null; + return Math.round(usage * 10000.0 / limitValue) / 100.0; + } catch (Exception e) { + return null; + } + } + + private PodGroupStatusInfo getPodGroupStatus(String serviceId, String namespace) { + String podGroupName = "svc-" + serviceId + "-pg"; + + try { + GenericKubernetesResource podGroup = kubernetesClient + .genericKubernetesResources("scheduling.volcano.sh/v1beta1", "PodGroup") + .inNamespace(namespace) + .withName(podGroupName) + .get(); + + if (podGroup == null) { + return null; + } + + @SuppressWarnings("unchecked") + Map status = (Map) podGroup.getAdditionalProperties().get("status"); + @SuppressWarnings("unchecked") + Map spec = (Map) podGroup.getAdditionalProperties().get("spec"); + + return PodGroupStatusInfo.builder() + .name(podGroupName) + .phase(status != null ? (String) status.get("phase") : "Unknown") + .minMember(spec != null ? (Integer) spec.get("minMember") : 1) + .running(status != null ? toInt(status.get("running")) : 0) + .succeeded(status != null ? toInt(status.get("succeeded")) : 0) + .failed(status != null ? toInt(status.get("failed")) : 0) + .pending(status != null ? toInt(status.get("pending")) : 0) + .build(); + } catch (Exception e) { + log.warn("Failed to get PodGroup status: {}", e.getMessage()); + return null; + } + } + + private List getPodStatuses(String serviceId, String namespace) { + List pods = kubernetesClient.pods() + .inNamespace(namespace) + .withLabel("service-id", serviceId) + .list() + .getItems(); + + return pods.stream() + .map(this::toPodStatusInfo) + .collect(Collectors.toList()); + } + + private PodStatusInfo toPodStatusInfo(Pod pod) { + PodStatus status = pod.getStatus(); + PodSpec spec = pod.getSpec(); + + Map containerSpecMap = new HashMap<>(); + if (spec != null && spec.getContainers() != null) { + for (Container c : spec.getContainers()) { + containerSpecMap.put(c.getName(), c); + } + } + + List containerStatuses = new ArrayList<>(); + if (status != null && status.getContainerStatuses() != null) { + for (ContainerStatus cs : status.getContainerStatuses()) { + Container containerSpec = containerSpecMap.get(cs.getName()); + ResourceUsageInfo resourceUsage = getContainerResourceLimits(containerSpec); + + containerStatuses.add(ContainerStatusInfo.builder() + .name(cs.getName()) + .ready(cs.getReady() != null && cs.getReady()) + .state(getContainerState(cs)) + .reason(getContainerReason(cs)) + .exitCode(getContainerExitCode(cs)) + .restartCount(cs.getRestartCount()) + .resourceUsage(resourceUsage) + .build()); + } + } + + String nodeHostname = spec != null ? spec.getNodeName() : null; + + return PodStatusInfo.builder() + .name(pod.getMetadata().getName()) + .phase(status != null ? status.getPhase() : "Unknown") + .nodeName(status != null ? status.getHostIP() : null) + .nodeHostname(nodeHostname) + .startTime(status != null && status.getStartTime() != null + ? Instant.parse(status.getStartTime()) : null) + .containers(containerStatuses) + .build(); + } + + private ResourceUsageInfo getContainerResourceLimits(Container container) { + if (container == null || container.getResources() == null) { + return null; + } + + ResourceRequirements resources = container.getResources(); + Map limits = resources.getLimits(); + Map requests = resources.getRequests(); + + return ResourceUsageInfo.builder() + .cpuLimit(limits != null && limits.get("cpu") != null + ? limits.get("cpu").getAmount() + (limits.get("cpu").getFormat() != null ? limits.get("cpu").getFormat() : "") : null) + .memoryLimit(limits != null && limits.get("memory") != null + ? limits.get("memory").getAmount() + (limits.get("memory").getFormat() != null ? limits.get("memory").getFormat() : "") : null) + .cpuRequest(requests != null && requests.get("cpu") != null + ? requests.get("cpu").getAmount() + (requests.get("cpu").getFormat() != null ? requests.get("cpu").getFormat() : "") : null) + .memoryRequest(requests != null && requests.get("memory") != null + ? requests.get("memory").getAmount() + (requests.get("memory").getFormat() != null ? requests.get("memory").getFormat() : "") : null) + .build(); + } + + private String getContainerState(ContainerStatus cs) { + if (cs.getState() == null) return "unknown"; + if (cs.getState().getRunning() != null) return "running"; + if (cs.getState().getWaiting() != null) return "waiting"; + if (cs.getState().getTerminated() != null) return "terminated"; + return "unknown"; + } + + private String getContainerReason(ContainerStatus cs) { + if (cs.getState() == null) return null; + if (cs.getState().getWaiting() != null) return cs.getState().getWaiting().getReason(); + if (cs.getState().getTerminated() != null) return cs.getState().getTerminated().getReason(); + return null; + } + + private Integer getContainerExitCode(ContainerStatus cs) { + if (cs.getState() == null) return null; + if (cs.getState().getTerminated() != null) return cs.getState().getTerminated().getExitCode(); + return null; + } + + @Override + public StopResult stopService(String serviceId, boolean graceful) { + String namespace = envConfig.getCluster().getNamespace(); + + // 如果启用了 OSS 挂载,通过 Pod label 获取 userId 并加锁 + String ossLockKey = buildOssLockKeyFromPods(serviceId, namespace); + String ossLock = null; + if (ossLockKey != null) { + ossLock = lockService.tryAcquireLockByKey(ossLockKey); + if (ossLock == null) { + log.warn("Failed to acquire OSS lock for stop service {}, key={}", serviceId, ossLockKey); + // 停止操作不应因锁失败而完全阻塞,记录警告后继续 + log.warn("Proceeding with stop without OSS lock for service {}", serviceId); + } else { + log.info("Acquired OSS lock for stop service {}, key={}", serviceId, ossLockKey); + } + } + + try { + return doStopService(serviceId, graceful, namespace); + } finally { + if (ossLockKey != null && ossLock != null) { + lockService.releaseLockByKey(ossLockKey, ossLock); + log.info("Released OSS lock for stop service {}, key={}", serviceId, ossLockKey); + } + } + } + + private StopResult doStopService(String serviceId, boolean graceful, String namespace) { + String podGroupName = "svc-" + serviceId + "-pg"; + + try { + log.info("Stopping service {}, graceful={}", serviceId, graceful); + + List pods = kubernetesClient.pods() + .inNamespace(namespace) + .withLabel("service-id", serviceId) + .list() + .getItems(); + int podCount = pods.size(); + + long gracePeriod = graceful ? 30L : 0L; + + kubernetesClient.pods() + .inNamespace(namespace) + .withLabel("service-id", serviceId) + .withGracePeriod(gracePeriod) + .delete(); + + try { + kubernetesClient.genericKubernetesResources("scheduling.volcano.sh/v1beta1", "PodGroup") + .inNamespace(namespace) + .withName(podGroupName) + .delete(); + } catch (Exception e) { + log.warn("Failed to delete PodGroup: {}", e.getMessage()); + } + + try { + String secretName = "svc-" + serviceId + "-token"; + kubernetesClient.secrets() + .inNamespace(namespace) + .withName(secretName) + .delete(); + log.info("Deleted Token Secret: {}", secretName); + } catch (Exception e) { + log.warn("Failed to delete Token Secret: {}", e.getMessage()); + } + + eventPublisher.publishSessionEnd(serviceId, podGroupName, podCount, graceful); + + return StopResult.success(serviceId); + + } catch (Exception e) { + log.error("Failed to stop service {}: {}", serviceId, e.getMessage()); + return StopResult.failed(serviceId, e.getMessage()); + } + } + + @Override + public void cleanupService(String serviceId) { + String namespace = envConfig.getCluster().getNamespace(); + + // 如果启用了 OSS 挂载,尝试加锁(cleanup 不阻塞) + String ossLockKey = buildOssLockKeyFromPods(serviceId, namespace); + String ossLock = null; + if (ossLockKey != null) { + ossLock = lockService.tryAcquireLockByKey(ossLockKey); + if (ossLock != null) { + log.info("Acquired OSS lock for cleanup service {}, key={}", serviceId, ossLockKey); + } + } + + try { + doCleanupService(serviceId, namespace); + } finally { + if (ossLockKey != null && ossLock != null) { + lockService.releaseLockByKey(ossLockKey, ossLock); + log.info("Released OSS lock for cleanup service {}, key={}", serviceId, ossLockKey); + } + } + } + + private void doCleanupService(String serviceId, String namespace) { + log.info("Cleaning up resources for service {} in namespace {}", serviceId, namespace); + + try { + kubernetesClient.genericKubernetesResources("scheduling.volcano.sh/v1beta1", "PodGroup") + .inNamespace(namespace) + .withName("svc-" + serviceId + "-pg") + .delete(); + } catch (Exception e) { + log.warn("Failed to delete PodGroup: {}", e.getMessage()); + } + + kubernetesClient.pods() + .inNamespace(namespace) + .withLabel("service-id", serviceId) + .delete(); + + kubernetesClient.secrets() + .inNamespace(namespace) + .withLabel("service-id", serviceId) + .delete(); + + kubernetesClient.configMaps() + .inNamespace(namespace) + .withLabel("service-id", serviceId) + .delete(); + + log.info("Cleaned up all resources for service {}", serviceId); + } + + // ==================== OSS 锁辅助方法 ==================== + + /** + * 从 MergedConfig 构建 OSS 锁键 + * 格式: oss:lock:{serviceId} (精确到 workstationId 级别) + * + * @return 锁键,如果 OSS 未启用返回 null + */ + private String buildOssLockKey(MergedConfig config) { + if (config.getOssMount() == null || !config.getOssMount().isEnabled()) { + return null; + } + return OSS_LOCK_PREFIX + config.getServiceId(); + } + + /** + * 从 serviceId 构建 OSS 锁键 + * 用于 stopService/cleanupService 等不传 MergedConfig 的场景 + * + * @return 锁键,如果 OSS 未启用返回 null + */ + private String buildOssLockKeyFromPods(String serviceId, String namespace) { + if (!envConfig.getOssMount().isEnabled()) { + return null; + } + return OSS_LOCK_PREFIX + serviceId; + } + + @Override + public GeneratedSpec previewSpec(MergedConfig config) { + Map podGroupSpec = podGroupSpecGenerator.generate(config); + + List> podSpecs = new ArrayList<>(); + for (int i = 0; i < config.getPodCount(); i++) { + Pod pod = podSpecGenerator.generate(config, i); + Map podMap = convertPodToMap(pod); + podSpecs.add(podMap); + } + + return GeneratedSpec.builder() + .serviceId(config.getServiceId()) + .podGroupSpec(podGroupSpec) + .podSpecs(podSpecs) + .build(); + } + + @SuppressWarnings("unchecked") + private Map convertPodToMap(Pod pod) { + try { + String json = objectMapper.writeValueAsString(pod); + return objectMapper.readValue(json, LinkedHashMap.class); + } catch (Exception e) { + log.error("Failed to convert Pod to Map: {}", e.getMessage()); + return Map.of("error", e.getMessage()); + } + } + + private int toInt(Object value) { + if (value == null) return 0; + if (value instanceof Integer) return (Integer) value; + if (value instanceof Number) return ((Number) value).intValue(); + return 0; + } + + @Override + public List getRunningPods(String serviceId) { + String namespace = envConfig.getCluster().getNamespace(); + + List pods = kubernetesClient.pods() + .inNamespace(namespace) + .withLabel("service-id", serviceId) + .list() + .getItems(); + + return pods.stream() + .filter(this::isReadyPod) + .map(pod -> pod.getMetadata().getName()) + .sorted() + .collect(Collectors.toList()); + } + + @Override + public List listAllServiceIds() { + String namespace = envConfig.getCluster().getNamespace(); + + try { + List pods = kubernetesClient.pods() + .inNamespace(namespace) + .withLabel("app", "ai-worker-service") + .list() + .getItems(); + + return pods.stream() + .filter(this::isReadyPod) + .map(pod -> pod.getMetadata().getLabels().get("service-id")) + .filter(id -> id != null && !id.isEmpty()) + .distinct() + .sorted() + .collect(Collectors.toList()); + + } catch (Exception e) { + log.error("Failed to list all service IDs from K8s: {}", e.getMessage(), e); + return List.of(); + } + } + + private boolean isReadyPod(Pod pod) { + if (pod == null || pod.getMetadata() == null || pod.getMetadata().getDeletionTimestamp() != null) { + return false; + } + if (pod.getStatus() == null || pod.getStatus().getPhase() == null) { + return false; + } + + String phase = pod.getStatus().getPhase(); + if (!"Running".equals(phase)) { + return false; + } + + List conditions = pod.getStatus().getConditions(); + if (conditions == null) { + return false; + } + + return conditions.stream() + .anyMatch(condition -> "Ready".equals(condition.getType()) && "True".equals(condition.getStatus())); + } + + @Override + public ScaleResult scaleDown(String serviceId, String podName) { + String namespace = envConfig.getCluster().getNamespace(); + + try { + List runningPods = getRunningPods(serviceId); + int previousCount = runningPods.size(); + + if (previousCount == 0) { + log.warn("No running pods found for service {}", serviceId); + return ScaleResult.failed(serviceId, "No running pods to scale down"); + } + + if (podName == null || podName.isEmpty()) { + return ScaleResult.failed(serviceId, "podName is required for scale-down"); + } + + String targetPod = podName; + if (!runningPods.contains(targetPod)) { + return ScaleResult.failed(serviceId, "Pod not found: " + targetPod); + } + + log.info("Scaling down service {}: deleting pod {}", serviceId, targetPod); + + kubernetesClient.pods() + .inNamespace(namespace) + .withName(targetPod) + .withGracePeriod(0L) + .delete(); + + runningPods.remove(targetPod); + + log.info("Service {} scaled down: {} -> {} pods", + serviceId, previousCount, runningPods.size()); + + return ScaleResult.success( + serviceId, + "SCALE_DOWN", + previousCount, + runningPods.size(), + previousCount, + runningPods, + null, + List.of(targetPod) + ); + + } catch (Exception e) { + log.error("Failed to scale down service {}: {}", serviceId, e.getMessage(), e); + return ScaleResult.failed(serviceId, e.getMessage()); + } + } + + @Override + public ScaleResult scaleUp(String serviceId, int targetPodCount, MergedConfig config) { + String namespace = envConfig.getCluster().getNamespace(); + + try { + cleanupTerminatedPods(serviceId, namespace); + + List runningPods = getRunningPods(serviceId); + int previousCount = runningPods.size(); + + // 当服务当前无存活 Pod 时,先刷新依赖资源,避免使用到过期 ConfigMap/Secret。 + if (previousCount == 0) { + createImagePullSecret(config); + createTokenSecret(config); + createAgentConfigMap(config); + ensureRunnerScriptsConfigMap(config.getNamespace()); + createPodGroup(config); + } + + if (targetPodCount <= previousCount) { + log.info("Service {} already has {} pods, target is {}, no scaling needed", + serviceId, previousCount, targetPodCount); + return ScaleResult.noChange(serviceId, previousCount, targetPodCount, runningPods); + } + + int podsToCreate = targetPodCount - previousCount; + + log.info("Scaling up service {}: {} -> {} pods (creating {} new pods)", + serviceId, previousCount, targetPodCount, podsToCreate); + + List existingIndices = runningPods.stream() + .map(name -> { + String[] parts = name.split("-"); + try { + return Integer.parseInt(parts[parts.length - 1]); + } catch (NumberFormatException e) { + return -1; + } + }) + .filter(i -> i >= 0) + .collect(Collectors.toList()); + + List addedPods = new ArrayList<>(); + int nextIndex = existingIndices.isEmpty() ? 0 : existingIndices.stream().max(Integer::compare).orElse(0) + 1; + + for (int i = 0; i < podsToCreate; i++) { + while (existingIndices.contains(nextIndex)) { + nextIndex++; + } + + Pod pod = podSpecGenerator.generate(config, nextIndex); + Pod createdPod = kubernetesClient.pods() + .inNamespace(namespace) + .resource(pod) + .create(); + + String podName = createdPod.getMetadata().getName(); + addedPods.add(podName); + runningPods.add(podName); + existingIndices.add(nextIndex); + + eventPublisher.publishPodScheduling(serviceId, podName, nextIndex, config.getQueueName()); + + log.info("Created Pod: {} (index={})", podName, nextIndex); + nextIndex++; + } + + waitForScheduledNodeWithEvents(namespace, serviceId, addedPods, 10); + + log.info("Service {} scaled up: {} -> {} pods, added: {}", + serviceId, previousCount, runningPods.size(), addedPods); + + return ScaleResult.success( + serviceId, + "SCALE_UP", + previousCount, + runningPods.size(), + targetPodCount, + runningPods, + addedPods, + null + ); + + } catch (Exception e) { + log.error("Failed to scale up service {}: {}", serviceId, e.getMessage(), e); + return ScaleResult.failed(serviceId, e.getMessage()); + } + } + + /** + * 清理已终止的 Pod(Succeeded/Failed),防止 scaleUp 创建同名 Pod 时 409 冲突。 + */ + private void cleanupTerminatedPods(String serviceId, String namespace) { + try { + List allPods = kubernetesClient.pods() + .inNamespace(namespace) + .withLabel("service-id", serviceId) + .list() + .getItems(); + + List terminatedPodNames = allPods.stream() + .filter(pod -> { + String phase = pod.getStatus() != null ? pod.getStatus().getPhase() : null; + return "Succeeded".equals(phase) || "Failed".equals(phase); + }) + .map(pod -> pod.getMetadata().getName()) + .collect(Collectors.toList()); + + if (terminatedPodNames.isEmpty()) { + return; + } + + log.info("Cleaning up {} terminated pod(s) for service {}: {}", + terminatedPodNames.size(), serviceId, terminatedPodNames); + + for (String podName : terminatedPodNames) { + kubernetesClient.pods() + .inNamespace(namespace) + .withName(podName) + .withGracePeriod(0L) + .delete(); + } + + waitForPodsDeleted(namespace, serviceId, 30); + } catch (Exception e) { + log.warn("Failed to cleanup terminated pods for service {}: {}", serviceId, e.getMessage()); + } + } +} diff --git a/back/src/main/java/com/linkwork/service/McpCryptoService.java b/back/src/main/java/com/linkwork/service/McpCryptoService.java new file mode 100644 index 0000000..a9ab20e --- /dev/null +++ b/back/src/main/java/com/linkwork/service/McpCryptoService.java @@ -0,0 +1,185 @@ +package com.linkwork.service; + +import com.fasterxml.jackson.core.JsonProcessingException; +import com.fasterxml.jackson.core.type.TypeReference; +import com.fasterxml.jackson.databind.ObjectMapper; +import jakarta.annotation.PostConstruct; +import lombok.RequiredArgsConstructor; +import lombok.extern.slf4j.Slf4j; +import org.springframework.beans.factory.annotation.Value; +import org.springframework.stereotype.Service; +import org.springframework.util.StringUtils; + +import javax.crypto.Cipher; +import javax.crypto.SecretKey; +import javax.crypto.spec.GCMParameterSpec; +import javax.crypto.spec.SecretKeySpec; +import java.nio.ByteBuffer; +import java.nio.charset.StandardCharsets; +import java.security.SecureRandom; +import java.util.Base64; +import java.util.Map; + +/** + * AES-256-GCM 加解密服务。 + *

+ * 存储格式: base64(nonce[12] + ciphertext + tag[16]) + *

+ * 密钥来源优先级: + * 1. 环境变量 MCP_ENCRYPTION_KEY + * 2. 配置 robot.mcp.encryption-key + *

+ * 密钥必须为 32 字节(hex 64 字符 或 base64 44 字符)。 + * 当密钥未配置时,加解密方法退化为明文透传(兼容开发环境)。 + */ +@Slf4j +@Service +@RequiredArgsConstructor +public class McpCryptoService { + + private static final String ALGORITHM = "AES/GCM/NoPadding"; + private static final int GCM_NONCE_LENGTH = 12; + private static final int GCM_TAG_LENGTH = 128; + + private final ObjectMapper objectMapper; + + @Value("${robot.mcp.encryption-key:}") + private String configKey; + + private SecretKey secretKey; + private final SecureRandom secureRandom = new SecureRandom(); + + @PostConstruct + void init() { + String envKey = System.getenv("MCP_ENCRYPTION_KEY"); + String rawKey = StringUtils.hasText(envKey) ? envKey : configKey; + + if (!StringUtils.hasText(rawKey)) { + log.warn("MCP_ENCRYPTION_KEY not configured — encryption is DISABLED, data stored as plaintext"); + return; + } + + byte[] keyBytes = decodeKey(rawKey.trim()); + if (keyBytes.length != 32) { + throw new IllegalArgumentException( + "MCP_ENCRYPTION_KEY must be 32 bytes (got " + keyBytes.length + "). " + + "Use 64 hex chars or 44 base64 chars."); + } + secretKey = new SecretKeySpec(keyBytes, "AES"); + log.info("MCP encryption initialized (AES-256-GCM)"); + } + + public boolean isEnabled() { + return secretKey != null; + } + + /** + * 加密字符串,返回 base64 编码的 nonce+ciphertext+tag + */ + public String encrypt(String plaintext) { + if (!isEnabled() || !StringUtils.hasText(plaintext)) { + return plaintext; + } + try { + byte[] nonce = new byte[GCM_NONCE_LENGTH]; + secureRandom.nextBytes(nonce); + + Cipher cipher = Cipher.getInstance(ALGORITHM); + cipher.init(Cipher.ENCRYPT_MODE, secretKey, new GCMParameterSpec(GCM_TAG_LENGTH, nonce)); + + byte[] ciphertext = cipher.doFinal(plaintext.getBytes(StandardCharsets.UTF_8)); + + ByteBuffer buffer = ByteBuffer.allocate(nonce.length + ciphertext.length); + buffer.put(nonce); + buffer.put(ciphertext); + + return Base64.getEncoder().encodeToString(buffer.array()); + } catch (Exception e) { + throw new RuntimeException("AES-GCM encryption failed", e); + } + } + + /** + * 解密 base64 编码的密文 + */ + public String decrypt(String cipherBase64) { + if (!isEnabled() || !StringUtils.hasText(cipherBase64)) { + return cipherBase64; + } + try { + byte[] decoded = Base64.getDecoder().decode(cipherBase64); + if (decoded.length < GCM_NONCE_LENGTH) { + log.warn("Ciphertext too short, returning as-is (possibly plaintext)"); + return cipherBase64; + } + + ByteBuffer buffer = ByteBuffer.wrap(decoded); + byte[] nonce = new byte[GCM_NONCE_LENGTH]; + buffer.get(nonce); + byte[] ciphertext = new byte[buffer.remaining()]; + buffer.get(ciphertext); + + Cipher cipher = Cipher.getInstance(ALGORITHM); + cipher.init(Cipher.DECRYPT_MODE, secretKey, new GCMParameterSpec(GCM_TAG_LENGTH, nonce)); + + byte[] plaintext = cipher.doFinal(ciphertext); + return new String(plaintext, StandardCharsets.UTF_8); + } catch (IllegalArgumentException e) { + log.debug("Not base64, treating as plaintext: {}", e.getMessage()); + return cipherBase64; + } catch (Exception e) { + log.warn("AES-GCM decryption failed, returning as-is (possibly plaintext data): {}", e.getMessage()); + return cipherBase64; + } + } + + /** + * 加密 Map 为加密后的 JSON 字符串 + */ + public String encryptMap(Map map) { + if (map == null || map.isEmpty()) { + return null; + } + try { + String json = objectMapper.writeValueAsString(map); + return encrypt(json); + } catch (JsonProcessingException e) { + throw new RuntimeException("Failed to serialize map for encryption", e); + } + } + + /** + * 解密 JSON 字符串为 Map + */ + public Map decryptMap(String encrypted) { + if (!StringUtils.hasText(encrypted)) { + return null; + } + String json = decrypt(encrypted); + try { + return objectMapper.readValue(json, new TypeReference<>() {}); + } catch (JsonProcessingException e) { + log.warn("Failed to parse decrypted map, returning null: {}", e.getMessage()); + return null; + } + } + + private byte[] decodeKey(String raw) { + if (raw.matches("[0-9a-fA-F]+") && raw.length() == 64) { + return hexToBytes(raw); + } + try { + return Base64.getDecoder().decode(raw); + } catch (IllegalArgumentException e) { + return raw.getBytes(StandardCharsets.UTF_8); + } + } + + private byte[] hexToBytes(String hex) { + byte[] bytes = new byte[hex.length() / 2]; + for (int i = 0; i < bytes.length; i++) { + bytes[i] = (byte) Integer.parseInt(hex.substring(2 * i, 2 * i + 2), 16); + } + return bytes; + } +} diff --git a/back/src/main/java/com/linkwork/service/McpDiscoveryService.java b/back/src/main/java/com/linkwork/service/McpDiscoveryService.java new file mode 100644 index 0000000..30ccc19 --- /dev/null +++ b/back/src/main/java/com/linkwork/service/McpDiscoveryService.java @@ -0,0 +1,201 @@ +package com.linkwork.service; + +import com.linkwork.agent.mcp.core.McpClient; +import com.linkwork.agent.mcp.core.model.McpDiscoverResponse; +import com.linkwork.agent.mcp.core.model.McpEndpoint; +import com.linkwork.model.dto.McpDiscoverResult; +import com.linkwork.model.entity.McpServerEntity; +import com.linkwork.model.entity.McpUserConfigEntity; +import lombok.extern.slf4j.Slf4j; +import org.springframework.stereotype.Service; +import org.springframework.util.StringUtils; +import org.springframework.web.util.UriComponentsBuilder; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; + +/** + * MCP 工具发现服务(基于 linkwork-mcp-starter)。 + */ +@Service +@Slf4j +public class McpDiscoveryService { + + private final McpClient mcpClient; + private final McpUserConfigService mcpUserConfigService; + + public McpDiscoveryService(McpClient mcpClient, McpUserConfigService mcpUserConfigService) { + this.mcpClient = mcpClient; + this.mcpUserConfigService = mcpUserConfigService; + } + + public McpDiscoverResult discover(McpServerEntity server) { + return discover(server, null); + } + + public McpDiscoverResult discover(McpServerEntity server, String userId) { + DiscoveryTarget target = resolveDiscoveryTarget(server, userId); + if (!StringUtils.hasText(target.getUrl())) { + return McpDiscoverResult.builder() + .success(false) + .error("No URL configured for MCP server") + .build(); + } + + try { + McpEndpoint endpoint = toEndpoint(server, target); + McpDiscoverResponse response = mcpClient.discover(endpoint); + if (response == null || !response.isSuccess()) { + return McpDiscoverResult.builder() + .success(false) + .error(response == null ? "discover response is null" : response.getMessage()) + .build(); + } + + List tools = new ArrayList<>(); + if (response.getTools() != null) { + response.getTools().forEach(tool -> tools.add(McpDiscoverResult.McpTool.builder() + .name(tool.getName()) + .description(tool.getDescription()) + .inputSchema(tool.getInputSchema()) + .build())); + } + + return McpDiscoverResult.builder() + .success(true) + .error(null) + .serverName(response.getServerName()) + .serverVersion(response.getServerVersion()) + .protocolVersion(response.getProtocolVersion()) + .tools(tools) + .build(); + } catch (Exception e) { + String error = e.getClass().getSimpleName() + ": " + e.getMessage(); + if (error.length() > 500) { + error = error.substring(0, 500); + } + log.error("MCP discover failed for {}: {}", server.getName(), error); + return McpDiscoverResult.builder() + .success(false) + .error(error) + .build(); + } + } + + DiscoveryTarget resolveDiscoveryTarget(McpServerEntity server, String userId) { + String serverUrl = resolveUrl(server); + Map mergedHeaders = new LinkedHashMap<>(); + if (server.getHeaders() != null) { + mergedHeaders.putAll(server.getHeaders()); + } + + if (StringUtils.hasText(userId) && server.getId() != null) { + McpUserConfigEntity userConfig = mcpUserConfigService.getByUserAndServer(userId, server.getId()); + if (userConfig != null) { + mergePreferredValues(mergedHeaders, userConfig.getHeaders()); + serverUrl = applyUrlParams(serverUrl, userConfig.getUrlParams()); + } + } + + return new DiscoveryTarget(serverUrl, mergedHeaders); + } + + private McpEndpoint toEndpoint(McpServerEntity server, DiscoveryTarget target) { + McpEndpoint endpoint = new McpEndpoint(); + endpoint.setType(StringUtils.hasText(server.getType()) ? server.getType() : "http"); + endpoint.setUrl(target.getUrl()); + endpoint.setHeaders(target.getHeaders()); + + Map configJson = server.getConfigJson(); + if (configJson != null) { + endpoint.setCommand(parseCommand(configJson.get("command"))); + endpoint.setEnv(parseEnv(configJson.get("env"))); + } + return endpoint; + } + + private List parseCommand(Object raw) { + if (raw instanceof List list) { + List cmd = new ArrayList<>(); + for (Object item : list) { + if (item != null && StringUtils.hasText(String.valueOf(item))) { + cmd.add(String.valueOf(item)); + } + } + return cmd; + } + if (raw instanceof String text && StringUtils.hasText(text)) { + return List.of(text); + } + return null; + } + + @SuppressWarnings("unchecked") + private Map parseEnv(Object raw) { + if (!(raw instanceof Map map)) { + return Collections.emptyMap(); + } + Map env = new LinkedHashMap<>(); + ((Map) map).forEach((k, v) -> { + if (k != null && v != null && StringUtils.hasText(String.valueOf(k))) { + env.put(String.valueOf(k), String.valueOf(v)); + } + }); + return env; + } + + private String resolveUrl(McpServerEntity server) { + if (StringUtils.hasText(server.getUrl())) { + return server.getUrl(); + } + return server.getEndpoint(); + } + + private void mergePreferredValues(Map base, Map preferred) { + if (preferred == null || preferred.isEmpty()) { + return; + } + preferred.forEach((key, value) -> { + if (StringUtils.hasText(key) && value != null) { + base.put(key, value); + } + }); + } + + private String applyUrlParams(String baseUrl, Map urlParams) { + if (!StringUtils.hasText(baseUrl) || urlParams == null || urlParams.isEmpty()) { + return baseUrl; + } + + UriComponentsBuilder builder = UriComponentsBuilder.fromUriString(baseUrl); + urlParams.forEach((key, value) -> { + if (StringUtils.hasText(key) && value != null) { + builder.replaceQueryParam(key, value); + } + }); + return builder.build().toUriString(); + } + + static final class DiscoveryTarget { + private final String url; + private final Map headers; + + DiscoveryTarget(String url, Map headers) { + this.url = url; + this.headers = headers != null + ? Collections.unmodifiableMap(new LinkedHashMap<>(headers)) + : Collections.emptyMap(); + } + + String getUrl() { + return url; + } + + Map getHeaders() { + return headers; + } + } +} diff --git a/back/src/main/java/com/linkwork/service/McpHealthChecker.java b/back/src/main/java/com/linkwork/service/McpHealthChecker.java new file mode 100644 index 0000000..f4ca0e8 --- /dev/null +++ b/back/src/main/java/com/linkwork/service/McpHealthChecker.java @@ -0,0 +1,242 @@ +package com.linkwork.service; + +import com.linkwork.agent.mcp.core.McpClient; +import com.linkwork.agent.mcp.core.model.McpEndpoint; +import com.linkwork.agent.mcp.core.model.McpProbeResponse; +import com.linkwork.model.dto.McpProbeResult; +import com.linkwork.model.entity.McpServerEntity; +import lombok.extern.slf4j.Slf4j; +import org.springframework.http.HttpEntity; +import org.springframework.http.HttpMethod; +import org.springframework.http.ResponseEntity; +import org.springframework.http.client.SimpleClientHttpRequestFactory; +import org.springframework.scheduling.annotation.Scheduled; +import org.springframework.stereotype.Component; +import org.springframework.util.StringUtils; +import org.springframework.web.client.RestTemplate; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; + +/** + * MCP Server 健康检查组件(基于 linkwork-mcp-starter)。 + */ +@Component +@Slf4j +public class McpHealthChecker { + + private final McpServerService mcpServerService; + private final McpClient mcpClient; + private final RestTemplate healthRestTemplate; + + public McpHealthChecker(McpServerService mcpServerService, McpClient mcpClient) { + this.mcpServerService = mcpServerService; + this.mcpClient = mcpClient; + + SimpleClientHttpRequestFactory factory = new SimpleClientHttpRequestFactory(); + factory.setConnectTimeout(5000); + factory.setReadTimeout(5000); + this.healthRestTemplate = new RestTemplate(factory); + } + + @Scheduled(fixedRate = 30_000) + public void healthCheckAll() { + try { + List servers = mcpServerService.listByTypes(List.of("http", "sse")); + if (servers.isEmpty()) { + return; + } + for (McpServerEntity server : servers) { + checkSingle(server); + } + } catch (Exception e) { + log.error("MCP health check cycle failed unexpectedly", e); + } + } + + public McpProbeResult probeSingle(McpServerEntity server) { + String probeUrl = resolveProbeUrl(server); + if (!StringUtils.hasText(probeUrl)) { + return McpProbeResult.builder() + .status("offline") + .latencyMs(0) + .message("No probe URL configured") + .probeUrl(null) + .build(); + } + + if (hasDedicatedHealthCheckUrl(server)) { + return probeByHttp(server, probeUrl); + } + return probeByMcpClient(server, probeUrl); + } + + private void checkSingle(McpServerEntity server) { + McpProbeResult result = probeSingle(server); + + if ("online".equals(result.getStatus()) || "degraded".equals(result.getStatus())) { + int consecutiveFailures = "online".equals(result.getStatus()) + ? 0 + : (server.getConsecutiveFailures() != null ? server.getConsecutiveFailures() + 1 : 1); + mcpServerService.updateHealth(server.getId(), result.getStatus(), result.getLatencyMs(), + result.getMessage(), consecutiveFailures); + return; + } + + handleFailure(server, result.getMessage(), result.getLatencyMs()); + } + + private McpProbeResult probeByMcpClient(McpServerEntity server, String probeUrl) { + long start = System.currentTimeMillis(); + try { + McpEndpoint endpoint = toEndpoint(server, probeUrl); + McpProbeResponse response = mcpClient.probe(endpoint); + int latencyMs = response != null && response.getLatencyMs() > 0 + ? response.getLatencyMs() + : (int) Math.max(1, System.currentTimeMillis() - start); + + if (response != null && response.isSuccess()) { + String status = latencyMs < 2000 ? "online" : "degraded"; + return McpProbeResult.builder() + .status(status) + .latencyMs(latencyMs) + .message("MCP OK (" + latencyMs + "ms)") + .probeUrl(probeUrl) + .build(); + } + + return McpProbeResult.builder() + .status("offline") + .latencyMs(latencyMs) + .message(response == null ? "mcp probe failed" : response.getMessage()) + .probeUrl(probeUrl) + .build(); + } catch (Exception e) { + int latencyMs = (int) Math.max(1, System.currentTimeMillis() - start); + String error = e.getClass().getSimpleName() + ": " + e.getMessage(); + if (error.length() > 250) { + error = error.substring(0, 250); + } + return McpProbeResult.builder() + .status("offline") + .latencyMs(latencyMs) + .message(error) + .probeUrl(probeUrl) + .build(); + } + } + + /** + * 兼容 dedicated healthCheckUrl(普通 HTTP 健康检查)。 + */ + private McpProbeResult probeByHttp(McpServerEntity server, String probeUrl) { + long start = System.currentTimeMillis(); + try { + HttpMethod method = "sse".equalsIgnoreCase(server.getType()) ? HttpMethod.HEAD : HttpMethod.GET; + ResponseEntity resp = healthRestTemplate.exchange(probeUrl, method, HttpEntity.EMPTY, String.class); + int latency = (int) Math.max(1, System.currentTimeMillis() - start); + int statusCode = resp.getStatusCode().value(); + if (statusCode >= 200 && statusCode < 400) { + return McpProbeResult.builder() + .status(latency < 2000 ? "online" : "degraded") + .latencyMs(latency) + .message("HTTP " + statusCode + " (" + latency + "ms)") + .probeUrl(probeUrl) + .build(); + } + return McpProbeResult.builder() + .status("offline") + .latencyMs(latency) + .message("HTTP " + statusCode) + .probeUrl(probeUrl) + .build(); + } catch (Exception e) { + int latency = (int) Math.max(1, System.currentTimeMillis() - start); + String error = e.getClass().getSimpleName() + ": " + e.getMessage(); + if (error.length() > 250) { + error = error.substring(0, 250); + } + return McpProbeResult.builder() + .status("offline") + .latencyMs(latency) + .message(error) + .probeUrl(probeUrl) + .build(); + } + } + + private McpEndpoint toEndpoint(McpServerEntity server, String probeUrl) { + McpEndpoint endpoint = new McpEndpoint(); + endpoint.setType(StringUtils.hasText(server.getType()) ? server.getType() : "http"); + endpoint.setUrl(probeUrl); + endpoint.setHeaders(server.getHeaders()); + + Map configJson = server.getConfigJson(); + if (configJson != null) { + endpoint.setCommand(parseCommand(configJson.get("command"))); + endpoint.setEnv(parseEnv(configJson.get("env"))); + } + return endpoint; + } + + private List parseCommand(Object raw) { + if (raw instanceof List list) { + List cmd = new ArrayList<>(); + for (Object item : list) { + if (item != null && StringUtils.hasText(String.valueOf(item))) { + cmd.add(String.valueOf(item)); + } + } + return cmd; + } + if (raw instanceof String text && StringUtils.hasText(text)) { + return List.of(text); + } + return null; + } + + @SuppressWarnings("unchecked") + private Map parseEnv(Object raw) { + if (!(raw instanceof Map map)) { + return Collections.emptyMap(); + } + Map env = new LinkedHashMap<>(); + ((Map) map).forEach((k, v) -> { + if (k != null && v != null && StringUtils.hasText(String.valueOf(k))) { + env.put(String.valueOf(k), String.valueOf(v)); + } + }); + return env; + } + + private void handleFailure(McpServerEntity server, String errorMessage, int latencyMs) { + int currentFailures = server.getConsecutiveFailures() != null ? server.getConsecutiveFailures() : 0; + int newFailures = currentFailures + 1; + String status = newFailures >= 3 ? "offline" : "degraded"; + + if (errorMessage != null && errorMessage.length() > 250) { + errorMessage = errorMessage.substring(0, 250); + } + + mcpServerService.updateHealth(server.getId(), status, latencyMs, errorMessage, newFailures); + log.warn("MCP server {} health check failed (attempt {}): {} -> {}", + server.getName(), newFailures, errorMessage, status); + } + + private String resolveProbeUrl(McpServerEntity server) { + if (StringUtils.hasText(server.getHealthCheckUrl())) { + return server.getHealthCheckUrl(); + } + if (StringUtils.hasText(server.getUrl())) { + return server.getUrl(); + } + return server.getEndpoint(); + } + + private boolean hasDedicatedHealthCheckUrl(McpServerEntity server) { + return StringUtils.hasText(server.getHealthCheckUrl()); + } +} diff --git a/back/src/main/java/com/linkwork/service/McpServerService.java b/back/src/main/java/com/linkwork/service/McpServerService.java new file mode 100644 index 0000000..c0a1235 --- /dev/null +++ b/back/src/main/java/com/linkwork/service/McpServerService.java @@ -0,0 +1,647 @@ +package com.linkwork.service; + +import com.baomidou.mybatisplus.core.conditions.query.LambdaQueryWrapper; +import com.baomidou.mybatisplus.core.conditions.update.LambdaUpdateWrapper; +import com.baomidou.mybatisplus.extension.plugins.pagination.Page; +import com.baomidou.mybatisplus.extension.service.impl.ServiceImpl; +import com.linkwork.common.ForbiddenOperationException; +import com.linkwork.common.ResourceNotFoundException; +import com.linkwork.mapper.McpServerMapper; +import com.linkwork.model.entity.McpServerEntity; +import lombok.extern.slf4j.Slf4j; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.beans.factory.annotation.Value; +import org.springframework.stereotype.Service; +import org.springframework.util.StringUtils; + +import java.net.URI; +import java.net.URISyntaxException; +import java.time.LocalDateTime; +import java.util.*; +import java.util.stream.Collectors; + +/** + * MCP 服务 Service + */ +@Slf4j +@Service +public class McpServerService extends ServiceImpl { + + @Value("${robot.mcp-gateway.agent-base-url:}") + private String mcpGatewayAgentBaseUrl; + + @Autowired + private McpCryptoService cryptoService; + + private static final Set SUPPORTED_TYPES = Set.of("http", "sse"); + private static final Set SUPPORTED_VISIBILITIES = Set.of("public", "private"); + private static final Set SUPPORTED_STATUSES = Set.of("online", "degraded", "offline", "unknown"); + private static final Set SENSITIVE_HEADER_KEYS = Set.of( + "authorization", "proxy-authorization", + "cookie", "set-cookie", + "x-api-key", "apikey", "api-key", + "token", "access-token", "refresh-token", + "secret", "client-secret", "app-secret", + "password", "passwd" + ); + private static final Set SUPPORTED_NETWORK_ZONES = Set.of("internal", "office", "external"); + @Autowired + private AdminAccessService adminAccessService; + + /** + * 创建 MCP 服务 + */ + @SuppressWarnings("unchecked") + public McpServerEntity createMcpServer(Map request, String userId, String userName) { + McpServerEntity entity = new McpServerEntity(); + entity.setMcpNo("MCP-" + System.currentTimeMillis()); + entity.setName(normalizeRequiredText(request.get("name"), "MCP 名称不能为空")); + entity.setEndpoint(normalizeOptionalText(request.get("endpoint"))); + entity.setDescription(normalizeOptionalText(request.get("description"))); + entity.setVisibility(normalizeVisibility(request.getOrDefault("visibility", "private"))); + entity.setStatus("unknown"); + entity.setCreatorId(userId); + entity.setCreatorName(userName); + + entity.setType(normalizeType(request.getOrDefault("type", "http"))); + entity.setUrl(normalizeOptionalText(request.get("url"))); + entity.setHealthCheckUrl(normalizeOptionalText(request.get("healthCheckUrl"))); + entity.setVersion(normalizeOptionalText(request.get("version"))); + entity.setNetworkZone(normalizeNetworkZone(request.getOrDefault("networkZone", "external"))); + entity.setConsecutiveFailures(0); + + if (request.containsKey("headers")) { + entity.setHeaders((Map) request.get("headers")); + } + if (request.containsKey("tags")) { + entity.setTags((List) request.get("tags")); + } + if (request.containsKey("configJson")) { + entity.setConfigJson((Map) request.get("configJson")); + } + validateConnectivityFields(entity); + encryptSensitiveFields(entity); + + this.save(entity); + log.info("Created MCP server: {} (type={}) by user {}", entity.getMcpNo(), entity.getType(), userId); + decryptSensitiveFields(entity); + return entity; + } + + /** + * 更新 MCP 服务 + */ + @SuppressWarnings("unchecked") + public McpServerEntity updateMcpServer(Long id, Map request, String userId, String userName) { + McpServerEntity entity = requireOwnedMcpServer(id, userId); + + if (request.containsKey("name")) { + entity.setName(normalizeRequiredText(request.get("name"), "MCP 名称不能为空")); + } + if (request.containsKey("endpoint")) { + entity.setEndpoint(normalizeOptionalText(request.get("endpoint"))); + } + if (request.containsKey("description")) { + entity.setDescription(normalizeOptionalText(request.get("description"))); + } + if (request.containsKey("visibility")) { + entity.setVisibility(normalizeVisibility(request.get("visibility"))); + } + if (request.containsKey("status")) { + entity.setStatus(normalizeStatus(request.get("status"))); + } + if (request.containsKey("configJson")) { + entity.setConfigJson((Map) request.get("configJson")); + } + + if (request.containsKey("type")) { + entity.setType(normalizeType(request.get("type"))); + } + if (request.containsKey("url")) { + entity.setUrl(normalizeOptionalText(request.get("url"))); + } + if (request.containsKey("headers")) { + entity.setHeaders((Map) request.get("headers")); + } + if (request.containsKey("healthCheckUrl")) { + entity.setHealthCheckUrl(normalizeOptionalText(request.get("healthCheckUrl"))); + } + if (request.containsKey("version")) { + entity.setVersion(normalizeOptionalText(request.get("version"))); + } + if (request.containsKey("tags")) { + entity.setTags((List) request.get("tags")); + } + if (request.containsKey("networkZone")) { + entity.setNetworkZone(normalizeNetworkZone(request.get("networkZone"))); + } + validateConnectivityFields(entity); + encryptSensitiveFields(entity); + + entity.setUpdaterId(userId); + entity.setUpdaterName(userName); + + this.updateById(entity); + log.info("Updated MCP server: {} by user {}", entity.getMcpNo(), userId); + decryptSensitiveFields(entity); + return entity; + } + + /** + * 按类型查询 MCP Server 列表(内部 API / Gateway 使用,需解密) + */ + public List listByTypes(List types) { + if (types == null || types.isEmpty()) { + return Collections.emptyList(); + } + LambdaQueryWrapper wrapper = new LambdaQueryWrapper<>(); + wrapper.in(McpServerEntity::getType, types); + List result = this.list(wrapper); + result.forEach(this::decryptSensitiveFields); + return result; + } + + /** + * 更新 MCP Server 健康状态 + */ + public void updateHealth(Long id, String status, Integer latencyMs, String message, int consecutiveFailures) { + LambdaUpdateWrapper wrapper = new LambdaUpdateWrapper<>(); + wrapper.eq(McpServerEntity::getId, id) + .set(McpServerEntity::getStatus, status) + .set(McpServerEntity::getHealthLatencyMs, latencyMs) + .set(McpServerEntity::getHealthMessage, message) + .set(McpServerEntity::getConsecutiveFailures, consecutiveFailures) + .set(McpServerEntity::getLastHealthAt, LocalDateTime.now()); + this.update(wrapper); + } + + /** + * 根据 MCP ID 列表生成 SDK 兼容的 mcp.json 格式 + *

+ * 当 mcpGatewayAgentBaseUrl 配置非空时,URL 指向 Gateway 代理地址,不暴露原始 URL 和 Headers; + * 未配置时回退为直连模式(向下兼容)。 + * + * @return { "mcpServers": { "name": { "type": "http", "url": "...", "headers": {...} } }, "globalHeaders": {...} } + */ + public Map generateMcpConfig(List mcpIds) { + if (mcpIds == null || mcpIds.isEmpty()) { + return Map.of("mcpServers", Collections.emptyMap()); + } + + List servers = this.listByIds(mcpIds); + servers.forEach(this::decryptSensitiveFields); + Map mcpServers = new LinkedHashMap<>(); + + boolean useGateway = StringUtils.hasText(mcpGatewayAgentBaseUrl); + + for (McpServerEntity server : servers) { + Map serverConfig = new LinkedHashMap<>(); + serverConfig.put("type", server.getType() != null ? server.getType() : "http"); + + if (useGateway) { + String gatewayUrl = mcpGatewayAgentBaseUrl.replaceAll("/+$", "") + + "/proxy/" + server.getName() + "/mcp"; + serverConfig.put("url", gatewayUrl); + } else { + String serverUrl = server.getUrl(); + if (!StringUtils.hasText(serverUrl)) { + serverUrl = server.getEndpoint(); + } + if (StringUtils.hasText(serverUrl)) { + serverConfig.put("url", serverUrl); + } + if (server.getHeaders() != null && !server.getHeaders().isEmpty()) { + serverConfig.put("headers", server.getHeaders()); + } + } + + mcpServers.put(server.getName(), serverConfig); + } + + Map result = new LinkedHashMap<>(); + result.put("mcpServers", mcpServers); + + if (useGateway) { + Map globalHeaders = new LinkedHashMap<>(); + globalHeaders.put("X-Task-Id", "{taskid}"); + globalHeaders.put("X-User-Id", "{userid}"); + result.put("globalHeaders", globalHeaders); + } + + return result; + } + + /** + * 返回所有 MCP Server 的健康状态列表 + */ + public Map getHealthStatus(String userId) { + if (!StringUtils.hasText(userId)) { + return Map.of("items", List.of(), "checkedAt", LocalDateTime.now().toString()); + } + LambdaQueryWrapper wrapper = new LambdaQueryWrapper<>(); + if (!adminAccessService.isAdmin(userId)) { + wrapper.eq(McpServerEntity::getCreatorId, userId); + } + List allServers = this.list(wrapper); + List> items = allServers.stream() + .map(this::toHealthMap) + .collect(Collectors.toList()); + + Map result = new LinkedHashMap<>(); + result.put("items", items); + result.put("checkedAt", LocalDateTime.now().toString()); + return result; + } + + /** + * 获取 MCP 服务列表(分页) + */ + public Map listMcpServers(int page, int pageSize, String status, String keyword, String userId) { + Page pageObj = new Page<>(page, pageSize); + + LambdaQueryWrapper wrapper = new LambdaQueryWrapper<>(); + applyVisibilityScope(wrapper, userId); + String normalizedStatus = normalizeStatusForQuery(status); + if (StringUtils.hasText(normalizedStatus)) { + wrapper.eq(McpServerEntity::getStatus, normalizedStatus); + } + if (StringUtils.hasText(keyword)) { + wrapper.and(w -> w.like(McpServerEntity::getName, keyword) + .or().like(McpServerEntity::getDescription, keyword)); + } + wrapper.orderByDesc(McpServerEntity::getCreatedAt); + + Page result = this.page(pageObj, wrapper); + result.getRecords().forEach(this::decryptSensitiveFields); + + List> items = result.getRecords().stream() + .map(entity -> toResponseMap(entity, userId)) + .collect(Collectors.toList()); + + Map response = new HashMap<>(); + response.put("items", items); + response.put("pagination", Map.of( + "page", result.getCurrent(), + "pageSize", result.getSize(), + "total", result.getTotal(), + "totalPages", result.getPages() + )); + return response; + } + + /** + * 获取所有可用的 MCP 服务(用于下拉选择) + */ + public List> listAllAvailable(String userId) { + LambdaQueryWrapper wrapper = new LambdaQueryWrapper<>(); + applyVisibilityScope(wrapper, userId); + wrapper.orderByDesc(McpServerEntity::getCreatedAt); + + return this.list(wrapper).stream() + .map(entity -> toSimpleMap(entity, userId)) + .collect(Collectors.toList()); + } + + /** + * 获取当前用户可访问的 MCP 服务详情 + */ + public Map getMcpServerForRead(Long id, String userId) { + McpServerEntity entity = requireReadableMcpServer(id, userId); + return toResponseMap(entity, userId); + } + + /** + * 获取当前用户可操作(写/管理)的 MCP 服务详情 + */ + public McpServerEntity getMcpServerForManage(Long id, String userId) { + McpServerEntity entity = requireOwnedMcpServer(id, userId); + decryptSensitiveFields(entity); + return entity; + } + + /** + * 删除 MCP 服务(仅创建者) + */ + public void deleteMcpServer(Long id, String userId) { + McpServerEntity entity = requireOwnedMcpServer(id, userId); + this.removeById(entity.getId()); + } + + private McpServerEntity requireOwnedMcpServer(Long id, String userId) { + McpServerEntity entity = this.getById(id); + if (entity == null) { + throw new ResourceNotFoundException("MCP server not found: " + id); + } + if (!canManage(entity, userId)) { + throw new ForbiddenOperationException("仅 MCP 创建者或管理员可访问或修改"); + } + return entity; + } + + private McpServerEntity requireReadableMcpServer(Long id, String userId) { + McpServerEntity entity = this.getById(id); + if (entity == null) { + throw new ResourceNotFoundException("MCP server not found: " + id); + } + if (adminAccessService.isAdmin(userId)) { + return entity; + } + boolean isOwner = StringUtils.hasText(userId) && userId.equals(entity.getCreatorId()); + boolean isPublic = "public".equals(coerceVisibility(entity.getVisibility())); + if (!isOwner && !isPublic) { + throw new ForbiddenOperationException("无权限访问该 MCP 服务"); + } + return entity; + } + + private String normalizeOptionalText(Object raw) { + if (raw == null) { + return null; + } + String value = String.valueOf(raw).trim(); + return value.isEmpty() ? null : value; + } + + private String normalizeRequiredText(Object raw, String message) { + String value = normalizeOptionalText(raw); + if (!StringUtils.hasText(value)) { + throw new IllegalArgumentException(message); + } + return value; + } + + private String normalizeType(Object rawType) { + String value = normalizeOptionalText(rawType); + if (!StringUtils.hasText(value)) { + return "http"; + } + String normalized = value.toLowerCase(Locale.ROOT); + if (!SUPPORTED_TYPES.contains(normalized)) { + throw new IllegalArgumentException("非法 MCP 类型: " + value + ",仅支持 http/sse"); + } + return normalized; + } + + private String normalizeVisibility(Object rawVisibility) { + String value = normalizeOptionalText(rawVisibility); + if (!StringUtils.hasText(value)) { + return "private"; + } + String normalized = value.toLowerCase(Locale.ROOT); + if (!SUPPORTED_VISIBILITIES.contains(normalized)) { + throw new IllegalArgumentException("非法 MCP 可见性: " + value + ",仅支持 public/private"); + } + return normalized; + } + + private String normalizeStatus(Object rawStatus) { + String value = normalizeOptionalText(rawStatus); + if (!StringUtils.hasText(value)) { + return "unknown"; + } + String normalized = value.toLowerCase(Locale.ROOT); + if (!SUPPORTED_STATUSES.contains(normalized)) { + throw new IllegalArgumentException("非法 MCP 状态: " + value + ",仅支持 online/degraded/offline/unknown"); + } + return normalized; + } + + private String normalizeStatusForQuery(String rawStatus) { + String value = normalizeOptionalText(rawStatus); + if (!StringUtils.hasText(value)) { + return null; + } + return normalizeStatus(value); + } + + private String coerceVisibility(String rawVisibility) { + if (!StringUtils.hasText(rawVisibility)) { + return "private"; + } + String normalized = rawVisibility.trim().toLowerCase(Locale.ROOT); + return SUPPORTED_VISIBILITIES.contains(normalized) ? normalized : "private"; + } + + private String coerceStatus(String rawStatus) { + if (!StringUtils.hasText(rawStatus)) { + return "unknown"; + } + String normalized = rawStatus.trim().toLowerCase(Locale.ROOT); + return SUPPORTED_STATUSES.contains(normalized) ? normalized : "unknown"; + } + + private String normalizeNetworkZone(Object rawZone) { + String value = normalizeOptionalText(rawZone); + if (!StringUtils.hasText(value)) { + return "external"; + } + String normalized = value.toLowerCase(Locale.ROOT); + if (!SUPPORTED_NETWORK_ZONES.contains(normalized)) { + throw new IllegalArgumentException("非法网段标记: " + value + ",仅支持 internal/office/external"); + } + return normalized; + } + + private void validateConnectivityFields(McpServerEntity entity) { + if (!StringUtils.hasText(entity.getUrl()) && !StringUtils.hasText(entity.getEndpoint())) { + throw new IllegalArgumentException("MCP url/endpoint 不能为空"); + } + } + + /** + * 保存前加密 URL 和 Headers(仅当加密服务启用时) + */ + private void encryptSensitiveFields(McpServerEntity entity) { + if (!cryptoService.isEnabled()) return; + if (StringUtils.hasText(entity.getUrl())) { + entity.setUrl(cryptoService.encrypt(entity.getUrl())); + } + } + + /** + * 读取后解密 URL 和 Headers(兼容明文和密文混存) + */ + private void decryptSensitiveFields(McpServerEntity entity) { + if (!cryptoService.isEnabled() || entity == null) return; + if (StringUtils.hasText(entity.getUrl())) { + entity.setUrl(cryptoService.decrypt(entity.getUrl())); + } + } + + private void applyVisibilityScope(LambdaQueryWrapper wrapper, String userId) { + if (StringUtils.hasText(userId)) { + if (adminAccessService.isAdmin(userId)) { + return; + } + wrapper.and(w -> w.apply("creator_id = {0} OR visibility = 'public'", userId)); + return; + } + wrapper.apply("visibility = 'public'"); + } + + private Map toResponseMap(McpServerEntity entity, String userId) { + boolean canManage = canManage(entity, userId); + boolean masked = shouldMaskSensitiveFields(entity, userId); + String urlForDisplay = firstNonBlank(entity.getUrl(), entity.getEndpoint()); + String healthUrlForDisplay = firstNonBlank(entity.getHealthCheckUrl(), entity.getUrl(), entity.getEndpoint()); + + Map map = new HashMap<>(); + map.put("id", entity.getId().toString()); + map.put("mcpNo", entity.getMcpNo()); + map.put("name", entity.getName()); + map.put("endpoint", masked ? null : entity.getEndpoint()); + map.put("description", entity.getDescription()); + map.put("visibility", coerceVisibility(entity.getVisibility())); + map.put("status", coerceStatus(entity.getStatus())); + map.put("type", entity.getType()); + map.put("url", masked ? null : entity.getUrl()); + map.put("headers", masked ? null : entity.getHeaders()); + map.put("networkZone", entity.getNetworkZone() != null ? entity.getNetworkZone() : "external"); + map.put("healthCheckUrl", masked ? null : entity.getHealthCheckUrl()); + map.put("displayUrl", maskUrlForDisplay(urlForDisplay)); + map.put("displayHeaders", maskHeadersForDisplay(entity.getHeaders())); + map.put("displayHealthCheckUrl", maskUrlForDisplay(healthUrlForDisplay)); + map.put("canManage", canManage); + map.put("masked", masked); + map.put("healthLatencyMs", entity.getHealthLatencyMs()); + map.put("healthMessage", entity.getHealthMessage()); + map.put("consecutiveFailures", entity.getConsecutiveFailures()); + map.put("version", entity.getVersion()); + map.put("tags", entity.getTags()); + map.put("lastHealthAt", formatDateTime(entity.getLastHealthAt())); + map.put("configJson", entity.getConfigJson()); + map.put("creatorId", entity.getCreatorId()); + map.put("creatorName", entity.getCreatorName()); + map.put("createdAt", formatDateTime(entity.getCreatedAt())); + map.put("updatedAt", formatDateTime(entity.getUpdatedAt())); + + return map; + } + + private boolean isOwner(McpServerEntity entity, String userId) { + return StringUtils.hasText(userId) && userId.equals(entity.getCreatorId()); + } + + private boolean canManage(McpServerEntity entity, String userId) { + return adminAccessService.isAdmin(userId) || isOwner(entity, userId); + } + + private boolean shouldMaskSensitiveFields(McpServerEntity entity, String userId) { + return !canManage(entity, userId) && "public".equals(coerceVisibility(entity.getVisibility())); + } + + private String firstNonBlank(String... values) { + if (values == null || values.length == 0) { + return null; + } + for (String value : values) { + if (StringUtils.hasText(value)) { + return value; + } + } + return null; + } + + private String maskUrlForDisplay(String rawUrl) { + if (!StringUtils.hasText(rawUrl)) { + return null; + } + String value = rawUrl.trim(); + try { + URI uri = new URI(value); + if (StringUtils.hasText(uri.getScheme()) && StringUtils.hasText(uri.getHost())) { + StringBuilder builder = new StringBuilder(); + builder.append(uri.getScheme()).append("://").append(uri.getHost()); + if (uri.getPort() > 0) { + builder.append(":").append(uri.getPort()); + } + builder.append("/***"); + return builder.toString(); + } + } catch (URISyntaxException ignored) { + // ignore and fallback to generic mask + } + return maskGenericValue(value); + } + + private Map maskHeadersForDisplay(Map headers) { + if (headers == null || headers.isEmpty()) { + return Collections.emptyMap(); + } + Map masked = new LinkedHashMap<>(); + for (Map.Entry entry : headers.entrySet()) { + String headerName = entry.getKey() == null ? "" : entry.getKey().trim(); + String lowerKey = headerName.toLowerCase(Locale.ROOT); + String headerValue = entry.getValue(); + if (isSensitiveHeaderKey(lowerKey)) { + masked.put(headerName, "***"); + } else { + masked.put(headerName, maskGenericValue(headerValue)); + } + } + return masked; + } + + private boolean isSensitiveHeaderKey(String lowerKey) { + if (!StringUtils.hasText(lowerKey)) { + return false; + } + if (SENSITIVE_HEADER_KEYS.contains(lowerKey)) { + return true; + } + return lowerKey.contains("token") + || lowerKey.contains("secret") + || lowerKey.contains("password") + || lowerKey.contains("cookie") + || lowerKey.contains("auth"); + } + + private String maskGenericValue(String raw) { + if (!StringUtils.hasText(raw)) { + return "***"; + } + String value = raw.trim(); + if (value.length() <= 8) { + return "***"; + } + return value.substring(0, 3) + "***" + value.substring(value.length() - 2); + } + + private Map toSimpleMap(McpServerEntity entity, String userId) { + boolean canManage = canManage(entity, userId); + boolean masked = shouldMaskSensitiveFields(entity, userId); + String urlForDisplay = firstNonBlank(entity.getUrl(), entity.getEndpoint()); + + Map map = new HashMap<>(); + map.put("id", entity.getId().toString()); + map.put("name", entity.getName()); + map.put("description", entity.getDescription()); + map.put("endpoint", masked ? null : entity.getEndpoint()); + map.put("url", masked ? null : entity.getUrl()); + map.put("displayUrl", maskUrlForDisplay(urlForDisplay)); + map.put("visibility", coerceVisibility(entity.getVisibility())); + map.put("status", coerceStatus(entity.getStatus())); + map.put("type", entity.getType()); + map.put("networkZone", entity.getNetworkZone() != null ? entity.getNetworkZone() : "external"); + map.put("canManage", canManage); + map.put("masked", masked); + return map; + } + + private Map toHealthMap(McpServerEntity entity) { + Map map = new LinkedHashMap<>(); + map.put("id", entity.getId().toString()); + map.put("name", entity.getName()); + map.put("type", entity.getType()); + map.put("status", coerceStatus(entity.getStatus())); + map.put("latencyMs", entity.getHealthLatencyMs()); + map.put("lastHealthAt", formatDateTime(entity.getLastHealthAt())); + map.put("consecutiveFailures", entity.getConsecutiveFailures()); + map.put("healthMessage", entity.getHealthMessage()); + return map; + } + + private String formatDateTime(LocalDateTime value) { + return value == null ? null : value.toString(); + } +} diff --git a/back/src/main/java/com/linkwork/service/McpUsageAggregator.java b/back/src/main/java/com/linkwork/service/McpUsageAggregator.java new file mode 100644 index 0000000..60775a8 --- /dev/null +++ b/back/src/main/java/com/linkwork/service/McpUsageAggregator.java @@ -0,0 +1,115 @@ +package com.linkwork.service; + +import com.baomidou.mybatisplus.core.conditions.query.LambdaQueryWrapper; +import com.linkwork.mapper.McpUsageDailyMapper; +import com.linkwork.model.entity.McpUsageDailyEntity; +import lombok.RequiredArgsConstructor; +import lombok.extern.slf4j.Slf4j; +import org.springframework.data.redis.core.StringRedisTemplate; +import org.springframework.scheduling.annotation.Scheduled; +import org.springframework.stereotype.Component; +import org.springframework.util.StringUtils; + +import java.time.LocalDate; +import java.time.format.DateTimeFormatter; +import java.util.Map; + +/** + * 每小时将 Redis 中的 MCP 使用量数据聚合到 linkwork_mcp_usage_daily 表。 + *

+ * Redis key 格式: + * - mcp:usage:user:{YYYYMMDD} field = "{userId}:{mcpName}" value = count + * - mcp:usage:bytes:{YYYYMMDD} field = "{taskId}:{mcpName}:req" value = bytes + * (bytes 按 user 维度聚合困难, 这里仅聚合 user 维度的 count) + */ +@Slf4j +@Component +@RequiredArgsConstructor +public class McpUsageAggregator { + + private final StringRedisTemplate redisTemplate; + private final McpUsageDailyMapper usageDailyMapper; + + private static final DateTimeFormatter DATE_FMT = DateTimeFormatter.ofPattern("yyyyMMdd"); + + @Scheduled(cron = "0 5 * * * *") + public void aggregateCurrentDay() { + String dateStr = LocalDate.now().format(DATE_FMT); + aggregateDate(dateStr); + } + + @Scheduled(cron = "0 10 0 * * *") + public void aggregateYesterday() { + String dateStr = LocalDate.now().minusDays(1).format(DATE_FMT); + aggregateDate(dateStr); + } + + void aggregateDate(String dateStr) { + String userKey = "mcp:usage:user:" + dateStr; + Map entries; + + try { + entries = redisTemplate.opsForHash().entries(userKey); + } catch (Exception e) { + log.warn("Failed to read Redis usage data for {}: {}", dateStr, e.getMessage()); + return; + } + + if (entries == null || entries.isEmpty()) { + return; + } + + LocalDate date = LocalDate.parse(dateStr, DATE_FMT); + int upserted = 0; + + for (Map.Entry entry : entries.entrySet()) { + String field = entry.getKey().toString(); + String value = entry.getValue().toString(); + + String[] parts = field.split(":", 2); + if (parts.length < 2) continue; + String userId = parts[0]; + String mcpName = parts[1]; + if (!StringUtils.hasText(userId) || !StringUtils.hasText(mcpName)) continue; + + int count; + try { + count = Integer.parseInt(value); + } catch (NumberFormatException e) { + continue; + } + + try { + upsertUsageDaily(date, userId, mcpName, count); + upserted++; + } catch (Exception e) { + log.warn("Failed to upsert usage for date={}, user={}, mcp={}: {}", + dateStr, userId, mcpName, e.getMessage()); + } + } + + log.info("MCP usage aggregation completed: date={}, records={}", dateStr, upserted); + } + + private void upsertUsageDaily(LocalDate date, String userId, String mcpName, int callCount) { + LambdaQueryWrapper wrapper = new LambdaQueryWrapper<>(); + wrapper.eq(McpUsageDailyEntity::getDate, date) + .eq(McpUsageDailyEntity::getUserId, userId) + .eq(McpUsageDailyEntity::getMcpName, mcpName); + + McpUsageDailyEntity existing = usageDailyMapper.selectOne(wrapper); + if (existing != null) { + existing.setCallCount(callCount); + usageDailyMapper.updateById(existing); + } else { + McpUsageDailyEntity entity = new McpUsageDailyEntity(); + entity.setDate(date); + entity.setUserId(userId); + entity.setMcpName(mcpName); + entity.setCallCount(callCount); + entity.setReqBytes(0L); + entity.setRespBytes(0L); + usageDailyMapper.insert(entity); + } + } +} diff --git a/back/src/main/java/com/linkwork/service/McpUserConfigService.java b/back/src/main/java/com/linkwork/service/McpUserConfigService.java new file mode 100644 index 0000000..3cf72a3 --- /dev/null +++ b/back/src/main/java/com/linkwork/service/McpUserConfigService.java @@ -0,0 +1,79 @@ +package com.linkwork.service; + +import com.baomidou.mybatisplus.core.conditions.query.LambdaQueryWrapper; +import com.baomidou.mybatisplus.extension.service.impl.ServiceImpl; +import com.linkwork.mapper.McpUserConfigMapper; +import com.linkwork.model.entity.McpServerEntity; +import com.linkwork.model.entity.McpUserConfigEntity; +import lombok.RequiredArgsConstructor; +import lombok.extern.slf4j.Slf4j; +import org.springframework.stereotype.Service; +import org.springframework.util.StringUtils; + +import java.util.List; +import java.util.Map; + +@Slf4j +@Service +@RequiredArgsConstructor +public class McpUserConfigService extends ServiceImpl { + + private final McpServerService mcpServerService; + + public McpUserConfigEntity getByUserAndMcpName(String userId, String mcpName) { + if (!StringUtils.hasText(userId) || !StringUtils.hasText(mcpName)) { + return null; + } + + LambdaQueryWrapper serverWrapper = new LambdaQueryWrapper<>(); + serverWrapper.eq(McpServerEntity::getName, mcpName); + McpServerEntity server = mcpServerService.getOne(serverWrapper, false); + if (server == null) { + return null; + } + + return getByUserAndServer(userId, server.getId()); + } + + public McpUserConfigEntity getByUserAndServer(String userId, Long mcpServerId) { + LambdaQueryWrapper wrapper = new LambdaQueryWrapper<>(); + wrapper.eq(McpUserConfigEntity::getUserId, userId) + .eq(McpUserConfigEntity::getMcpServerId, mcpServerId); + return this.getOne(wrapper, false); + } + + public List listByUser(String userId) { + LambdaQueryWrapper wrapper = new LambdaQueryWrapper<>(); + wrapper.eq(McpUserConfigEntity::getUserId, userId); + return this.list(wrapper); + } + + @SuppressWarnings("unchecked") + public McpUserConfigEntity saveOrUpdate(String userId, Long mcpServerId, Map request) { + McpUserConfigEntity entity = getByUserAndServer(userId, mcpServerId); + if (entity == null) { + entity = new McpUserConfigEntity(); + entity.setUserId(userId); + entity.setMcpServerId(mcpServerId); + } + + if (request.containsKey("headers")) { + entity.setHeaders((Map) request.get("headers")); + } + if (request.containsKey("urlParams")) { + entity.setUrlParams((Map) request.get("urlParams")); + } + + this.saveOrUpdate(entity); + log.info("Saved MCP user config for userId={}, mcpServerId={}", userId, mcpServerId); + return entity; + } + + public void deleteConfig(String userId, Long mcpServerId) { + LambdaQueryWrapper wrapper = new LambdaQueryWrapper<>(); + wrapper.eq(McpUserConfigEntity::getUserId, userId) + .eq(McpUserConfigEntity::getMcpServerId, mcpServerId); + this.remove(wrapper); + log.info("Deleted MCP user config for userId={}, mcpServerId={}", userId, mcpServerId); + } +} diff --git a/back/src/main/java/com/linkwork/service/ModelRegistryService.java b/back/src/main/java/com/linkwork/service/ModelRegistryService.java new file mode 100644 index 0000000..8d5a74a --- /dev/null +++ b/back/src/main/java/com/linkwork/service/ModelRegistryService.java @@ -0,0 +1,83 @@ +package com.linkwork.service; + +import com.fasterxml.jackson.core.type.TypeReference; +import com.fasterxml.jackson.databind.ObjectMapper; +import lombok.RequiredArgsConstructor; +import lombok.extern.slf4j.Slf4j; +import org.springframework.beans.factory.annotation.Value; +import org.springframework.boot.web.client.RestTemplateBuilder; +import org.springframework.http.HttpEntity; +import org.springframework.http.HttpHeaders; +import org.springframework.http.HttpMethod; +import org.springframework.http.ResponseEntity; +import org.springframework.stereotype.Service; +import org.springframework.util.StringUtils; +import org.springframework.web.client.RestTemplate; + +import java.time.Duration; +import java.util.Map; + +/** + * 模型注册表读取服务(后端代理模型网关,避免前端跨域直连) + */ +@Slf4j +@Service +@RequiredArgsConstructor +public class ModelRegistryService { + + @Value("${robot.model-registry.gateway-url:http://172.18.228.32:4000/v1/models}") + private String gatewayUrl; + + @Value("${robot.model-registry.timeout-ms:5000}") + private long timeoutMs; + + @Value("${robot.model-registry.auth-token:}") + private String authToken; + + @Value("${robot.model-registry.x-litellm-api-key:}") + private String xLitellmApiKey; + + private final RestTemplateBuilder restTemplateBuilder; + private final ObjectMapper objectMapper; + + public Map fetchModels() { + RestTemplate restTemplate = restTemplateBuilder + .setConnectTimeout(Duration.ofMillis(timeoutMs)) + .setReadTimeout(Duration.ofMillis(timeoutMs)) + .build(); + + try { + HttpHeaders headers = new HttpHeaders(); + String resolvedAuthToken = StringUtils.hasText(authToken) ? authToken.trim() : ""; + if (StringUtils.hasText(resolvedAuthToken)) { + headers.setBearerAuth(resolvedAuthToken); + } + String resolvedXKey = StringUtils.hasText(xLitellmApiKey) ? xLitellmApiKey.trim() : resolvedAuthToken; + if (StringUtils.hasText(resolvedXKey)) { + headers.set("x-litellm-api-key", resolvedXKey); + } + HttpEntity requestEntity = new HttpEntity<>(headers); + + ResponseEntity response = restTemplate.exchange( + gatewayUrl, + HttpMethod.GET, + requestEntity, + String.class + ); + if (!response.getStatusCode().is2xxSuccessful()) { + throw new IllegalStateException("模型网关返回非 2xx: " + response.getStatusCode().value()); + } + + String body = response.getBody(); + if (!StringUtils.hasText(body)) { + throw new IllegalStateException("模型网关返回空响应"); + } + + return objectMapper.readValue(body, new TypeReference>() { + }); + } catch (Exception e) { + log.error("读取模型列表失败: gatewayUrl={}", gatewayUrl, e); + throw new IllegalStateException("模型列表加载失败: " + e.getMessage(), e); + } + } +} diff --git a/back/src/main/java/com/linkwork/service/NfsStorageService.java b/back/src/main/java/com/linkwork/service/NfsStorageService.java new file mode 100644 index 0000000..110e483 --- /dev/null +++ b/back/src/main/java/com/linkwork/service/NfsStorageService.java @@ -0,0 +1,163 @@ +package com.linkwork.service; + +import com.linkwork.agent.storage.core.StorageClient; +import com.linkwork.config.NfsStorageConfig; +import lombok.extern.slf4j.Slf4j; +import org.springframework.stereotype.Service; +import org.springframework.web.multipart.MultipartFile; + +import java.io.IOException; +import java.io.InputStream; +import java.net.URLEncoder; +import java.nio.charset.StandardCharsets; +import java.nio.file.Path; +import java.time.LocalDate; +import java.time.format.DateTimeFormatter; +import java.util.List; +import java.util.UUID; + +/** + * NFS 文件存储服务 + * + * 通过 linkwork-storage-starter 的 StorageClient 覆盖替换原有文件系统实现, + * 对上层保持原方法签名不变。 + */ +@Slf4j +@Service +public class NfsStorageService { + + private final NfsStorageConfig config; + private final StorageClient storageClient; + + public NfsStorageService(NfsStorageConfig config, StorageClient storageClient) { + this.config = config; + this.storageClient = storageClient; + } + + public boolean isConfigured() { + return storageClient.supportsFileStorageOps() && storageClient.isConfigured(); + } + + /** + * 获取 objectName 对应的绝对路径 + */ + public Path getAbsolutePath(String objectName) { + if (storageClient.supportsFileStorageOps()) { + return storageClient.resolvePath(objectName); + } + return config.resolve(objectName); + } + + /** + * 上传 MultipartFile 到指定相对路径 + */ + public String uploadFileToPath(MultipartFile file, String objectName) throws IOException { + ensureConfigured(); + try (InputStream in = file.getInputStream()) { + String stored = storageClient.uploadToPath(in, objectName, file.getSize()); + log.info("文件上传成功: objectName={}, filename={}, size={}", stored, file.getOriginalFilename(), file.getSize()); + return stored; + } + } + + /** + * 上传文本内容到指定相对路径 + */ + public String uploadTextToPath(String content, String objectName) { + ensureConfigured(); + String stored = storageClient.uploadText(content, objectName); + log.info("文本上传成功: objectName={}, size={}", stored, content != null ? content.length() : 0); + return stored; + } + + /** + * 下载文件到临时文件 + */ + public Path downloadToTempFile(String objectName) throws IOException { + ensureConfigured(); + Path tmp = storageClient.downloadToTempFile(objectName); + log.info("下载 NFS 文件到临时文件: objectName={}", objectName); + return tmp; + } + + /** + * 复制文件 + */ + public void copyObject(String sourceObjectName, String destObjectName) { + ensureConfigured(); + storageClient.copyObject(sourceObjectName, destObjectName); + log.info("复制 NFS 文件: source={}, dest={}", sourceObjectName, destObjectName); + } + + /** + * 检查文件是否存在 + */ + public boolean doesObjectExist(String objectName) { + ensureConfigured(); + return storageClient.objectExists(objectName); + } + + /** + * 列出指定前缀(目录)下的所有文件 + */ + public List listObjects(String prefix) { + if (!isConfigured()) { + return List.of(); + } + return storageClient.listObjects(prefix); + } + + /** + * 删除文件 + */ + public void deleteFile(String objectName) { + if (!isConfigured()) { + return; + } + storageClient.deleteObject(objectName); + } + + /** + * 上传文本内容到按日期分目录的路径 + */ + public String uploadText(String content, String directory, String filename) { + String datePath = LocalDate.now().format(DateTimeFormatter.ofPattern("yyyy/MM/dd")); + String objectName = directory + "/" + datePath + "/" + filename; + return uploadTextToPath(content, objectName); + } + + /** + * 上传 MultipartFile 到按日期分目录的路径 + */ + public String uploadFile(MultipartFile file, String directory) throws IOException { + String originalFilename = file.getOriginalFilename(); + String extension = ""; + if (originalFilename != null && originalFilename.contains(".")) { + extension = originalFilename.substring(originalFilename.lastIndexOf(".")); + } + String datePath = LocalDate.now().format(DateTimeFormatter.ofPattern("yyyy/MM/dd")); + String objectName = directory + "/" + datePath + "/" + UUID.randomUUID() + extension; + return uploadFileToPath(file, objectName); + } + + /** + * 生成后端代理下载 URL + */ + public String buildDownloadUrl(String fileId) { + return config.getDownloadBaseUrl() + "/" + fileId + "/download"; + } + + /** + * 生成任务产出物下载 URL(按 objectName 直连后端代理)。 + */ + public String buildTaskOutputDownloadUrl(String objectName) { + String encodedObject = URLEncoder.encode(objectName, StandardCharsets.UTF_8).replace("+", "%20"); + return config.getTaskOutputBaseUrl() + "/file?object=" + encodedObject; + } + + private void ensureConfigured() { + if (!isConfigured()) { + throw new IllegalStateException("NFS 存储未配置"); + } + } +} diff --git a/back/src/main/java/com/linkwork/service/PodGroupSpecGenerator.java b/back/src/main/java/com/linkwork/service/PodGroupSpecGenerator.java new file mode 100644 index 0000000..4922add --- /dev/null +++ b/back/src/main/java/com/linkwork/service/PodGroupSpecGenerator.java @@ -0,0 +1,91 @@ +package com.linkwork.service; + +import com.linkwork.model.dto.MergedConfig; +import lombok.extern.slf4j.Slf4j; +import org.springframework.stereotype.Component; + +import java.util.HashMap; +import java.util.LinkedHashMap; +import java.util.Map; + +/** + * PodGroup Spec 生成器 + */ +@Component("legacyPodGroupSpecGenerator") +@Slf4j +public class PodGroupSpecGenerator { + + /** + * 生成 PodGroup Spec(显式创建) + */ + public Map generate(MergedConfig config) { + String podGroupName = "svc-" + config.getServiceId() + "-pg"; + + log.info("Generating PodGroup spec: {}, minMember: {}", + podGroupName, config.getPodCount()); + + Map result = new LinkedHashMap<>(); + result.put("apiVersion", "scheduling.volcano.sh/v1beta1"); + result.put("kind", "PodGroup"); + + // metadata + Map metadata = new LinkedHashMap<>(); + metadata.put("name", podGroupName); + metadata.put("namespace", config.getNamespace()); + metadata.put("labels", Map.of( + "app", "ai-worker-service", + "service-id", config.getServiceId() + )); + result.put("metadata", metadata); + + // spec + Map spec = new LinkedHashMap<>(); + spec.put("minMember", config.getPodCount()); + spec.put("queue", config.getQueueName()); + spec.put("priorityClassName", config.getPriorityClassName()); + spec.put("minResources", buildMinResources(config)); + result.put("spec", spec); + + return result; + } + + /** + * 计算总资源需求 + */ + private Map buildMinResources(MergedConfig config) { + Map resources = new HashMap<>(); + + // 计算总 CPU(Pod 数量 × 单 Pod CPU) + double totalCpu = parseCpu(config.getAgentResources().getCpuRequest()) * config.getPodCount(); + resources.put("cpu", String.valueOf((int) Math.ceil(totalCpu))); + + // 计算总内存 + long totalMemory = parseMemory(config.getAgentResources().getMemoryRequest()) * config.getPodCount(); + resources.put("memory", formatMemory(totalMemory)); + + return resources; + } + + private double parseCpu(String cpu) { + if (cpu == null) return 1.0; + if (cpu.endsWith("m")) { + return Double.parseDouble(cpu.replace("m", "")) / 1000; + } + return Double.parseDouble(cpu); + } + + private long parseMemory(String memory) { + if (memory == null) return 2L * 1024 * 1024 * 1024; // 默认 2Gi + if (memory.endsWith("Gi")) { + return Long.parseLong(memory.replace("Gi", "")) * 1024 * 1024 * 1024; + } + if (memory.endsWith("Mi")) { + return Long.parseLong(memory.replace("Mi", "")) * 1024 * 1024; + } + return Long.parseLong(memory); + } + + private String formatMemory(long bytes) { + return (bytes / (1024 * 1024 * 1024)) + "Gi"; + } +} diff --git a/back/src/main/java/com/linkwork/service/PodSpecGenerator.java b/back/src/main/java/com/linkwork/service/PodSpecGenerator.java new file mode 100644 index 0000000..dc1c18d --- /dev/null +++ b/back/src/main/java/com/linkwork/service/PodSpecGenerator.java @@ -0,0 +1,833 @@ +package com.linkwork.service; + +import com.linkwork.config.EnvConfig.OssMountConfig; +import com.linkwork.model.dto.MergedConfig; +import com.linkwork.model.enums.PodMode; +import io.fabric8.kubernetes.api.model.*; +import io.fabric8.kubernetes.api.model.EnvVarBuilder; +import io.fabric8.kubernetes.api.model.VolumeMountBuilder; +import lombok.extern.slf4j.Slf4j; +import org.springframework.beans.factory.annotation.Value; +import org.springframework.stereotype.Component; +import org.springframework.util.StringUtils; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; + +/** + * Pod Spec 生成器 + * + * 双容器模式 (Sidecar): + * Agent 容器: /opt/agent/start-dual.sh → 密钥管理 → 等 Runner SSH → 启动 zzd + worker + * Runner 容器: /opt/runner/start-runner.sh → 等公钥 → 配置 authorized_keys → sshd -D + * + * 单容器模式 (Alone): + * Agent 容器: /opt/agent/start-single.sh → 本地模式启动 + * + * 共享卷: + * - shared-keys (emptyDir/Memory): Agent 写公钥 → Runner 读取 + * - workspace (emptyDir/PVC): 工作目录 + * + * ConfigMap: + * - svc-{serviceId}-agent-config: config.json → /opt/agent/config.json + * - runner-start-script: start-runner.sh → /opt/runner/start-runner.sh + */ +@Component("legacyPodSpecGenerator") +@Slf4j +public class PodSpecGenerator { + + private static final String PERMISSION_INIT_CPU_REQUEST = "100m"; + private static final String PERMISSION_INIT_CPU_LIMIT = "500m"; + private static final String PERMISSION_INIT_MEMORY_REQUEST = "128Mi"; + private static final String PERMISSION_INIT_MEMORY_LIMIT = "512Mi"; + + /** Runner 启动脚本 ConfigMap 名称(集群级共享) */ + public static final String RUNNER_SCRIPT_CONFIGMAP = "runner-start-script"; + + /** Runner 启动脚本在 ConfigMap 中的 key */ + public static final String RUNNER_SCRIPT_KEY = "start-runner.sh"; + + /** zzd 调用 /api/v1/tasks/{taskId}/git-token 的服务身份 token(可选) */ + @Value("${robot.zzd.api-server-token:}") + private String zzdApiServerToken; + + /** Claude/LiteLLM 运行时网关配置 */ + @Value("${ANTHROPIC_BASE_URL:${robot.litellm.base-url:http://localhost:4000}}") + private String anthropicBaseUrl; + + @Value("${ANTHROPIC_AUTH_TOKEN:${robot.litellm.api-key:}}") + private String anthropicAuthToken; + + @Value("${ANTHROPIC_API_KEY:${robot.litellm.api-key:}}") + private String anthropicApiKey; + + @Value("${LITELLM_BASE_URL:${robot.litellm.base-url:http://localhost:4000}}") + private String litellmBaseUrl; + + @Value("${LITELLM_API_KEY:${robot.litellm.api-key:}}") + private String litellmApiKey; + + @Value("${OPENAI_API_KEY:${robot.litellm.api-key:}}") + private String openaiApiKey; + + @Value("${ANTHROPIC_MODEL:${robot.litellm.default-chat-model:openrouter/anthropic/claude-sonnet-4.5}}") + private String anthropicModel; + + /** + * 生成 Pod Spec(根据模式选择) + */ + public Pod generate(MergedConfig config, int podIndex) { + return config.getPodMode() == PodMode.SIDECAR + ? generateSidecarPod(config, podIndex) + : generateAlonePod(config, podIndex); + } + + /** + * 获取 Agent ConfigMap 名称 + */ + public static String agentConfigMapName(String serviceId) { + return "svc-" + serviceId + "-agent-config"; + } + + /** + * 生成 Sidecar 模式 Pod + * - Agent 容器:root 启动, /opt/agent/start-dual.sh + * - Runner 容器:root 启动, /opt/runner/start-runner.sh (via ConfigMap) + * - 共享卷: /shared-keys (emptyDir/Memory), /workspace + */ + private Pod generateSidecarPod(MergedConfig config, int podIndex) { + String podName = "svc-" + config.getServiceId() + "-" + podIndex; + String podGroupName = "svc-" + config.getServiceId() + "-pg"; + + log.info("Generating Sidecar Pod: {}, preferredNode: {}", podName, config.getPreferredNode()); + + PodBuilder builder = new PodBuilder() + .withNewMetadata() + .withName(podName) + .withNamespace(config.getNamespace()) + .addToLabels("app", "ai-worker-service") + .addToLabels("service-id", config.getServiceId()) + .addToLabels("user-id", config.getUserId()) + .addToLabels("pod-index", String.valueOf(podIndex)) + .addToLabels("pod-mode", "sidecar") + // Volcano 注解(关联 PodGroup) + .addToAnnotations("scheduling.k8s.io/group-name", podGroupName) + .addToAnnotations("scheduling.volcano.sh/group-name", podGroupName) + .addToAnnotations("volcano.sh/queue-name", config.getQueueName()) + .endMetadata() + .withNewSpec() + .withSchedulerName("volcano") + .withPriorityClassName(config.getPriorityClassName()) + .withRestartPolicy("Never") + .withTerminationGracePeriodSeconds(30L) + // 私有镜像拉取凭证 + .withImagePullSecrets(buildImagePullSecrets(config)) + // 主容器 + .addToContainers(buildSidecarAgentContainer(config)) + .addToContainers(buildSidecarRunnerContainer(config)) + // Volumes + .addAllToVolumes(buildSidecarVolumes(config)) + .endSpec(); + + // 权限初始化 InitContainer:user-files / workstation 挂载根设为仅 x + Container permInit = buildPermissionInitContainer(config); + if (permInit != null) { + builder.editSpec() + .addToInitContainers(permInit) + .endSpec(); + } + + // 添加节点亲和配置(用于快速重启) + if (StringUtils.hasText(config.getPreferredNode())) { + builder.editSpec() + .withAffinity(buildPreferredNodeAffinity(config.getPreferredNode())) + .endSpec(); + } + + return builder.build(); + } + + /** + * 生成 Alone 模式 Pod + * - 单容器:/opt/agent/start-single.sh (ZZD_MODE=local) + */ + private Pod generateAlonePod(MergedConfig config, int podIndex) { + String podName = "svc-" + config.getServiceId() + "-" + podIndex; + String podGroupName = "svc-" + config.getServiceId() + "-pg"; + + log.info("Generating Alone Pod: {}, preferredNode: {}", podName, config.getPreferredNode()); + + PodBuilder builder = new PodBuilder() + .withNewMetadata() + .withName(podName) + .withNamespace(config.getNamespace()) + .addToLabels("app", "ai-worker-service") + .addToLabels("service-id", config.getServiceId()) + .addToLabels("user-id", config.getUserId()) + .addToLabels("pod-index", String.valueOf(podIndex)) + .addToLabels("pod-mode", "alone") + // Volcano 注解 + .addToAnnotations("scheduling.k8s.io/group-name", podGroupName) + .addToAnnotations("scheduling.volcano.sh/group-name", podGroupName) + .addToAnnotations("volcano.sh/queue-name", config.getQueueName()) + .endMetadata() + .withNewSpec() + .withSchedulerName("volcano") + .withPriorityClassName(config.getPriorityClassName()) + .withRestartPolicy("Never") + .withTerminationGracePeriodSeconds(30L) + // 私有镜像拉取凭证 + .withImagePullSecrets(buildImagePullSecrets(config)) + // 单容器 + .addToContainers(buildAloneContainer(config)) + // Volumes + .addAllToVolumes(buildAloneVolumes(config)) + .endSpec(); + + // 权限初始化 InitContainer:user-files / workstation 挂载根设为仅 x + Container permInit = buildPermissionInitContainer(config); + if (permInit != null) { + builder.editSpec() + .addToInitContainers(permInit) + .endSpec(); + } + + // 添加节点亲和配置(用于快速重启) + if (StringUtils.hasText(config.getPreferredNode())) { + builder.editSpec() + .withAffinity(buildPreferredNodeAffinity(config.getPreferredNode())) + .endSpec(); + } + + return builder.build(); + } + + // ==================== Sidecar 模式容器构建 ==================== + + /** + * Sidecar 模式 Agent 容器 + * + * 启动命令: /opt/agent/start-dual.sh + * 流程: SSH 密钥管理 → 写公钥到 /shared-keys → 等 Runner SSH → 启动 zzd + worker + * + * 必须以 root 启动 (写 /etc/zzd、起 zzd、再 sudo -u agent 拉 worker) + */ + private Container buildSidecarAgentContainer(MergedConfig config) { + List envVars = new ArrayList<>(); + + // ---- 必填环境变量 ---- + envVars.add(new EnvVar("WORKSTATION_ID", + config.getWorkstationId() != null ? config.getWorkstationId() : config.getServiceId(), null)); + if (config.getRedisUrl() != null) { + envVars.add(new EnvVar("REDIS_URL", config.getRedisUrl(), null)); + } + envVars.add(new EnvVar("CONFIG_FILE", "/opt/agent/config.json", null)); + envVars.add(new EnvVar("IDLE_TIMEOUT", "86400", null)); + + // ---- 双容器模式配置 ---- + // 同 Pod 内 Runner,通过 localhost 连接 + envVars.add(new EnvVar("ZZD_RUNNER_HOST", "localhost", null)); + appendRuntimeGitTokenEnv(envVars, config, true); + + // ---- 服务标识 ---- + envVars.add(new EnvVar("SERVICE_ID", config.getServiceId(), null)); + envVars.add(new EnvVar("USER_ID", config.getUserId(), null)); + // 注意: 不注入 ZZ_MODE / ZZD_MODE / SSH_PORT + // - ZZD_MODE 由 start-dual.sh 内部设置为 sandbox(见 zzd.md §运行模式) + // - SSH 端口由 zzd 配置模型内部管理(runner_port),非 Pod env var + + // ---- Downward API: Pod 名称 ---- + envVars.add(new EnvVarBuilder() + .withName("POD_NAME") + .withNewValueFrom() + .withNewFieldRef() + .withFieldPath("metadata.name") + .endFieldRef() + .endValueFrom() + .build()); + + // ---- 可选环境变量 ---- + if (config.getMainPyUrl() != null) { + envVars.add(new EnvVar("MAIN_PY_URL", config.getMainPyUrl(), null)); + } + if (config.getApiBaseUrl() != null) { + envVars.add(new EnvVar("API_BASE_URL", config.getApiBaseUrl(), null)); + envVars.add(new EnvVar("RUNTIME_GIT_API_FALLBACK_URL", config.getApiBaseUrl(), null)); + } + if (config.getWsBaseUrl() != null) { + envVars.add(new EnvVar("WS_BASE_URL", config.getWsBaseUrl(), null)); + } + if (config.getLlmGatewayUrl() != null) { + envVars.add(new EnvVar("LLM_GATEWAY_URL", config.getLlmGatewayUrl(), null)); + } + if (config.getRoleId() != null) { + envVars.add(new EnvVar("ROLE_ID", String.valueOf(config.getRoleId()), null)); + } + appendClaudeRuntimeEnv(envVars); + + String agentConfigMap = agentConfigMapName(config.getServiceId()); + + ContainerBuilder agentBuilder = new ContainerBuilder() + .withName("agent") + .withImage(config.getAgentImage()) + .withImagePullPolicy(config.getImagePullPolicy() != null ? config.getImagePullPolicy() : "IfNotPresent") + // ★ 启动命令: start-dual.sh(不要跑 build.sh) + .withCommand("/opt/agent/start-dual.sh") + .withEnv(envVars) + // ★ 以 root 启动(脚本里要写 /etc/zzd、起 zzd、再 sudo -u agent 拉 worker) + // NET_ADMIN: ENABLE_NETWORK_FIREWALL=true 时 start.sh 需要 iptables 配置 agent 用户网络白名单 + .withNewSecurityContext() + .withRunAsUser(0L) + .withNewCapabilities() + .addToAdd("NET_ADMIN") + .endCapabilities() + .endSecurityContext() + // ---- Volume Mounts ---- + .addToVolumeMounts(new VolumeMountBuilder() + .withName("workspace") + .withMountPath("/workspace") + .withReadOnly(false) + .build()) + .addToVolumeMounts(new VolumeMountBuilder() + .withName("shared-keys") + .withMountPath("/shared-keys") + .withReadOnly(false) + .build()) + // ★ 挂载配置文件: ConfigMap subPath → /opt/agent/config.json(只读) + .addToVolumeMounts(new VolumeMountBuilder() + .withName("agent-config") + .withMountPath("/opt/agent/config.json") + .withSubPath("config.json") + .withReadOnly(true) + .build()) + // ---- Resources ---- + .withNewResources() + .addToRequests("cpu", new Quantity(config.getAgentResources().getCpuRequest())) + .addToRequests("memory", new Quantity(config.getAgentResources().getMemoryRequest())) + .addToLimits("cpu", new Quantity(config.getAgentResources().getCpuLimit())) + .addToLimits("memory", new Quantity(config.getAgentResources().getMemoryLimit())) + .endResources(); + + // OSS 挂载 + addOssVolumeMount(agentBuilder, config); + + return agentBuilder.build(); + } + + /** + * Sidecar 模式 Runner 容器 + * + * 使用 Rocky 基础镜像(非独立 Runner 镜像) + * 启动脚本通过 ConfigMap 挂载: runner-start-script → /opt/runner/start-runner.sh + * + * 流程: 校验 sshd 环境 → 创建 momobot → 等公钥 → 写 authorized_keys → sshd -D -e + */ + private Container buildSidecarRunnerContainer(MergedConfig config) { + List envVars = new ArrayList<>(); + envVars.add(new EnvVar("SERVICE_ID", config.getServiceId(), null)); + envVars.add(new EnvVar("USER_ID", config.getUserId(), null)); + // 公钥等待超时(秒) + envVars.add(new EnvVar("PUBKEY_TIMEOUT", "120", null)); + // Downward API + envVars.add(new EnvVarBuilder() + .withName("POD_NAME") + .withNewValueFrom() + .withNewFieldRef() + .withFieldPath("metadata.name") + .endFieldRef() + .endValueFrom() + .build()); + if (config.getApiBaseUrl() != null) { + envVars.add(new EnvVar("API_BASE_URL", config.getApiBaseUrl(), null)); + } + + ContainerBuilder runnerBuilder = new ContainerBuilder() + .withName("runner") + .withImage(config.getRunnerImage()) // Rocky 基础镜像 + .withImagePullPolicy(config.getImagePullPolicy() != null ? config.getImagePullPolicy() : "IfNotPresent") + // ★ 启动命令: start-runner.sh(通过 ConfigMap 挂载) + .withCommand("/opt/runner/start-runner.sh") + .withEnv(envVars) + // Runner 也以 root 启动(需要 ssh-keygen -A、创建用户、启动 sshd) + .withNewSecurityContext() + .withRunAsUser(0L) + .endSecurityContext() + // ★ SSH 端口 22(sshd 默认端口,Agent 通过 localhost:22 连接) + .addToPorts(new ContainerPort(22, null, null, "ssh", "TCP")) + // ---- Volume Mounts ---- + .addToVolumeMounts(new VolumeMountBuilder() + .withName("workspace") + .withMountPath("/workspace") + .withReadOnly(false) + .build()) + .addToVolumeMounts(new VolumeMountBuilder() + .withName("shared-keys") + .withMountPath("/shared-keys") + .withReadOnly(false) + .build()) + // ★ 挂载启动脚本: ConfigMap → /opt/runner/start-runner.sh + .addToVolumeMounts(new VolumeMountBuilder() + .withName("runner-scripts") + .withMountPath("/opt/runner/start-runner.sh") + .withSubPath(RUNNER_SCRIPT_KEY) + .withReadOnly(true) + .build()) + // ---- Resources ---- + .withNewResources() + .addToRequests("cpu", new Quantity(config.getRunnerResources().getCpuRequest())) + .addToRequests("memory", new Quantity(config.getRunnerResources().getMemoryRequest())) + .addToLimits("cpu", new Quantity(config.getRunnerResources().getCpuLimit())) + .addToLimits("memory", new Quantity(config.getRunnerResources().getMemoryLimit())) + .endResources() + // ★ 就绪探针:SSH 22 端口 TCP 检查 + // Runner 需要: dnf install sshd + 等待 Agent 公钥 + 启动 sshd,首次可能需要较长时间 + .withNewReadinessProbe() + .withNewTcpSocket() + .withNewPort(22) + .endTcpSocket() + .withInitialDelaySeconds(30) + .withPeriodSeconds(10) + .withFailureThreshold(30) + .endReadinessProbe(); + + // OSS 挂载 + addOssVolumeMount(runnerBuilder, config); + + return runnerBuilder.build(); + } + + // ==================== Alone 模式容器构建 ==================== + + /** + * Alone 模式容器 + * 启动命令: /opt/agent/start-single.sh (ZZD_MODE=local,不需要 Runner) + */ + private Container buildAloneContainer(MergedConfig config) { + List envVars = new ArrayList<>(); + + // 必填环境变量 + envVars.add(new EnvVar("WORKSTATION_ID", + config.getWorkstationId() != null ? config.getWorkstationId() : config.getServiceId(), null)); + if (config.getRedisUrl() != null) { + envVars.add(new EnvVar("REDIS_URL", config.getRedisUrl(), null)); + } + envVars.add(new EnvVar("CONFIG_FILE", "/opt/agent/config.json", null)); + envVars.add(new EnvVar("IDLE_TIMEOUT", "86400", null)); + + // 服务标识 + envVars.add(new EnvVar("SERVICE_ID", config.getServiceId(), null)); + envVars.add(new EnvVar("USER_ID", config.getUserId(), null)); + appendRuntimeGitTokenEnv(envVars, config, false); + // 注意: 不注入 ZZD_MODE,start-single.sh 内部设置为 local + // Downward API + envVars.add(new EnvVarBuilder() + .withName("POD_NAME") + .withNewValueFrom() + .withNewFieldRef() + .withFieldPath("metadata.name") + .endFieldRef() + .endValueFrom() + .build()); + // 可选环境变量 + if (config.getMainPyUrl() != null) { + envVars.add(new EnvVar("MAIN_PY_URL", config.getMainPyUrl(), null)); + } + if (config.getApiBaseUrl() != null) { + envVars.add(new EnvVar("API_BASE_URL", config.getApiBaseUrl(), null)); + envVars.add(new EnvVar("RUNTIME_GIT_API_FALLBACK_URL", config.getApiBaseUrl(), null)); + } + if (config.getWsBaseUrl() != null) { + envVars.add(new EnvVar("WS_BASE_URL", config.getWsBaseUrl(), null)); + } + if (config.getLlmGatewayUrl() != null) { + envVars.add(new EnvVar("LLM_GATEWAY_URL", config.getLlmGatewayUrl(), null)); + } + if (config.getRoleId() != null) { + envVars.add(new EnvVar("ROLE_ID", String.valueOf(config.getRoleId()), null)); + } + appendClaudeRuntimeEnv(envVars); + + String agentConfigMap = agentConfigMapName(config.getServiceId()); + + + ContainerBuilder aloneBuilder = new ContainerBuilder() + .withName("agent") + .withImage(config.getAgentImage()) + .withImagePullPolicy(config.getImagePullPolicy() != null ? config.getImagePullPolicy() : "IfNotPresent") + // ★ 启动命令: start-single.sh + .withCommand("/opt/agent/start-single.sh") + .withEnv(envVars) + // root 启动; NET_ADMIN: ENABLE_NETWORK_FIREWALL=true 时 start.sh 需要 iptables 配置 agent 用户网络白名单 + .withNewSecurityContext() + .withRunAsUser(0L) + .withNewCapabilities() + .addToAdd("NET_ADMIN") + .endCapabilities() + .endSecurityContext() + .addToVolumeMounts(new VolumeMountBuilder() + .withName("workspace") + .withMountPath("/workspace") + .withReadOnly(false) + .build()) + // 挂载配置文件 + .addToVolumeMounts(new VolumeMountBuilder() + .withName("agent-config") + .withMountPath("/opt/agent/config.json") + .withSubPath("config.json") + .withReadOnly(true) + .build()) + .withNewResources() + .addToRequests("cpu", new Quantity(config.getAgentResources().getCpuRequest())) + .addToRequests("memory", new Quantity(config.getAgentResources().getMemoryRequest())) + .addToLimits("cpu", new Quantity(config.getAgentResources().getCpuLimit())) + .addToLimits("memory", new Quantity(config.getAgentResources().getMemoryLimit())) + .endResources(); + + // OSS 挂载 + addOssVolumeMount(aloneBuilder, config); + + return aloneBuilder.build(); + } + + /** + * 注入运行时 Git Token 相关环境变量。 + * - sidecar 模式默认强制 git 路由到 local,避免命中 runner 丢失 token 注入。 + * - API_BASE_URL 与 ZZD_API_SERVER_URL 同时下发:脚本可兜底回退。 + */ + private void appendRuntimeGitTokenEnv(List envVars, MergedConfig config, boolean sidecarMode) { + envVars.add(new EnvVar("ZZD_ENABLE_GIT_TOKEN", "true", null)); + if (sidecarMode) { + envVars.add(new EnvVar("ZZD_FORCE_GIT_LOCAL_ROUTE", "true", null)); + } + if (StringUtils.hasText(config.getApiBaseUrl())) { + envVars.add(new EnvVar("ZZD_API_SERVER_URL", config.getApiBaseUrl(), null)); + } + if (StringUtils.hasText(zzdApiServerToken)) { + envVars.add(new EnvVar("ZZD_API_SERVER_TOKEN", zzdApiServerToken, null)); + } + } + + /** + * 注入 Claude/LiteLLM 运行时所需环境变量。 + * 避免 agent 子进程缺失 base url / key 后回退到 localhost:4000。 + */ + private void appendClaudeRuntimeEnv(List envVars) { + if (StringUtils.hasText(anthropicBaseUrl)) { + envVars.add(new EnvVar("ANTHROPIC_BASE_URL", anthropicBaseUrl, null)); + } + if (StringUtils.hasText(anthropicAuthToken)) { + envVars.add(new EnvVar("ANTHROPIC_AUTH_TOKEN", anthropicAuthToken, null)); + } + if (StringUtils.hasText(anthropicApiKey)) { + envVars.add(new EnvVar("ANTHROPIC_API_KEY", anthropicApiKey, null)); + } + if (StringUtils.hasText(litellmBaseUrl)) { + envVars.add(new EnvVar("LITELLM_BASE_URL", litellmBaseUrl, null)); + } + if (StringUtils.hasText(litellmApiKey)) { + envVars.add(new EnvVar("LITELLM_API_KEY", litellmApiKey, null)); + } + if (StringUtils.hasText(openaiApiKey)) { + envVars.add(new EnvVar("OPENAI_API_KEY", openaiApiKey, null)); + } + if (StringUtils.hasText(anthropicModel)) { + envVars.add(new EnvVar("ANTHROPIC_MODEL", anthropicModel, null)); + } + } + + // ==================== Volume 构建 ==================== + + /** + * Sidecar 模式 Volumes + * - workspace: 共享工作目录 (emptyDir) + * - shared-keys: Agent/Runner 传递公钥 (emptyDir/Memory, 每次 Pod 重建清空) + * - agent-config: ConfigMap, 挂载 config.json 到 /opt/agent/config.json + * - runner-scripts: ConfigMap, 挂载 start-runner.sh 到 /opt/runner/start-runner.sh + */ + private List buildSidecarVolumes(MergedConfig config) { + List volumes = new ArrayList<>(); + + // 共享工作目录 + volumes.add(new VolumeBuilder() + .withName("workspace") + .withNewEmptyDir() + .withSizeLimit(new Quantity(config.getWorkspaceSizeLimit() + "Gi")) + .endEmptyDir() + .build()); + + // ★ 共享密钥卷(emptyDir/Memory,Agent 写公钥 → Runner 读取) + volumes.add(new VolumeBuilder() + .withName("shared-keys") + .withNewEmptyDir() + .withMedium("Memory") + .endEmptyDir() + .build()); + + // ★ Agent 配置文件(ConfigMap → /opt/agent/config.json) + String agentConfigMap = agentConfigMapName(config.getServiceId()); + volumes.add(new VolumeBuilder() + .withName("agent-config") + .withNewConfigMap() + .withName(agentConfigMap) + .addNewItem() + .withKey("config.json") + .withPath("config.json") + .endItem() + .endConfigMap() + .build()); + + // ★ Runner 启动脚本(ConfigMap → /opt/runner/start-runner.sh, 0755 可执行) + volumes.add(new VolumeBuilder() + .withName("runner-scripts") + .withNewConfigMap() + .withName(RUNNER_SCRIPT_CONFIGMAP) + .withDefaultMode(0755) + .addNewItem() + .withKey(RUNNER_SCRIPT_KEY) + .withPath(RUNNER_SCRIPT_KEY) + .endItem() + .endConfigMap() + .build()); + + // OSS 挂载 + addOssVolume(volumes, config); + + return volumes; + } + + /** + * Alone 模式 Volumes + */ + private List buildAloneVolumes(MergedConfig config) { + List volumes = new ArrayList<>(); + + // 工作目录 + volumes.add(new VolumeBuilder() + .withName("workspace") + .withNewEmptyDir() + .withSizeLimit(new Quantity(config.getWorkspaceSizeLimit() + "Gi")) + .endEmptyDir() + .build()); + + // Agent 配置文件(ConfigMap) + String agentConfigMap = agentConfigMapName(config.getServiceId()); + volumes.add(new VolumeBuilder() + .withName("agent-config") + .withNewConfigMap() + .withName(agentConfigMap) + .addNewItem() + .withKey("config.json") + .withPath("config.json") + .endItem() + .endConfigMap() + .build()); + + // OSS 挂载 + addOssVolume(volumes, config); + + return volumes; + } + + // ==================== 权限初始化 ==================== + + /** + * 构建权限初始化 InitContainer + * + * 容器启动前设置 NFS 挂载目录权限: + * - user-files、workstation → 0711(owner rwx / others x-only), + * 非 root 用户无法 ls 挂载根,只能通过 zzd 创建的 symlink 访问被授权的子目录。 + * - oss-data(产出物 /data/oss/robot)→ 0777,agent worker 启动时需要写入探测, + * NFS 上新建目录默认 755,非 root 用户无法写入,需要显式放开。 + */ + private Container buildPermissionInitContainer(MergedConfig config) { + OssMountConfig ossMount = config.getOssMount(); + if (ossMount == null || !ossMount.isEnabled()) { + return null; + } + + String cmd = String.format("chmod 0777 %s && chmod 0711 %s %s", + ossMount.getMountPath(), + ossMount.getUserFilesMountPath(), + ossMount.getWorkstationMountPath()); + + return new ContainerBuilder() + .withName("permission-init") + .withImage(config.getAgentImage()) + .withCommand("sh", "-c", cmd) + .withNewSecurityContext() + .withRunAsUser(0L) + .endSecurityContext() + .withNewResources() + .addToRequests("cpu", new Quantity(PERMISSION_INIT_CPU_REQUEST)) + .addToRequests("memory", new Quantity(PERMISSION_INIT_MEMORY_REQUEST)) + .addToLimits("cpu", new Quantity(PERMISSION_INIT_CPU_LIMIT)) + .addToLimits("memory", new Quantity(PERMISSION_INIT_MEMORY_LIMIT)) + .endResources() + .addToVolumeMounts(new VolumeMountBuilder() + .withName("oss-data") + .withMountPath(ossMount.getMountPath()) + .withMountPropagation("HostToContainer") + .build()) + .addToVolumeMounts(new VolumeMountBuilder() + .withName("oss-user-files") + .withMountPath(ossMount.getUserFilesMountPath()) + .withMountPropagation("HostToContainer") + .build()) + .addToVolumeMounts(new VolumeMountBuilder() + .withName("oss-workstation") + .withMountPath(ossMount.getWorkstationMountPath()) + .withMountPropagation("HostToContainer") + .build()) + .build(); + } + + // ==================== OSS 挂载 ==================== + + /** + * 添加 OSS hostPath Volumes (3 个挂载点) + * + * 节点上 ossfs 分三路挂载: + * oss://robot-agent-files/system/ → {hostPath}/system + * oss://robot-agent-files/user-files/ → {hostPath}/user-files + * oss://robot-agent-files/workstation/ → {hostPath}/workstation + * + * 容器级: + * 1. oss-data: {hostPath}/system/{wsId} → /data/oss/robot (产出物,读写) + * 2. oss-user-files: {hostPath}/user-files → /mnt/user-files (个人空间,读写) + * 3. oss-workstation: {hostPath}/workstation/{wsId} → /mnt/workstation (岗位空间,读写) + */ + private void addOssVolume(List volumes, MergedConfig config) { + OssMountConfig ossMount = config.getOssMount(); + if (ossMount == null || !ossMount.isEnabled()) { + return; + } + + String workstationId = config.getWorkstationId() != null + ? config.getWorkstationId() : config.getServiceId(); + String hostBase = ossMount.getHostPath(); + + // 1. oss-data: {hostPath}/system/{workstationId} → /data/oss/robot + String systemHostPath = hostBase + "/system/" + workstationId; + volumes.add(new VolumeBuilder() + .withName("oss-data") + .withNewHostPath() + .withPath(systemHostPath) + .withType("DirectoryOrCreate") + .endHostPath() + .build()); + log.info("Added OSS volume [oss-data]: hostPath={}, mountPath={}", systemHostPath, ossMount.getMountPath()); + + // 2. oss-user-files: {hostPath}/user-files → /mnt/user-files + String userFilesHostPath = hostBase + "/user-files"; + volumes.add(new VolumeBuilder() + .withName("oss-user-files") + .withNewHostPath() + .withPath(userFilesHostPath) + .withType("DirectoryOrCreate") + .endHostPath() + .build()); + log.info("Added OSS volume [oss-user-files]: hostPath={}, mountPath={}", + userFilesHostPath, ossMount.getUserFilesMountPath()); + + // 3. oss-workstation: {hostPath}/workstation/{workstationId} → /mnt/workstation + String workstationHostPath = hostBase + "/workstation/" + workstationId; + volumes.add(new VolumeBuilder() + .withName("oss-workstation") + .withNewHostPath() + .withPath(workstationHostPath) + .withType("DirectoryOrCreate") + .endHostPath() + .build()); + log.info("Added OSS volume [oss-workstation]: hostPath={}, mountPath={}", + workstationHostPath, ossMount.getWorkstationMountPath()); + } + + /** + * 为容器添加 OSS VolumeMounts (全部读写,权限由 zzd 在任务生命周期内通过 symlink 控制) + * + * oss-data: /data/oss/robot → 产出物挂载根 (读写) + * oss-user-files: /mnt/user-files → 个人空间 (读写) + * oss-workstation: /mnt/workstation → 岗位空间 (读写) + */ + private void addOssVolumeMount(ContainerBuilder builder, MergedConfig config) { + OssMountConfig ossMount = config.getOssMount(); + if (ossMount == null || !ossMount.isEnabled()) { + return; + } + + // 1. oss-data (产出物,读写) + builder.addToVolumeMounts(new VolumeMountBuilder() + .withName("oss-data") + .withMountPath(ossMount.getMountPath()) + .withReadOnly(ossMount.isReadOnly()) + .withMountPropagation("HostToContainer") + .build()); + + // 2. oss-user-files (个人空间,读写) + builder.addToVolumeMounts(new VolumeMountBuilder() + .withName("oss-user-files") + .withMountPath(ossMount.getUserFilesMountPath()) + .withReadOnly(false) + .withMountPropagation("HostToContainer") + .build()); + + // 3. oss-workstation (岗位空间,读写) + builder.addToVolumeMounts(new VolumeMountBuilder() + .withName("oss-workstation") + .withMountPath(ossMount.getWorkstationMountPath()) + .withReadOnly(false) + .withMountPropagation("HostToContainer") + .build()); + } + + // ==================== 镜像拉取凭证 ==================== + + private List buildImagePullSecrets(MergedConfig config) { + if (StringUtils.hasText(config.getImagePullSecret()) && requiresImagePullSecret(config)) { + return Collections.singletonList( + new LocalObjectReference(config.getImagePullSecret()) + ); + } + return Collections.emptyList(); + } + + private boolean requiresImagePullSecret(MergedConfig config) { + if (hasRegistryHost(config.getAgentImage())) { + return true; + } + return hasRegistryHost(config.getRunnerImage()); + } + + private boolean hasRegistryHost(String image) { + if (!StringUtils.hasText(image)) { + return false; + } + String value = image.trim(); + int slash = value.indexOf('/'); + if (slash <= 0) { + return false; + } + String first = value.substring(0, slash); + return first.contains(".") || first.contains(":") || "localhost".equals(first); + } + + // ==================== 节点亲和配置 ==================== + + private Affinity buildPreferredNodeAffinity(String preferredNode) { + return new AffinityBuilder() + .withNewNodeAffinity() + .addNewPreferredDuringSchedulingIgnoredDuringExecution() + .withWeight(100) + .withNewPreference() + .addNewMatchExpression() + .withKey("kubernetes.io/hostname") + .withOperator("In") + .withValues(preferredNode) + .endMatchExpression() + .endPreference() + .endPreferredDuringSchedulingIgnoredDuringExecution() + .endNodeAffinity() + .build(); + } +} diff --git a/back/src/main/java/com/linkwork/service/QueueSelector.java b/back/src/main/java/com/linkwork/service/QueueSelector.java new file mode 100644 index 0000000..a8e8d28 --- /dev/null +++ b/back/src/main/java/com/linkwork/service/QueueSelector.java @@ -0,0 +1,36 @@ +package com.linkwork.service; + +import lombok.extern.slf4j.Slf4j; +import org.springframework.beans.factory.annotation.Value; +import org.springframework.stereotype.Service; + +import java.util.Optional; + +/** + * 队列选择器 + */ +@Service +@Slf4j +public class QueueSelector { + + @Value("${schedule.queue.prefix:ai-worker}") + private String queuePrefix; + + /** + * 根据优先级选择调度队列 + * @param priority 优先级 0-100 + * @param gpuRequired 是否需要 GPU(K8s 不支持 GPU) + */ + public String selectQueue(Integer priority, Boolean gpuRequired) { + int p = Optional.ofNullable(priority).orElse(50); + + if (Boolean.TRUE.equals(gpuRequired)) { + log.warn("GPU tasks should use Compose mode, K8s does not support GPU"); + } + + if (p >= 90) return queuePrefix + "-critical"; + if (p >= 70) return queuePrefix + "-high"; + if (p >= 30) return queuePrefix + "-normal"; + return queuePrefix + "-low"; + } +} diff --git a/back/src/main/java/com/linkwork/service/ReportExportService.java b/back/src/main/java/com/linkwork/service/ReportExportService.java new file mode 100644 index 0000000..d6e1cf6 --- /dev/null +++ b/back/src/main/java/com/linkwork/service/ReportExportService.java @@ -0,0 +1,475 @@ +package com.linkwork.service; + +import com.baomidou.mybatisplus.core.conditions.query.LambdaQueryWrapper; +import com.baomidou.mybatisplus.extension.plugins.pagination.Page; +import com.fasterxml.jackson.core.JsonProcessingException; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.linkwork.config.DispatchConfig; +import com.linkwork.mapper.RoleMapper; +import com.linkwork.mapper.TaskMapper; +import com.linkwork.model.dto.ReportExportFieldOption; +import com.linkwork.model.dto.ReportExportFieldResponse; +import com.linkwork.model.dto.ReportExportRequest; +import com.linkwork.model.entity.RoleEntity; +import com.linkwork.model.entity.Task; +import com.linkwork.model.enums.TaskStatus; +import lombok.RequiredArgsConstructor; +import lombok.extern.slf4j.Slf4j; +import org.springframework.data.redis.connection.stream.MapRecord; +import org.springframework.data.redis.connection.stream.StreamOffset; +import org.springframework.data.redis.core.StringRedisTemplate; +import org.springframework.stereotype.Service; +import org.springframework.util.StringUtils; + +import java.io.BufferedWriter; +import java.io.IOException; +import java.io.OutputStream; +import java.io.OutputStreamWriter; +import java.lang.reflect.Field; +import java.lang.reflect.Modifier; +import java.math.BigDecimal; +import java.nio.charset.StandardCharsets; +import java.time.Instant; +import java.time.LocalDateTime; +import java.time.OffsetDateTime; +import java.time.ZoneId; +import java.time.format.DateTimeFormatter; +import java.time.format.DateTimeParseException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.LinkedHashMap; +import java.util.LinkedHashSet; +import java.util.List; +import java.util.Locale; +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; + +/** + * 运维报表导出服务 + */ +@Slf4j +@Service +@RequiredArgsConstructor +public class ReportExportService { + + private static final int PAGE_SIZE = 500; + private static final String TYPE_TASK = "task"; + private static final String TYPE_ROLE = "role"; + private static final String FIELD_EVENT_STREAM = "eventStream"; + private static final DateTimeFormatter DATE_TIME_OUT = DateTimeFormatter.ofPattern("yyyy-MM-dd HH:mm:ss"); + + private static final List LOCAL_DATE_TIME_FORMATTERS = List.of( + DateTimeFormatter.ofPattern("yyyy-MM-dd'T'HH:mm:ss"), + DateTimeFormatter.ofPattern("yyyy-MM-dd'T'HH:mm"), + DateTimeFormatter.ofPattern("yyyy-MM-dd HH:mm:ss"), + DateTimeFormatter.ISO_LOCAL_DATE_TIME + ); + + private static final Map TASK_FIELD_LABELS = Map.ofEntries( + Map.entry("id", "主键ID"), + Map.entry("taskNo", "任务编号"), + Map.entry("roleId", "岗位ID"), + Map.entry("roleName", "岗位名称"), + Map.entry("prompt", "任务目标"), + Map.entry("status", "任务状态"), + Map.entry("image", "镜像"), + Map.entry("selectedModel", "模型"), + Map.entry("assemblyId", "装配ID"), + Map.entry("configJson", "任务配置"), + Map.entry("source", "任务来源"), + Map.entry("cronJobId", "定时任务ID"), + Map.entry("creatorId", "创建人ID"), + Map.entry("creatorName", "创建人"), + Map.entry("creatorIp", "创建IP"), + Map.entry("updaterId", "更新人ID"), + Map.entry("updaterName", "更新人"), + Map.entry("tokensUsed", "总Tokens"), + Map.entry("inputTokens", "输入Tokens"), + Map.entry("outputTokens", "输出Tokens"), + Map.entry("requestCount", "请求次数"), + Map.entry("tokenLimit", "Token上限"), + Map.entry("usagePercent", "使用率"), + Map.entry("durationMs", "执行耗时(ms)"), + Map.entry("reportJson", "任务报告"), + Map.entry("createdAt", "创建时间"), + Map.entry("updatedAt", "更新时间"), + Map.entry("isDeleted", "逻辑删除标记") + ); + + private static final Map ROLE_FIELD_LABELS = Map.ofEntries( + Map.entry("id", "主键ID"), + Map.entry("roleNo", "岗位编号"), + Map.entry("name", "岗位名称"), + Map.entry("description", "岗位描述"), + Map.entry("category", "岗位分类"), + Map.entry("icon", "图标"), + Map.entry("image", "镜像"), + Map.entry("prompt", "岗位Prompt"), + Map.entry("status", "岗位状态"), + Map.entry("configJson", "岗位配置"), + Map.entry("isPublic", "是否公开"), + Map.entry("maxEmployees", "最大员工数"), + Map.entry("creatorId", "创建人ID"), + Map.entry("creatorName", "创建人"), + Map.entry("updaterId", "更新人ID"), + Map.entry("updaterName", "更新人"), + Map.entry("createdAt", "创建时间"), + Map.entry("updatedAt", "更新时间"), + Map.entry("isDeleted", "逻辑删除标记") + ); + + private final TaskMapper taskMapper; + private final RoleMapper roleMapper; + private final StringRedisTemplate redisTemplate; + private final DispatchConfig dispatchConfig; + private final ObjectMapper objectMapper; + private final Map, Map> fieldCache = new ConcurrentHashMap<>(); + + public ReportExportFieldResponse listFields(String type) { + String normalizedType = normalizeType(type); + List fields = TYPE_TASK.equals(normalizedType) + ? buildFieldOptions(Task.class, TASK_FIELD_LABELS) + : buildFieldOptions(RoleEntity.class, ROLE_FIELD_LABELS); + return new ReportExportFieldResponse(normalizedType, fields); + } + + public void exportCsv(ReportExportRequest request, OutputStream outputStream) throws IOException { + String normalizedType = normalizeType(request.getType()); + LocalDateTime startTime = parseDateTime(request.getStartTime(), "开始时间"); + LocalDateTime endTime = parseDateTime(request.getEndTime(), "结束时间"); + if (endTime.isBefore(startTime)) { + throw new IllegalArgumentException("结束时间必须大于或等于开始时间"); + } + + try (BufferedWriter writer = new BufferedWriter(new OutputStreamWriter(outputStream, StandardCharsets.UTF_8))) { + // Excel 友好:UTF-8 BOM + writer.write('\uFEFF'); + + if (TYPE_TASK.equals(normalizedType)) { + exportTaskCsv(request, startTime, endTime, writer); + } else { + exportRoleCsv(request, startTime, endTime, writer); + } + writer.flush(); + } + } + + private void exportTaskCsv(ReportExportRequest request, + LocalDateTime startTime, + LocalDateTime endTime, + BufferedWriter writer) throws IOException { + List taskFields = buildFieldOptions(Task.class, TASK_FIELD_LABELS); + List selectedFields = resolveSelectedFields(request.getFields(), taskFields); + boolean includeEventStream = Boolean.TRUE.equals(request.getIncludeEventStream()); + if (includeEventStream) { + selectedFields = new ArrayList<>(selectedFields); + selectedFields.add(new ReportExportFieldOption(FIELD_EVENT_STREAM, "event_stream", "消息流", "String")); + } + + writeCsvRow(writer, selectedFields.stream().map(ReportExportFieldOption::getLabel).toList()); + + LambdaQueryWrapper wrapper = new LambdaQueryWrapper() + .ge(Task::getCreatedAt, startTime) + .le(Task::getCreatedAt, endTime) + .orderByAsc(Task::getCreatedAt) + .orderByAsc(Task::getId); + + long pageNo = 1L; + while (true) { + Page page = taskMapper.selectPage(new Page<>(pageNo, PAGE_SIZE), wrapper); + for (Task task : page.getRecords()) { + List row = new ArrayList<>(selectedFields.size()); + for (ReportExportFieldOption option : selectedFields) { + if (FIELD_EVENT_STREAM.equals(option.getField())) { + row.add(loadTaskEventStream(task)); + } else { + row.add(toCellValue(readField(task, option.getField()))); + } + } + writeCsvRow(writer, row); + } + if (pageNo >= page.getPages()) { + break; + } + pageNo++; + } + } + + private void exportRoleCsv(ReportExportRequest request, + LocalDateTime startTime, + LocalDateTime endTime, + BufferedWriter writer) throws IOException { + List roleFields = buildFieldOptions(RoleEntity.class, ROLE_FIELD_LABELS); + List selectedFields = resolveSelectedFields(request.getFields(), roleFields); + + writeCsvRow(writer, selectedFields.stream().map(ReportExportFieldOption::getLabel).toList()); + + LambdaQueryWrapper wrapper = new LambdaQueryWrapper() + .ge(RoleEntity::getCreatedAt, startTime) + .le(RoleEntity::getCreatedAt, endTime) + .orderByAsc(RoleEntity::getCreatedAt) + .orderByAsc(RoleEntity::getId); + + long pageNo = 1L; + while (true) { + Page page = roleMapper.selectPage(new Page<>(pageNo, PAGE_SIZE), wrapper); + for (RoleEntity role : page.getRecords()) { + List row = new ArrayList<>(selectedFields.size()); + for (ReportExportFieldOption option : selectedFields) { + row.add(toCellValue(readField(role, option.getField()))); + } + writeCsvRow(writer, row); + } + if (pageNo >= page.getPages()) { + break; + } + pageNo++; + } + } + + private List buildFieldOptions(Class entityClass, Map labelMap) { + List fields = new ArrayList<>(); + for (Field field : entityClass.getDeclaredFields()) { + if (Modifier.isStatic(field.getModifiers())) { + continue; + } + String fieldName = field.getName(); + fields.add(new ReportExportFieldOption( + fieldName, + toSnakeCase(fieldName), + labelMap.getOrDefault(fieldName, fieldName), + field.getType().getSimpleName() + )); + } + return fields; + } + + private List resolveSelectedFields(List fields, List allowedFields) { + if (fields == null || fields.isEmpty()) { + return new ArrayList<>(allowedFields); + } + + Map allowed = new LinkedHashMap<>(); + for (ReportExportFieldOption option : allowedFields) { + allowed.put(option.getField(), option); + } + + LinkedHashSet deduplicated = new LinkedHashSet<>(); + for (String field : fields) { + if (!StringUtils.hasText(field)) { + continue; + } + String normalizedField = field.trim(); + if (!allowed.containsKey(normalizedField)) { + throw new IllegalArgumentException("存在不支持的导出字段: " + normalizedField); + } + deduplicated.add(normalizedField); + } + + if (deduplicated.isEmpty()) { + throw new IllegalArgumentException("导出字段不能为空"); + } + + List selected = new ArrayList<>(deduplicated.size()); + for (String field : deduplicated) { + selected.add(allowed.get(field)); + } + return selected; + } + + private String loadTaskEventStream(Task task) { + if (!StringUtils.hasText(task.getTaskNo())) { + return ""; + } + + List streamKeys = Arrays.asList( + dispatchConfig.getLogStreamKey(task.getRoleId(), task.getTaskNo()), + "stream:task:" + task.getTaskNo() + ":events", + "stream:task:" + task.getTaskNo() + ); + + for (String streamKey : streamKeys) { + List> records; + try { + records = redisTemplate.opsForStream().read(StreamOffset.fromStart(streamKey)); + } catch (Exception e) { + log.warn("读取消息流失败: taskNo={}, key={}, error={}", task.getTaskNo(), streamKey, e.getMessage()); + continue; + } + if (records == null || records.isEmpty()) { + continue; + } + + List lines = new ArrayList<>(records.size()); + for (MapRecord record : records) { + lines.add(serializeStreamRecord(streamKey, record)); + } + return String.join("\n", lines); + } + + return ""; + } + + private String serializeStreamRecord(String streamKey, MapRecord record) { + Map valueMap = new LinkedHashMap<>(); + valueMap.put("streamKey", streamKey); + valueMap.put("recordId", record.getId().getValue()); + + record.getValue().forEach((k, v) -> valueMap.put(String.valueOf(k), normalizeStreamValue(v))); + + try { + return objectMapper.writeValueAsString(valueMap); + } catch (JsonProcessingException e) { + throw new IllegalStateException("序列化消息流记录失败: streamKey=" + streamKey, e); + } + } + + private Object normalizeStreamValue(Object value) { + if (!(value instanceof String text)) { + return value; + } + String trimmed = text.trim(); + if (!StringUtils.hasText(trimmed)) { + return text; + } + if ((trimmed.startsWith("{") && trimmed.endsWith("}")) || (trimmed.startsWith("[") && trimmed.endsWith("]"))) { + try { + return objectMapper.readValue(trimmed, Object.class); + } catch (Exception e) { + log.debug("消息流 JSON 解析失败,保留原文: {}", e.getMessage()); + } + } + return text; + } + + private Object readField(Object entity, String fieldName) { + Map classFields = fieldCache.computeIfAbsent(entity.getClass(), this::buildFieldMap); + Field field = classFields.get(fieldName); + if (field == null) { + throw new IllegalArgumentException("未知字段: " + fieldName); + } + try { + return field.get(entity); + } catch (IllegalAccessException e) { + throw new IllegalStateException("读取字段失败: " + fieldName, e); + } + } + + private Map buildFieldMap(Class clazz) { + Map map = new LinkedHashMap<>(); + for (Field field : clazz.getDeclaredFields()) { + if (Modifier.isStatic(field.getModifiers())) { + continue; + } + field.setAccessible(true); + map.put(field.getName(), field); + } + return map; + } + + private String toCellValue(Object value) { + if (value == null) { + return ""; + } + if (value instanceof TaskStatus taskStatus) { + return taskStatus.getCode(); + } + if (value instanceof LocalDateTime localDateTime) { + return localDateTime.format(DATE_TIME_OUT); + } + if (value instanceof Instant instant) { + return DATE_TIME_OUT.format(instant.atZone(ZoneId.systemDefault()).toLocalDateTime()); + } + if (value instanceof OffsetDateTime offsetDateTime) { + return DATE_TIME_OUT.format(offsetDateTime.atZoneSameInstant(ZoneId.systemDefault()).toLocalDateTime()); + } + if (value instanceof CharSequence || value instanceof Number || value instanceof Boolean || value instanceof BigDecimal) { + return String.valueOf(value); + } + if (value.getClass().isEnum()) { + return ((Enum) value).name(); + } + try { + return objectMapper.writeValueAsString(value); + } catch (JsonProcessingException e) { + throw new IllegalStateException("序列化字段值失败: " + value.getClass().getSimpleName(), e); + } + } + + private void writeCsvRow(BufferedWriter writer, List columns) throws IOException { + for (int i = 0; i < columns.size(); i++) { + writer.write(escapeCsv(columns.get(i))); + if (i < columns.size() - 1) { + writer.write(','); + } + } + writer.write('\n'); + } + + private String escapeCsv(String value) { + if (value == null) { + return ""; + } + boolean needQuotes = value.contains(",") || value.contains("\"") || value.contains("\n") || value.contains("\r"); + if (!needQuotes) { + return value; + } + return "\"" + value.replace("\"", "\"\"") + "\""; + } + + private LocalDateTime parseDateTime(String value, String fieldName) { + String text = value == null ? "" : value.trim(); + if (!StringUtils.hasText(text)) { + throw new IllegalArgumentException(fieldName + "不能为空"); + } + for (DateTimeFormatter formatter : LOCAL_DATE_TIME_FORMATTERS) { + try { + return LocalDateTime.parse(text, formatter); + } catch (DateTimeParseException ignored) { + // 尝试下一个格式 + } + } + + try { + return OffsetDateTime.parse(text).atZoneSameInstant(ZoneId.systemDefault()).toLocalDateTime(); + } catch (DateTimeParseException ignored) { + // 尝试下一个格式 + } + + try { + return Instant.parse(text).atZone(ZoneId.systemDefault()).toLocalDateTime(); + } catch (DateTimeParseException ignored) { + throw new IllegalArgumentException(fieldName + "格式不正确: " + text); + } + } + + private String normalizeType(String type) { + if (!StringUtils.hasText(type)) { + throw new IllegalArgumentException("导出类型不能为空"); + } + String normalized = type.trim().toLowerCase(Locale.ROOT); + if (!TYPE_TASK.equals(normalized) && !TYPE_ROLE.equals(normalized)) { + throw new IllegalArgumentException("不支持的导出类型: " + type); + } + return normalized; + } + + private String toSnakeCase(String camelCase) { + if (!StringUtils.hasText(camelCase)) { + return camelCase; + } + StringBuilder builder = new StringBuilder(camelCase.length() + 8); + for (int i = 0; i < camelCase.length(); i++) { + char c = camelCase.charAt(i); + if (Character.isUpperCase(c)) { + if (i > 0) { + builder.append('_'); + } + builder.append(Character.toLowerCase(c)); + } else { + builder.append(c); + } + } + return builder.toString(); + } +} diff --git a/back/src/main/java/com/linkwork/service/RoleHotRankService.java b/back/src/main/java/com/linkwork/service/RoleHotRankService.java new file mode 100644 index 0000000..efa3438 --- /dev/null +++ b/back/src/main/java/com/linkwork/service/RoleHotRankService.java @@ -0,0 +1,83 @@ +package com.linkwork.service; + +import lombok.extern.slf4j.Slf4j; +import org.springframework.lang.Nullable; +import org.springframework.data.redis.core.StringRedisTemplate; +import org.springframework.stereotype.Service; + +import java.util.ArrayList; +import java.util.List; +import java.util.Map; + +/** + * 热门岗位排行榜(Redis Sorted Set) + */ +@Slf4j +@Service +public class RoleHotRankService { + + public static final String HOT_ROLE_RANK_KEY = "rank:roles:favorite"; + + private final StringRedisTemplate redisTemplate; + + public RoleHotRankService(@Nullable StringRedisTemplate redisTemplate) { + this.redisTemplate = redisTemplate; + } + + public void incrementFavoriteScore(Long roleId, double delta) { + if (roleId == null || delta == 0D || redisTemplate == null) { + return; + } + String member = String.valueOf(roleId); + try { + Double score = redisTemplate.opsForZSet().incrementScore(HOT_ROLE_RANK_KEY, member, delta); + if (score != null && score <= 0D) { + redisTemplate.opsForZSet().remove(HOT_ROLE_RANK_KEY, member); + } + } catch (Exception e) { + log.warn("update role hot rank failed, roleId={}, delta={}, reason={}", roleId, delta, e.getMessage()); + } + } + + public List listTopRoleIds(int limit) { + if (limit <= 0 || redisTemplate == null) { + return List.of(); + } + try { + var members = redisTemplate.opsForZSet().reverseRange(HOT_ROLE_RANK_KEY, 0, limit - 1L); + if (members == null || members.isEmpty()) { + return List.of(); + } + List result = new ArrayList<>(members.size()); + for (String member : members) { + try { + result.add(Long.parseLong(member)); + } catch (NumberFormatException ignored) { + // 非法 member 直接跳过,避免影响榜单读取 + } + } + return result; + } catch (Exception e) { + log.warn("query role hot rank failed, reason={}", e.getMessage()); + return List.of(); + } + } + + public void rebuildRank(Map favoriteCountMap) { + if (redisTemplate == null || favoriteCountMap == null) { + return; + } + try { + redisTemplate.delete(HOT_ROLE_RANK_KEY); + favoriteCountMap.forEach((roleId, favoriteCount) -> { + if (roleId == null || favoriteCount == null || favoriteCount <= 0L) { + return; + } + redisTemplate.opsForZSet().add(HOT_ROLE_RANK_KEY, String.valueOf(roleId), favoriteCount.doubleValue()); + }); + } catch (Exception e) { + log.warn("rebuild role hot rank failed, reason={}", e.getMessage()); + } + } +} + diff --git a/back/src/main/java/com/linkwork/service/RoleService.java b/back/src/main/java/com/linkwork/service/RoleService.java new file mode 100644 index 0000000..fe85409 --- /dev/null +++ b/back/src/main/java/com/linkwork/service/RoleService.java @@ -0,0 +1,491 @@ +package com.linkwork.service; + +import com.baomidou.mybatisplus.core.conditions.query.LambdaQueryWrapper; +import com.baomidou.mybatisplus.core.conditions.query.QueryWrapper; +import com.baomidou.mybatisplus.extension.plugins.pagination.Page; +import com.baomidou.mybatisplus.extension.service.impl.ServiceImpl; +import com.linkwork.common.ForbiddenOperationException; +import com.linkwork.common.ResourceNotFoundException; +import com.linkwork.mapper.RoleMapper; +import com.linkwork.mapper.UserFavoriteRoleMapper; +import com.linkwork.model.entity.RoleEntity; +import com.linkwork.model.entity.UserFavoriteRoleEntity; +import com.linkwork.model.enums.DeployMode; +import com.linkwork.model.enums.PodMode; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.stereotype.Service; +import org.springframework.transaction.annotation.Transactional; +import org.springframework.util.StringUtils; + +import java.util.Comparator; +import java.util.LinkedHashSet; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.Set; +import java.util.stream.Collectors; + +@Service +public class RoleService extends ServiceImpl { + + @Autowired + private UserFavoriteRoleMapper userFavoriteRoleMapper; + @Autowired + private CronJobService cronJobService; + @Autowired + private RoleHotRankService roleHotRankService; + @Autowired + private AdminAccessService adminAccessService; + + private static final Set SUPPORTED_RUNTIME_MODES = Set.of(PodMode.SIDECAR.name(), PodMode.ALONE.name()); + private static final Set SUPPORTED_DEPLOY_MODES = Set.of(DeployMode.K8S.name(), DeployMode.COMPOSE.name()); + private static final Set SUPPORTED_ROLE_STATUSES = Set.of("active", "maintenance", "disabled"); + + /** + * 分页查询岗位 + * @param page 页码 + * @param pageSize 每页条数 + * @param query 搜索关键词 + * @param category 分类 + * @param scope 范围: all, mine, favorite + * @param status 状态筛选: active, disabled, maintenance + * @param currentUserId 当前用户ID + * @return 分页结果 + */ + public Page listRoles(int page, int pageSize, String query, String category, String scope, String status, String currentUserId) { + Page pageParam = new Page<>(page, pageSize); + LambdaQueryWrapper wrapper = buildRoleQueryWrapper(query, category, scope, status, currentUserId); + + // 3. 排序 (默认按时间倒序) + wrapper.orderByDesc(RoleEntity::getCreatedAt); + + return this.page(pageParam, wrapper); + } + + /** + * 查询热门岗位(按收藏数倒序,最多 limit 条) + */ + public List listHotRoles(int limit, String currentUserId) { + int safeLimit = Math.max(1, limit); + LambdaQueryWrapper wrapper = buildRoleQueryWrapper(null, null, "all", null, currentUserId); + List roles = this.list(wrapper).stream() + .filter(Objects::nonNull) + .filter(role -> role.getId() != null) + .collect(Collectors.toList()); + if (roles.isEmpty()) { + return List.of(); + } + + Map roleMap = roles.stream() + .collect(Collectors.toMap(RoleEntity::getId, role -> role)); + + int rankFetchSize = Math.max(20, safeLimit * 5); + List rankedRoleIds = roleHotRankService.listTopRoleIds(rankFetchSize); + if (rankedRoleIds.isEmpty()) { + roleHotRankService.rebuildRank(queryAllFavoriteCountMap()); + rankedRoleIds = roleHotRankService.listTopRoleIds(rankFetchSize); + } + + LinkedHashSet orderedRoleIds = new LinkedHashSet<>(); + for (Long roleId : rankedRoleIds) { + if (roleMap.containsKey(roleId)) { + orderedRoleIds.add(roleId); + } + if (orderedRoleIds.size() >= safeLimit) { + break; + } + } + + if (orderedRoleIds.size() < safeLimit) { + Map favoriteCountMap = queryFavoriteCountMap( + roles.stream().map(RoleEntity::getId).collect(Collectors.toList()) + ); + List fallbackSorted = roles.stream() + .sorted( + Comparator.comparingLong((RoleEntity role) -> favoriteCountMap.getOrDefault(role.getId(), 0L)) + .reversed() + .thenComparing(RoleEntity::getCreatedAt, Comparator.nullsLast(Comparator.reverseOrder())) + ) + .collect(Collectors.toList()); + for (RoleEntity role : fallbackSorted) { + orderedRoleIds.add(role.getId()); + if (orderedRoleIds.size() >= safeLimit) { + break; + } + } + } + + List orderedRoles = new java.util.ArrayList<>(orderedRoleIds.size()); + for (Long roleId : orderedRoleIds) { + RoleEntity role = roleMap.get(roleId); + if (role != null) { + orderedRoles.add(role); + } + } + return orderedRoles; + } + + /** + * 批量查询岗位收藏数 + */ + public Map queryFavoriteCountMap(List roleIds) { + if (roleIds == null || roleIds.isEmpty()) { + return Map.of(); + } + + QueryWrapper wrapper = new QueryWrapper<>(); + wrapper.select("role_id AS roleId", "COUNT(1) AS favoriteCount") + .in("role_id", roleIds) + .groupBy("role_id"); + + List> rows = userFavoriteRoleMapper.selectMaps(wrapper); + Map favoriteCountMap = new HashMap<>(); + for (Map row : rows) { + Object roleIdValue = row.getOrDefault("roleId", row.get("role_id")); + if (!(roleIdValue instanceof Number roleIdNumber)) { + continue; + } + Long roleId = roleIdNumber.longValue(); + Object favoriteCountValue = row.getOrDefault("favoriteCount", row.get("favorite_count")); + long favoriteCount = favoriteCountValue instanceof Number countNumber ? countNumber.longValue() : 0L; + favoriteCountMap.put(roleId, favoriteCount); + } + return favoriteCountMap; + } + + public Map queryAllFavoriteCountMap() { + QueryWrapper wrapper = new QueryWrapper<>(); + wrapper.select("role_id AS roleId", "COUNT(1) AS favoriteCount") + .groupBy("role_id"); + + List> rows = userFavoriteRoleMapper.selectMaps(wrapper); + Map favoriteCountMap = new HashMap<>(); + for (Map row : rows) { + Object roleIdValue = row.getOrDefault("roleId", row.get("role_id")); + if (!(roleIdValue instanceof Number roleIdNumber)) { + continue; + } + Long roleId = roleIdNumber.longValue(); + Object favoriteCountValue = row.getOrDefault("favoriteCount", row.get("favorite_count")); + long favoriteCount = favoriteCountValue instanceof Number countNumber ? countNumber.longValue() : 0L; + favoriteCountMap.put(roleId, favoriteCount); + } + return favoriteCountMap; + } + + private LambdaQueryWrapper buildRoleQueryWrapper( + String query, + String category, + String scope, + String status, + String currentUserId + ) { + LambdaQueryWrapper wrapper = new LambdaQueryWrapper<>(); + boolean adminUser = adminAccessService.isAdmin(currentUserId); + + // 1. 基础筛选 + if (StringUtils.hasText(category)) { + wrapper.eq(RoleEntity::getCategory, category); + } + if (StringUtils.hasText(query)) { + wrapper.and(w -> w.like(RoleEntity::getName, query) + .or().like(RoleEntity::getDescription, query)); + } + if (StringUtils.hasText(status)) { + wrapper.eq(RoleEntity::getStatus, normalizeRoleStatus(status)); + } + + // 2. Scope 筛选 + if ("mine".equalsIgnoreCase(scope)) { + wrapper.eq(RoleEntity::getCreatorId, currentUserId); + } else if ("favorite".equalsIgnoreCase(scope)) { + if (!StringUtils.hasText(currentUserId)) { + wrapper.eq(RoleEntity::getId, -1L); + return wrapper; + } + // 查询用户收藏的 Role IDs + List favoriteRoleIds = userFavoriteRoleMapper.selectList( + new LambdaQueryWrapper() + .eq(UserFavoriteRoleEntity::getUserId, currentUserId) + ).stream().map(UserFavoriteRoleEntity::getRoleId).collect(Collectors.toList()); + + if (favoriteRoleIds.isEmpty()) { + wrapper.eq(RoleEntity::getId, -1L); + return wrapper; + } + wrapper.in(RoleEntity::getId, favoriteRoleIds); + } else { + // all: 管理员可见全部;非管理员可见公开的 OR 自己的 + if (adminUser) { + return wrapper; + } + if (StringUtils.hasText(currentUserId)) { + wrapper.and(w -> w.eq(RoleEntity::getIsPublic, true) + .or().eq(RoleEntity::getCreatorId, currentUserId)); + } else { + wrapper.eq(RoleEntity::getIsPublic, true); + } + } + + return wrapper; + } + + /** + * 创建岗位 + */ + public RoleEntity createRole(RoleEntity role, String userId, String username) { + assertRoleNameUnique(role.getName(), null); + role.setRoleNo("ROLE-" + System.currentTimeMillis()); + role.setCreatorId(userId); + role.setCreatorName(username); + role.setStatus(normalizeRoleStatusOrDefault(role.getStatus())); + role.setPrompt(normalizeRequiredRolePrompt(role.getPrompt())); + // 默认不公开 + if (role.getIsPublic() == null) { + role.setIsPublic(false); + } + if (role.getMaxEmployees() == null) { + role.setMaxEmployees(1); + } + + role.setConfigJson(normalizeRoleConfig(role.getConfigJson())); + + this.save(role); + return role; + } + + /** + * 更新岗位 + */ + public RoleEntity updateRole(Long id, RoleEntity updateInfo, String userId) { + RoleEntity exists = getRoleForWrite(id, userId); + String previousStatus = exists.getStatus(); + + // 更新字段 + if (StringUtils.hasText(updateInfo.getName())) { + String normalizedName = updateInfo.getName().trim(); + if (!normalizedName.equals(exists.getName())) { + assertRoleNameUnique(normalizedName, exists.getId()); + } + exists.setName(normalizedName); + } + if (StringUtils.hasText(updateInfo.getDescription())) exists.setDescription(updateInfo.getDescription()); + if (StringUtils.hasText(updateInfo.getCategory())) exists.setCategory(updateInfo.getCategory()); + if (StringUtils.hasText(updateInfo.getIcon())) exists.setIcon(updateInfo.getIcon()); + if (StringUtils.hasText(updateInfo.getImage())) exists.setImage(updateInfo.getImage()); + if (updateInfo.getPrompt() != null) exists.setPrompt(normalizeRequiredRolePrompt(updateInfo.getPrompt())); + if (updateInfo.getConfigJson() != null) exists.setConfigJson(normalizeRoleConfig(updateInfo.getConfigJson())); + if (updateInfo.getIsPublic() != null) exists.setIsPublic(updateInfo.getIsPublic()); + if (updateInfo.getMaxEmployees() != null) exists.setMaxEmployees(updateInfo.getMaxEmployees()); + if (StringUtils.hasText(updateInfo.getStatus())) { + exists.setStatus(normalizeRoleStatus(updateInfo.getStatus())); + } + exists.setPrompt(normalizeRequiredRolePrompt(exists.getPrompt())); + + exists.setUpdaterId(userId); + // exists.setUpdaterName(username); // Need username passed in + + this.updateById(exists); + + boolean previousActive = "active".equalsIgnoreCase(previousStatus); + boolean currentInactive = !StringUtils.hasText(exists.getStatus()) + || !"active".equalsIgnoreCase(exists.getStatus()); + boolean transitionedToInactive = previousActive && currentInactive; + if (transitionedToInactive) { + cronJobService.disableByRoleId(exists.getId(), "岗位状态变更为 " + exists.getStatus()); + } + return exists; + } + + private RoleEntity.RoleConfig normalizeRoleConfig(RoleEntity.RoleConfig config) { + if (config == null) { + return null; + } + + String deployMode = normalizeDeployMode(config.getDeployMode()); + if (!StringUtils.hasText(deployMode)) { + deployMode = DeployMode.K8S.name(); + } + config.setDeployMode(deployMode); + + String runtimeMode = normalizeRuntimeMode(config.getRuntimeMode()); + if (DeployMode.COMPOSE.name().equals(deployMode)) { + runtimeMode = PodMode.ALONE.name(); + } + config.setRuntimeMode(runtimeMode); + + String runnerImage = normalizeText(config.getRunnerImage()); + if (DeployMode.COMPOSE.name().equals(deployMode) || PodMode.ALONE.name().equals(runtimeMode)) { + runnerImage = null; + } + config.setRunnerImage(runnerImage); + config.setMemoryEnabled(Boolean.TRUE.equals(config.getMemoryEnabled())); + + return config; + } + + private String normalizeDeployMode(String deployModeRaw) { + String deployMode = normalizeText(deployModeRaw); + if (!StringUtils.hasText(deployMode)) { + return null; + } + + String normalizedMode = deployMode.toUpperCase(); + if (!SUPPORTED_DEPLOY_MODES.contains(normalizedMode)) { + throw new IllegalArgumentException("非法部署模式: " + deployMode + ",仅支持 K8S/COMPOSE"); + } + return normalizedMode; + } + + private String normalizeRuntimeMode(String runtimeModeRaw) { + String runtimeMode = normalizeText(runtimeModeRaw); + if (!StringUtils.hasText(runtimeMode)) { + return null; + } + + String normalizedMode = runtimeMode.toUpperCase(); + if (!SUPPORTED_RUNTIME_MODES.contains(normalizedMode)) { + throw new IllegalArgumentException("非法运行模式: " + runtimeMode + ",仅支持 SIDECAR/ALONE"); + } + return normalizedMode; + } + + private String normalizeText(String raw) { + if (!StringUtils.hasText(raw)) { + return null; + } + return raw.trim(); + } + + private String normalizeRoleStatusOrDefault(String rawStatus) { + String normalized = normalizeRoleStatus(rawStatus); + return StringUtils.hasText(normalized) ? normalized : "active"; + } + + private String normalizeRequiredRolePrompt(String rawPrompt) { + String normalized = normalizeText(rawPrompt); + if (!StringUtils.hasText(normalized)) { + return "你是一个智能岗位助手,请根据用户需求高质量完成任务。"; + } + return normalized; + } + + private String normalizeRoleStatus(String rawStatus) { + if (!StringUtils.hasText(rawStatus)) { + return null; + } + String normalized = rawStatus.trim().toLowerCase(); + if (!SUPPORTED_ROLE_STATUSES.contains(normalized)) { + throw new IllegalArgumentException("非法岗位状态: " + rawStatus + ",仅支持 active/maintenance/disabled"); + } + return normalized; + } + + private void assertRoleNameUnique(String roleName, Long excludeRoleId) { + if (!StringUtils.hasText(roleName)) { + throw new IllegalArgumentException("岗位名称不能为空"); + } + + LambdaQueryWrapper wrapper = new LambdaQueryWrapper() + .eq(RoleEntity::getName, roleName.trim()); + if (excludeRoleId != null) { + wrapper.ne(RoleEntity::getId, excludeRoleId); + } + + long count = this.count(wrapper); + if (count > 0) { + throw new IllegalArgumentException("岗位名称已存在: " + roleName.trim()); + } + } + + /** + * 删除岗位 + */ + public void deleteRole(Long id, String userId) { + RoleEntity exists = getRoleForWrite(id, userId); + + cronJobService.disableByRoleId(exists.getId(), "岗位已删除"); + this.removeById(id); + } + + /** + * 检查是否收藏 + */ + public boolean isFavorite(Long roleId, String userId) { + if (roleId == null || !StringUtils.hasText(userId)) { + return false; + } + return userFavoriteRoleMapper.exists( + new LambdaQueryWrapper() + .eq(UserFavoriteRoleEntity::getRoleId, roleId) + .eq(UserFavoriteRoleEntity::getUserId, userId) + ); + } + + public RoleEntity getRoleForRead(Long id, String userId) { + RoleEntity role = this.getById(id); + if (role == null) { + throw new ResourceNotFoundException("岗位不存在: " + id); + } + if (!canRead(role, userId)) { + throw new ForbiddenOperationException("无权限访问该岗位"); + } + return role; + } + + public RoleEntity getRoleForWrite(Long id, String userId) { + RoleEntity role = this.getById(id); + if (role == null) { + throw new ResourceNotFoundException("岗位不存在: " + id); + } + if (!canWrite(role, userId)) { + throw new ForbiddenOperationException("仅岗位创建者或管理员可修改"); + } + return role; + } + + private boolean canRead(RoleEntity role, String userId) { + return adminAccessService.isAdmin(userId) + || Boolean.TRUE.equals(role.getIsPublic()) + || isOwner(role, userId); + } + + private boolean canWrite(RoleEntity role, String userId) { + return adminAccessService.isAdmin(userId) || isOwner(role, userId); + } + + private boolean isOwner(RoleEntity role, String userId) { + return StringUtils.hasText(userId) && userId.equals(role.getCreatorId()); + } + + /** + * 切换收藏状态 + */ + @Transactional + public boolean toggleFavorite(Long roleId, String userId, boolean isFavorite) { + RoleEntity role = getRoleForRead(roleId, userId); + if (isFavorite) { + // 添加收藏 + if (!isFavorite(roleId, userId)) { + UserFavoriteRoleEntity entity = new UserFavoriteRoleEntity(); + entity.setUserId(userId); + entity.setRoleId(roleId); + userFavoriteRoleMapper.insert(entity); + roleHotRankService.incrementFavoriteScore(role.getId(), 1D); + } + return true; + } else { + // 取消收藏 + int deletedCount = userFavoriteRoleMapper.delete( + new LambdaQueryWrapper() + .eq(UserFavoriteRoleEntity::getRoleId, roleId) + .eq(UserFavoriteRoleEntity::getUserId, userId) + ); + if (deletedCount > 0) { + roleHotRankService.incrementFavoriteScore(role.getId(), -1D); + } + return false; + } + } +} diff --git a/back/src/main/java/com/linkwork/service/RuntimeModeService.java b/back/src/main/java/com/linkwork/service/RuntimeModeService.java new file mode 100644 index 0000000..743ec52 --- /dev/null +++ b/back/src/main/java/com/linkwork/service/RuntimeModeService.java @@ -0,0 +1,187 @@ +package com.linkwork.service; + +import com.linkwork.config.EnvConfig; +import com.linkwork.model.entity.BuildRecordEntity; +import com.linkwork.model.entity.RoleEntity; +import com.linkwork.model.enums.PodMode; +import lombok.AllArgsConstructor; +import lombok.Data; +import lombok.RequiredArgsConstructor; +import org.springframework.stereotype.Service; +import org.springframework.util.StringUtils; + +import java.util.Map; + +/** + * 运行模式解析服务 + */ +@Service +@RequiredArgsConstructor +public class RuntimeModeService { + + private final BuildRecordService buildRecordService; + private final RoleService roleService; + private final EnvConfig envConfig; + + /** + * 按岗位 ID 解析运行模式视图。 + */ + public RuntimeSnapshot resolveForRole(Long roleId) { + if (roleId == null) { + return resolveDefault(); + } + + RoleEntity role = roleService.getById(roleId); + if (role == null) { + return resolveDefault(); + } + return resolveForRole(role); + } + + /** + * 按岗位实体解析运行模式视图。 + */ + public RuntimeSnapshot resolveForRole(RoleEntity role) { + if (role == null || role.getId() == null) { + throw new IllegalArgumentException("岗位信息不完整,无法解析运行模式"); + } + + String context = "roleId=" + role.getId(); + RoleEntity.RoleConfig roleConfig = role.getConfigJson(); + + PodMode configPodMode = null; + String configRunnerImage = null; + if (roleConfig != null) { + configPodMode = parsePodMode(roleConfig.getRuntimeMode(), context + ", configJson.runtimeMode"); + configRunnerImage = asText(roleConfig.getRunnerImage()); + } + + BuildRecordEntity latestRecord = buildRecordService.getLatestByRoleId(role.getId()); + PodMode snapshotPodMode = null; + String snapshotRunnerImage = null; + if (latestRecord != null && latestRecord.getConfigSnapshot() != null) { + Map snapshot = latestRecord.getConfigSnapshot(); + Object runtimeRaw = snapshot.get("runtimeMode") != null + ? snapshot.get("runtimeMode") + : snapshot.get("podMode"); + snapshotPodMode = parsePodMode(runtimeRaw, context + ", buildNo=" + latestRecord.getBuildNo()); + + snapshotRunnerImage = asText(snapshot.get("runnerImage")); + if (!StringUtils.hasText(snapshotRunnerImage)) { + snapshotRunnerImage = asText(snapshot.get("runnerBaseImage")); + } + } + + PodMode podMode = configPodMode != null ? configPodMode : snapshotPodMode; + if (podMode == null) { + podMode = resolveDefaultPodMode(context); + } + + String runnerImage = StringUtils.hasText(configRunnerImage) ? configRunnerImage : snapshotRunnerImage; + if (podMode == PodMode.SIDECAR && !StringUtils.hasText(runnerImage)) { + runnerImage = resolveDefaultRunnerImage(role); + } + + return resolveFromPodMode(podMode, runnerImage, context); + } + + /** + * 使用系统默认配置解析运行模式(用于历史数据兼容)。 + */ + public RuntimeSnapshot resolveDefault() { + PodMode podMode = resolveDefaultPodMode("default"); + String runnerImage = podMode == PodMode.SIDECAR ? resolveDefaultRunnerImage() : null; + return resolveFromPodMode(podMode, runnerImage, "default"); + } + + /** + * 按原始模式值解析运行模式视图。 + */ + public RuntimeSnapshot resolveFromRaw(String runtimeMode, String runnerImage, String context) { + PodMode podMode = parsePodMode(runtimeMode, context); + if (podMode == null) { + throw new IllegalArgumentException("运行模式缺失: " + context); + } + return resolveFromPodMode(podMode, runnerImage, context); + } + + /** + * 按 PodMode 解析运行模式视图。 + */ + public RuntimeSnapshot resolveFromPodMode(PodMode podMode, String runnerImage, String context) { + if (podMode == null) { + throw new IllegalArgumentException("运行模式缺失: " + context); + } + + if (podMode == PodMode.SIDECAR) { + if (!StringUtils.hasText(runnerImage)) { + throw new IllegalStateException("SIDECAR 模式缺少 runnerImage: " + context); + } + return new RuntimeSnapshot(PodMode.SIDECAR.name(), "ssh", runnerImage); + } + + return new RuntimeSnapshot(PodMode.ALONE.name(), "local", null); + } + + private PodMode resolveDefaultPodMode(String context) { + if (envConfig.getPodModeRules() == null || envConfig.getPodModeRules().getDefaultMode() == null) { + throw new IllegalStateException("未配置默认运行模式: " + context); + } + return envConfig.getPodModeRules().getDefaultMode(); + } + + private String resolveDefaultRunnerImage(RoleEntity role) { + String defaultRunnerImage = resolveDefaultRunnerImage(); + if (StringUtils.hasText(defaultRunnerImage)) { + return defaultRunnerImage; + } + if (role != null && StringUtils.hasText(role.getImage())) { + return role.getImage(); + } + return null; + } + + private String resolveDefaultRunnerImage() { + if (envConfig.getImages() != null && StringUtils.hasText(envConfig.getImages().getRunner())) { + return envConfig.getImages().getRunner(); + } + return null; + } + + private PodMode parsePodMode(Object rawMode, String context) { + if (rawMode == null) { + return null; + } + + if (rawMode instanceof PodMode podMode) { + return podMode; + } + + String mode = String.valueOf(rawMode).trim(); + if (!StringUtils.hasText(mode)) { + return null; + } + + try { + return PodMode.valueOf(mode.toUpperCase()); + } catch (IllegalArgumentException ex) { + throw new IllegalArgumentException("非法运行模式: " + mode + ", " + context); + } + } + + private String asText(Object value) { + if (value == null) { + return null; + } + String text = String.valueOf(value).trim(); + return StringUtils.hasText(text) ? text : null; + } + + @Data + @AllArgsConstructor + public static class RuntimeSnapshot { + private String runtimeMode; + private String zzMode; + private String runnerImage; + } +} diff --git a/back/src/main/java/com/linkwork/service/ScheduleEventPublisher.java b/back/src/main/java/com/linkwork/service/ScheduleEventPublisher.java new file mode 100644 index 0000000..d94cd98 --- /dev/null +++ b/back/src/main/java/com/linkwork/service/ScheduleEventPublisher.java @@ -0,0 +1,377 @@ +package com.linkwork.service; + +import com.fasterxml.jackson.core.JsonProcessingException; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.linkwork.model.dto.ScheduleEvent; +import com.linkwork.model.dto.event.*; +import com.linkwork.model.enums.ContainerEventType; +import com.linkwork.model.dto.event.BuildEventData; +import lombok.RequiredArgsConstructor; +import lombok.extern.slf4j.Slf4j; +import org.springframework.data.redis.connection.stream.RecordId; +import org.springframework.data.redis.connection.stream.StreamRecords; +import org.springframework.data.redis.core.StringRedisTemplate; +import org.springframework.stereotype.Service; + +import java.time.Instant; +import java.time.ZoneOffset; +import java.time.format.DateTimeFormatter; +import java.util.Map; + +/** + * 调度事件发布服务 + * 遵循 data-format.md 规范,发布容器日志事件到 Redis Stream + * + * Stream Key: stream:task:{taskId} + */ +@Service +@Slf4j +@RequiredArgsConstructor +public class ScheduleEventPublisher { + + private final StringRedisTemplate redisTemplate; + private final ObjectMapper objectMapper; + + /** + * ISO 8601 时间戳格式化器,精确到微秒 + */ + private static final DateTimeFormatter TIMESTAMP_FORMATTER = + DateTimeFormatter.ofPattern("yyyy-MM-dd'T'HH:mm:ss.SSSSSSXXX") + .withZone(ZoneOffset.UTC); + + /** + * Stream Key 前缀 + */ + private static final String STREAM_KEY_PREFIX = "stream:task:"; + + // ==================== 通用发布方法 ==================== + + /** + * 发布容器日志事件到 Redis Stream + * + * @param taskId 任务 ID(对应 serviceId) + * @param eventType 事件类型枚举 + * @param data 业务负载 + */ + public void publish(String taskId, ContainerEventType eventType, Object data) { + ScheduleEvent event = ScheduleEvent.builder() + .eventType(eventType.name()) + .timestamp(formatTimestamp(Instant.now())) + .taskId(taskId) + .data(data) + .build(); + + try { + String eventJson = objectMapper.writeValueAsString(event); + String streamKey = STREAM_KEY_PREFIX + taskId; + + // 写入 Stream(持久化,支持回溯) + // XADD stream:task:{taskId} * payload '{...}' + RecordId recordId = redisTemplate.opsForStream().add( + StreamRecords.string(Map.of("payload", eventJson)).withStreamKey(streamKey) + ); + + log.debug("Published event to {}: type={}, recordId={}", + streamKey, eventType, recordId); + + } catch (JsonProcessingException e) { + log.error("Failed to serialize event for task {}: {}", taskId, e.getMessage()); + } catch (Exception e) { + log.error("Failed to publish event for task {}: {}", taskId, e.getMessage()); + } + } + + // ==================== 便捷方法:调度阶段 ==================== + + /** + * 发布 POD_SCHEDULING 事件 + */ + public void publishPodScheduling(String taskId, String podName, int podIndex, String queueName) { + PodSchedulingData data = PodSchedulingData.builder() + .podName(podName) + .podIndex(podIndex) + .queueName(queueName) + .build(); + publish(taskId, ContainerEventType.POD_SCHEDULING, data); + } + + /** + * 发布 POD_SCHEDULED 事件 + */ + public void publishPodScheduled(String taskId, String podName, int podIndex, String nodeName) { + PodSchedulingData data = PodSchedulingData.builder() + .podName(podName) + .podIndex(podIndex) + .nodeName(nodeName) + .build(); + publish(taskId, ContainerEventType.POD_SCHEDULED, data); + } + + // ==================== 便捷方法:镜像阶段 ==================== + + /** + * 发布 IMAGE_PULLING 事件 + */ + public void publishImagePulling(String taskId, String podName, String containerName, String image) { + ImageEventData data = ImageEventData.builder() + .podName(podName) + .containerName(containerName) + .image(image) + .build(); + publish(taskId, ContainerEventType.IMAGE_PULLING, data); + } + + /** + * 发布 IMAGE_PULLED 事件 + */ + public void publishImagePulled(String taskId, String podName, String containerName, String image) { + ImageEventData data = ImageEventData.builder() + .podName(podName) + .containerName(containerName) + .image(image) + .build(); + publish(taskId, ContainerEventType.IMAGE_PULLED, data); + } + + // ==================== 便捷方法:容器阶段 ==================== + + /** + * 发布 CONTAINER_STARTING 事件 + */ + public void publishContainerStarting(String taskId, String podName, String containerName) { + ContainerEventData data = ContainerEventData.builder() + .podName(podName) + .containerName(containerName) + .build(); + publish(taskId, ContainerEventType.CONTAINER_STARTING, data); + } + + /** + * 发布 CONTAINER_READY 事件 + */ + public void publishContainerReady(String taskId, String podName, String containerName) { + ContainerEventData data = ContainerEventData.builder() + .podName(podName) + .containerName(containerName) + .ready(true) + .build(); + publish(taskId, ContainerEventType.CONTAINER_READY, data); + } + + // ==================== 便捷方法:环境阶段 ==================== + + /** + * 发布 ENV_SETUP 事件 + */ + public void publishEnvSetup(String taskId, String podName, String step, String message) { + EnvSetupData data = EnvSetupData.builder() + .podName(podName) + .step(step) + .message(message) + .build(); + publish(taskId, ContainerEventType.ENV_SETUP, data); + } + + /** + * 发布 WORKSPACE_INIT 事件 + */ + public void publishWorkspaceInit(String taskId, String podName, String step, String message) { + EnvSetupData data = EnvSetupData.builder() + .podName(podName) + .step(step) + .message(message) + .build(); + publish(taskId, ContainerEventType.WORKSPACE_INIT, data); + } + + // ==================== 便捷方法:完成阶段 ==================== + + /** + * 发布 INIT_COMPLETE 事件 + */ + public void publishInitComplete(String taskId, String podName, String podGroupName, + int readyPods, int totalPods) { + InitCompleteData data = InitCompleteData.builder() + .podName(podName) + .podGroupName(podGroupName) + .readyPods(readyPods) + .totalPods(totalPods) + .build(); + publish(taskId, ContainerEventType.INIT_COMPLETE, data); + } + + /** + * 发布 INIT_FAILED 事件 + */ + public void publishInitFailed(String taskId, String podName, String podGroupName, + String errorCode, String errorMessage) { + InitCompleteData data = InitCompleteData.builder() + .podName(podName) + .podGroupName(podGroupName) + .errorCode(errorCode) + .errorMessage(errorMessage) + .build(); + publish(taskId, ContainerEventType.INIT_FAILED, data); + } + + // ==================== 便捷方法:生命周期事件 ==================== + + /** + * 发布 SESSION_START 事件 + */ + public void publishSessionStart(String taskId, String podGroupName, int podCount, + String queueName, String nodeName) { + SessionEventData data = SessionEventData.builder() + .podGroupName(podGroupName) + .podCount(podCount) + .queueName(queueName) + .nodeName(nodeName) + .build(); + publish(taskId, ContainerEventType.SESSION_START, data); + } + + /** + * 发布 SESSION_END 事件 + */ + public void publishSessionEnd(String taskId, String podGroupName, int podCount, boolean graceful) { + SessionEventData data = SessionEventData.builder() + .podGroupName(podGroupName) + .podCount(podCount) + .graceful(graceful) + .build(); + publish(taskId, ContainerEventType.SESSION_END, data); + } + + // ==================== 便捷方法:构建阶段 ==================== + + /** + * 构建事件使用独立的 Stream Key + */ + private static final String BUILD_STREAM_KEY_PREFIX = "stream:build:"; + + /** + * 发布构建事件到 Redis Stream(使用 buildId 作为 key) + */ + public void publishBuildEvent(String buildId, ContainerEventType eventType, Object data) { + ScheduleEvent event = ScheduleEvent.builder() + .eventType(eventType.name()) + .timestamp(formatTimestamp(Instant.now())) + .taskId(buildId) + .data(data) + .build(); + + try { + String eventJson = objectMapper.writeValueAsString(event); + String streamKey = BUILD_STREAM_KEY_PREFIX + buildId; + + RecordId recordId = redisTemplate.opsForStream().add( + StreamRecords.string(Map.of("payload", eventJson)).withStreamKey(streamKey) + ); + + log.debug("Published build event to {}: type={}, recordId={}", + streamKey, eventType, recordId); + + } catch (JsonProcessingException e) { + log.error("Failed to serialize build event for {}: {}", buildId, e.getMessage()); + } catch (Exception e) { + log.error("Failed to publish build event for {}: {}", buildId, e.getMessage()); + } + } + + /** + * 发布 BUILD_STARTED 事件 + */ + public void publishBuildStarted(String buildId, String buildNo, Long roleId, + String roleName, String baseImage) { + BuildEventData data = BuildEventData.builder() + .buildNo(buildNo) + .roleId(roleId) + .roleName(roleName) + .baseImage(baseImage) + .build(); + publishBuildEvent(buildId, ContainerEventType.BUILD_STARTED, data); + } + + /** + * 发布 BUILD_PROGRESS 事件 + */ + public void publishBuildProgress(String buildId, String buildNo, String step, String message) { + BuildEventData data = BuildEventData.builder() + .buildNo(buildNo) + .step(step) + .message(message) + .build(); + publishBuildEvent(buildId, ContainerEventType.BUILD_PROGRESS, data); + } + + /** + * 发布 BUILD_LOG 事件(Docker 实时日志行) + */ + public void publishBuildLog(String buildId, String buildNo, String logLine, String logLevel) { + BuildEventData data = BuildEventData.builder() + .buildNo(buildNo) + .message(logLine) + .step(logLevel) // 使用 step 字段存储日志级别 (info, warn, error, debug) + .build(); + publishBuildEvent(buildId, ContainerEventType.BUILD_LOG, data); + } + + /** + * 发布 BUILD_COMPLETED 事件 + */ + public void publishBuildCompleted(String buildId, String buildNo, String imageTag, Long durationMs) { + BuildEventData data = BuildEventData.builder() + .buildNo(buildNo) + .imageTag(imageTag) + .durationMs(durationMs) + .build(); + publishBuildEvent(buildId, ContainerEventType.BUILD_COMPLETED, data); + } + + /** + * 发布 BUILD_FAILED 事件 + */ + public void publishBuildFailed(String buildId, String buildNo, String errorCode, + String errorMessage, Long durationMs) { + BuildEventData data = BuildEventData.builder() + .buildNo(buildNo) + .errorCode(errorCode) + .errorMessage(errorMessage) + .durationMs(durationMs) + .build(); + publishBuildEvent(buildId, ContainerEventType.BUILD_FAILED, data); + } + + /** + * 发布 BUILD_PUSHING 事件 + */ + public void publishBuildPushing(String buildId, String buildNo, String imageTag) { + BuildEventData data = BuildEventData.builder() + .buildNo(buildNo) + .imageTag(imageTag) + .message("Pushing image to registry...") + .build(); + publishBuildEvent(buildId, ContainerEventType.BUILD_PUSHING, data); + } + + /** + * 发布 BUILD_PUSHED 事件 + */ + public void publishBuildPushed(String buildId, String buildNo, String imageTag) { + BuildEventData data = BuildEventData.builder() + .buildNo(buildNo) + .imageTag(imageTag) + .message("Image pushed successfully") + .build(); + publishBuildEvent(buildId, ContainerEventType.BUILD_PUSHED, data); + } + + // ==================== 工具方法 ==================== + + /** + * 格式化时间戳为 ISO 8601 格式,精确到微秒 + */ + private String formatTimestamp(Instant instant) { + return TIMESTAMP_FORMATTER.format(instant); + } +} diff --git a/back/src/main/java/com/linkwork/service/SecurityPolicyService.java b/back/src/main/java/com/linkwork/service/SecurityPolicyService.java new file mode 100644 index 0000000..a9bc5a3 --- /dev/null +++ b/back/src/main/java/com/linkwork/service/SecurityPolicyService.java @@ -0,0 +1,186 @@ +package com.linkwork.service; + +import com.baomidou.mybatisplus.core.conditions.query.LambdaQueryWrapper; +import com.fasterxml.jackson.core.JsonProcessingException; +import com.fasterxml.jackson.core.type.TypeReference; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.linkwork.mapper.SecurityPolicyMapper; +import com.linkwork.model.entity.SecurityPolicy; +import lombok.RequiredArgsConstructor; +import lombok.extern.slf4j.Slf4j; +import org.springframework.stereotype.Service; +import org.springframework.transaction.annotation.Transactional; + +import java.time.LocalDateTime; +import java.util.*; +import java.util.stream.Collectors; + +/** + * 安全策略服务 + */ +@Slf4j +@Service +@RequiredArgsConstructor +public class SecurityPolicyService { + + private final SecurityPolicyMapper policyMapper; + private final ObjectMapper objectMapper; + + /** + * 获取所有安全策略 + */ + public List> listPolicies() { + LambdaQueryWrapper wrapper = new LambdaQueryWrapper<>(); + wrapper.orderByAsc(SecurityPolicy::getType) // system 排前面 + .orderByDesc(SecurityPolicy::getCreatedAt); + List policies = policyMapper.selectList(wrapper); + return policies.stream().map(this::toResponse).collect(Collectors.toList()); + } + + /** + * 获取单个策略 + */ + public Map getPolicy(Long id) { + SecurityPolicy policy = policyMapper.selectById(id); + if (policy == null) { + throw new IllegalArgumentException("策略不存在: " + id); + } + return toResponse(policy); + } + + /** + * 创建自定义策略 + */ + @Transactional + public Map createPolicy(Map request, String creatorId, String creatorName) { + SecurityPolicy policy = new SecurityPolicy(); + policy.setName((String) request.get("name")); + policy.setDescription((String) request.get("description")); + policy.setType("custom"); + policy.setEnabled(true); + policy.setCreatorId(creatorId); + policy.setCreatorName(creatorName); + policy.setCreatedAt(LocalDateTime.now()); + policy.setUpdatedAt(LocalDateTime.now()); + policy.setIsDeleted(0); + + // 序列化规则 + Object rules = request.get("rules"); + if (rules != null) { + try { + policy.setRulesJson(objectMapper.writeValueAsString(rules)); + } catch (JsonProcessingException e) { + log.error("序列化策略规则失败", e); + } + } else { + policy.setRulesJson("[]"); + } + + policyMapper.insert(policy); + log.info("安全策略创建成功: id={}, name={}", policy.getId(), policy.getName()); + return toResponse(policy); + } + + /** + * 更新策略 + */ + @Transactional + public Map updatePolicy(Long id, Map request) { + SecurityPolicy policy = policyMapper.selectById(id); + if (policy == null) { + throw new IllegalArgumentException("策略不存在: " + id); + } + if ("system".equals(policy.getType())) { + throw new IllegalArgumentException("系统策略不可编辑"); + } + + if (request.containsKey("name")) { + policy.setName((String) request.get("name")); + } + if (request.containsKey("description")) { + policy.setDescription((String) request.get("description")); + } + if (request.containsKey("enabled")) { + policy.setEnabled((Boolean) request.get("enabled")); + } + if (request.containsKey("rules")) { + try { + policy.setRulesJson(objectMapper.writeValueAsString(request.get("rules"))); + } catch (JsonProcessingException e) { + log.error("序列化策略规则失败", e); + } + } + + policy.setUpdatedAt(LocalDateTime.now()); + policyMapper.updateById(policy); + log.info("安全策略更新成功: id={}, name={}", policy.getId(), policy.getName()); + return toResponse(policy); + } + + /** + * 切换策略启用/禁用 + */ + @Transactional + public Map togglePolicy(Long id) { + SecurityPolicy policy = policyMapper.selectById(id); + if (policy == null) { + throw new IllegalArgumentException("策略不存在: " + id); + } + if ("system".equals(policy.getType()) && Boolean.TRUE.equals(policy.getEnabled())) { + throw new IllegalArgumentException("系统策略不能禁用"); + } + + policy.setEnabled(!policy.getEnabled()); + policy.setUpdatedAt(LocalDateTime.now()); + policyMapper.updateById(policy); + log.info("安全策略状态切换: id={}, enabled={}", policy.getId(), policy.getEnabled()); + return toResponse(policy); + } + + /** + * 删除策略 + */ + @Transactional + public void deletePolicy(Long id) { + SecurityPolicy policy = policyMapper.selectById(id); + if (policy == null) { + throw new IllegalArgumentException("策略不存在: " + id); + } + if ("system".equals(policy.getType())) { + throw new IllegalArgumentException("系统策略不可删除"); + } + policyMapper.deleteById(id); + log.info("安全策略删除: id={}, name={}", id, policy.getName()); + } + + /** + * 转换为响应格式 + */ + private Map toResponse(SecurityPolicy policy) { + Map map = new LinkedHashMap<>(); + map.put("id", policy.getId()); + map.put("name", policy.getName()); + map.put("description", policy.getDescription()); + map.put("type", policy.getType()); + map.put("enabled", policy.getEnabled()); + map.put("creatorName", policy.getCreatorName()); + map.put("createdAt", policy.getCreatedAt()); + map.put("updatedAt", policy.getUpdatedAt()); + + // 解析规则 JSON + if (policy.getRulesJson() != null) { + try { + List> rules = objectMapper.readValue( + policy.getRulesJson(), new TypeReference>>() {}); + map.put("rules", rules); + } catch (JsonProcessingException e) { + log.error("解析策略规则失败: id={}", policy.getId(), e); + map.put("rules", List.of()); + } + } else { + map.put("rules", List.of()); + } + + return map; + } +} diff --git a/back/src/main/java/com/linkwork/service/ServiceResumeService.java b/back/src/main/java/com/linkwork/service/ServiceResumeService.java new file mode 100644 index 0000000..0d5a8f1 --- /dev/null +++ b/back/src/main/java/com/linkwork/service/ServiceResumeService.java @@ -0,0 +1,100 @@ +package com.linkwork.service; + +import com.linkwork.model.dto.ServiceBuildRequest; +import com.linkwork.model.dto.ServiceBuildResult; +import com.linkwork.model.dto.ServiceResumeResult; +import com.linkwork.model.dto.ServiceSnapshot; +import lombok.extern.slf4j.Slf4j; +import org.springframework.stereotype.Service; + +import java.util.Optional; + +/** + * 服务快速恢复服务 + * + * 利用镜像缓存 + 节点亲和调度实现快速重启: + * 1. 查询服务快照 + * 2. 还原原始请求 + * 3. 设置 preferredNode(优先调度到原节点) + * 4. 调用 Build 服务 + * + * 效果:命中镜像缓存时,启动时间从 ~90s 降到 ~20s + */ +@Service +@Slf4j +public class ServiceResumeService { + + private final ServiceSnapshotService snapshotService; + private final ServiceScheduleService scheduleService; + + public ServiceResumeService(ServiceSnapshotService snapshotService, + ServiceScheduleService scheduleService) { + this.snapshotService = snapshotService; + this.scheduleService = scheduleService; + } + + /** + * 快速恢复 Service + * + * @param serviceId 服务 ID + * @return 恢复结果 + */ + public ServiceResumeResult resume(String serviceId) { + log.info("Resuming service {}", serviceId); + + // 1. 查询快照 + Optional snapshotOpt = snapshotService.findActiveSnapshot(serviceId); + + if (snapshotOpt.isEmpty()) { + log.info("No active snapshot for service {}, requires full build request", serviceId); + return ServiceResumeResult.snapshotNotFound(serviceId); + } + + ServiceSnapshot snapshot = snapshotOpt.get(); + String lastScheduledNode = snapshot.getLastScheduledNode(); + + // 2. 还原原始请求 + ServiceBuildRequest request = snapshotService.restoreRequest(snapshot); + if (request == null) { + return ServiceResumeResult.failed(serviceId, "RESTORE_FAILED", + "无法还原原始请求"); + } + + // 3. 设置优先调度节点(关键!用于命中镜像缓存) + request.setPreferredNode(lastScheduledNode); + log.info("Resume service {}: setting preferredNode={}", serviceId, lastScheduledNode); + + // 4. 调用 Build 服务 + try { + ServiceBuildResult buildResult = scheduleService.build(request); + + if (!buildResult.isSuccess()) { + return ServiceResumeResult.failed(serviceId, + buildResult.getErrorCode(), buildResult.getErrorMessage()); + } + + // 5. 更新快照 + snapshotService.onServiceResumed(serviceId, buildResult.getScheduledNode()); + + // 6. 判断是否命中缓存 + boolean imageCached = lastScheduledNode != null + && lastScheduledNode.equals(buildResult.getScheduledNode()); + + log.info("Service {} resumed successfully, scheduledNode={}, imageCached={}", + serviceId, buildResult.getScheduledNode(), imageCached); + + return ServiceResumeResult.success( + serviceId, + buildResult.getPodGroupName(), + buildResult.getPodNames(), + buildResult.getScheduledNode(), + imageCached + ); + + } catch (Exception e) { + log.error("Failed to resume service {}: {}", serviceId, e.getMessage(), e); + return ServiceResumeResult.failed(serviceId, "RESUME_FAILED", + "恢复失败: " + e.getMessage()); + } + } +} diff --git a/back/src/main/java/com/linkwork/service/ServiceScaleService.java b/back/src/main/java/com/linkwork/service/ServiceScaleService.java new file mode 100644 index 0000000..a2f0184 --- /dev/null +++ b/back/src/main/java/com/linkwork/service/ServiceScaleService.java @@ -0,0 +1,506 @@ +package com.linkwork.service; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.linkwork.model.dto.*; +import com.linkwork.model.entity.BuildRecordEntity; +import com.linkwork.model.enums.DeployMode; +import com.linkwork.model.enums.PodMode; +import lombok.extern.slf4j.Slf4j; +import org.springframework.stereotype.Service; + +import java.util.ArrayList; +import java.util.HashSet; +import java.util.List; +import java.util.Map; + +/** + * 服务动态伸缩服务 + */ +@Service +@Slf4j +public class ServiceScaleService { + + private final K8sOrchestrator k8sOrchestrator; + private final ServiceSnapshotService snapshotService; + private final ConfigMergeService configMergeService; + private final DistributedLockService lockService; + private final BuildRecordService buildRecordService; + private final ObjectMapper objectMapper; + + private static final int SCALE_UP_MAX_RETRIES = 3; + + public ServiceScaleService(K8sOrchestrator k8sOrchestrator, + ServiceSnapshotService snapshotService, + ConfigMergeService configMergeService, + DistributedLockService lockService, + BuildRecordService buildRecordService, + ObjectMapper objectMapper) { + this.k8sOrchestrator = k8sOrchestrator; + this.snapshotService = snapshotService; + this.configMergeService = configMergeService; + this.lockService = lockService; + this.buildRecordService = buildRecordService; + this.objectMapper = objectMapper; + } + + public ScaleResult scaleDown(String serviceId, String podName, String source) { + log.info("Scale down request for service {}, podName={}, source={}", + serviceId, podName, source); + + String lockValue = lockService.tryAcquireLock(serviceId); + if (lockValue == null) { + return ScaleResult.failed(serviceId, + "Failed to acquire lock, another scaling operation may be in progress"); + } + + try { + return doScaleDown(serviceId, podName); + } finally { + lockService.releaseLock(serviceId, lockValue); + } + } + + private ScaleResult doScaleDown(String serviceId, String podName) { + ServiceSnapshot snapshot = snapshotService.getSnapshot(serviceId); + if (snapshot == null) { + return ScaleResult.failed(serviceId, "Snapshot not found for service"); + } + + int previousCount = snapshot.getCurrentPodCount() != null ? snapshot.getCurrentPodCount() : 0; + int maxPodCount = snapshot.getMaxPodCount() != null ? snapshot.getMaxPodCount() : previousCount; + + ScaleResult k8sResult = k8sOrchestrator.scaleDown(serviceId, podName); + if (!k8sResult.isSuccess()) { + log.error("Failed to delete pod {} for service {}: {}", + podName, serviceId, k8sResult.getErrorMessage()); + return k8sResult; + } + + int currentCount = k8sResult.getCurrentPodCount(); + snapshot.setCurrentPodCount(currentCount); + + if (snapshot.getRunningPodNames() != null) { + snapshot.getRunningPodNames().remove(podName); + } + + snapshotService.updateSnapshot(snapshot); + + log.info("Scale down result for service {}: {} -> {} pods", + serviceId, previousCount, currentCount); + + return ScaleResult.success( + serviceId, + "SCALE_DOWN", + previousCount, + currentCount, + maxPodCount, + k8sResult.getRunningPods(), + null, + List.of(podName) + ); + } + + public ScaleResult scaleUp(String serviceId, Integer targetPodCount, String source) { + log.info("Scale up request for service {}, targetPodCount={}, source={}", + serviceId, targetPodCount, source); + + String lockValue = lockService.tryAcquireLock(serviceId); + if (lockValue == null) { + return ScaleResult.failed(serviceId, + "Failed to acquire lock, another scaling operation may be in progress"); + } + + try { + return doScaleUp(serviceId, targetPodCount); + } finally { + lockService.releaseLock(serviceId, lockValue); + } + } + + private ScaleResult doScaleUp(String serviceId, Integer targetPodCount) { + ServiceSnapshot snapshot = snapshotService.getSnapshot(serviceId); + if (snapshot == null) { + snapshot = tryRecoverSnapshotFromSources(serviceId); + } + if (snapshot == null) { + return ScaleResult.failed(serviceId, "Snapshot not found, cannot scale up"); + } + + int maxPodCount = snapshot.getMaxPodCount() != null ? snapshot.getMaxPodCount() : 1; + int target = targetPodCount != null ? targetPodCount : maxPodCount; + int currentCount = snapshot.getCurrentPodCount() != null ? snapshot.getCurrentPodCount() : 0; + + if (target > maxPodCount) { + log.warn("Target pod count {} exceeds max {}, using max", target, maxPodCount); + target = maxPodCount; + } + + if (currentCount >= target) { + List runningPods = k8sOrchestrator.getRunningPods(serviceId); + return ScaleResult.noChange(serviceId, currentCount, maxPodCount, runningPods); + } + + MergedConfig config = restoreConfig(snapshot); + if (config == null) { + return ScaleResult.failed(serviceId, "Failed to restore config from snapshot"); + } + + if (snapshot.getLastScheduledNode() != null) { + config.setPreferredNode(snapshot.getLastScheduledNode()); + } + + int toAdd = target - currentCount; + List addedPods = new ArrayList<>(); + + for (int i = 0; i < toAdd; i++) { + String newPodName = createPodWithRetry(serviceId, config, snapshot); + if (newPodName != null) { + addedPods.add(newPodName); + + if (snapshot.getRunningPodNames() == null) { + snapshot.setRunningPodNames(new HashSet<>()); + } + snapshot.getRunningPodNames().add(newPodName); + } else { + log.warn("Failed to create pod after {} retries, stopping scale-up", SCALE_UP_MAX_RETRIES); + break; + } + } + + int newCount = currentCount + addedPods.size(); + if (!addedPods.isEmpty()) { + snapshot.setCurrentPodCount(newCount); + snapshotService.updateSnapshot(snapshot); + } + + log.info("Scale up result for service {}: {} -> {} pods (target={}, max={}), added={}", + serviceId, currentCount, newCount, target, maxPodCount, addedPods); + + List runningPods = k8sOrchestrator.getRunningPods(serviceId); + return ScaleResult.builder() + .serviceId(serviceId) + .success(!addedPods.isEmpty()) + .scaleType("SCALE_UP") + .previousPodCount(currentCount) + .currentPodCount(newCount) + .maxPodCount(maxPodCount) + .runningPods(runningPods) + .addedPods(addedPods) + .errorMessage(addedPods.isEmpty() ? "Failed to create any pod" : null) + .build(); + } + + private String createPodWithRetry(String serviceId, MergedConfig config, ServiceSnapshot snapshot) { + for (int retry = 0; retry < SCALE_UP_MAX_RETRIES; retry++) { + try { + ScaleResult result = k8sOrchestrator.scaleUp( + serviceId, + (snapshot.getCurrentPodCount() != null ? snapshot.getCurrentPodCount() : 0) + 1, + config + ); + + if (result.isSuccess() && result.getAddedPods() != null && !result.getAddedPods().isEmpty()) { + return result.getAddedPods().get(0); + } + + log.warn("Create pod failed for service {}, retry {}/{}", + serviceId, retry + 1, SCALE_UP_MAX_RETRIES); + + } catch (Exception e) { + log.warn("Create pod exception for service {}, retry {}/{}: {}", + serviceId, retry + 1, SCALE_UP_MAX_RETRIES, e.getMessage()); + } + + if (retry < SCALE_UP_MAX_RETRIES - 1) { + try { + Thread.sleep(1000L * (retry + 1)); + } catch (InterruptedException ie) { + Thread.currentThread().interrupt(); + return null; + } + } + } + return null; + } + + public ScaleResult scale(String serviceId, int targetPodCount, String source) { + log.info("Scale request for service {}, targetPodCount={}, source={}", + serviceId, targetPodCount, source); + + String lockValue = lockService.tryAcquireLock(serviceId); + if (lockValue == null) { + return ScaleResult.failed(serviceId, + "Failed to acquire lock, another scaling operation may be in progress"); + } + + try { + return doScale(serviceId, targetPodCount, source); + } finally { + lockService.releaseLock(serviceId, lockValue); + } + } + + private ScaleResult doScale(String serviceId, int targetPodCount, String source) { + List runningPods = k8sOrchestrator.getRunningPods(serviceId); + int currentCount = runningPods.size(); + + if (targetPodCount < currentCount) { + int podsToRemove = currentCount - targetPodCount; + ScaleResult lastResult = null; + + for (int i = 0; i < podsToRemove; i++) { + List currentPods = k8sOrchestrator.getRunningPods(serviceId); + if (currentPods.isEmpty()) { + break; + } + String podToDelete = currentPods.get(currentPods.size() - 1); + + lastResult = doScaleDown(serviceId, podToDelete); + if (!lastResult.isSuccess()) { + return lastResult; + } + } + + return lastResult; + + } else if (targetPodCount > currentCount) { + return doScaleUp(serviceId, targetPodCount); + + } else { + ServiceSnapshot snapshot = snapshotService.getSnapshot(serviceId); + int maxPodCount = snapshot != null && snapshot.getMaxPodCount() != null + ? snapshot.getMaxPodCount() : currentCount; + return ScaleResult.noChange(serviceId, currentCount, maxPodCount, runningPods); + } + } + + public ScaleResult getScaleStatus(String serviceId) { + List runningPods = k8sOrchestrator.getRunningPods(serviceId); + ServiceSnapshot snapshot = snapshotService.getSnapshot(serviceId); + + int maxPodCount = snapshot != null && snapshot.getMaxPodCount() != null + ? snapshot.getMaxPodCount() : runningPods.size(); + + return ScaleResult.noChange(serviceId, runningPods.size(), maxPodCount, runningPods); + } + + /** + * 任务入队时确保岗位下有可用 Pod。 + * 如果快照存在且当前 Pod 数量为 0,按岗位配置的 maxPodCount 扩容。 + * + * @param serviceId 服务 ID(= roleId) + * @return 扩容结果;快照不存在或已有 Pod 时返回 null(无需扩容) + */ + public ScaleResult ensurePodsForRole(String serviceId) { + ServiceSnapshot snapshot = snapshotService.getSnapshot(serviceId); + if (snapshot == null) { + snapshot = tryRecoverSnapshotFromSources(serviceId); + if (snapshot == null) { + log.warn("ensurePodsForRole: 无法恢复 Snapshot,跳过自动扩容: serviceId={}", serviceId); + return null; + } + } + + int currentCount = snapshot.getCurrentPodCount() != null ? snapshot.getCurrentPodCount() : 0; + if (currentCount > 0) { + return null; + } + + // 二次确认:快照可能与 K8s 实际状态不同步 + List actualPods = k8sOrchestrator.getRunningPods(serviceId); + if (!actualPods.isEmpty()) { + snapshot.setCurrentPodCount(actualPods.size()); + snapshotService.updateSnapshot(snapshot); + log.info("ensurePodsForRole: snapshot was stale for service {}, actual pods={}", + serviceId, actualPods.size()); + return null; + } + + log.info("ensurePodsForRole: no pods for service {}, triggering scale-up to maxPodCount", serviceId); + return scaleUp(serviceId, null, "task_enqueue"); + } + + private MergedConfig restoreConfig(ServiceSnapshot snapshot) { + try { + String json = snapshot.getOriginalRequestJson(); + + if (json == null || json.isEmpty()) { + json = snapshotService.getOriginalRequestJsonFromRedis(snapshot.getServiceId()); + if (json != null) { + snapshot.setOriginalRequestJson(json); + snapshotService.updateSnapshot(snapshot); + } + } + + if (json == null || json.isEmpty()) { + json = rebuildRequestFromBuildRecord(snapshot); + if (json != null) { + snapshot.setOriginalRequestJson(json); + snapshotService.updateSnapshot(snapshot); + } + } + + if (json == null || json.isEmpty()) { + log.error("Original request JSON is empty for service {} and all fallbacks failed", + snapshot.getServiceId()); + return null; + } + + ServiceBuildRequest request = objectMapper.readValue(json, ServiceBuildRequest.class); + MergedConfig config = configMergeService.merge(request); + + if (snapshot.getAgentImage() != null && !snapshot.getAgentImage().isEmpty()) { + config.setAgentImage(snapshot.getAgentImage()); + log.info("Restored built agent image for service {}: {}", + snapshot.getServiceId(), snapshot.getAgentImage()); + } else { + String builtImage = resolveBuiltImageFromRecord(snapshot.getServiceId()); + if (builtImage != null) { + config.setAgentImage(builtImage); + snapshot.setAgentImage(builtImage); + snapshotService.updateSnapshot(snapshot); + log.info("Restored built agent image from DB for service {}: {}", + snapshot.getServiceId(), builtImage); + } + } + + return config; + + } catch (Exception e) { + log.error("Failed to restore config from snapshot: {}", e.getMessage(), e); + return null; + } + } + + private String resolveBuiltImageFromRecord(String serviceId) { + try { + Long roleId = Long.parseLong(serviceId); + BuildRecordEntity record = buildRecordService.getLatestByRoleId(roleId); + if (record != null && BuildRecordEntity.STATUS_SUCCESS.equals(record.getStatus()) + && record.getImageTag() != null) { + return record.getImageTag(); + } + } catch (Exception e) { + log.warn("Failed to resolve built image from DB for service {}: {}", serviceId, e.getMessage()); + } + return null; + } + + /** + * 当内存中无 Snapshot 时,尝试从 Redis + DB 恢复一个可用的 Snapshot。 + * 典型场景:后端重启后 Pod 已结束(Succeeded),SnapshotSyncTask 不会重建该服务的 Snapshot。 + */ + @SuppressWarnings("unchecked") + private ServiceSnapshot tryRecoverSnapshotFromSources(String serviceId) { + try { + Long roleId = Long.parseLong(serviceId); + BuildRecordEntity record = buildRecordService.getLatestByRoleId(roleId); + if (record == null) { + log.warn("tryRecoverSnapshot: 岗位无构建记录,无法恢复 Snapshot: serviceId={}", serviceId); + return null; + } + if (!BuildRecordEntity.STATUS_SUCCESS.equals(record.getStatus())) { + log.warn("tryRecoverSnapshot: 最新构建记录状态非 SUCCESS,无法恢复: serviceId={}, buildNo={}, status={}", + serviceId, record.getBuildNo(), record.getStatus()); + return null; + } + + String originalRequestJson = snapshotService.getOriginalRequestJsonFromRedis(serviceId); + + if (originalRequestJson == null) { + log.info("tryRecoverSnapshot: Redis 中无 originalRequestJson,尝试从 DB 构建记录重建: serviceId={}", serviceId); + originalRequestJson = rebuildRequestJsonFromRecord(serviceId, record); + } + if (originalRequestJson == null) { + log.warn("tryRecoverSnapshot: 从 Redis 和 DB 均无法恢复 originalRequestJson: serviceId={}, buildNo={}", + serviceId, record.getBuildNo()); + return null; + } + + Map config = record.getConfigSnapshot(); + String podMode = config != null && config.get("podMode") != null ? + (String) config.get("podMode") : "SIDECAR"; + Integer podCount = config != null && config.get("podCount") != null ? + ((Number) config.get("podCount")).intValue() : 1; + + ServiceSnapshot snapshot = ServiceSnapshot.builder() + .serviceId(serviceId) + .userId(record.getCreatorId()) + .originalRequestJson(originalRequestJson) + .agentImage(record.getImageTag()) + .podMode(podMode) + .maxPodCount(podCount) + .currentPodCount(0) + .runningPodNames(new HashSet<>()) + .nextPodIndex(0) + .createdAt(java.time.Instant.now()) + .lastActiveAt(java.time.Instant.now()) + .status(com.linkwork.model.enums.SnapshotStatus.ACTIVE) + .resumeCount(0) + .build(); + + snapshotService.updateSnapshot(snapshot); + log.info("Recovered snapshot from Redis/DB for service {}, maxPodCount={}", + serviceId, podCount); + return snapshot; + + } catch (Exception e) { + log.warn("Failed to recover snapshot for service {}: {}", serviceId, e.getMessage()); + return null; + } + } + + @SuppressWarnings("unchecked") + private String rebuildRequestJsonFromRecord(String serviceId, BuildRecordEntity record) { + try { + Map config = record.getConfigSnapshot(); + if (config == null) { + return null; + } + ServiceBuildRequest request = ServiceBuildRequest.builder() + .serviceId(serviceId) + .userId(record.getCreatorId()) + .roleId(record.getRoleId()) + .roleName(record.getRoleName()) + .deployMode(DeployMode.K8S) + .buildEnvVars(config.containsKey("buildEnvVars") ? + (Map) config.get("buildEnvVars") : Map.of()) + .podMode(config.containsKey("podMode") && config.get("podMode") != null ? + PodMode.valueOf((String) config.get("podMode")) : PodMode.SIDECAR) + .podCount(config.containsKey("podCount") && config.get("podCount") != null ? + ((Number) config.get("podCount")).intValue() : 1) + .runnerBaseImage(config.containsKey("runnerBaseImage") ? + (String) config.get("runnerBaseImage") : null) + .build(); + return objectMapper.writeValueAsString(request); + } catch (Exception e) { + log.error("Failed to build request JSON from record for service {}: {}", serviceId, e.getMessage()); + return null; + } + } + + private String rebuildRequestFromBuildRecord(ServiceSnapshot snapshot) { + String serviceId = snapshot.getServiceId(); + try { + Long roleId = Long.parseLong(serviceId); + BuildRecordEntity record = buildRecordService.getLatestByRoleId(roleId); + if (record == null || !BuildRecordEntity.STATUS_SUCCESS.equals(record.getStatus())) { + log.warn("No successful build record found for role {}", roleId); + return null; + } + String json = rebuildRequestJsonFromRecord(serviceId, record); + if (json != null) { + log.info("Rebuilt ServiceBuildRequest from DB build record for service {}, buildNo={}", + serviceId, record.getBuildNo()); + } + return json; + } catch (NumberFormatException e) { + log.warn("ServiceId {} is not a valid roleId, cannot reconstruct from build record", serviceId); + return null; + } catch (Exception e) { + log.error("Failed to rebuild request from build record for service {}: {}", + serviceId, e.getMessage(), e); + return null; + } + } +} diff --git a/back/src/main/java/com/linkwork/service/ServiceScheduleService.java b/back/src/main/java/com/linkwork/service/ServiceScheduleService.java new file mode 100644 index 0000000..f7d558f --- /dev/null +++ b/back/src/main/java/com/linkwork/service/ServiceScheduleService.java @@ -0,0 +1,953 @@ +package com.linkwork.service; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.linkwork.config.BuildQueueConfig; +import com.linkwork.config.ImageBuildConfig; +import com.linkwork.model.dto.BuildTask; +import com.linkwork.model.dto.GeneratedSpec; +import com.linkwork.model.dto.ImageBuildResult; +import com.linkwork.model.dto.MergedConfig; +import com.linkwork.model.dto.ServiceBuildRequest; +import com.linkwork.model.dto.ServiceBuildResult; +import com.linkwork.model.entity.McpServerEntity; +import com.linkwork.model.entity.RoleEntity; +import com.linkwork.model.entity.SkillEntity; +import com.linkwork.model.enums.DeployMode; +import com.linkwork.context.UserContext; +import lombok.extern.slf4j.Slf4j; +import org.springframework.beans.factory.annotation.Value; +import org.springframework.scheduling.annotation.Async; +import org.springframework.stereotype.Service; +import org.springframework.util.StringUtils; + +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.UUID; +import java.util.stream.Collectors; + +/** + * 服务调度服务 + */ +@Service +@Slf4j +public class ServiceScheduleService { + + private final ConfigMergeService configMergeService; + private final K8sOrchestrator orchestrator; + private final DockerComposeGenerator composeGenerator; + private final ImageBuildService imageBuildService; + private final ImageBuildConfig imageBuildConfig; + private final BuildRecordService buildRecordService; + private final ScheduleEventPublisher eventPublisher; + private final GitLabAuthService gitLabAuthService; + private final BuildQueueService buildQueueService; + private final BuildQueueConfig buildQueueConfig; + private final McpServerService mcpServerService; + private final SkillService skillService; + private final RoleService roleService; + private final ObjectMapper objectMapper; + private final ServiceSnapshotService snapshotService; + + @Value("${robot.skills.repo-url:}") + private String skillsRepoUrl; + + @Value("${robot.skills.deploy-token:}") + private String skillsDeployToken; + + public ServiceScheduleService(ConfigMergeService configMergeService, + K8sOrchestrator orchestrator, + DockerComposeGenerator composeGenerator, + ImageBuildService imageBuildService, + ImageBuildConfig imageBuildConfig, + BuildRecordService buildRecordService, + ScheduleEventPublisher eventPublisher, + GitLabAuthService gitLabAuthService, + BuildQueueService buildQueueService, + BuildQueueConfig buildQueueConfig, + McpServerService mcpServerService, + SkillService skillService, + RoleService roleService, + ObjectMapper objectMapper, + ServiceSnapshotService snapshotService) { + this.configMergeService = configMergeService; + this.orchestrator = orchestrator; + this.composeGenerator = composeGenerator; + this.imageBuildService = imageBuildService; + this.imageBuildConfig = imageBuildConfig; + this.buildRecordService = buildRecordService; + this.eventPublisher = eventPublisher; + this.gitLabAuthService = gitLabAuthService; + this.buildQueueService = buildQueueService; + this.buildQueueConfig = buildQueueConfig; + this.mcpServerService = mcpServerService; + this.skillService = skillService; + this.roleService = roleService; + this.objectMapper = objectMapper; + this.snapshotService = snapshotService; + } + + /** + * 构建服务(同步方式,立即返回,实际构建异步执行) + * + * 如果启用了构建队列,任务会进入队列排队; + * 否则直接异步执行(兼容旧行为)。 + */ + public ServiceBuildResult build(ServiceBuildRequest request) { + String serviceId = request.getServiceId(); + + // 确保 buildId 存在(如果前端未传,则后端生成) + String buildId = request.getBuildId(); + if (!StringUtils.hasText(buildId)) { + buildId = UUID.randomUUID().toString(); + request.setBuildId(buildId); + } + + try { + // 0. 自动注入 GitLab token(从用户绑定的 GitLab 账户获取) + injectGitLabToken(request); + + // 0.5 自动注入 MCP 配置(从岗位 configJson.mcp 生成) + injectMcpConfig(request); + + // 0.6 自动注入 Skills 配置(从岗位 configJson.skills 生成) + injectSkillsConfig(request); + + // 1. 配置融合(快速验证) + MergedConfig config = configMergeService.merge(request); + + // 2. 计算 PodGroup 名称(提前返回给调用方) + String podGroupName = "svc-" + serviceId + "-pg"; + String queueName = config.getQueueName(); + + // 3. 创建构建记录 + if (request.getRoleId() != null) { + Map configSnapshot = createConfigSnapshot(request, config); + buildRecordService.createBuildRecord( + buildId, + request.getRoleId(), + request.getRoleName(), + configSnapshot, + request.getUserId(), + UserContext.getCurrentUserName() + ); + } + + // 4. 提交构建任务 + if (buildQueueConfig.isEnabled()) { + // 使用构建队列 + try { + BuildTask task = buildQueueService.submit(request, config); + int queuePosition = buildQueueService.getWaitingCount(); + log.info("Build task queued: serviceId={}, buildId={}, queuePosition={}", + serviceId, buildId, queuePosition); + + String message = queuePosition > 1 + ? String.format("Task queued at position %d, waiting for resources", queuePosition) + : "Task submitted, starting build..."; + + return ServiceBuildResult.building(serviceId, buildId, podGroupName, queueName, message); + } catch (IllegalStateException e) { + // 队列已满 + log.warn("Build queue full, rejecting request: serviceId={}", serviceId); + return ServiceBuildResult.failed(serviceId, "QUEUE_FULL", e.getMessage()); + } + } else { + // 直接异步执行(兼容旧行为) + buildAsync(request, config); + log.info("Build task submitted (direct async): serviceId={}, buildId={}", serviceId, buildId); + return ServiceBuildResult.building(serviceId, buildId, podGroupName, queueName, + "Task submitted, starting build..."); + } + + } catch (IllegalArgumentException e) { + log.error("Invalid request for service {}: {}", serviceId, e.getMessage()); + // 更新构建记录为失败 + if (request.getRoleId() != null) { + buildRecordService.markFailed(buildId, e.getMessage(), 0L); + } + return ServiceBuildResult.failed(serviceId, "INVALID_REQUEST", e.getMessage()); + } catch (Exception e) { + log.error("Failed to submit build task for service {}: {}", serviceId, e.getMessage(), e); + // 更新构建记录为失败 + if (request.getRoleId() != null) { + buildRecordService.markFailed(buildId, e.getMessage(), 0L); + } + return ServiceBuildResult.failed(serviceId, "INTERNAL_ERROR", e.getMessage()); + } + } + + /** + * 自动注入 GitLab token 到 buildEnvVars + * 从用户绑定的 GitLab 账户获取有效的 access token + */ + private void injectGitLabToken(ServiceBuildRequest request) { + String userId = request.getUserId(); + if (!StringUtils.hasText(userId)) { + log.debug("No userId provided, skipping GitLab token injection"); + return; + } + + // 获取用户的 GitLab token + String gitLabToken = gitLabAuthService.getAccessToken(userId); + if (!StringUtils.hasText(gitLabToken)) { + log.debug("No GitLab token found for userId: {}", userId); + return; + } + + // 注入到 buildEnvVars + Map envVars = request.getBuildEnvVars(); + if (envVars == null) { + envVars = new HashMap<>(); + request.setBuildEnvVars(envVars); + } + + // 覆盖前端传入的 GIT_TOKEN(前端传的可能是错误的登录 JWT) + envVars.put("GIT_TOKEN", gitLabToken); + log.info("Injected GitLab token for userId: {}", userId); + } + + /** + * 自动注入 MCP 配置到 buildEnvVars + * 从岗位 configJson.mcp 中获取 MCP ID 列表,生成 SDK 兼容的 mcp.json JSON 字符串, + * 写入 MCP_CONFIG 环境变量供 build.sh 使用。 + * + *

如果岗位未配置 MCP(configJson.mcp 为空或不存在),正常跳过不报错。 + * 如果岗位配置了 MCP 但注入失败(ID 无效、Server 不存在等),抛出异常中断构建。

+ */ + private void injectMcpConfig(ServiceBuildRequest request) { + Long roleId = request.getRoleId(); + if (roleId == null) { + log.debug("No roleId provided, skipping MCP config injection"); + return; + } + + RoleEntity role = roleService.getById(roleId); + if (role == null) { + log.debug("Role not found for roleId: {}, skipping MCP config injection", roleId); + return; + } + + // 从岗位 configJson.mcp 取 MCP 标识列表(兼容数字 ID 和名称字符串两种格式) + List mcpRefs = Collections.emptyList(); + if (role.getConfigJson() != null && role.getConfigJson().getMcp() != null) { + mcpRefs = role.getConfigJson().getMcp().stream() + .filter(s -> s != null && !s.isBlank()) + .collect(Collectors.toList()); + } + + // 岗位未配置 MCP → 正常跳过,不报错 + if (mcpRefs.isEmpty()) { + log.debug("No MCP refs configured for role {}, skipping MCP config injection", roleId); + return; + } + + // --- 以下:岗位配置了 MCP,注入失败则中断构建 --- + + // 分离数字 ID 和名称字符串 + List mcpIds = new java.util.ArrayList<>(); + List mcpNames = new java.util.ArrayList<>(); + for (String ref : mcpRefs) { + try { + mcpIds.add(Long.parseLong(ref)); + } catch (NumberFormatException e) { + mcpNames.add(ref); + } + } + + // 按名称查询 MCP Server ID(兼容前端 mock 名称格式) + if (!mcpNames.isEmpty()) { + List byNames = mcpServerService.list( + new com.baomidou.mybatisplus.core.conditions.query.LambdaQueryWrapper() + .in(McpServerEntity::getName, mcpNames) + ); + for (McpServerEntity entity : byNames) { + mcpIds.add(entity.getId()); + } + if (byNames.size() < mcpNames.size()) { + List foundNames = byNames.stream() + .map(McpServerEntity::getName) + .collect(Collectors.toList()); + List missingNames = mcpNames.stream() + .filter(n -> !foundNames.contains(n)) + .collect(Collectors.toList()); + throw new IllegalArgumentException( + String.format("岗位 [%d] 配置的 MCP Server 名称不存在: %s", roleId, missingNames)); + } + } + + if (mcpIds.isEmpty()) { + throw new IllegalArgumentException( + String.format("岗位 [%d] 配置的 MCP 引用均无法解析为有效 ID: %s", roleId, mcpRefs)); + } + + // 生成 SDK 兼容的 mcp.json 配置 + try { + Map mcpConfig = mcpServerService.generateMcpConfig(mcpIds); + String mcpJsonString = objectMapper.writeValueAsString(mcpConfig); + + // 注入到 buildEnvVars + Map envVars = request.getBuildEnvVars(); + if (envVars == null) { + envVars = new HashMap<>(); + request.setBuildEnvVars(envVars); + } + envVars.put("MCP_CONFIG", mcpJsonString); + log.info("Injected MCP config for roleId: {} ({} servers, resolved from {} refs)", + roleId, mcpIds.size(), mcpRefs.size()); + } catch (IllegalArgumentException e) { + // 直接抛出,不包装 + throw e; + } catch (Exception e) { + throw new IllegalArgumentException( + String.format("岗位 [%d] 的 MCP 配置生成失败: %s", roleId, e.getMessage()), e); + } + } + + /** + * 自动注入 Skills 配置到 buildEnvVars + * 从岗位 configJson.skills 中获取 Skill ID 列表,查询 DB 获取 branchName + latestCommit, + * 生成 SKILLS_CONFIG JSON 字符串注入环境变量供 build.sh 使用。 + * + *

如果岗位未配置 Skills(configJson.skills 为空或不存在),正常跳过不报错。 + * 如果岗位配置了 Skills 但注入失败(ID 无效、Skill 不存在等),抛出异常中断构建。

+ */ + private void injectSkillsConfig(ServiceBuildRequest request) { + Long roleId = request.getRoleId(); + if (roleId == null) { + log.debug("No roleId provided, skipping Skills config injection"); + return; + } + + RoleEntity role = roleService.getById(roleId); + if (role == null) { + log.debug("Role not found for roleId: {}, skipping Skills config injection", roleId); + return; + } + + // 从岗位 configJson.skills 取 Skill 引用列表 + List skillRefs = Collections.emptyList(); + if (role.getConfigJson() != null && role.getConfigJson().getSkills() != null) { + skillRefs = role.getConfigJson().getSkills().stream() + .filter(s -> s != null && !s.isBlank()) + .collect(Collectors.toList()); + } + + // 岗位未配置 Skills → 正常跳过 + if (skillRefs.isEmpty()) { + log.debug("No Skill refs configured for role {}, skipping Skills config injection", roleId); + return; + } + + // --- 以下:岗位配置了 Skills,注入失败则中断构建 --- + + // 分离数字 ID 和名称字符串 + List skillIds = new java.util.ArrayList<>(); + List skillNames = new java.util.ArrayList<>(); + for (String ref : skillRefs) { + try { + skillIds.add(Long.parseLong(ref)); + } catch (NumberFormatException e) { + skillNames.add(ref); + } + } + + // 按名称查询 Skill ID + if (!skillNames.isEmpty()) { + List byNames = skillService.list( + new com.baomidou.mybatisplus.core.conditions.query.LambdaQueryWrapper() + .in(SkillEntity::getName, skillNames) + ); + for (SkillEntity entity : byNames) { + skillIds.add(entity.getId()); + } + if (byNames.size() < skillNames.size()) { + List foundNames = byNames.stream() + .map(SkillEntity::getName) + .collect(Collectors.toList()); + List missingNames = skillNames.stream() + .filter(n -> !foundNames.contains(n)) + .collect(Collectors.toList()); + throw new IllegalArgumentException( + String.format("岗位 [%d] 配置的 Skill 名称不存在: %s", roleId, missingNames)); + } + } + + if (skillIds.isEmpty()) { + throw new IllegalArgumentException( + String.format("岗位 [%d] 配置的 Skill 引用均无法解析为有效 ID: %s", roleId, skillRefs)); + } + + // 查询所有 SkillEntity + List skills = skillService.listByIds(skillIds); + if (skills.size() < skillIds.size()) { + List foundIds = skills.stream().map(SkillEntity::getId).collect(Collectors.toList()); + List missingIds = skillIds.stream() + .filter(id -> !foundIds.contains(id)) + .collect(Collectors.toList()); + throw new IllegalArgumentException( + String.format("岗位 [%d] 配置的 Skill ID 不存在: %s", roleId, missingIds)); + } + + // 校验所有 Skill 都有 branchName + for (SkillEntity skill : skills) { + if (!StringUtils.hasText(skill.getBranchName())) { + throw new IllegalArgumentException( + String.format("Skill [%s] 尚未关联 Git 分支,请先同步", skill.getName())); + } + } + + // 生成 SKILLS_CONFIG JSON + try { + List> skillsList = new java.util.ArrayList<>(); + for (SkillEntity skill : skills) { + Map item = new java.util.LinkedHashMap<>(); + item.put("name", skill.getName()); + item.put("branch", skill.getBranchName()); + item.put("commit", skill.getLatestCommit()); + skillsList.add(item); + } + + Map skillsConfig = new java.util.LinkedHashMap<>(); + skillsConfig.put("repoUrl", skillsRepoUrl); + skillsConfig.put("token", skillsDeployToken); + skillsConfig.put("skills", skillsList); + + String skillsJsonString = objectMapper.writeValueAsString(skillsConfig); + + // 注入到 buildEnvVars + Map envVars = request.getBuildEnvVars(); + if (envVars == null) { + envVars = new HashMap<>(); + request.setBuildEnvVars(envVars); + } + envVars.put("SKILLS_CONFIG", skillsJsonString); + log.info("Injected Skills config for roleId: {} ({} skills, resolved from {} refs)", + roleId, skills.size(), skillRefs.size()); + } catch (IllegalArgumentException e) { + throw e; + } catch (Exception e) { + throw new IllegalArgumentException( + String.format("岗位 [%d] 的 Skills 配置生成失败: %s", roleId, e.getMessage()), e); + } + } + + /** + * 创建配置快照 + */ + private Map createConfigSnapshot(ServiceBuildRequest request, MergedConfig config) { + Map snapshot = new HashMap<>(); + snapshot.put("serviceId", request.getServiceId()); + snapshot.put("deployMode", request.getDeployMode() != null ? request.getDeployMode().name() : null); + snapshot.put("runnerBaseImage", request.getRunnerBaseImage()); + snapshot.put("agentImage", config.getAgentImage()); + snapshot.put("podMode", config.getPodMode() != null ? config.getPodMode().name() : null); + snapshot.put("runtimeMode", config.getPodMode() != null ? config.getPodMode().name() : null); + snapshot.put("zzMode", config.getPodMode() != null && config.getPodMode().name().equals("SIDECAR") ? "ssh" : "local"); + snapshot.put("runnerImage", config.getRunnerImage()); + snapshot.put("podCount", config.getPodCount()); + snapshot.put("buildEnvVars", request.getBuildEnvVars()); + return snapshot; + } + + /** + * 异步构建服务(镜像构建 + K8s 资源创建) + */ + @Async + public void buildAsync(ServiceBuildRequest request, MergedConfig config) { + String serviceId = request.getServiceId(); + String buildId = request.getBuildId(); + Long roleId = request.getRoleId(); + String roleName = request.getRoleName(); + long startTime = System.currentTimeMillis(); + + try { + log.info("Async build started for service {}, buildId: {}", serviceId, buildId); + + // 更新构建记录状态为 BUILDING + if (roleId != null && StringUtils.hasText(buildId)) { + buildRecordService.markBuilding(buildId); + } + + // 1. 镜像构建(如果启用,仅构建 Agent 镜像) + if (imageBuildConfig.isEnabled() && request.getDeployMode() == DeployMode.K8S) { + log.info("Image build enabled, starting image build for service {}", serviceId); + + // 发布 BUILD_STARTED 事件 + if (StringUtils.hasText(buildId)) { + eventPublisher.publishBuildStarted(buildId, buildId, roleId, roleName, + config.getAgentImage()); + eventPublisher.publishBuildProgress(buildId, buildId, "dockerfile", + "Generating Dockerfile..."); + } + + ImageBuildResult buildResult = imageBuildService.buildImages(request); + + if (!buildResult.isSuccess()) { + log.error("Image build failed for service {}: {}", serviceId, buildResult.getErrorMessage()); + + // 发布 BUILD_FAILED 事件并更新记录 + long durationMs = System.currentTimeMillis() - startTime; + if (StringUtils.hasText(buildId)) { + eventPublisher.publishBuildFailed(buildId, buildId, "BUILD_ERROR", + buildResult.getErrorMessage(), durationMs); + if (roleId != null) { + buildRecordService.markFailed(buildId, buildResult.getErrorMessage(), durationMs); + } + } + return; + } + + // 只更新 Agent 镜像地址(Runner 镜像保持 runnerBaseImage 不变) + config.setAgentImage(buildResult.getAgentImageTag()); + config.setImageBuildDurationMs(buildResult.getBuildDurationMs()); + + // 发布镜像推送事件(如果已推送) + if (buildResult.isPushed() && StringUtils.hasText(buildId)) { + eventPublisher.publishBuildPushed(buildId, buildId, buildResult.getAgentImageTag()); + } + + log.info("Image build completed for service {}, agentImage: {}, runnerImage: {}, duration: {}ms", + serviceId, buildResult.getAgentImageTag(), config.getRunnerImage(), buildResult.getBuildDurationMs()); + } else { + log.info("Image build disabled or not K8s mode, using base image for service {}", serviceId); + } + + // 2. 创建 K8s 资源 + ServiceBuildResult result = orchestrator.buildService(config); + + long durationMs = System.currentTimeMillis() - startTime; + + if (result.isSuccess()) { + log.info("Async build completed successfully for service {}", serviceId); + + // 保存 Snapshot 到内存 + Redis,确保后端重启后能恢复 + try { + snapshotService.saveSnapshot(request, result); + } catch (Exception snapshotErr) { + log.warn("异步构建后保存 Snapshot 失败(不影响构建结果): serviceId={}, error={}", + serviceId, snapshotErr.getMessage()); + } + + // 发布 BUILD_COMPLETED 事件并更新记录 + if (StringUtils.hasText(buildId)) { + eventPublisher.publishBuildCompleted(buildId, buildId, config.getAgentImage(), durationMs); + if (roleId != null) { + buildRecordService.markSuccess(buildId, config.getAgentImage(), durationMs); + } + } + } else { + log.error("Async build failed for service {}: {}", serviceId, result.getErrorMessage()); + + // 发布 BUILD_FAILED 事件并更新记录 + if (StringUtils.hasText(buildId)) { + eventPublisher.publishBuildFailed(buildId, buildId, "K8S_ERROR", + result.getErrorMessage(), durationMs); + if (roleId != null) { + buildRecordService.markFailed(buildId, result.getErrorMessage(), durationMs); + } + } + } + + } catch (Exception e) { + log.error("Async build error for service {}: {}", serviceId, e.getMessage(), e); + + long durationMs = System.currentTimeMillis() - startTime; + + // 发布 BUILD_FAILED 事件并更新记录 + if (StringUtils.hasText(buildId)) { + eventPublisher.publishBuildFailed(buildId, buildId, "INTERNAL_ERROR", + e.getMessage(), durationMs); + if (roleId != null) { + buildRecordService.markFailed(buildId, e.getMessage(), durationMs); + } + } + } + } + + /** + * 预览生成的 Spec(不实际创建) + */ + public GeneratedSpec preview(ServiceBuildRequest request) { + MergedConfig config = configMergeService.merge(request); + return orchestrator.previewSpec(config); + } + + /** + * 生成 Compose 构建包(tar.gz,包含完整的本地构建和部署材料) + * + * 不在服务端构建镜像,而是将构建所需的全部材料打包给用户, + * 用户解压后执行 start.sh 即可一键完成镜像构建和容器启动。 + * + * tar.gz 内容: + * docker-compose.yaml - Compose 编排文件(build: . 方式) + * Dockerfile - 动态生成(基于 Compose 专用基础镜像) + * build.sh - Dockerfile 内部执行的镜像构建脚本 + * config.json - Agent 默认配置 + * cedar-policies/ - Cedar 策略文件 + * start.sh - 一键构建部署脚本 + * README.md - 详细使用说明 + */ + public ServiceBuildResult generateComposePackage(ServiceBuildRequest request) { + String serviceId = request.getServiceId(); + + try { + log.info("Generating Compose package for service {}", serviceId); + + // 1. 注入凭证和配置 + injectGitLabToken(request); + injectMcpConfig(request); + injectSkillsConfig(request); + + // 2. 配置融合 + MergedConfig config = configMergeService.merge(request); + + // 3. 生成各文件内容 + String composeYaml = composeGenerator.generateComposeYaml(config); + String dockerfile = imageBuildService.generateDockerfile( + imageBuildConfig.getComposeBaseImage(), + request.getBuildEnvVars()); + String startScript = generateStartScript(serviceId); + String readme = generateReadme(config); + + // 4. 打包 tar.gz + byte[] tarBytes = buildComposeTar(serviceId, composeYaml, dockerfile, startScript, readme); + + log.info("Compose package generated for service {}, size: {} bytes", serviceId, tarBytes.length); + return ServiceBuildResult.successCompose(serviceId, tarBytes); + + } catch (Exception e) { + log.error("Compose package generation failed for service {}: {}", serviceId, e.getMessage(), e); + return ServiceBuildResult.failed(serviceId, "INTERNAL_ERROR", e.getMessage()); + } + } + + // ==================== Compose 打包内部方法 ==================== + + private byte[] buildComposeTar(String serviceId, String composeYaml, String dockerfile, + String startScript, String readme) throws java.io.IOException { + String prefix = "ai-worker-" + serviceId + "/"; + var baos = new java.io.ByteArrayOutputStream(); + try (var gzos = new java.util.zip.GZIPOutputStream(baos); + var tos = new org.apache.commons.compress.archivers.tar.TarArchiveOutputStream(gzos)) { + tos.setLongFileMode(org.apache.commons.compress.archivers.tar.TarArchiveOutputStream.LONGFILE_POSIX); + + addTarEntry(tos, prefix + "docker-compose.yaml", composeYaml); + addTarEntry(tos, prefix + "Dockerfile", dockerfile); + addTarEntry(tos, prefix + "README.md", readme); + addTarEntryExecutable(tos, prefix + "start.sh", startScript); + + addClasspathResourceToTar(tos, "scripts/build.sh", prefix + "build.sh"); + addClasspathResourceToTar(tos, "scripts/config.json", prefix + "config.json"); + addClasspathResourceToTar(tos, "scripts/00-platform.cedar", prefix + "cedar-policies/00-platform.cedar"); + + tos.finish(); + } + return baos.toByteArray(); + } + + private void addTarEntry(org.apache.commons.compress.archivers.tar.TarArchiveOutputStream tos, + String name, String content) throws java.io.IOException { + byte[] data = content.getBytes(java.nio.charset.StandardCharsets.UTF_8); + var entry = new org.apache.commons.compress.archivers.tar.TarArchiveEntry(name); + entry.setSize(data.length); + entry.setMode(0644); + tos.putArchiveEntry(entry); + tos.write(data); + tos.closeArchiveEntry(); + } + + private void addTarEntryExecutable(org.apache.commons.compress.archivers.tar.TarArchiveOutputStream tos, + String name, String content) throws java.io.IOException { + byte[] data = content.getBytes(java.nio.charset.StandardCharsets.UTF_8); + var entry = new org.apache.commons.compress.archivers.tar.TarArchiveEntry(name); + entry.setSize(data.length); + entry.setMode(0755); + tos.putArchiveEntry(entry); + tos.write(data); + tos.closeArchiveEntry(); + } + + private void addClasspathResourceToTar(org.apache.commons.compress.archivers.tar.TarArchiveOutputStream tos, + String classpathPath, String entryName) throws java.io.IOException { + var resource = new org.springframework.core.io.ClassPathResource(classpathPath); + if (resource.exists()) { + try (var is = resource.getInputStream()) { + byte[] data = is.readAllBytes(); + var entry = new org.apache.commons.compress.archivers.tar.TarArchiveEntry(entryName); + entry.setSize(data.length); + entry.setMode(0644); + tos.putArchiveEntry(entry); + tos.write(data); + tos.closeArchiveEntry(); + } + } else { + log.warn("Classpath resource not found: {}, skipping", classpathPath); + } + } + + /** + * 生成一键构建部署脚本 + */ + private String generateStartScript(String serviceId) { + return String.format(""" +#!/bin/bash +# ============================================================================= +# AI Worker 一键构建部署脚本 +# Service ID: %1$s +# +# 用法: +# ./start.sh 首次部署(构建镜像 + 启动容器) +# ./start.sh rebuild 强制重新构建镜像 +# ./start.sh stop 停止服务 +# ./start.sh logs 查看实时日志 +# ./start.sh status 查看容器状态 +# ./start.sh clean 停止服务并删除镜像和数据卷 +# ============================================================================= +set -e + +SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" +cd "$SCRIPT_DIR" + +SERVICE_ID="%1$s" +CONTAINER_NAME="ai-worker-${SERVICE_ID}" + +log_info() { echo -e "\\033[32m[INFO]\\033[0m $(date '+%%H:%%M:%%S') $*"; } +log_warn() { echo -e "\\033[33m[WARN]\\033[0m $(date '+%%H:%%M:%%S') $*"; } +log_error() { echo -e "\\033[31m[ERROR]\\033[0m $(date '+%%H:%%M:%%S') $*" >&2; } + +check_docker() { + if ! command -v docker &>/dev/null; then + log_error "Docker 未安装,请先安装 Docker" + exit 1 + fi + if ! docker compose version &>/dev/null; then + log_error "Docker Compose V2 不可用,请升级 Docker" + exit 1 + fi +} + +do_build_and_start() { + log_info "开始构建镜像并启动服务(Service ID: ${SERVICE_ID})..." + log_info "首次构建需要拉取基础镜像和安装依赖,预计耗时 5-10 分钟" + echo "" + docker compose up --build -d + echo "" + log_info "服务已启动" + log_info "容器名称: ${CONTAINER_NAME}" + log_info "查看日志: ./start.sh logs" + log_info "停止服务: ./start.sh stop" +} + +do_rebuild() { + log_info "强制重新构建镜像..." + docker compose build --no-cache + docker compose up -d + log_info "重新构建完成,服务已启动" +} + +do_stop() { + log_info "停止服务..." + docker compose down + log_info "服务已停止" +} + +do_logs() { + docker compose logs -f agent +} + +do_status() { + docker compose ps +} + +do_clean() { + log_warn "将停止服务、删除容器、镜像和数据卷" + read -rp "确认? [y/N] " confirm + if [[ "$confirm" =~ ^[Yy]$ ]]; then + docker compose down -v --rmi local + log_info "清理完成" + else + log_info "已取消" + fi +} + +check_docker + +case "${1:-}" in + rebuild) do_rebuild ;; + stop) do_stop ;; + logs) do_logs ;; + status) do_status ;; + clean) do_clean ;; + "") do_build_and_start ;; + *) + echo "用法: $0 {rebuild|stop|logs|status|clean}" + echo " (无参数) 首次构建并启动" + echo " rebuild 强制重新构建镜像" + echo " stop 停止服务" + echo " logs 查看实时日志" + echo " status 查看容器状态" + echo " clean 停止并清理所有资源" + exit 1 + ;; +esac +""", serviceId); + } + + /** + * 生成详细使用说明文档 + */ + private String generateReadme(MergedConfig config) { + return String.format(""" +# AI Worker 本地部署包 + +## 概述 + +本压缩包包含在本地服务器上以 Docker Compose 方式部署 AI Worker 的全部文件。 +镜像在本地构建,无需从镜像仓库拉取构建好的镜像。 + +- **Service ID**: %s +- **User ID**: %s +- **基础镜像**: %s +- **运行模式**: Alone(单容器,ZZD_MODE=local) + +## 文件清单 + +| 文件 | 说明 | +|------|------| +| `docker-compose.yaml` | Docker Compose 编排文件 | +| `Dockerfile` | 镜像构建定义(基于基础镜像,安装 SDK、zzd 等) | +| `build.sh` | Dockerfile 内部执行的构建脚本(安装依赖、配置权限等) | +| `config.json` | Agent 配置文件(claude_settings、workspace 等) | +| `cedar-policies/` | Cedar 安全策略文件 | +| `start.sh` | **一键构建部署脚本** | +| `README.md` | 本说明文档 | + +## 环境要求 + +- Docker Engine 20.10+(需支持 Compose V2) +- 可访问 `docker.momo.com`(拉取基础镜像) +- 可访问 `git.wemomo.com`(构建时克隆 SDK 仓库) +- 建议内存 ≥ %s,CPU ≥ %s 核 + +## 快速开始 + +### 一键部署 + +```bash +# 解压 +tar xzf ai-worker-%s.tar.gz +cd ai-worker-%s + +# 首次部署(构建镜像 + 启动容器) +./start.sh +``` + +首次构建需要拉取基础镜像和安装依赖,预计耗时 **5-10 分钟**。 +后续启动(镜像已构建过)只需几秒。 + +### 常用操作 + +```bash +# 查看实时日志 +./start.sh logs + +# 查看容器状态 +./start.sh status + +# 停止服务 +./start.sh stop + +# 强制重新构建(修改配置后) +./start.sh rebuild + +# 清理所有资源(容器 + 镜像 + 数据卷) +./start.sh clean +``` + +### 手动操作(等效命令) + +```bash +# 构建并启动 +docker compose up --build -d + +# 查看日志 +docker compose logs -f agent + +# 停止 +docker compose down +``` + +## 运行时环境变量 + +以下环境变量已在 `docker-compose.yaml` 中配置: + +| 变量 | 值 | 说明 | +|------|-----|------| +| `WORKSTATION_ID` | %s | 工位 ID | +| `SERVICE_ID` | %s | 服务 ID | +| `REDIS_URL` | %s | Redis 连接地址 | +| `API_BASE_URL` | %s | API 网关地址 | +| `WS_BASE_URL` | %s | WebSocket 网关地址 | +| `LLM_GATEWAY_URL` | %s | LLM 网关地址 | +| `CONFIG_FILE` | /opt/agent/config.json | Agent 配置文件路径 | +| `IDLE_TIMEOUT` | 86400 | 空闲超时(秒) | + +## 资源配置 + +| 资源 | 请求 | 上限 | +|------|------|------| +| CPU | %s | %s | +| 内存 | %s | %s | + +如需调整,修改 `docker-compose.yaml` 中 `deploy.resources` 部分。 + +## 数据持久化 + +工作目录挂载为 Docker named volume `workspace`,容器重启后数据保留。 +执行 `./start.sh clean` 或 `docker compose down -v` 会删除数据卷。 + +## 故障排查 + +```bash +# 查看容器状态 +docker compose ps + +# 查看最近日志(不 follow) +docker compose logs --tail 100 agent + +# 进入容器调试 +docker compose exec agent bash + +# 查看资源使用 +docker stats ai-worker-%s +``` +""", + config.getServiceId(), + config.getUserId(), + imageBuildConfig.getComposeBaseImage(), + config.getAgentResources().getMemoryLimit(), + config.getAgentResources().getCpuLimit(), + config.getServiceId(), + config.getServiceId(), + config.getWorkstationId() != null ? config.getWorkstationId() : config.getServiceId(), + config.getServiceId(), + config.getRedisUrl() != null ? config.getRedisUrl() : "", + config.getApiBaseUrl() != null ? config.getApiBaseUrl() : "", + config.getWsBaseUrl() != null ? config.getWsBaseUrl() : "", + config.getLlmGatewayUrl() != null ? config.getLlmGatewayUrl() : "", + config.getAgentResources().getCpuRequest(), + config.getAgentResources().getCpuLimit(), + config.getAgentResources().getMemoryRequest(), + config.getAgentResources().getMemoryLimit(), + config.getServiceId()); + } + + /** + * 获取融合后的配置(用于调试) + */ + public MergedConfig getMergedConfig(ServiceBuildRequest request) { + return configMergeService.merge(request); + } +} diff --git a/back/src/main/java/com/linkwork/service/ServiceSnapshotService.java b/back/src/main/java/com/linkwork/service/ServiceSnapshotService.java new file mode 100644 index 0000000..1e74919 --- /dev/null +++ b/back/src/main/java/com/linkwork/service/ServiceSnapshotService.java @@ -0,0 +1,364 @@ +package com.linkwork.service; + +import com.fasterxml.jackson.core.JsonProcessingException; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.linkwork.config.EnvConfig; +import com.linkwork.model.dto.ServiceBuildRequest; +import com.linkwork.model.dto.ServiceBuildResult; +import com.linkwork.model.dto.ServiceSnapshot; +import com.linkwork.model.enums.DeployMode; +import com.linkwork.model.enums.PodMode; +import com.linkwork.model.enums.SnapshotStatus; +import lombok.extern.slf4j.Slf4j; +import org.springframework.data.redis.core.StringRedisTemplate; +import org.springframework.stereotype.Service; + +import java.time.Duration; +import java.time.Instant; +import java.util.*; +import java.util.concurrent.ConcurrentHashMap; +import java.util.stream.Collectors; + +/** + * 服务快照管理服务 + * + * 用于快速重启功能: + * 1. Service 创建成功后保存快照(原始请求、运行节点) + * 2. Service 关闭时更新快照(记录 shutdownAt) + * 3. Resume 时查询快照,获取原始请求和上次运行节点 + * + * 注:当前使用内存存储,生产环境建议使用 Redis 或 DB + */ +@Service +@Slf4j +public class ServiceSnapshotService { + + private final ObjectMapper objectMapper; + private final EnvConfig envConfig; + private final StringRedisTemplate redisTemplate; + + private static final String REDIS_KEY_PREFIX = "service:snapshot:request:"; + private static final Duration REDIS_TTL = Duration.ofDays(30); + + private final Map snapshotStore = new ConcurrentHashMap<>(); + + public ServiceSnapshotService(ObjectMapper objectMapper, EnvConfig envConfig, + StringRedisTemplate redisTemplate) { + this.objectMapper = objectMapper; + this.envConfig = envConfig; + this.redisTemplate = redisTemplate; + } + + /** + * Service 创建成功后保存快照 + * + * @param request 原始构建请求 + * @param result 构建结果 + */ + public void saveSnapshot(ServiceBuildRequest request, ServiceBuildResult result) { + try { + String originalRequestJson = objectMapper.writeValueAsString(request); + + // 获取初始 Pod 数量作为最大值 + int podCount = request.getPodCount() != null && request.getPodCount() > 0 + ? request.getPodCount() : 1; + + // 初始化 runningPodNames + Set runningPodNames = new HashSet<>(); + if (result.getPodNames() != null) { + runningPodNames.addAll(result.getPodNames()); + } + + ServiceSnapshot snapshot = ServiceSnapshot.builder() + .serviceId(request.getServiceId()) + .userId(request.getUserId()) + .originalRequestJson(originalRequestJson) + // 保存构建后的 Agent 镜像地址 + .agentImage(result.getBuiltAgentImage()) + .podMode(resolvePodMode(request).name()) + .lastScheduledNode(result.getScheduledNode()) + .maxPodCount(podCount) // 初始配置的最大 Pod 数量 + .currentPodCount(podCount) // 当前 Pod 数量 + .runningPodNames(runningPodNames) // 运行中的 Pod 名称列表 + .nextPodIndex(podCount) // 下一个 Pod 序号 + .createdAt(Instant.now()) + .lastActiveAt(Instant.now()) + .status(SnapshotStatus.ACTIVE) + .resumeCount(0) + .build(); + + snapshotStore.put(request.getServiceId(), snapshot); + + persistRequestToRedis(request.getServiceId(), originalRequestJson); + + log.info("Saved snapshot for service {}, scheduledNode={}, maxPodCount={}, pods={}", + request.getServiceId(), result.getScheduledNode(), podCount, runningPodNames); + + } catch (JsonProcessingException e) { + log.error("Failed to serialize request for service {}: {}", + request.getServiceId(), e.getMessage()); + } + } + + private PodMode resolvePodMode(ServiceBuildRequest request) { + if (request.getPodMode() != null) { + return request.getPodMode(); + } + if (request.getDeployMode() == DeployMode.COMPOSE) { + return PodMode.ALONE; + } + if (envConfig.getPodModeRules() == null || envConfig.getPodModeRules().getDefaultMode() == null) { + throw new IllegalStateException("未配置默认运行模式,无法保存服务快照"); + } + return envConfig.getPodModeRules().getDefaultMode(); + } + + /** + * Service 关闭时更新快照 + * + * @param serviceId 服务 ID + */ + public void onServiceShutdown(String serviceId) { + ServiceSnapshot snapshot = snapshotStore.get(serviceId); + if (snapshot != null) { + snapshot.setShutdownAt(Instant.now()); + log.info("Updated snapshot shutdown time for service {}", serviceId); + } + } + + /** + * 查询可用快照 + * + * @param serviceId 服务 ID + * @return 快照(如果存在且未过期) + */ + public Optional findActiveSnapshot(String serviceId) { + ServiceSnapshot snapshot = snapshotStore.get(serviceId); + + if (snapshot == null) { + log.debug("No snapshot found for service {}", serviceId); + return Optional.empty(); + } + + if (snapshot.getStatus() != SnapshotStatus.ACTIVE) { + log.debug("Snapshot for service {} is not active: {}", serviceId, snapshot.getStatus()); + return Optional.empty(); + } + + if (snapshot.isExpired()) { + log.info("Snapshot for service {} is expired", serviceId); + snapshot.setStatus(SnapshotStatus.EXPIRED); + return Optional.empty(); + } + + return Optional.of(snapshot); + } + + /** + * 从快照恢复原始请求 + * + * @param snapshot 快照 + * @return 原始请求 + */ + public ServiceBuildRequest restoreRequest(ServiceSnapshot snapshot) { + try { + return objectMapper.readValue(snapshot.getOriginalRequestJson(), ServiceBuildRequest.class); + } catch (JsonProcessingException e) { + log.error("Failed to deserialize request for service {}: {}", + snapshot.getServiceId(), e.getMessage()); + return null; + } + } + + /** + * 恢复后更新快照 + * + * @param serviceId 服务 ID + * @param newNode 新的调度节点 + */ + public void onServiceResumed(String serviceId, String newNode) { + ServiceSnapshot snapshot = snapshotStore.get(serviceId); + if (snapshot != null) { + snapshot.setLastScheduledNode(newNode); + snapshot.setLastActiveAt(Instant.now()); + snapshot.setShutdownAt(null); + snapshot.setResumeCount(snapshot.getResumeCount() + 1); + log.info("Updated snapshot for resumed service {}, newNode={}, resumeCount={}", + serviceId, newNode, snapshot.getResumeCount()); + } + } + + /** + * 删除快照 + * + * @param serviceId 服务 ID + */ + public void deleteSnapshot(String serviceId) { + snapshotStore.remove(serviceId); + log.info("Deleted snapshot for service {}", serviceId); + } + + /** + * 获取快照信息 + * + * @param serviceId 服务 ID + * @return 快照,不存在返回 null + */ + public ServiceSnapshot getSnapshot(String serviceId) { + return snapshotStore.get(serviceId); + } + + /** + * 更新快照 + * + * @param snapshot 快照 + */ + public void updateSnapshot(ServiceSnapshot snapshot) { + if (snapshot != null && snapshot.getServiceId() != null) { + snapshotStore.put(snapshot.getServiceId(), snapshot); + log.debug("Updated snapshot for service {}", snapshot.getServiceId()); + } + } + + /** + * 获取所有需要同步的快照 + * + * 包含: + * 1. 活跃且有运行中 Pod 的快照 + * 2. currentPodCount=0 但 shutdownAt 未设置的快照(需要标记过期时间) + * + * @return 需要同步的快照列表 + */ + public List findAllActive() { + return snapshotStore.values().stream() + .filter(s -> s.getStatus() == SnapshotStatus.ACTIVE) + .filter(s -> !s.isExpired()) + .filter(s -> { + int podCount = s.getCurrentPodCount() != null ? s.getCurrentPodCount() : 0; + // 有运行中的 Pod,需要同步 + if (podCount > 0) { + return true; + } + // Pod 数量为 0 但还没设置 shutdownAt,需要同步以标记过期时间 + return s.getShutdownAt() == null; + }) + .collect(Collectors.toList()); + } + + /** + * 获取并递增下一个 Pod 序号 + * + * @param serviceId 服务 ID + * @return 下一个 Pod 序号 + */ + public int getAndIncrementNextPodIndex(String serviceId) { + ServiceSnapshot snapshot = snapshotStore.get(serviceId); + if (snapshot == null) { + return 0; + } + int nextIndex = snapshot.getNextPodIndex() != null ? snapshot.getNextPodIndex() : 0; + snapshot.setNextPodIndex(nextIndex + 1); + return nextIndex; + } + + /** + * 从 K8s 运行状态反向重建 Snapshot(后端重启后恢复用) + * + *

尝试从 Redis 恢复 originalRequestJson;若 Redis 中没有则为 null。 + * + * @param serviceId 服务 ID + * @param runningPodNames 当前运行中的 Pod 名称列表 + * @param userId 用户 ID(从 Pod label 提取) + * @param podMode Pod 模式(从 Pod label 提取,如 "sidecar"/"alone") + * @param scheduledNode 调度节点(从 Pod spec 提取) + * @return 重建的 Snapshot + */ + public ServiceSnapshot rebuildFromK8s(String serviceId, Set runningPodNames, + String userId, String podMode, String scheduledNode) { + int podCount = runningPodNames.size(); + + int maxIndex = runningPodNames.stream() + .map(name -> { + String[] parts = name.split("-"); + try { + return Integer.parseInt(parts[parts.length - 1]); + } catch (NumberFormatException e) { + return -1; + } + }) + .filter(i -> i >= 0) + .max(Integer::compareTo) + .orElse(0); + + String originalRequestJson = loadRequestFromRedis(serviceId); + + String agentImage = null; + if (originalRequestJson != null) { + try { + ServiceBuildRequest req = objectMapper.readValue(originalRequestJson, ServiceBuildRequest.class); + agentImage = req.getBuildEnvVars() != null ? + (String) req.getBuildEnvVars().get("AGENT_IMAGE") : null; + } catch (Exception e) { + log.warn("Failed to parse restored request for service {}: {}", serviceId, e.getMessage()); + } + } + + ServiceSnapshot snapshot = ServiceSnapshot.builder() + .serviceId(serviceId) + .userId(userId) + .originalRequestJson(originalRequestJson) + .agentImage(agentImage) + .podMode(podMode) + .lastScheduledNode(scheduledNode) + .maxPodCount(podCount) + .currentPodCount(podCount) + .runningPodNames(runningPodNames) + .nextPodIndex(maxIndex + 1) + .createdAt(Instant.now()) + .lastActiveAt(Instant.now()) + .status(SnapshotStatus.ACTIVE) + .resumeCount(0) + .build(); + + snapshotStore.put(serviceId, snapshot); + log.info("Rebuilt snapshot from K8s for service {}: pods={}, node={}, podMode={}, hasOriginalRequest={}", + serviceId, runningPodNames, scheduledNode, podMode, originalRequestJson != null); + + return snapshot; + } + + /** + * 判断是否已有某个 serviceId 的 Snapshot + */ + public boolean hasSnapshot(String serviceId) { + return snapshotStore.containsKey(serviceId); + } + + /** + * 从 Redis 加载 originalRequestJson(供外部在 snapshot 缺失时使用) + */ + public String getOriginalRequestJsonFromRedis(String serviceId) { + return loadRequestFromRedis(serviceId); + } + + private void persistRequestToRedis(String serviceId, String requestJson) { + try { + redisTemplate.opsForValue().set(REDIS_KEY_PREFIX + serviceId, requestJson, REDIS_TTL); + } catch (Exception e) { + log.warn("Failed to persist request to Redis for service {}: {}", serviceId, e.getMessage()); + } + } + + private String loadRequestFromRedis(String serviceId) { + try { + String json = redisTemplate.opsForValue().get(REDIS_KEY_PREFIX + serviceId); + if (json != null) { + log.info("Recovered originalRequestJson from Redis for service {}", serviceId); + } + return json; + } catch (Exception e) { + log.warn("Failed to load request from Redis for service {}: {}", serviceId, e.getMessage()); + return null; + } + } +} diff --git a/back/src/main/java/com/linkwork/service/SkillGitLabService.java b/back/src/main/java/com/linkwork/service/SkillGitLabService.java new file mode 100644 index 0000000..963786b --- /dev/null +++ b/back/src/main/java/com/linkwork/service/SkillGitLabService.java @@ -0,0 +1,248 @@ +package com.linkwork.service; + +import com.linkwork.agent.skill.core.SkillClient; +import com.linkwork.agent.skill.core.SkillException; +import com.linkwork.agent.skill.core.model.CommitInfo; +import com.linkwork.agent.skill.core.model.FileNode; +import com.linkwork.agent.skill.core.model.SkillInfo; +import com.linkwork.common.ResourceNotFoundException; +import lombok.extern.slf4j.Slf4j; +import org.springframework.stereotype.Service; +import org.springframework.util.StringUtils; + +import java.time.Instant; +import java.util.ArrayList; +import java.util.Collections; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; + +/** + * Skill Git provider adapter. + * + * 使用 linkwork-skill-starter 的 SkillClient 覆盖替换原有直连 GitLab RestTemplate 实现, + * 对上层 SkillService 继续提供原有 Map/List 兼容接口。 + */ +@Slf4j +@Service +public class SkillGitLabService { + + private final SkillClient skillClient; + + public SkillGitLabService(SkillClient skillClient) { + this.skillClient = skillClient; + } + + /** + * 返回兼容 GitLab branch API 结构。 + */ + public List> listBranches() { + List skills = skillClient.listSkills(); + List> branches = new ArrayList<>(); + for (SkillInfo skill : skills) { + Map branch = new LinkedHashMap<>(); + branch.put("name", skill.name()); + String commitId = skill.lastCommitId(); + if (StringUtils.hasText(commitId)) { + branch.put("id", commitId); + branch.put("commit_id", commitId); + branch.put("commit", Map.of("id", commitId)); + } + branches.add(branch); + } + return branches; + } + + /** + * 返回兼容 GitLab branch detail 结构。 + */ + public Map getBranchInfo(String branchName) { + String commitId = null; + try { + commitId = skillClient.getHeadCommitId(branchName); + } catch (RuntimeException ex) { + // 兼容 resolveDefaultBranch: main/master 在目录模型下可能并非真实 skill。 + if (!"main".equals(branchName) && !"master".equals(branchName)) { + throw new ResourceNotFoundException("Branch not found: " + branchName); + } + } + + Map branch = new LinkedHashMap<>(); + branch.put("name", branchName); + if (StringUtils.hasText(commitId)) { + branch.put("id", commitId); + branch.put("commit_id", commitId); + branch.put("commit", Map.of("id", commitId)); + } + return branch; + } + + public String getFileContent(String branchName, String filePath) { + try { + return skillClient.getFile(branchName, filePath); + } catch (RuntimeException ex) { + throw mapNotFound(ex, "File not found: " + filePath + " on branch " + branchName); + } + } + + /** + * 返回兼容 GitLab repository tree 结构。 + */ + public List> getTree(String branchName) { + List nodes = skillClient.getTree(branchName); + List> tree = new ArrayList<>(); + for (FileNode node : nodes) { + Map item = new LinkedHashMap<>(); + item.put("id", node.sha()); + item.put("name", node.name()); + item.put("path", node.path()); + item.put("type", node.type() == FileNode.NodeType.DIRECTORY ? "tree" : "blob"); + item.put("mode", node.type() == FileNode.NodeType.DIRECTORY ? "040000" : "100644"); + if (node.size() != null) { + item.put("size", node.size()); + } + tree.add(item); + } + return tree; + } + + /** + * 兼容旧接口:lastCommitId 用于乐观锁校验。 + */ + public Map commitFile(String branchName, String filePath, String content, + String commitMessage, String lastCommitId) { + if (StringUtils.hasText(lastCommitId)) { + String current = skillClient.getHeadCommitId(branchName); + if (StringUtils.hasText(current) && !lastCommitId.equals(current)) { + throw new IllegalStateException("Commit conflict: expected " + lastCommitId + " but current is " + current); + } + } + + CommitInfo commit = skillClient.upsertFile(branchName, filePath, content, commitMessage); + return commitToMap(commit); + } + + public Map createFile(String branchName, String filePath, String content, String commitMessage) { + CommitInfo commit = skillClient.upsertFile(branchName, filePath, content, commitMessage); + return commitToMap(commit); + } + + /** + * 兼容旧分支模型:通过扩展能力创建 skill 工作区。 + */ + public Map createBranch(String branchName, String ref) { + if (!skillClient.supportsExtendedOps()) { + return Map.of("name", branchName); + } + CommitInfo commit = skillClient.createSkillBranch(branchName, ref); + Map result = new LinkedHashMap<>(); + result.put("name", branchName); + if (commit != null && StringUtils.hasText(commit.id())) { + result.put("id", commit.id()); + result.put("commit", Map.of("id", commit.id())); + } + return result; + } + + public void deleteBranch(String branchName) { + if (!skillClient.supportsExtendedOps()) { + return; + } + skillClient.deleteSkillBranch(branchName); + } + + /** + * 返回兼容 GitLab commit 列表结构。 + */ + public List> getCommitHistory(String branchName) { + List commits = skillClient.listCommits(branchName, 1, 50); + List> result = new ArrayList<>(); + for (CommitInfo commit : commits) { + Map item = new LinkedHashMap<>(); + item.put("id", commit.id()); + item.put("short_id", shortSha(commit.id())); + item.put("title", commit.title()); + item.put("message", commit.message()); + item.put("author_name", commit.authorName()); + item.put("author_email", ""); + item.put("created_at", toIso(commit.authoredAt())); + item.put("web_url", commit.webUrl()); + result.add(item); + } + return result; + } + + /** + * 新增:显式 skillName,避免仅靠 commitSha 反查 skill 的不确定性。 + */ + public String getFileAtCommit(String skillName, String commitSha, String filePath) { + try { + return skillClient.getFileAtCommit(skillName, filePath, commitSha); + } catch (RuntimeException ex) { + throw mapNotFound(ex, "File not found: " + filePath + " at commit " + commitSha); + } + } + + /** + * 兼容旧签名:根据 commitSha 在技能列表中顺序尝试。 + */ + public String getFileAtCommit(String commitSha, String filePath) { + List skills; + try { + skills = skillClient.listSkills(); + } catch (RuntimeException ex) { + skills = Collections.emptyList(); + } + + for (SkillInfo skill : skills) { + try { + return skillClient.getFileAtCommit(skill.name(), filePath, commitSha); + } catch (RuntimeException ignored) { + } + } + throw new ResourceNotFoundException("File not found: " + filePath + " at commit " + commitSha); + } + + private Map commitToMap(CommitInfo commit) { + if (commit == null) { + return new LinkedHashMap<>(); + } + Map mapped = new LinkedHashMap<>(); + mapped.put("id", commit.id()); + mapped.put("commit_id", commit.id()); + mapped.put("short_id", shortSha(commit.id())); + mapped.put("title", commit.title()); + mapped.put("message", commit.message()); + mapped.put("author_name", commit.authorName()); + mapped.put("author_email", ""); + mapped.put("created_at", toIso(commit.authoredAt())); + mapped.put("web_url", commit.webUrl()); + mapped.put("commit", Map.of( + "id", commit.id() == null ? "" : commit.id(), + "message", commit.message() == null ? "" : commit.message() + )); + return mapped; + } + + private String shortSha(String sha) { + if (!StringUtils.hasText(sha)) { + return null; + } + return sha.substring(0, Math.min(8, sha.length())); + } + + private String toIso(Instant instant) { + return instant == null ? null : instant.toString(); + } + + private RuntimeException mapNotFound(RuntimeException ex, String message) { + String lower = ex.getMessage() == null ? "" : ex.getMessage().toLowerCase(); + if (lower.contains("404") || lower.contains("not found")) { + return new ResourceNotFoundException(message); + } + if (ex instanceof SkillException) { + return new RuntimeException(ex.getMessage(), ex); + } + return ex; + } +} diff --git a/back/src/main/java/com/linkwork/service/SkillService.java b/back/src/main/java/com/linkwork/service/SkillService.java new file mode 100644 index 0000000..0936167 --- /dev/null +++ b/back/src/main/java/com/linkwork/service/SkillService.java @@ -0,0 +1,708 @@ +package com.linkwork.service; + +import com.baomidou.mybatisplus.core.conditions.query.LambdaQueryWrapper; +import com.baomidou.mybatisplus.extension.plugins.pagination.Page; +import com.baomidou.mybatisplus.extension.service.impl.ServiceImpl; +import com.linkwork.common.ForbiddenOperationException; +import com.linkwork.common.ResourceNotFoundException; +import com.linkwork.mapper.SkillMapper; +import com.linkwork.model.entity.SkillEntity; +import lombok.extern.slf4j.Slf4j; +import org.springframework.stereotype.Service; +import org.springframework.util.StringUtils; + +import java.time.LocalDateTime; +import java.util.*; +import java.util.regex.*; +import java.util.stream.Collectors; + +/** + * 技能 Service - 支持 Git 同步 + */ +@Slf4j +@Service +public class SkillService extends ServiceImpl { + + private static final Set SUPPORTED_SKILL_STATUSES = Set.of("draft", "ready", "disabled"); + private final SkillGitLabService skillGitLabService; + private final AdminAccessService adminAccessService; + + public SkillService(SkillGitLabService skillGitLabService, AdminAccessService adminAccessService) { + this.skillGitLabService = skillGitLabService; + this.adminAccessService = adminAccessService; + } + + // ==================== Git Sync Methods ==================== + + /** + * 从 Git 同步所有技能分支 + * + * @return 同步的技能数量 + */ + public int syncAllFromGit() { + List> branches = skillGitLabService.listBranches(); + int syncedCount = 0; + + // Collect all branch names for later disabling orphaned records + Set gitBranchNames = new HashSet<>(); + + for (Map branch : branches) { + String branchName = (String) branch.get("name"); + if (branchName == null) { + continue; + } + // Skip the default branch (main/master) + if ("main".equals(branchName) || "master".equals(branchName)) { + continue; + } + gitBranchNames.add(branchName); + + try { + String fileContent = skillGitLabService.getFileContent(branchName, "SKILL.md"); + Map frontmatter = parseFrontmatter(fileContent); + + String skillName = frontmatter.getOrDefault("name", branchName); + String displayName = frontmatter.getOrDefault("displayName", skillName); + String description = frontmatter.getOrDefault("description", ""); + + // Extract latest commit from branch info + String latestCommit = extractCommitSha(branch); + + // Find existing entity by branchName + SkillEntity entity = findByBranchName(branchName); + + if (entity != null) { + // Update existing + entity.setName(skillName); + entity.setDisplayName(displayName); + entity.setDescription(description); + entity.setImplementation(truncateContent(fileContent)); + entity.setLatestCommit(latestCommit); + entity.setLastSyncedAt(LocalDateTime.now()); + entity.setStatus("ready"); + this.updateById(entity); + } else { + // Create new + entity = new SkillEntity(); + entity.setSkillNo("SKL-" + System.currentTimeMillis()); + entity.setName(skillName); + entity.setDisplayName(displayName); + entity.setDescription(description); + entity.setImplementation(truncateContent(fileContent)); + entity.setBranchName(branchName); + entity.setLatestCommit(latestCommit); + entity.setLastSyncedAt(LocalDateTime.now()); + entity.setStatus("ready"); + // Git 全量同步导入的历史技能默认公开,避免迁移后不可见 + entity.setIsPublic(true); + this.save(entity); + } + + syncedCount++; + log.debug("Synced skill from branch: {}", branchName); + } catch (Exception e) { + log.warn("Failed to sync branch {}: {}", branchName, e.getMessage()); + } + } + + // Disable DB records whose branchName is not in the Git branches list + disableOrphanedSkills(gitBranchNames); + + log.info("Synced {} skills from Git ({} branches total)", syncedCount, gitBranchNames.size()); + return syncedCount; + } + + /** + * 同步单个技能 + */ + public SkillEntity syncSingle(String skillName) { + Map branchInfo = skillGitLabService.getBranchInfo(skillName); + String latestCommit = extractCommitSha(branchInfo); + + String fileContent = skillGitLabService.getFileContent(skillName, "SKILL.md"); + Map frontmatter = parseFrontmatter(fileContent); + + String name = frontmatter.getOrDefault("name", skillName); + String displayName = frontmatter.getOrDefault("displayName", name); + String description = frontmatter.getOrDefault("description", ""); + + SkillEntity entity = findByName(skillName); + if (entity == null) { + entity = findByBranchName(skillName); + } + + if (entity != null) { + entity.setName(name); + entity.setDisplayName(displayName); + entity.setDescription(description); + entity.setImplementation(truncateContent(fileContent)); + entity.setLatestCommit(latestCommit); + entity.setLastSyncedAt(LocalDateTime.now()); + entity.setStatus("ready"); + this.updateById(entity); + } else { + entity = new SkillEntity(); + entity.setSkillNo("SKL-" + System.currentTimeMillis()); + entity.setName(name); + entity.setDisplayName(displayName); + entity.setDescription(description); + entity.setImplementation(truncateContent(fileContent)); + entity.setBranchName(skillName); + entity.setLatestCommit(latestCommit); + entity.setLastSyncedAt(LocalDateTime.now()); + entity.setStatus("ready"); + entity.setIsPublic(true); + this.save(entity); + } + + log.info("Synced single skill: {}", skillName); + return entity; + } + + // ==================== CRUD Methods ==================== + + /** + * 创建技能(通过 Git 分支) + */ + public SkillEntity createSkill(String name, String description, Boolean isPublic, String userId, String userName) { + if (!StringUtils.hasText(userId)) { + throw new ForbiddenOperationException("用户未登录或登录态失效"); + } + if (!StringUtils.hasText(name)) { + throw new IllegalArgumentException("Skill 名称不能为空"); + } + // Validate name: alphanumeric + hyphens + underscores, must start with letter + if (!name.matches("^[a-zA-Z][a-zA-Z0-9_\\-]*$")) { + throw new IllegalArgumentException( + "Skill 名称只能包含英文字母、数字、连字符和下划线,且以字母开头: " + name); + } + + String skillDescription = description == null ? "" : description; + boolean publicVisible = Boolean.TRUE.equals(isPublic); + + // Create Git branch from main + // 从默认分支创建(兼容 main 和 master) + String defaultBranch = resolveDefaultBranch(); + skillGitLabService.createBranch(name, defaultBranch); + + // Generate SKILL.md template + String content = "---\n" + + "name: " + name + "\n" + + "displayName: " + name + "\n" + + "description: " + skillDescription + "\n" + + "---\n\n" + + "# " + name + "\n\n" + + skillDescription + "\n"; + + // Create SKILL.md in the new branch + Map fileResponse = skillGitLabService.createFile( + name, "SKILL.md", content, "Initialize skill: " + name); + + // Extract commit SHA from response + String latestCommit = null; + if (fileResponse != null) { + if (fileResponse.containsKey("commit_id")) { + latestCommit = (String) fileResponse.get("commit_id"); + } else if (fileResponse.containsKey("id")) { + latestCommit = (String) fileResponse.get("id"); + } + } + + // Create DB record + SkillEntity entity = new SkillEntity(); + entity.setSkillNo("SKL-" + System.currentTimeMillis()); + entity.setName(name); + entity.setDisplayName(name); + entity.setDescription(skillDescription); + entity.setImplementation(truncateContent(content)); + entity.setBranchName(name); + entity.setLatestCommit(latestCommit); + entity.setLastSyncedAt(LocalDateTime.now()); + entity.setStatus("ready"); + entity.setIsPublic(publicVisible); + entity.setCreatorId(userId); + entity.setCreatorName(userName); + this.save(entity); + + log.info("Created skill: {} (branch: {}) by user {}", entity.getSkillNo(), name, userId); + return entity; + } + + /** + * 删除技能(同时删除 Git 分支) + */ + public void deleteSkill(String name, String userId) { + SkillEntity entity = requireSkillForWrite(name, userId); + + // Delete Git branch + try { + skillGitLabService.deleteBranch(name); + } catch (Exception e) { + log.warn("Failed to delete Git branch for skill {}: {}", name, e.getMessage()); + } + + // Remove DB record + this.removeById(entity.getId()); + log.info("Deleted skill: {} (branch: {})", entity.getSkillNo(), name); + } + + /** + * 更新技能(直接 DB 更新,用于同步期间) + */ + public SkillEntity updateSkill(Long id, Map request, String userId, String userName) { + SkillEntity entity = this.getById(id); + if (entity == null) { + throw new ResourceNotFoundException("Skill not found: " + id); + } + + if (request.containsKey("name")) { + entity.setName((String) request.get("name")); + } + if (request.containsKey("displayName")) { + entity.setDisplayName((String) request.get("displayName")); + } + if (request.containsKey("description")) { + entity.setDescription((String) request.get("description")); + } + if (request.containsKey("implementation")) { + entity.setImplementation((String) request.get("implementation")); + } + if (request.containsKey("status")) { + entity.setStatus((String) request.get("status")); + } + + entity.setUpdaterId(userId); + entity.setUpdaterName(userName); + + this.updateById(entity); + log.info("Updated skill: {} by user {}", entity.getSkillNo(), userId); + return entity; + } + + /** + * 更新技能元数据(描述、公开性) + */ + public SkillEntity updateSkillMeta(String name, Map request, String userId, String userName) { + SkillEntity entity = requireSkillForWrite(name, userId); + + if (request.containsKey("description")) { + entity.setDescription((String) request.get("description")); + } + if (request.containsKey("isPublic")) { + Object value = request.get("isPublic"); + if (!(value instanceof Boolean)) { + throw new IllegalArgumentException("isPublic 必须为布尔值"); + } + entity.setIsPublic((Boolean) value); + } + if (request.containsKey("status")) { + entity.setStatus(normalizeSkillStatus(request.get("status"))); + } + + entity.setUpdaterId(userId); + entity.setUpdaterName(userName); + this.updateById(entity); + log.info("Updated skill metadata: {} by user {}", name, userId); + return entity; + } + + // ==================== Detail & File Operations ==================== + + /** + * 获取技能详情(含文件列表) + */ + public Map getSkillDetail(String name, String userId) { + SkillEntity entity = requireSkillForRead(name, userId); + + List> files = skillGitLabService.getTree(name); + + Map result = toResponseMap(entity); + result.remove("branchName"); + result.remove("lastSyncedAt"); + result.put("files", files); + return result; + } + + /** + * 获取文件内容 + */ + public String getFile(String name, String path, String userId) { + requireSkillForRead(name, userId); + return skillGitLabService.getFileContent(name, path); + } + + /** + * 获取技能最新 commit(用于前端乐观锁) + */ + public String getLatestCommitId(String name, String userId) { + // 优先从 DB 取缓存值 + SkillEntity entity = requireSkillForRead(name, userId); + if (entity != null && entity.getLatestCommit() != null) { + return entity.getLatestCommit(); + } + + // fallback: 从 GitLab 获取 + try { + Map branchInfo = skillGitLabService.getBranchInfo(name); + return extractCommitSha(branchInfo); + } catch (Exception e) { + log.warn("Failed to get latest commit for skill {}: {}", name, e.getMessage()); + return null; + } + } + + /** + * 提交文件变更 + */ + public Map commitFile(String name, String path, String content, + String commitMessage, String lastCommitId, String userId) { + requireSkillForWrite(name, userId); + Map response = skillGitLabService.commitFile( + name, path, content, commitMessage, lastCommitId); + + // Update DB record after successful commit + SkillEntity entity = findByName(name); + if (entity != null) { + String newCommit = null; + if (response != null) { + if (response.containsKey("id")) { + newCommit = (String) response.get("id"); + } else if (response.containsKey("commit_id")) { + newCommit = (String) response.get("commit_id"); + } + } + if (newCommit != null) { + entity.setLatestCommit(newCommit); + } + entity.setLastSyncedAt(LocalDateTime.now()); + this.updateById(entity); + } + + return response; + } + + // ==================== History & Revert ==================== + + /** + * 获取提交历史 + */ + public List> getHistory(String name, String userId) { + requireSkillForRead(name, userId); + List> raw = skillGitLabService.getCommitHistory(name); + return raw.stream().map(commit -> { + Map mapped = new LinkedHashMap<>(); + mapped.put("sha", commit.get("id")); + mapped.put("shortSha", commit.get("short_id")); + mapped.put("message", commit.get("message")); + mapped.put("authorName", commit.get("author_name")); + mapped.put("authorEmail", commit.get("author_email")); + mapped.put("createdAt", commit.get("created_at")); + return mapped; + }).collect(Collectors.toList()); + } + + /** + * 回退到指定 commit + */ + public void revertToCommit(String name, String commitSha, String userId) { + requireSkillForWrite(name, userId); + // Get SKILL.md content at the target commit + String oldContent = skillGitLabService.getFileAtCommit(name, commitSha, "SKILL.md"); + + // Get current branch info for lastCommitId + Map branchInfo = skillGitLabService.getBranchInfo(name); + String currentCommit = extractCommitSha(branchInfo); + + // Commit the old content as a new commit (revert) + String revertMessage = "Revert to " + commitSha.substring(0, Math.min(8, commitSha.length())); + skillGitLabService.commitFile(name, "SKILL.md", oldContent, revertMessage, currentCommit); + + // Sync to update DB + syncSingle(name); + log.info("Reverted skill {} to commit {}", name, commitSha.substring(0, Math.min(8, commitSha.length()))); + } + + // ==================== List Methods ==================== + + /** + * 获取技能列表(分页) + */ + public Map listSkills(int page, int pageSize, String status, String keyword, String userId) { + Page pageObj = new Page<>(page, pageSize); + + LambdaQueryWrapper wrapper = new LambdaQueryWrapper<>(); + applyVisibilityFilter(wrapper, userId); + if (StringUtils.hasText(status)) { + wrapper.eq(SkillEntity::getStatus, status); + } + if (StringUtils.hasText(keyword)) { + wrapper.and(w -> w.like(SkillEntity::getName, keyword) + .or().like(SkillEntity::getDisplayName, keyword) + .or().like(SkillEntity::getDescription, keyword)); + } + wrapper.orderByDesc(SkillEntity::getCreatedAt); + + Page result = this.page(pageObj, wrapper); + + List> items = result.getRecords().stream() + .map(this::toResponseMap) + .collect(Collectors.toList()); + + Map response = new HashMap<>(); + response.put("items", items); + response.put("pagination", Map.of( + "page", result.getCurrent(), + "pageSize", result.getSize(), + "total", result.getTotal(), + "totalPages", result.getPages() + )); + return response; + } + + /** + * 获取所有可用的技能(用于下拉选择 - 向后兼容) + */ + public List> listAllAvailable(String userId) { + LambdaQueryWrapper wrapper = new LambdaQueryWrapper<>(); + applyVisibilityFilter(wrapper, userId); + wrapper.eq(SkillEntity::getStatus, "ready"); + wrapper.orderByDesc(SkillEntity::getCreatedAt); + + return this.list(wrapper).stream() + .map(this::toSimpleMap) + .collect(Collectors.toList()); + } + + // ==================== Helper Methods ==================== + + /** + * 解析仓库默认分支(兼容 main / master) + */ + private String resolveDefaultBranch() { + try { + List> branches = skillGitLabService.listBranches(); + // listBranches 过滤了 main/master,所以直接查一下 + } catch (Exception e) { + // ignore + } + // 先尝试 main,失败再用 master + try { + skillGitLabService.getBranchInfo("main"); + return "main"; + } catch (Exception e) { + return "master"; + } + } + + /** + * 可见性过滤:自己创建 or 公开 + */ + private void applyVisibilityFilter(LambdaQueryWrapper wrapper, String userId) { + if (StringUtils.hasText(userId)) { + if (adminAccessService.isAdmin(userId)) { + return; + } + wrapper.and(w -> w.eq(SkillEntity::getCreatorId, userId) + .or().eq(SkillEntity::getIsPublic, true)); + return; + } + // 未登录仅可见公开资源 + wrapper.eq(SkillEntity::getIsPublic, true); + } + + private SkillEntity requireSkillForRead(String name, String userId) { + SkillEntity entity = findByName(name); + if (entity == null) { + throw new ResourceNotFoundException("Skill not found: " + name); + } + if (!canRead(entity, userId)) { + throw new ForbiddenOperationException("无权限访问该 Skill"); + } + return entity; + } + + private SkillEntity requireSkillForWrite(String name, String userId) { + SkillEntity entity = findByName(name); + if (entity == null) { + throw new ResourceNotFoundException("Skill not found: " + name); + } + if (!canWrite(entity, userId)) { + throw new ForbiddenOperationException("仅 Skill 创建者或管理员可执行该操作"); + } + return entity; + } + + private boolean canRead(SkillEntity entity, String userId) { + return adminAccessService.isAdmin(userId) + || Boolean.TRUE.equals(entity.getIsPublic()) + || isOwner(entity, userId); + } + + private boolean isOwner(SkillEntity entity, String userId) { + return StringUtils.hasText(userId) && userId.equals(entity.getCreatorId()); + } + + private boolean canWrite(SkillEntity entity, String userId) { + return adminAccessService.isAdmin(userId) || isOwner(entity, userId); + } + + private String normalizeSkillStatus(Object rawStatus) { + if (rawStatus == null || !StringUtils.hasText(String.valueOf(rawStatus))) { + throw new IllegalArgumentException("status 不能为空"); + } + String normalized = String.valueOf(rawStatus).trim().toLowerCase(Locale.ROOT); + if (!SUPPORTED_SKILL_STATUSES.contains(normalized)) { + throw new IllegalArgumentException("非法 Skill 状态: " + rawStatus + ",仅支持 draft/ready/disabled"); + } + return normalized; + } + + /** + * 解析 YAML frontmatter(--- 标记之间的内容) + */ + private Map parseFrontmatter(String content) { + Map result = new HashMap<>(); + if (content == null) { + return result; + } + + Pattern pattern = Pattern.compile("^---\\s*\\n(.*?)\\n---", Pattern.DOTALL); + Matcher matcher = pattern.matcher(content); + if (matcher.find()) { + String yaml = matcher.group(1); + for (String line : yaml.split("\\n")) { + line = line.trim(); + if (line.isEmpty() || !line.contains(":")) { + continue; + } + int colonIdx = line.indexOf(':'); + String key = line.substring(0, colonIdx).trim(); + String value = line.substring(colonIdx + 1).trim(); + // Remove surrounding quotes if present + if (value.length() >= 2 + && ((value.startsWith("\"") && value.endsWith("\"")) + || (value.startsWith("'") && value.endsWith("'")))) { + value = value.substring(1, value.length() - 1); + } + result.put(key, value); + } + } + + return result; + } + + /** + * 按 name 查找技能 + */ + private SkillEntity findByName(String name) { + LambdaQueryWrapper wrapper = new LambdaQueryWrapper<>(); + wrapper.eq(SkillEntity::getName, name); + return this.getOne(wrapper, false); + } + + /** + * 按 branchName 查找技能 + */ + private SkillEntity findByBranchName(String branchName) { + LambdaQueryWrapper wrapper = new LambdaQueryWrapper<>(); + wrapper.eq(SkillEntity::getBranchName, branchName); + return this.getOne(wrapper, false); + } + + /** + * 从分支信息中提取 commit SHA + */ + private String extractCommitSha(Map branchInfo) { + if (branchInfo == null) { + return null; + } + // GitLab branch API returns commit info nested under "commit" + Object commitObj = branchInfo.get("commit"); + if (commitObj instanceof Map) { + @SuppressWarnings("unchecked") + Map commitMap = (Map) commitObj; + return (String) commitMap.get("id"); + } + // Fallback: direct "id" or "commit_id" field + if (branchInfo.containsKey("commit_id")) { + return (String) branchInfo.get("commit_id"); + } + return (String) branchInfo.get("id"); + } + + /** + * 截断内容作为预览 + */ + private String truncateContent(String content) { + if (content == null) { + return null; + } + int maxLength = 2000; + if (content.length() <= maxLength) { + return content; + } + return content.substring(0, maxLength) + "..."; + } + + /** + * 禁用不在 Git 分支列表中的技能 + */ + private void disableOrphanedSkills(Set gitBranchNames) { + LambdaQueryWrapper wrapper = new LambdaQueryWrapper<>(); + wrapper.isNotNull(SkillEntity::getBranchName); + wrapper.ne(SkillEntity::getStatus, "disabled"); + + List allWithBranch = this.list(wrapper); + for (SkillEntity entity : allWithBranch) { + if (entity.getBranchName() != null && !gitBranchNames.contains(entity.getBranchName())) { + entity.setStatus("disabled"); + this.updateById(entity); + log.info("Disabled orphaned skill: {} (branch: {})", entity.getName(), entity.getBranchName()); + } + } + } + + /** + * 转换为响应 Map(含完整字段) + */ + private Map toResponseMap(SkillEntity entity) { + Map map = new HashMap<>(); + map.put("id", entity.getId().toString()); + map.put("skillNo", entity.getSkillNo()); + map.put("name", entity.getName()); + map.put("displayName", entity.getDisplayName()); + map.put("description", entity.getDescription()); + map.put("implementation", entity.getImplementation()); + map.put("status", entity.getStatus()); + map.put("isPublic", entity.getIsPublic()); + map.put("branchName", entity.getBranchName()); + map.put("latestCommit", entity.getLatestCommit()); + map.put("lastSyncedAt", formatDateTime(entity.getLastSyncedAt())); + map.put("creatorId", entity.getCreatorId()); + map.put("creatorName", entity.getCreatorName()); + map.put("createdAt", formatDateTime(entity.getCreatedAt())); + map.put("updatedAt", formatDateTime(entity.getUpdatedAt())); + return map; + } + + /** + * 转换为简单 Map(用于下拉选择) + */ + private Map toSimpleMap(SkillEntity entity) { + Map map = new HashMap<>(); + map.put("id", entity.getId().toString()); + map.put("name", entity.getName()); + map.put("displayName", entity.getDisplayName()); + map.put("description", entity.getDescription()); + map.put("status", entity.getStatus()); + map.put("isPublic", entity.getIsPublic()); + map.put("skillNo", entity.getSkillNo()); + map.put("createdAt", formatDateTime(entity.getCreatedAt())); + map.put("updatedAt", formatDateTime(entity.getUpdatedAt())); + return map; + } + + private String formatDateTime(LocalDateTime value) { + return value == null ? null : value.toString(); + } +} diff --git a/back/src/main/java/com/linkwork/service/SnapshotSyncTask.java b/back/src/main/java/com/linkwork/service/SnapshotSyncTask.java new file mode 100644 index 0000000..041ad49 --- /dev/null +++ b/back/src/main/java/com/linkwork/service/SnapshotSyncTask.java @@ -0,0 +1,233 @@ +package com.linkwork.service; + +import com.linkwork.config.EnvConfig; +import com.linkwork.model.dto.ServiceSnapshot; +import io.fabric8.kubernetes.api.model.Pod; +import io.fabric8.kubernetes.client.KubernetesClient; +import jakarta.annotation.PostConstruct; +import lombok.extern.slf4j.Slf4j; +import org.springframework.scheduling.annotation.Scheduled; +import org.springframework.stereotype.Component; + +import java.util.*; +import java.util.stream.Collectors; + +/** + * Snapshot 与 K8s 状态定期同步任务 + * + * 包含两个方向的同步: + * 1. 正向同步:已有 Snapshot → 从 K8s 校验/更新 Pod 状态 + * 2. 反向发现:扫描 K8s 中运行的服务 → 为缺失 Snapshot 的服务自动重建 + * (解决后端重启后 Snapshot 丢失导致 scale-down/stop 等操作失败的问题) + */ +@Component +@Slf4j +public class SnapshotSyncTask { + + private final ServiceSnapshotService snapshotService; + private final K8sOrchestrator k8sOrchestrator; + private final KubernetesClient kubernetesClient; + private final EnvConfig envConfig; + + public SnapshotSyncTask(ServiceSnapshotService snapshotService, + K8sOrchestrator k8sOrchestrator, + KubernetesClient kubernetesClient, + EnvConfig envConfig) { + this.snapshotService = snapshotService; + this.k8sOrchestrator = k8sOrchestrator; + this.kubernetesClient = kubernetesClient; + this.envConfig = envConfig; + } + + /** + * 启动时立即执行一次反向发现,恢复所有 K8s 中运行的服务 + */ + @PostConstruct + public void onStartup() { + try { + log.info("SnapshotSyncTask startup: discovering running services from K8s..."); + int rebuilt = discoverAndRebuildSnapshots(); + if (rebuilt > 0) { + log.info("Startup discovery complete: rebuilt {} snapshots from K8s", rebuilt); + } else { + log.info("Startup discovery complete: no orphan services found in K8s"); + } + } catch (Exception e) { + log.error("Startup snapshot discovery failed (non-fatal): {}", e.getMessage(), e); + } + } + + @Scheduled(fixedRate = 60000) + public void syncSnapshotWithK8s() { + // 1. 反向发现:为 K8s 中存在但内存中没有 Snapshot 的服务重建 + try { + discoverAndRebuildSnapshots(); + } catch (Exception e) { + log.error("Snapshot reverse discovery failed: {}", e.getMessage()); + } + + // 2. 正向同步:已有 Snapshot → 校验 K8s 实际状态 + List snapshots = snapshotService.findAllActive(); + + if (snapshots.isEmpty()) { + return; + } + + log.debug("Starting snapshot sync task, checking {} active services", snapshots.size()); + + for (ServiceSnapshot snapshot : snapshots) { + try { + syncSingleService(snapshot); + } catch (Exception e) { + log.error("Failed to sync snapshot for service {}: {}", + snapshot.getServiceId(), e.getMessage()); + } + } + } + + /** + * 反向发现:扫描 K8s namespace 中所有运行的服务, + * 为没有 Snapshot 的服务从 Pod label/spec 中重建 Snapshot + * + * @return 本次重建的 Snapshot 数量 + */ + private int discoverAndRebuildSnapshots() { + List allServiceIds = k8sOrchestrator.listAllServiceIds(); + + if (allServiceIds.isEmpty()) { + return 0; + } + + int rebuiltCount = 0; + String namespace = envConfig.getCluster().getNamespace(); + + for (String serviceId : allServiceIds) { + if (snapshotService.hasSnapshot(serviceId)) { + continue; + } + + // 内存中没有这个 Snapshot,需要从 K8s 重建 + try { + rebuiltCount += rebuildSnapshotForService(serviceId, namespace) ? 1 : 0; + } catch (Exception e) { + log.error("Failed to rebuild snapshot for orphan service {}: {}", + serviceId, e.getMessage()); + } + } + + return rebuiltCount; + } + + /** + * 从 K8s Pod 信息重建单个服务的 Snapshot + */ + private boolean rebuildSnapshotForService(String serviceId, String namespace) { + List pods = kubernetesClient.pods() + .inNamespace(namespace) + .withLabel("service-id", serviceId) + .list() + .getItems() + .stream() + .filter(this::isReadyPod) + .collect(Collectors.toList()); + + if (pods.isEmpty()) { + log.debug("No running pods for orphan service {}, skip rebuild", serviceId); + return false; + } + + // 从第一个 Pod 的 label 和 spec 中提取信息 + Pod firstPod = pods.get(0); + Map labels = firstPod.getMetadata().getLabels(); + + String userId = labels != null ? labels.getOrDefault("user-id", "unknown") : "unknown"; + String podMode = labels != null ? labels.getOrDefault("pod-mode", "sidecar") : "sidecar"; + String scheduledNode = firstPod.getSpec() != null ? firstPod.getSpec().getNodeName() : null; + + Set runningPodNames = pods.stream() + .map(pod -> pod.getMetadata().getName()) + .collect(Collectors.toCollection(HashSet::new)); + + snapshotService.rebuildFromK8s(serviceId, runningPodNames, userId, podMode, scheduledNode); + + log.warn("Rebuilt snapshot for orphan service {} from K8s: pods={}, userId={}, node={}", + serviceId, runningPodNames, userId, scheduledNode); + + return true; + } + + private void syncSingleService(ServiceSnapshot snapshot) { + String serviceId = snapshot.getServiceId(); + + List actualPodNames = k8sOrchestrator.getRunningPods(serviceId); + int actualCount = actualPodNames.size(); + int snapshotCount = snapshot.getCurrentPodCount() != null ? snapshot.getCurrentPodCount() : 0; + + if (actualCount != snapshotCount) { + log.warn("State inconsistency detected for service {}: snapshot={}, actual={}", + serviceId, snapshotCount, actualCount); + + snapshot.setCurrentPodCount(actualCount); + snapshot.setRunningPodNames(new HashSet<>(actualPodNames)); + + if (actualCount == 0 && snapshot.getShutdownAt() == null) { + snapshot.setShutdownAt(java.time.Instant.now()); + log.info("Service {} has no running pods, marked for expiration in 24 hours", serviceId); + } + + snapshotService.updateSnapshot(snapshot); + + log.info("Snapshot synced for service {}: currentPodCount={}, pods={}", + serviceId, actualCount, actualPodNames); + return; + } + + Set snapshotPodNames = snapshot.getRunningPodNames(); + if (snapshotPodNames == null) { + snapshotPodNames = new HashSet<>(); + } + + Set actualPodSet = new HashSet<>(actualPodNames); + if (!snapshotPodNames.equals(actualPodSet)) { + log.warn("Pod names mismatch for service {}: snapshot={}, actual={}", + serviceId, snapshotPodNames, actualPodSet); + + snapshot.setRunningPodNames(actualPodSet); + snapshotService.updateSnapshot(snapshot); + + log.info("Snapshot pod names synced for service {}: {}", serviceId, actualPodSet); + } + } + + public void manualSync(String serviceId) { + if (serviceId != null && !serviceId.isEmpty()) { + ServiceSnapshot snapshot = snapshotService.getSnapshot(serviceId); + if (snapshot != null) { + syncSingleService(snapshot); + } else { + // 手动触发时,如果 Snapshot 不存在,尝试从 K8s 重建 + String namespace = envConfig.getCluster().getNamespace(); + rebuildSnapshotForService(serviceId, namespace); + } + } else { + syncSnapshotWithK8s(); + } + } + + private boolean isReadyPod(Pod pod) { + if (pod == null || pod.getMetadata() == null || pod.getMetadata().getDeletionTimestamp() != null) { + return false; + } + if (pod.getStatus() == null || pod.getStatus().getPhase() == null) { + return false; + } + if (!"Running".equals(pod.getStatus().getPhase())) { + return false; + } + if (pod.getStatus().getConditions() == null) { + return false; + } + return pod.getStatus().getConditions().stream() + .anyMatch(condition -> "Ready".equals(condition.getType()) && "True".equals(condition.getStatus())); + } +} diff --git a/back/src/main/java/com/linkwork/service/SystemResourceMonitor.java b/back/src/main/java/com/linkwork/service/SystemResourceMonitor.java new file mode 100644 index 0000000..723f1d5 --- /dev/null +++ b/back/src/main/java/com/linkwork/service/SystemResourceMonitor.java @@ -0,0 +1,142 @@ +package com.linkwork.service; + +import com.linkwork.model.dto.ResourceStatus; +import lombok.extern.slf4j.Slf4j; +import org.springframework.stereotype.Service; + +import java.lang.management.ManagementFactory; + +/** + * 系统资源监控服务 + * 使用 JDK 内置 OperatingSystemMXBean 获取系统 CPU 和内存使用率 + */ +@Service +@Slf4j +public class SystemResourceMonitor { + + private final com.sun.management.OperatingSystemMXBean osBean; + + public SystemResourceMonitor() { + this.osBean = (com.sun.management.OperatingSystemMXBean) + ManagementFactory.getOperatingSystemMXBean(); + log.info("SystemResourceMonitor initialized"); + } + + /** + * 获取系统 CPU 使用率 (0.0 ~ 1.0) + * 返回 -1 表示获取失败 + */ + public double getSystemCpuLoad() { + double cpuLoad = osBean.getCpuLoad(); + // getCpuLoad() 在某些情况下可能返回负值表示不可用 + if (cpuLoad < 0) { + // 尝试使用 getSystemLoadAverage 作为后备 + double loadAvg = osBean.getSystemLoadAverage(); + if (loadAvg >= 0) { + // 将 load average 转换为使用率(假设处理器数量) + int processors = osBean.getAvailableProcessors(); + cpuLoad = Math.min(loadAvg / processors, 1.0); + } else { + return -1; + } + } + return cpuLoad; + } + + /** + * 获取系统内存使用率 (0.0 ~ 1.0) + * 优先使用 /proc/meminfo 的 MemAvailable(更准确),否则回退到 JDK API + */ + public double getMemoryUsage() { + long total = osBean.getTotalMemorySize(); + if (total <= 0) { + return -1; + } + + // 尝试从 /proc/meminfo 读取 MemAvailable(Linux 专用,更准确) + try { + java.nio.file.Path meminfo = java.nio.file.Paths.get("/proc/meminfo"); + if (java.nio.file.Files.exists(meminfo)) { + for (String line : java.nio.file.Files.readAllLines(meminfo)) { + if (line.startsWith("MemAvailable:")) { + // 格式: "MemAvailable: 43681980 kB" + String[] parts = line.split("\\s+"); + if (parts.length >= 2) { + long availableKb = Long.parseLong(parts[1]); + long available = availableKb * 1024; + return (double) (total - available) / total; + } + } + } + } + } catch (Exception e) { + log.debug("Failed to read /proc/meminfo: {}", e.getMessage()); + } + + // 回退到 JDK API(在非 Linux 或读取失败时) + long free = osBean.getFreeMemorySize(); + return (double) (total - free) / total; + } + + /** + * 获取可用内存(字节) + */ + public long getFreeMemory() { + return osBean.getFreeMemorySize(); + } + + /** + * 获取总内存(字节) + */ + public long getTotalMemory() { + return osBean.getTotalMemorySize(); + } + + /** + * 获取可用处理器数量 + */ + public int getAvailableProcessors() { + return osBean.getAvailableProcessors(); + } + + /** + * 检查是否有足够资源执行新的构建任务 + * + * @param cpuThreshold CPU 使用率阈值 (0.0 ~ 1.0) + * @param memoryThreshold 内存使用率阈值 (0.0 ~ 1.0) + * @return true 如果资源充足 + */ + public boolean hasAvailableResources(double cpuThreshold, double memoryThreshold) { + double cpu = getSystemCpuLoad(); + double memory = getMemoryUsage(); + + // 如果无法获取资源信息,默认允许执行(容器环境可能获取不准确) + if (cpu < 0 || memory < 0) { + log.warn("无法获取系统资源信息: CPU={}, Memory={},默认允许执行", cpu, memory); + return true; + } + + boolean available = cpu < cpuThreshold && memory < memoryThreshold; + + if (!available) { + log.debug("资源不足: CPU={}% (阈值{}%), 内存={}% (阈值{}%)", + String.format("%.1f", cpu * 100), String.format("%.1f", cpuThreshold * 100), + String.format("%.1f", memory * 100), String.format("%.1f", memoryThreshold * 100)); + } + + return available; + } + + /** + * 获取当前资源状态(用于 API 返回) + */ + public ResourceStatus getStatus() { + return ResourceStatus.builder() + .cpuUsage(getSystemCpuLoad()) + .memoryUsage(getMemoryUsage()) + .totalMemory(getTotalMemory()) + .freeMemory(getFreeMemory()) + .availableProcessors(getAvailableProcessors()) + .build(); + } +} diff --git a/back/src/main/java/com/linkwork/service/TaskBillingUsageService.java b/back/src/main/java/com/linkwork/service/TaskBillingUsageService.java new file mode 100644 index 0000000..b9af129 --- /dev/null +++ b/back/src/main/java/com/linkwork/service/TaskBillingUsageService.java @@ -0,0 +1,149 @@ +package com.linkwork.service; + +import com.fasterxml.jackson.core.type.TypeReference; +import com.fasterxml.jackson.databind.ObjectMapper; +import lombok.RequiredArgsConstructor; +import lombok.extern.slf4j.Slf4j; +import org.springframework.beans.factory.annotation.Value; +import org.springframework.boot.web.client.RestTemplateBuilder; +import org.springframework.http.ResponseEntity; +import org.springframework.stereotype.Service; +import org.springframework.util.StringUtils; +import org.springframework.web.client.RestTemplate; + +import java.math.BigDecimal; +import java.time.Duration; +import java.util.Map; +import java.util.Optional; + +/** + * 任务终态对账:从计费网关拉取任务维度 token 使用量。 + */ +@Slf4j +@Service +@RequiredArgsConstructor +public class TaskBillingUsageService { + + @Value("${robot.billing.gateway-url-template:http://ai-agent-gateway.momo.com/v1/billing/tasks/{taskId}}") + private String gatewayUrlTemplate; + + @Value("${robot.billing.timeout-ms:3000}") + private long timeoutMs; + + @Value("${robot.billing.sync-retries:3}") + private int syncRetries; + + @Value("${robot.billing.retry-interval-ms:200}") + private long retryIntervalMs; + + private final RestTemplateBuilder restTemplateBuilder; + private final ObjectMapper objectMapper; + + public Optional fetchTaskUsage(String taskId) { + if (!StringUtils.hasText(taskId)) { + return Optional.empty(); + } + String url = gatewayUrlTemplate.replace("{taskId}", taskId.trim()); + RestTemplate restTemplate = restTemplateBuilder + .setConnectTimeout(Duration.ofMillis(timeoutMs)) + .setReadTimeout(Duration.ofMillis(timeoutMs)) + .build(); + + int maxAttempts = Math.max(1, syncRetries); + Exception lastError = null; + + for (int attempt = 1; attempt <= maxAttempts; attempt++) { + try { + ResponseEntity response = restTemplate.getForEntity(url, String.class); + if (!response.getStatusCode().is2xxSuccessful()) { + throw new IllegalStateException("billing response status=" + response.getStatusCode().value()); + } + String body = response.getBody(); + if (!StringUtils.hasText(body)) { + throw new IllegalStateException("billing response empty"); + } + + Map payload = objectMapper.readValue(body, new TypeReference>() { + }); + UsageSnapshot snapshot = UsageSnapshot.from(payload); + if (snapshot.tokenUsed() == null) { + throw new IllegalStateException("billing tokenUsed missing"); + } + return Optional.of(snapshot); + } catch (Exception ex) { + lastError = ex; + if (attempt < maxAttempts && retryIntervalMs > 0) { + try { + Thread.sleep(retryIntervalMs); + } catch (InterruptedException ie) { + Thread.currentThread().interrupt(); + break; + } + } + } + } + + log.warn("任务计费同步失败: taskId={}, url={}, attempts={}, error={}", + taskId, url, maxAttempts, lastError == null ? "unknown" : lastError.getMessage()); + return Optional.empty(); + } + + public record UsageSnapshot( + Integer tokenUsed, + Integer inputTokens, + Integer outputTokens, + Integer requestCount, + Long tokenLimit, + BigDecimal usagePercent + ) { + static UsageSnapshot from(Map source) { + return new UsageSnapshot( + parseInteger(source.get("tokenUsed")), + parseInteger(source.get("inputTokens")), + parseInteger(source.get("outputTokens")), + parseInteger(source.get("requestCount")), + parseLong(source.get("tokenLimit")), + parseBigDecimal(source.get("usagePercent")) + ); + } + + private static Integer parseInteger(Object value) { + if (value == null) { + return null; + } + if (value instanceof Number number) { + return number.intValue(); + } + try { + return Integer.parseInt(String.valueOf(value).trim()); + } catch (Exception ignore) { + return null; + } + } + + private static Long parseLong(Object value) { + if (value == null) { + return null; + } + if (value instanceof Number number) { + return number.longValue(); + } + try { + return Long.parseLong(String.valueOf(value).trim()); + } catch (Exception ignore) { + return null; + } + } + + private static BigDecimal parseBigDecimal(Object value) { + if (value == null) { + return null; + } + try { + return new BigDecimal(String.valueOf(value).trim()); + } catch (Exception ignore) { + return null; + } + } + } +} diff --git a/back/src/main/java/com/linkwork/service/TaskDispatchConsumer.java b/back/src/main/java/com/linkwork/service/TaskDispatchConsumer.java new file mode 100644 index 0000000..6ca3e75 --- /dev/null +++ b/back/src/main/java/com/linkwork/service/TaskDispatchConsumer.java @@ -0,0 +1,198 @@ +package com.linkwork.service; + +import com.fasterxml.jackson.core.type.TypeReference; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.linkwork.config.DispatchConfig; +import com.linkwork.model.enums.TaskStatus; + +import java.util.Map; +import jakarta.annotation.PostConstruct; +import jakarta.annotation.PreDestroy; +import lombok.RequiredArgsConstructor; +import lombok.extern.slf4j.Slf4j; +import org.springframework.data.redis.core.StringRedisTemplate; +import org.springframework.stereotype.Service; + +import java.time.Duration; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.atomic.AtomicBoolean; + +/** + * 任务调度队列消费者 + * 从 Redis 队列中消费任务,并分发给 Agent 执行器处理 + */ +@Slf4j +@Service +@RequiredArgsConstructor +public class TaskDispatchConsumer { + + private final StringRedisTemplate redisTemplate; + private final ObjectMapper objectMapper; + private final TaskService taskService; + private final DispatchConfig dispatchConfig; + + private final AtomicBoolean running = new AtomicBoolean(true); + private ExecutorService executorService; + + /** + * 队列阻塞等待超时时间(秒) + */ + private static final int QUEUE_TIMEOUT_SECONDS = 30; + + /** + * 消费前延迟时间(毫秒),便于观测队列消息 + * TODO: 生产环境设为 0 + */ + private static final int PRE_CONSUME_DELAY_MS = 5000; + + /** + * 是否启用后端消费者 + * 设为 false 时,消息留在队列供外部 momo-worker 消费 + */ + private static final boolean CONSUMER_ENABLED = false; + + @PostConstruct + public void startConsumer() { + if (!CONSUMER_ENABLED) { + log.info("后端消费者已禁用,消息将由外部 momo-worker 消费,队列: {}", dispatchConfig.getTaskQueueKey()); + return; + } + + executorService = Executors.newSingleThreadExecutor(r -> { + Thread thread = new Thread(r, "task-dispatch-consumer"); + thread.setDaemon(true); + return thread; + }); + + executorService.submit(this::consumeLoop); + log.info("任务调度消费者已启动,监听队列: {}", dispatchConfig.getTaskQueueKey()); + } + + @PreDestroy + public void stopConsumer() { + running.set(false); + if (executorService != null) { + executorService.shutdownNow(); + } + log.info("任务调度消费者已停止"); + } + + /** + * 消费循环 + */ + private void consumeLoop() { + while (running.get()) { + try { + // 每次循环开始时延迟,便于观测队列消息(生产环境应设为 0) + if (PRE_CONSUME_DELAY_MS > 0) { + Thread.sleep(PRE_CONSUME_DELAY_MS); + } + + // 非阻塞弹出,便于延迟生效 + String messageJson = redisTemplate.opsForList().rightPop(dispatchConfig.getTaskQueueKey()); + + if (messageJson != null) { + log.info("从队列取出消息,开始处理..."); + processTask(messageJson); + } + } catch (Exception e) { + if (running.get()) { + log.error("消费任务时发生异常", e); + // 避免异常风暴,等待一小段时间后重试 + try { + Thread.sleep(1000); + } catch (InterruptedException ie) { + Thread.currentThread().interrupt(); + break; + } + } + } + } + } + + /** + * 处理单个任务 + * 消息格式: {"task_id": "xxx", "content": "xxx"} + */ + private void processTask(String messageJson) { + try { + // 解析简化的消息格式 + Map message = objectMapper.readValue( + messageJson, new TypeReference>() {}); + + String taskId = message.get("task_id"); + String content = message.get("content"); + + log.info("收到任务调度消息: task_id={}, content={}", + taskId, content != null && content.length() > 50 + ? content.substring(0, 50) + "..." : content); + + // 1. 更新任务状态为 RUNNING + updateTaskStatus(taskId, TaskStatus.RUNNING); + + // 2. 发布状态变更事件 + publishEvent(taskId, "TASK_STARTED"); + + // 3. TODO: 调用 Agent 执行器 (gRPC 或 HTTP) + // 这里是占位逻辑,实际应该调用 Agent 执行器 + executeTask(taskId, content); + + log.info("任务调度完成: task_id={}", taskId); + } catch (Exception e) { + log.error("处理任务消息失败: {}", messageJson, e); + } + } + + /** + * 更新任务状态 + */ + private void updateTaskStatus(String taskNo, TaskStatus status) { + try { + taskService.updateStatus(taskNo, status); + } catch (Exception e) { + log.error("更新任务状态失败: taskNo={}", taskNo, e); + } + } + + /** + * 发布任务事件到 Redis Stream + * 统一使用 stream:task:{taskNo} 格式,与 momo-worker 保持一致 + */ + private void publishEvent(String taskNo, String eventType) { + Long roleId = null; + try { + roleId = taskService.getTaskByNo(taskNo).getRoleId(); + } catch (Exception e) { + log.debug("发布事件时未找到任务 roleId,回退默认 workstation: taskNo={}", taskNo); + } + String streamKey = dispatchConfig.getLogStreamKey(roleId, taskNo); + redisTemplate.opsForStream().add(streamKey, + Map.of("event_type", eventType, "task_no", taskNo)); + log.debug("发布事件: streamKey={}, eventType={}", streamKey, eventType); + } + + /** + * 执行任务 (占位实现) + * TODO: 对接真正的 Agent 执行器 + * + * @param taskId 任务 ID + * @param content 任务内容 + */ + private void executeTask(String taskId, String content) { + log.info("准备执行任务: task_id={}, content={}", taskId, + content != null && content.length() > 100 + ? content.substring(0, 100) + "..." + : content); + + // TODO: 实际调用 Agent 执行器 + // agentExecutor.execute(taskId, content); + } + + /** + * 获取当前队列长度(用于监控) + */ + public Long getQueueLength() { + return redisTemplate.opsForList().size(dispatchConfig.getTaskQueueKey()); + } +} diff --git a/back/src/main/java/com/linkwork/service/TaskEventBroadcaster.java b/back/src/main/java/com/linkwork/service/TaskEventBroadcaster.java new file mode 100644 index 0000000..a6996cb --- /dev/null +++ b/back/src/main/java/com/linkwork/service/TaskEventBroadcaster.java @@ -0,0 +1,55 @@ +package com.linkwork.service; + +import lombok.extern.slf4j.Slf4j; +import org.springframework.data.redis.connection.stream.MapRecord; +import org.springframework.stereotype.Component; + +import java.util.Map; +import java.util.UUID; +import java.util.concurrent.ConcurrentHashMap; + +/** + * 进程内任务事件广播器。 + * + * 事件消费和 WebSocket 推送解耦: + * - 后端常驻消费者只负责消费并广播 + * - WebSocket handler 只负责订阅并推送给在线客户端 + */ +@Slf4j +@Component +public class TaskEventBroadcaster { + + @FunctionalInterface + public interface TaskEventListener { + void onEvent(String taskNo, MapRecord record); + } + + private final Map listeners = new ConcurrentHashMap<>(); + + public String register(TaskEventListener listener) { + String listenerId = UUID.randomUUID().toString(); + listeners.put(listenerId, listener); + return listenerId; + } + + public void unregister(String listenerId) { + if (listenerId == null || listenerId.isBlank()) { + return; + } + listeners.remove(listenerId); + } + + public void broadcast(String taskNo, MapRecord record) { + if (taskNo == null || taskNo.isBlank() || record == null || listeners.isEmpty()) { + return; + } + listeners.forEach((listenerId, listener) -> { + try { + listener.onEvent(taskNo, record); + } catch (Exception e) { + log.warn("TaskEventBroadcaster listener failed: listenerId={}, taskNo={}, err={}", + listenerId, taskNo, e.getMessage(), e); + } + }); + } +} diff --git a/back/src/main/java/com/linkwork/service/TaskEventConsumerService.java b/back/src/main/java/com/linkwork/service/TaskEventConsumerService.java new file mode 100644 index 0000000..dd957f0 --- /dev/null +++ b/back/src/main/java/com/linkwork/service/TaskEventConsumerService.java @@ -0,0 +1,291 @@ +package com.linkwork.service; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.linkwork.config.DispatchConfig; +import com.linkwork.model.entity.Task; +import com.linkwork.model.enums.TaskStatus; +import com.baomidou.mybatisplus.extension.plugins.pagination.Page; +import jakarta.annotation.PostConstruct; +import jakarta.annotation.PreDestroy; +import lombok.RequiredArgsConstructor; +import lombok.extern.slf4j.Slf4j; +import org.springframework.data.redis.connection.stream.*; +import org.springframework.data.redis.core.StringRedisTemplate; +import org.springframework.scheduling.annotation.Scheduled; +import org.springframework.stereotype.Service; + +import java.time.Duration; +import java.time.LocalDateTime; +import java.time.ZoneOffset; +import java.util.*; +import java.util.concurrent.*; + +/** + * 常驻任务事件消费者。 + * + * 职责: + * 1) 消费 Redis Stream 任务事件(不依赖 WS 在线) + * 2) 同步任务状态 + * 3) 同步 TASK_OUTPUT_PATHLIST_READY 到文件索引 + * 4) 广播事件给 WebSocket 层做实时展示 + */ +@Slf4j +@Service +@RequiredArgsConstructor +public class TaskEventConsumerService { + + private static final String CONSUMER_GROUP = "backend-core"; + private static final int SCAN_PAGE_SIZE = 200; + private static final long LISTENER_RETAIN_MS = 300_000L; + + private final StringRedisTemplate redisTemplate; + private final TaskService taskService; + private final DispatchConfig dispatchConfig; + private final TaskStatusSyncService taskStatusSyncService; + private final TaskPathlistSyncService taskPathlistSyncService; + private final TaskEventBroadcaster taskEventBroadcaster; + + private final ObjectMapper objectMapper = new ObjectMapper(); + private final ExecutorService workerPool = Executors.newCachedThreadPool(); + private final Map listeners = new ConcurrentHashMap<>(); + + @PostConstruct + public void init() { + discoverAndMaintainListeners(); + } + + @PreDestroy + public void shutdown() { + listeners.values().forEach(listener -> listener.future().cancel(true)); + workerPool.shutdownNow(); + } + + @Scheduled(fixedDelayString = "${robot.task-event-consumer.scan-interval-ms:5000}") + public void discoverAndMaintainListeners() { + try { + long now = System.currentTimeMillis(); + Set activeTasks = new HashSet<>(); + + for (TaskStatus status : List.of(TaskStatus.PENDING, TaskStatus.RUNNING, TaskStatus.PENDING_AUTH)) { + long current = 1; + while (true) { + Page page = taskService.listTasks(null, status.name(), (int) current, SCAN_PAGE_SIZE); + List records = page.getRecords(); + if (records == null || records.isEmpty()) { + break; + } + for (Task task : records) { + String taskNo = task.getTaskNo(); + if (taskNo == null || taskNo.isBlank()) { + continue; + } + activeTasks.add(taskNo); + startListenerIfAbsent(task); + ListenerState existed = listeners.get(taskNo); + if (existed != null) { + existed.touch(now); + } + } + if (current >= page.getPages()) { + break; + } + current++; + } + } + + // 避免“短任务在一次扫描周期内已终态”导致漏消费: + // 额外追踪最近窗口内到达终态的任务,保留监听一段时间接收尾部事件。 + for (TaskStatus terminalStatus : List.of(TaskStatus.COMPLETED, TaskStatus.FAILED, TaskStatus.ABORTED)) { + Page page = taskService.listTasks(null, terminalStatus.name(), 1, SCAN_PAGE_SIZE); + List records = page.getRecords(); + if (records == null || records.isEmpty()) { + continue; + } + for (Task task : records) { + if (!isRecentlyUpdated(task, now, LISTENER_RETAIN_MS)) { + continue; + } + startListenerIfAbsent(task); + ListenerState existed = listeners.get(task.getTaskNo()); + if (existed != null) { + existed.touch(now); + } + } + } + + listeners.forEach((taskNo, state) -> { + if (activeTasks.contains(taskNo)) { + return; + } + if (now - state.lastActiveAt() < LISTENER_RETAIN_MS) { + return; + } + try { + Task task = taskService.getTaskByNo(taskNo); + if (task.getStatus() == TaskStatus.PENDING + || task.getStatus() == TaskStatus.RUNNING + || task.getStatus() == TaskStatus.PENDING_AUTH) { + state.touch(now); + return; + } + } catch (Exception e) { + log.debug("listener cleanup skip taskNo={} because task lookup failed: {}", taskNo, e.getMessage()); + } + + state.future().cancel(true); + listeners.remove(taskNo); + log.info("task event listener removed: taskNo={}", taskNo); + }); + } catch (Exception e) { + log.error("discover task listeners failed: {}", e.getMessage(), e); + } + } + + private boolean isRecentlyUpdated(Task task, long nowMillis, long thresholdMillis) { + LocalDateTime updatedAt = task.getUpdatedAt(); + if (updatedAt == null) { + return false; + } + long updatedMillis = updatedAt.toInstant(ZoneOffset.UTC).toEpochMilli(); + return nowMillis - updatedMillis <= thresholdMillis; + } + + private void startListenerIfAbsent(Task task) { + listeners.computeIfAbsent(task.getTaskNo(), taskNo -> { + List streamKeys = buildStreamKeys(task); + String consumerName = "core-" + taskNo; + for (String streamKey : streamKeys) { + try { + redisTemplate.opsForStream().createGroup(streamKey, ReadOffset.from("0"), CONSUMER_GROUP); + } catch (Exception ignored) { + // stream/group may already exist + } + } + Future future = workerPool.submit(() -> consumeLoop(taskNo, streamKeys, consumerName)); + log.info("task event listener started: taskNo={}, streamKeys={}", taskNo, streamKeys); + return new ListenerState(future, System.currentTimeMillis()); + }); + } + + private List buildStreamKeys(Task task) { + List keys = new ArrayList<>(); + keys.add(dispatchConfig.getLogStreamKey(task.getRoleId(), task.getTaskNo())); + keys.add("stream:task:" + task.getTaskNo()); + keys.add("stream:task:" + task.getTaskNo() + ":events"); + keys.add("stream:build:" + task.getTaskNo()); + return keys; + } + + private void consumeLoop(String taskNo, List streamKeys, String consumerName) { + while (!Thread.currentThread().isInterrupted()) { + try { + for (String streamKey : streamKeys) { + List> records; + try { + records = redisTemplate.opsForStream().read( + Consumer.from(CONSUMER_GROUP, consumerName), + StreamReadOptions.empty().count(20).block(Duration.ofMillis(500)), + StreamOffset.create(streamKey, ReadOffset.lastConsumed()) + ); + } catch (Exception e) { + continue; + } + if (records == null || records.isEmpty()) { + continue; + } + for (MapRecord record : records) { + processRecord(taskNo, record); + try { + redisTemplate.opsForStream().acknowledge(streamKey, CONSUMER_GROUP, record.getId()); + } catch (Exception e) { + log.debug("ack stream record failed: streamKey={}, recordId={}, err={}", + streamKey, record.getId(), e.getMessage()); + } + } + } + } catch (Exception e) { + if (!Thread.currentThread().isInterrupted()) { + log.warn("task event consume loop error: taskNo={}, err={}", taskNo, e.getMessage(), e); + try { + Thread.sleep(1000); + } catch (InterruptedException ie) { + Thread.currentThread().interrupt(); + break; + } + } + } + } + } + + private void processRecord(String taskNo, MapRecord record) { + Map eventData = extractEventData(record); + taskStatusSyncService.onEvent(taskNo, eventData); + taskPathlistSyncService.onEvent(taskNo, eventData); + taskEventBroadcaster.broadcast(taskNo, record); + } + + @SuppressWarnings("unchecked") + private Map extractEventData(MapRecord record) { + Map rawValues = record.getValue(); + + Object payloadObj = rawValues.get("payload"); + if (payloadObj instanceof String payloadStr && payloadStr.startsWith("{")) { + try { + Map parsed = objectMapper.readValue(payloadStr, Map.class); + Object innerData = parsed.get("data"); + if (innerData instanceof String dataStr && (dataStr.startsWith("{") || dataStr.startsWith("["))) { + try { + parsed.put("data", objectMapper.readValue(dataStr, Object.class)); + } catch (Exception ignored) { + } + } + Object dataObj = parsed.get("data"); + if (dataObj instanceof Map dataMap) { + dataMap.forEach((k, v) -> parsed.putIfAbsent(String.valueOf(k), v)); + } + return parsed; + } catch (Exception e) { + log.debug("parse payload failed, fallback flat event: {}", e.getMessage()); + } + } + + Map event = new HashMap<>(); + rawValues.forEach((k, v) -> event.put(String.valueOf(k), v)); + + Object dataObj = event.get("data"); + if (dataObj instanceof String dataStr && (dataStr.startsWith("{") || dataStr.startsWith("["))) { + try { + event.put("data", objectMapper.readValue(dataStr, Object.class)); + } catch (Exception ignored) { + } + } + + Object dataMapObj = event.get("data"); + if (dataMapObj instanceof Map dataMap) { + dataMap.forEach((k, v) -> event.putIfAbsent(String.valueOf(k), v)); + } + return event; + } + + private static final class ListenerState { + private final Future future; + private volatile long lastActiveAt; + + private ListenerState(Future future, long lastActiveAt) { + this.future = future; + this.lastActiveAt = lastActiveAt; + } + + private Future future() { + return future; + } + + private long lastActiveAt() { + return lastActiveAt; + } + + private void touch(long ts) { + this.lastActiveAt = ts; + } + } +} diff --git a/back/src/main/java/com/linkwork/service/TaskGitAuthService.java b/back/src/main/java/com/linkwork/service/TaskGitAuthService.java new file mode 100644 index 0000000..ef325ac --- /dev/null +++ b/back/src/main/java/com/linkwork/service/TaskGitAuthService.java @@ -0,0 +1,81 @@ +package com.linkwork.service; + +import com.linkwork.mapper.TaskGitAuthMapper; +import com.linkwork.model.entity.GitLabAuthEntity; +import com.linkwork.model.entity.TaskGitAuthEntity; +import lombok.RequiredArgsConstructor; +import lombok.extern.slf4j.Slf4j; +import org.springframework.stereotype.Service; +import org.springframework.util.StringUtils; + +import java.time.LocalDateTime; + +/** + * 任务与 Git 认证映射服务 + */ +@Slf4j +@Service +@RequiredArgsConstructor +public class TaskGitAuthService { + + public static final String PROVIDER_GITLAB = "gitlab"; + + private final TaskGitAuthMapper taskGitAuthMapper; + private final GitLabAuthService gitLabAuthService; + + /** + * 在任务创建时绑定最新的 GitLab 认证记录。 + */ + public void bindTaskWithLatestGitAuth(String taskId, String userId) { + if (!StringUtils.hasText(taskId) || !StringUtils.hasText(userId)) { + return; + } + + GitLabAuthEntity latestAuth = gitLabAuthService.getLatestAuth(userId); + if (latestAuth == null) { + log.info("任务未绑定 Git 认证(用户暂无授权): taskId={}, userId={}", taskId, userId); + return; + } + + TaskGitAuthEntity entity = new TaskGitAuthEntity(); + entity.setTaskId(taskId); + entity.setUserId(userId); + entity.setProvider(PROVIDER_GITLAB); + entity.setGitlabAuthId(latestAuth.getId()); + entity.setExpiresAt(latestAuth.getExpiresAt()); + entity.setUpdatedAt(LocalDateTime.now()); + entity.setIsDeleted(false); + + TaskGitAuthEntity existing = taskGitAuthMapper.selectById(taskId); + if (existing == null) { + entity.setCreatedAt(LocalDateTime.now()); + taskGitAuthMapper.insert(entity); + } else { + entity.setCreatedAt(existing.getCreatedAt()); + taskGitAuthMapper.updateById(entity); + } + + log.info("任务绑定 Git 认证成功: taskId={}, userId={}, provider={}, gitlabAuthId={}", + taskId, userId, PROVIDER_GITLAB, latestAuth.getId()); + } + + public TaskGitAuthEntity getByTaskId(String taskId) { + if (!StringUtils.hasText(taskId)) { + return null; + } + return taskGitAuthMapper.selectById(taskId); + } + + public void updateExpiresAt(String taskId, LocalDateTime expiresAt) { + if (!StringUtils.hasText(taskId) || expiresAt == null) { + return; + } + TaskGitAuthEntity entity = taskGitAuthMapper.selectById(taskId); + if (entity == null) { + return; + } + entity.setExpiresAt(expiresAt); + entity.setUpdatedAt(LocalDateTime.now()); + taskGitAuthMapper.updateById(entity); + } +} diff --git a/back/src/main/java/com/linkwork/service/TaskGitTokenService.java b/back/src/main/java/com/linkwork/service/TaskGitTokenService.java new file mode 100644 index 0000000..7ef16ae --- /dev/null +++ b/back/src/main/java/com/linkwork/service/TaskGitTokenService.java @@ -0,0 +1,106 @@ +package com.linkwork.service; + +import com.linkwork.common.ForbiddenOperationException; +import com.linkwork.common.ResourceNotFoundException; +import com.linkwork.model.dto.TaskGitTokenResponse; +import com.linkwork.model.entity.TaskGitAuthEntity; +import lombok.RequiredArgsConstructor; +import lombok.extern.slf4j.Slf4j; +import org.springframework.stereotype.Service; +import org.springframework.util.StringUtils; + +import java.time.Duration; +import java.time.ZoneOffset; +import java.time.format.DateTimeFormatter; + +/** + * 任务运行期 Git token 获取服务(供 zzd 调用) + */ +@Slf4j +@Service +@RequiredArgsConstructor +public class TaskGitTokenService { + + private static final Duration REFRESH_AHEAD_WINDOW = Duration.ofMinutes(5); + private static final String SCOPE_READ_REPOSITORY = "read_repository"; + private static final String SCOPE_WRITE_REPOSITORY = "write_repository"; + private static final String SCOPE_API = "api"; + + private final TaskService taskService; + private final TaskGitAuthService taskGitAuthService; + private final GitLabAuthService gitLabAuthService; + + public TaskGitTokenResponse getTaskGitToken(String taskId) { + ensureTaskExists(taskId); + + TaskGitAuthEntity binding = taskGitAuthService.getByTaskId(taskId); + if (binding == null || binding.getGitlabAuthId() == null) { + throw new ResourceNotFoundException("任务未绑定 Git 认证: taskId=" + taskId); + } + + if (!TaskGitAuthService.PROVIDER_GITLAB.equalsIgnoreCase(binding.getProvider())) { + throw new ForbiddenOperationException("仅支持 gitlab provider,当前 provider=" + binding.getProvider()); + } + + GitLabAuthService.ValidToken token = + gitLabAuthService.getValidTokenByAuthId(binding.getGitlabAuthId(), REFRESH_AHEAD_WINDOW); + if (token == null) { + throw new ResourceNotFoundException("任务未绑定有效 Git 认证: taskId=" + taskId); + } + + if (!StringUtils.hasText(token.getToken())) { + throw new IllegalStateException("任务 Git token 不可用: taskId=" + taskId); + } + + ensureScopeSufficient(token.getScope(), taskId); + GitLabAuthService.CommitIdentity commitIdentity = gitLabAuthService.resolveCommitIdentity(token.getToken()); + + taskGitAuthService.updateExpiresAt(taskId, token.getExpiresAt()); + log.info("任务 Git token 发放: taskId={}, provider={}, gitlabAuthId={}, tokenType={}, scope={}, tokenAlias={}, commitUser={}", + taskId, + binding.getProvider(), + binding.getGitlabAuthId(), + token.getTokenType(), + token.getScope(), + token.getTokenAlias(), + commitIdentity.getUsername()); + + TaskGitTokenResponse response = new TaskGitTokenResponse(); + response.setProvider(TaskGitAuthService.PROVIDER_GITLAB); + response.setTokenType(token.getTokenType()); + response.setToken(token.getToken()); + response.setExpiresAt(DateTimeFormatter.ISO_INSTANT.format(token.getExpiresAt().atOffset(ZoneOffset.UTC).toInstant())); + response.setCommitUserName(commitIdentity.getUsername()); + response.setCommitUserEmail(commitIdentity.getEmail()); + return response; + } + + /** + * Scope fail-fast:发放前校验 scope 是否满足仓库访问要求。 + * 至少包含 read_repository / write_repository / api 之一。 + * GitLab 中 write_repository 隐含读能力,api 隐含仓库读写权限。 + * 旧授权若 scope 不足,直接拒绝并引导重新授权,避免任务在 GIT_PRE 阶段才失败。 + */ + private void ensureScopeSufficient(String scope, String taskId) { + if (!StringUtils.hasText(scope)) { + log.warn("Git token scope 为空,跳过校验: taskId={}", taskId); + return; + } + boolean hasRepoAccess = scope.contains(SCOPE_READ_REPOSITORY) + || scope.contains(SCOPE_WRITE_REPOSITORY) + || scope.contains(SCOPE_API); + if (!hasRepoAccess) { + log.warn("Git token scope 不满足仓库访问要求: taskId={}, scope={}", taskId, scope); + throw new ForbiddenOperationException( + "Git 授权 scope 不足(缺少 read_repository、write_repository 或 api),请删除旧授权后重新授权: taskId=" + taskId); + } + } + + private void ensureTaskExists(String taskId) { + try { + taskService.getTaskByNo(taskId); + } catch (IllegalArgumentException e) { + throw new ResourceNotFoundException("任务不存在: " + taskId); + } + } +} diff --git a/back/src/main/java/com/linkwork/service/TaskOutputEstimatorAgent.java b/back/src/main/java/com/linkwork/service/TaskOutputEstimatorAgent.java new file mode 100644 index 0000000..fcf3e8b --- /dev/null +++ b/back/src/main/java/com/linkwork/service/TaskOutputEstimatorAgent.java @@ -0,0 +1,797 @@ +package com.linkwork.service; + +import com.fasterxml.jackson.core.JsonProcessingException; +import com.fasterxml.jackson.databind.JsonNode; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.linkwork.model.dto.TaskCreateRequest; +import com.linkwork.model.entity.RoleEntity; +import com.linkwork.model.enums.TaskOutputType; +import lombok.extern.slf4j.Slf4j; +import org.springframework.beans.factory.annotation.Value; +import org.springframework.stereotype.Service; +import org.springframework.util.StringUtils; + +import java.io.BufferedReader; +import java.io.IOException; +import java.io.InputStream; +import java.io.InputStreamReader; +import java.net.URI; +import java.net.http.HttpClient; +import java.net.http.HttpRequest; +import java.net.http.HttpResponse; +import java.nio.charset.StandardCharsets; +import java.time.Duration; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.LinkedHashSet; +import java.util.List; +import java.util.Locale; +import java.util.Set; +import java.util.stream.Collectors; + +/** + * 任务产出预估 Agent + * + * 在任务执行前基于用户诉求、岗位 system prompt 与任务属性进行预估: + * 1) 优先调用 LLM Gateway 输出结构化结果; + * 2) LLM 不可用时,降级到规则引擎,避免阻断任务创建。 + */ +@Slf4j +@Service +public class TaskOutputEstimatorAgent { + + private static final String DIALOG_CONCLUSION_CODE = TaskOutputType.DIALOG_CONCLUSION.getCode(); + private static final String GIT_BRANCH_CODE = TaskOutputType.GIT_BRANCH.getCode(); + private static final String DONE_TOKEN = "[DONE]"; + private static final int MAX_BRANCH_LENGTH = 64; + private static final int MAX_PROMPT_SNIPPET = 1200; + + private static final Set DIALOG_ONLY_KEYWORDS = Set.of( + "只要结论", "仅回复", "仅对话", "不用文件", "无须文件", "不要文件", "只给建议", + "only conclusion", "only answer", "dialog only", "no file output" + ); + + private static final Set API_RESULT_KEYWORDS = Set.of( + "api调用", "api call", "webhook", "callback", "回调", "curl", "http request", "openapi", "postman" + ); + + private static final Set GIT_KEYWORDS = Set.of( + "git", "branch", "commit", "push", "分支", "提交" + ); + + private static final Set PULL_REQUEST_KEYWORDS = Set.of( + "pull request", "merge request", "创建pr", "提pr", "提交pr", "pr链接", "mr链接" + ); + + private static final Set EXCEL_KEYWORDS = Set.of( + "excel", "xlsx", "xls", "sheet", "数据透视", "表格" + ); + + private static final Set CSV_KEYWORDS = Set.of( + "csv", "逗号分隔" + ); + + private static final Set WORD_KEYWORDS = Set.of( + "word", "doc", "docx", "技术文档", "文档" + ); + + private static final Set PPT_KEYWORDS = Set.of( + "ppt", "pptx", "slide", "slides", "演示文稿" + ); + + private static final Set PDF_KEYWORDS = Set.of( + "pdf" + ); + + private static final Set MARKDOWN_KEYWORDS = Set.of( + "markdown", "readme", "md文档", ".md" + ); + + private static final Set TXT_KEYWORDS = Set.of( + "txt", "纯文本", "text file", "日志文件" + ); + + private static final Set PYTHON_KEYWORDS = Set.of( + "python", "pandas", "jupyter", "notebook", ".py" + ); + + private static final Set JAVA_KEYWORDS = Set.of( + "java", "spring", "springboot", "maven", "gradle", "jvm" + ); + + private static final Set JAVASCRIPT_KEYWORDS = Set.of( + "javascript", "nodejs", "node.js", "frontend js", ".js" + ); + + private static final Set TYPESCRIPT_KEYWORDS = Set.of( + "typescript", "tsx", ".ts" + ); + + private static final Set SQL_KEYWORDS = Set.of( + "sql", "ddl", "dml", "select ", "insert ", "update ", "delete " + ); + + private static final Set SHELL_KEYWORDS = Set.of( + "shell", "bash", "zsh", "sh脚本", "命令脚本" + ); + + private static final Set CONFIG_KEYWORDS = Set.of( + "配置文件", "yaml", "yml", "toml", "ini", "properties", ".env" + ); + + private static final Set JSON_KEYWORDS = Set.of( + "json", "jsonl" + ); + + private static final Set PNG_KEYWORDS = Set.of( + "png", "image", "chart", "plot", "graph", "可视化", "截图", "图表" + ); + + private static final Set ARCHIVE_KEYWORDS = Set.of( + "zip", "tar", "tar.gz", "压缩包", "归档" + ); + + private final ObjectMapper objectMapper = new ObjectMapper(); + + @Value("${robot.output-estimator.llm.enabled:true}") + private boolean llmEnabled; + + @Value("${robot.output-estimator.llm.gateway-url:}") + private String gatewayUrl; + + @Value("${robot.output-estimator.llm.model:minimax-m2.1}") + private String llmModel; + + @Value("${robot.output-estimator.llm.max-tokens:256}") + private int llmMaxTokens; + + @Value("${robot.output-estimator.llm.stream:true}") + private boolean llmStream; + + @Value("${robot.output-estimator.llm.connect-timeout-ms:3000}") + private int connectTimeoutMs; + + @Value("${robot.output-estimator.llm.read-timeout-ms:12000}") + private int readTimeoutMs; + + @Value("${robot.output-estimator.llm.auth-token:}") + private String authToken; + + @Value("${robot.output-estimator.llm.x-litellm-api-key:}") + private String xLitellmApiKey; + + public record EstimateResult(List estimatedOutput, String branchName, String source) { + public EstimateResult { + estimatedOutput = estimatedOutput == null ? List.of() : List.copyOf(estimatedOutput); + source = StringUtils.hasText(source) ? source : "rule"; + } + } + + public List estimate(TaskCreateRequest request, RoleEntity role) { + return estimateWithBranch(null, request, role).estimatedOutput(); + } + + public EstimateResult estimateWithBranch(String taskNo, TaskCreateRequest request, RoleEntity role) { + String promptRaw = request != null ? request.getPrompt() : null; + String prompt = normalize(promptRaw); + String systemPrompt = normalize(role != null ? role.getPrompt() : null); + String mergedText = prompt + "\n" + systemPrompt; + + if (containsAny(mergedText, DIALOG_ONLY_KEYWORDS)) { + return new EstimateResult(List.of(DIALOG_CONCLUSION_CODE), null, "rule"); + } + + LinkedHashSet ruleEstimated = new LinkedHashSet<>(); + addByTextRules(mergedText, ruleEstimated); + addByTaskAttributes(request, ruleEstimated); + + if (ruleEstimated.isEmpty()) { + ruleEstimated.add(TaskOutputType.DIALOG_CONCLUSION); + } + + EstimateResult llmEstimate = estimateByLlm(taskNo, request, role, promptRaw); + if (llmEstimate != null && !llmEstimate.estimatedOutput().isEmpty()) { + LinkedHashSet mergedOutputs = new LinkedHashSet<>(llmEstimate.estimatedOutput()); + LinkedHashSet attributeOutputs = new LinkedHashSet<>(); + addByTaskAttributes(request, attributeOutputs); + for (TaskOutputType type : attributeOutputs) { + mergedOutputs.add(type.getCode()); + } + normalizeDialogOutput(mergedOutputs); + + String branchName = llmEstimate.branchName(); + if (mergedOutputs.contains(GIT_BRANCH_CODE) && !StringUtils.hasText(branchName)) { + branchName = buildFallbackBranchName(taskNo, promptRaw); + } + if (!mergedOutputs.contains(GIT_BRANCH_CODE)) { + branchName = null; + } + return new EstimateResult(new ArrayList<>(mergedOutputs), branchName, "llm"); + } + + LinkedHashSet fallbackOutputs = new LinkedHashSet<>(toCodes(ruleEstimated)); + normalizeDialogOutput(fallbackOutputs); + String fallbackBranchName = fallbackOutputs.contains(GIT_BRANCH_CODE) + ? buildFallbackBranchName(taskNo, promptRaw) + : null; + return new EstimateResult(new ArrayList<>(fallbackOutputs), fallbackBranchName, "rule"); + } + + private EstimateResult estimateByLlm(String taskNo, TaskCreateRequest request, RoleEntity role, String promptRaw) { + if (!llmEnabled || !StringUtils.hasText(gatewayUrl)) { + return null; + } + + String token = resolveGatewayToken(); + if (!StringUtils.hasText(token)) { + return null; + } + + try { + String systemPrompt = buildEstimatorSystemPrompt(); + String userPrompt = buildEstimatorUserPrompt(taskNo, request, role); + String completion = callLlmGateway(systemPrompt, userPrompt, token, resolveEstimatorModel(request)); + if (!StringUtils.hasText(completion)) { + return null; + } + return parseLlmEstimate(completion, taskNo, promptRaw); + } catch (Exception e) { + log.warn("LLM 产物预估失败,降级规则模式: taskNo={}, error={}", taskNo, e.getMessage()); + return null; + } + } + + private String callLlmGateway(String systemPrompt, String userPrompt, String token, String modelId) throws IOException, InterruptedException { + JsonNode payload = buildGatewayPayload(systemPrompt, userPrompt, modelId); + String requestBody = objectMapper.writeValueAsString(payload); + + HttpClient client = HttpClient.newBuilder() + .connectTimeout(Duration.ofMillis(Math.max(connectTimeoutMs, 1000))) + .build(); + + HttpRequest.Builder requestBuilder = HttpRequest.newBuilder(URI.create(gatewayUrl)) + .timeout(Duration.ofMillis(Math.max(readTimeoutMs, 2000))) + .header("Authorization", "Bearer " + token) + .header("Content-Type", "application/json"); + + if (StringUtils.hasText(xLitellmApiKey)) { + requestBuilder.header("x-litellm-api-key", xLitellmApiKey.trim()); + } + + HttpRequest request = requestBuilder + .POST(HttpRequest.BodyPublishers.ofString(requestBody, StandardCharsets.UTF_8)) + .build(); + + if (llmStream) { + HttpResponse response = client.send(request, HttpResponse.BodyHandlers.ofInputStream()); + if (response.statusCode() >= 400) { + String errorBody = readErrorBody(response.body()); + throw new IllegalStateException("LLM Gateway 请求失败: status=" + response.statusCode() + ", body=" + errorBody); + } + return readStreamCompletion(response.body()); + } + + HttpResponse response = client.send(request, HttpResponse.BodyHandlers.ofString(StandardCharsets.UTF_8)); + if (response.statusCode() >= 400) { + throw new IllegalStateException("LLM Gateway 请求失败: status=" + response.statusCode() + ", body=" + response.body()); + } + + String text = extractTextFromGatewayPayload(response.body()); + return StringUtils.hasText(text) ? text : response.body(); + } + + private JsonNode buildGatewayPayload(String systemPrompt, String userPrompt, String modelId) { + var root = objectMapper.createObjectNode(); + root.put("model", modelId); + root.put("max_tokens", llmMaxTokens); + root.put("stream", llmStream); + + var messages = root.putArray("messages"); + var system = messages.addObject(); + system.put("role", "system"); + system.put("content", systemPrompt); + + var user = messages.addObject(); + user.put("role", "user"); + user.put("content", userPrompt); + return root; + } + + private String readStreamCompletion(InputStream bodyStream) throws IOException { + StringBuilder completion = new StringBuilder(); + StringBuilder raw = new StringBuilder(); + + try (BufferedReader reader = new BufferedReader(new InputStreamReader(bodyStream, StandardCharsets.UTF_8))) { + String line; + while ((line = reader.readLine()) != null) { + raw.append(line).append('\n'); + String trimmed = line.trim(); + if (trimmed.isEmpty() || trimmed.startsWith(":")) { + continue; + } + if (trimmed.startsWith("data:")) { + trimmed = trimmed.substring(5).trim(); + } + if (!StringUtils.hasText(trimmed) || DONE_TOKEN.equals(trimmed)) { + if (DONE_TOKEN.equals(trimmed)) { + break; + } + continue; + } + + String delta = extractTextFromGatewayPayload(trimmed); + if (StringUtils.hasText(delta)) { + completion.append(delta); + } + } + } + + if (completion.length() > 0) { + return completion.toString(); + } + return raw.toString(); + } + + private String extractTextFromGatewayPayload(String payload) { + if (!StringUtils.hasText(payload)) { + return ""; + } + + String trimmed = payload.trim(); + if (!trimmed.startsWith("{")) { + return trimmed; + } + + try { + JsonNode root = objectMapper.readTree(trimmed); + StringBuilder text = new StringBuilder(); + + appendTextNode(root.path("output_text"), text); + appendTextNode(root.path("content"), text); + + JsonNode choices = root.path("choices"); + if (choices.isArray()) { + for (JsonNode choice : choices) { + appendTextNode(choice.path("delta").path("content"), text); + appendTextNode(choice.path("message").path("content"), text); + appendTextNode(choice.path("text"), text); + } + } + + JsonNode data = root.path("data"); + if (data.isTextual()) { + appendTextNode(data, text); + } else if (data.isObject()) { + appendTextNode(data.path("content"), text); + } + + if (text.length() > 0) { + return text.toString(); + } + if (root.isTextual()) { + return root.asText(); + } + return ""; + } catch (Exception ignore) { + return trimmed; + } + } + + private void appendTextNode(JsonNode node, StringBuilder builder) { + if (node == null || node.isMissingNode() || node.isNull()) { + return; + } + + if (node.isTextual()) { + builder.append(node.asText()); + return; + } + + if (node.isArray()) { + for (JsonNode child : node) { + appendTextNode(child, builder); + } + return; + } + + if (node.isObject()) { + appendTextNode(node.path("text"), builder); + appendTextNode(node.path("content"), builder); + appendTextNode(node.path("value"), builder); + } + } + + private EstimateResult parseLlmEstimate(String completion, String taskNo, String promptRaw) { + String jsonText = extractJsonObject(completion); + if (!StringUtils.hasText(jsonText)) { + log.warn("LLM 产物预估解析失败(未找到 JSON): taskNo={}, completion={}...", taskNo, truncate(completion, 240)); + return null; + } + + try { + JsonNode root = objectMapper.readTree(jsonText); + LinkedHashSet outputs = new LinkedHashSet<>(); + collectOutputCodes(root.path("estimatedOutput"), outputs); + collectOutputCodes(root.path("estimated_output"), outputs); + collectOutputCodes(root.path("outputs"), outputs); + + if (outputs.isEmpty()) { + addAllowedCode(root.path("outputType").asText(null), outputs); + addAllowedCode(root.path("output_type").asText(null), outputs); + } + + String branchName = firstNonBlank( + root.path("branchName").asText(null), + root.path("branch_name").asText(null), + root.path("estimatedBranchName").asText(null) + ); + branchName = sanitizeBranchName(branchName); + + if (StringUtils.hasText(branchName)) { + outputs.add(GIT_BRANCH_CODE); + } + + if (outputs.isEmpty()) { + return null; + } + normalizeDialogOutput(outputs); + + if (outputs.contains(GIT_BRANCH_CODE) && !StringUtils.hasText(branchName)) { + branchName = buildFallbackBranchName(taskNo, promptRaw); + } + if (!outputs.contains(GIT_BRANCH_CODE)) { + branchName = null; + } + + return new EstimateResult(new ArrayList<>(outputs), branchName, "llm"); + } catch (JsonProcessingException e) { + log.warn("LLM 产物预估解析失败(JSON 非法): taskNo={}, error={}", taskNo, e.getMessage()); + return null; + } + } + + private void collectOutputCodes(JsonNode node, LinkedHashSet outputs) { + if (node == null || node.isMissingNode() || node.isNull()) { + return; + } + + if (node.isArray()) { + for (JsonNode item : node) { + if (item.isTextual()) { + addAllowedCode(item.asText(), outputs); + continue; + } + addAllowedCode(item.path("code").asText(null), outputs); + addAllowedCode(item.path("type").asText(null), outputs); + } + return; + } + + if (node.isTextual()) { + String text = node.asText(); + for (String piece : text.split("[,,\\s]+")) { + addAllowedCode(piece, outputs); + } + return; + } + + if (node.isObject()) { + addAllowedCode(node.path("code").asText(null), outputs); + addAllowedCode(node.path("type").asText(null), outputs); + } + } + + private void addAllowedCode(String code, LinkedHashSet outputs) { + if (!StringUtils.hasText(code)) { + return; + } + TaskOutputType.fromCode(code.trim()) + .map(TaskOutputType::getCode) + .ifPresent(outputs::add); + } + + private void normalizeDialogOutput(LinkedHashSet outputs) { + if (outputs.size() > 1) { + outputs.remove(DIALOG_CONCLUSION_CODE); + } + if (outputs.isEmpty()) { + outputs.add(DIALOG_CONCLUSION_CODE); + } + } + + private String extractJsonObject(String text) { + if (!StringUtils.hasText(text)) { + return null; + } + + String trimmed = text.trim(); + if (trimmed.startsWith("```") && trimmed.endsWith("```")) { + int firstBrace = trimmed.indexOf('{'); + int lastBrace = trimmed.lastIndexOf('}'); + if (firstBrace >= 0 && lastBrace > firstBrace) { + return trimmed.substring(firstBrace, lastBrace + 1); + } + } + + if (trimmed.startsWith("{") && trimmed.endsWith("}")) { + return trimmed; + } + + int start = trimmed.indexOf('{'); + int end = trimmed.lastIndexOf('}'); + if (start >= 0 && end > start) { + return trimmed.substring(start, end + 1); + } + return null; + } + + private String buildEstimatorSystemPrompt() { + String allowedOutputs = Arrays.stream(TaskOutputType.values()) + .map(type -> "- " + type.getCode() + ": " + type.getDescription()) + .collect(Collectors.joining("\\n")); + + return """ + 你是任务产物预估助手,只负责判断任务执行后可能产出的结果类型。 + 必须遵守以下规则: + 1) 只返回 JSON,不要 markdown,不要解释文字。 + 2) JSON 字段固定为: estimatedOutput(array), branchName(string|null), reason(string)。 + 3) estimatedOutput 只能使用下面白名单枚举,允许多个并存。 + 4) 若只会给对话结论,则 estimatedOutput 仅保留 dialog_conclusion。 + 5) 只有当预计有代码提交时才返回 branchName;否则 branchName 必须为 null。 + 6) branchName 使用小写 kebab-case,建议格式 auto/{topic}-{taskShortId}。 + + 允许的 estimatedOutput: + """ + allowedOutputs; + } + + private String buildEstimatorUserPrompt(String taskNo, TaskCreateRequest request, RoleEntity role) throws JsonProcessingException { + var root = objectMapper.createObjectNode(); + root.put("taskNo", taskNo == null ? "" : taskNo); + root.put("userPrompt", truncate(request != null ? request.getPrompt() : null, MAX_PROMPT_SNIPPET)); + root.put("modelId", request != null ? request.getModelId() : ""); + + var fileIds = root.putArray("fileIds"); + if (request != null && request.getFileIds() != null) { + for (String fileId : request.getFileIds()) { + fileIds.add(fileId); + } + } + + var roleNode = root.putObject("role"); + roleNode.put("roleId", role != null && role.getId() != null ? role.getId() : 0L); + roleNode.put("roleName", role != null ? role.getName() : ""); + roleNode.put("rolePrompt", truncate(role != null ? role.getPrompt() : null, MAX_PROMPT_SNIPPET)); + + var gitRepos = roleNode.putArray("gitRepos"); + if (role != null && role.getConfigJson() != null && role.getConfigJson().getGitRepos() != null) { + for (RoleEntity.RoleConfig.GitRepo repo : role.getConfigJson().getGitRepos()) { + var repoNode = gitRepos.addObject(); + repoNode.put("url", repo.getUrl()); + repoNode.put("branch", repo.getBranch()); + } + } + + return "请基于以下任务要素做产物预估,按 system 要求返回 JSON:\n" + + objectMapper.writerWithDefaultPrettyPrinter().writeValueAsString(root); + } + + private String resolveEstimatorModel(TaskCreateRequest request) { + if (request != null && StringUtils.hasText(request.getModelId())) { + return request.getModelId().trim(); + } + return llmModel; + } + + private String resolveGatewayToken() { + if (StringUtils.hasText(authToken)) { + return authToken.trim(); + } + return null; + } + + private String readErrorBody(InputStream inputStream) { + if (inputStream == null) { + return ""; + } + try (BufferedReader reader = new BufferedReader(new InputStreamReader(inputStream, StandardCharsets.UTF_8))) { + StringBuilder sb = new StringBuilder(); + String line; + while ((line = reader.readLine()) != null) { + sb.append(line); + } + return sb.toString(); + } catch (Exception e) { + return ""; + } + } + + private String buildFallbackBranchName(String taskNo, String prompt) { + if (!StringUtils.hasText(taskNo)) { + return null; + } + + String topic = slugify(prompt); + if (!StringUtils.hasText(topic)) { + topic = "task"; + } + + String taskShort = taskNo.replaceAll("[^a-zA-Z0-9]", "").toLowerCase(Locale.ROOT); + if (taskShort.length() > 8) { + taskShort = taskShort.substring(taskShort.length() - 8); + } + + return sanitizeBranchName("auto/" + topic + "-" + taskShort); + } + + private String sanitizeBranchName(String branchName) { + if (!StringUtils.hasText(branchName)) { + return null; + } + + String sanitized = branchName.trim().toLowerCase(Locale.ROOT) + .replace('\\', '/') + .replace(' ', '-'); + + sanitized = sanitized.replaceAll("[^a-z0-9/_\\-.]+", "-"); + sanitized = sanitized.replaceAll("/{2,}", "/"); + sanitized = sanitized.replaceAll("-{2,}", "-"); + sanitized = sanitized.replaceAll("^/+", ""); + sanitized = sanitized.replaceAll("/+$", ""); + + if (!StringUtils.hasText(sanitized)) { + return null; + } + + if (!sanitized.contains("/")) { + sanitized = "auto/" + sanitized; + } + + if (sanitized.length() > MAX_BRANCH_LENGTH) { + sanitized = sanitized.substring(0, MAX_BRANCH_LENGTH); + sanitized = sanitized.replaceAll("[-/]+$", ""); + } + + return StringUtils.hasText(sanitized) ? sanitized : null; + } + + private String slugify(String text) { + if (!StringUtils.hasText(text)) { + return ""; + } + + String slug = text.toLowerCase(Locale.ROOT) + .replaceAll("[^a-z0-9]+", "-") + .replaceAll("^-+", "") + .replaceAll("-+$", ""); + + if (slug.length() > 24) { + slug = slug.substring(0, 24).replaceAll("-+$", ""); + } + return slug; + } + + private String firstNonBlank(String... values) { + if (values == null) { + return null; + } + for (String value : values) { + if (StringUtils.hasText(value)) { + return value; + } + } + return null; + } + + private void addByTextRules(String text, LinkedHashSet estimated) { + if (containsAny(text, GIT_KEYWORDS)) { + estimated.add(TaskOutputType.GIT_BRANCH); + } + if (containsAny(text, PULL_REQUEST_KEYWORDS)) { + estimated.add(TaskOutputType.PULL_REQUEST); + estimated.add(TaskOutputType.GIT_BRANCH); + } + + if (containsAny(text, PYTHON_KEYWORDS)) { + estimated.add(TaskOutputType.PYTHON_FILE); + } + if (containsAny(text, JAVA_KEYWORDS)) { + estimated.add(TaskOutputType.JAVA_FILE); + } + if (containsAny(text, JAVASCRIPT_KEYWORDS)) { + estimated.add(TaskOutputType.JAVASCRIPT_FILE); + } + if (containsAny(text, TYPESCRIPT_KEYWORDS)) { + estimated.add(TaskOutputType.TYPESCRIPT_FILE); + } + if (containsAny(text, SQL_KEYWORDS)) { + estimated.add(TaskOutputType.SQL_FILE); + } + if (containsAny(text, SHELL_KEYWORDS)) { + estimated.add(TaskOutputType.SHELL_SCRIPT); + } + if (containsAny(text, CONFIG_KEYWORDS)) { + estimated.add(TaskOutputType.CONFIG_FILE); + } + + if (containsAny(text, MARKDOWN_KEYWORDS)) { + estimated.add(TaskOutputType.MARKDOWN); + } + if (containsAny(text, TXT_KEYWORDS)) { + estimated.add(TaskOutputType.TXT); + } + if (containsAny(text, WORD_KEYWORDS)) { + estimated.add(TaskOutputType.WORD); + } + if (containsAny(text, EXCEL_KEYWORDS)) { + estimated.add(TaskOutputType.EXCEL); + } + if (containsAny(text, CSV_KEYWORDS)) { + estimated.add(TaskOutputType.CSV); + } + if (containsAny(text, PPT_KEYWORDS)) { + estimated.add(TaskOutputType.PPT); + } + if (containsAny(text, PDF_KEYWORDS)) { + estimated.add(TaskOutputType.PDF); + } + + if (containsAny(text, JSON_KEYWORDS)) { + estimated.add(TaskOutputType.JSON); + } + if (containsAny(text, PNG_KEYWORDS)) { + estimated.add(TaskOutputType.PNG); + } + if (containsAny(text, ARCHIVE_KEYWORDS)) { + estimated.add(TaskOutputType.ARCHIVE); + } + + if (containsAny(text, API_RESULT_KEYWORDS)) { + estimated.add(TaskOutputType.API_CALL_RESULT); + } + } + + private void addByTaskAttributes(TaskCreateRequest request, LinkedHashSet estimated) { + if (request != null && request.getFileIds() != null) { + for (String fileId : request.getFileIds()) { + TaskOutputType.fromFileName(fileId).ifPresent(estimated::add); + } + } + } + + private List toCodes(LinkedHashSet estimated) { + List output = new ArrayList<>(estimated.size()); + for (TaskOutputType type : estimated) { + output.add(type.getCode()); + } + return output; + } + + private String normalize(String text) { + if (text == null) { + return ""; + } + return text.toLowerCase(Locale.ROOT); + } + + private boolean containsAny(String text, Set keywords) { + if (text.isBlank()) { + return false; + } + for (String keyword : keywords) { + if (text.contains(keyword)) { + return true; + } + } + return false; + } + + private String truncate(String text, int maxLength) { + if (!StringUtils.hasText(text)) { + return ""; + } + if (text.length() <= maxLength) { + return text; + } + return text.substring(0, maxLength); + } +} diff --git a/back/src/main/java/com/linkwork/service/TaskOutputWorkspaceSyncService.java b/back/src/main/java/com/linkwork/service/TaskOutputWorkspaceSyncService.java new file mode 100644 index 0000000..23543db --- /dev/null +++ b/back/src/main/java/com/linkwork/service/TaskOutputWorkspaceSyncService.java @@ -0,0 +1,835 @@ +package com.linkwork.service; + +import com.baomidou.mybatisplus.core.conditions.query.LambdaQueryWrapper; +import com.linkwork.config.DispatchConfig; +import com.linkwork.mapper.FileNodeMapper; +import com.linkwork.mapper.WorkspaceFileMapper; +import com.linkwork.model.entity.FileNodeEntity; +import com.linkwork.model.entity.WorkspaceFile; +import com.linkwork.model.entity.Task; +import lombok.RequiredArgsConstructor; +import lombok.extern.slf4j.Slf4j; +import org.springframework.stereotype.Service; +import org.springframework.transaction.annotation.Transactional; +import org.springframework.util.StringUtils; + +import java.nio.file.Files; +import java.nio.file.Path; +import java.time.LocalDateTime; +import java.util.LinkedHashSet; +import java.util.List; +import java.util.Locale; +import java.util.Map; +import java.util.Objects; +import java.util.Optional; +import java.util.UUID; + +/** + * 将 TASK_OUTPUT_READY 的 OSS 产物同步写入文件管理索引(WORKSTATION 空间)。 + */ +@Slf4j +@Service +@RequiredArgsConstructor +public class TaskOutputWorkspaceSyncService { + + private static final int NODE_NAME_MAX_LENGTH = 512; + private static final String DELETED_NAME_MARKER = "__deleted__"; + private static final String SPACE_TYPE_WORKSTATION = "WORKSTATION"; + private static final String SPACE_TYPE_USER = "USER"; + private static final String ROOT_DIR_NAME = "任务产出"; + private static final String ROOT_DIR_FALLBACK_NAME = "任务产出_系统"; + + private final TaskService taskService; + private final DispatchConfig dispatchConfig; + private final NfsStorageService nfsStorageService; + private final WorkspaceFileMapper workspaceFileMapper; + private final FileNodeMapper fileNodeMapper; + + public record WorkspaceSyncContext(String workstationId, String parentNodeId, String taskNodeId) {} + + @Transactional(rollbackFor = Exception.class) + public void syncTaskPathListArtifacts( + String taskNo, + String userId, + String workstationId, + List> artifacts + ) { + if (artifacts == null || artifacts.isEmpty()) { + return; + } + for (Map artifact : artifacts) { + if (artifact == null) { + continue; + } + String relativePath = normalizeWorkspaceRelativePath(String.valueOf(artifact.getOrDefault("relative_path", ""))); + if (!StringUtils.hasText(relativePath)) { + continue; + } + + if (relativePath.startsWith("logs/")) { + continue; + } + if (relativePath.equalsIgnoreCase("user/MEMORY.md") + || relativePath.equalsIgnoreCase("workstation/MEMORY.md")) { + continue; + } + + String action = String.valueOf(artifact.getOrDefault("action", "upsert")).trim().toLowerCase(Locale.ROOT); + String objectName = normalizeOssPath(String.valueOf(artifact.getOrDefault("object_name", ""))); + if (!StringUtils.hasText(objectName)) { + objectName = buildObjectNameFromRelative(relativePath, userId, workstationId); + } + + if ("deleted".equals(action)) { + markDeletedByObjectName(userId, workstationId, objectName); + continue; + } + + upsertPathListArtifact(userId, workstationId, relativePath, objectName, extractSize(artifact.get("size"))); + } + } + + @Transactional(rollbackFor = Exception.class) + public Optional syncTaskOutput(String taskNo, Map outputData) { + if (outputData == null) { + return Optional.empty(); + } + + String outputType = String.valueOf(outputData.getOrDefault("output_type", "")).trim().toLowerCase(Locale.ROOT); + if (!"oss".equals(outputType)) { + return Optional.empty(); + } + + String ossPath = normalizeOssPath(String.valueOf(outputData.getOrDefault("oss_path", ""))); + if (!StringUtils.hasText(ossPath)) { + return Optional.empty(); + } + String resolvedOssPath = normalizeOssPath(String.valueOf(outputData.getOrDefault("oss_path_resolved", ""))); + + TaskContext context = resolveTaskContext(taskNo, ossPath); + if (!context.isValid()) { + log.warn("TASK_OUTPUT_READY 索引跳过:任务上下文缺失 taskNo={}, ossPath={}", taskNo, ossPath); + return Optional.empty(); + } + + FileNodeEntity outputRoot = ensureRootDirectory(context.userId(), context.workstationId()); + FileNodeEntity taskDirectory = ensureTaskDirectory(context.userId(), context.workstationId(), + outputRoot.getNodeId(), context.taskNo()); + + List objectNames = List.of(); + for (String prefix : buildCandidatePrefixes(context.workstationId(), ossPath, resolvedOssPath)) { + objectNames = nfsStorageService.listObjects(prefix); + if (!objectNames.isEmpty()) { + break; + } + } + for (String objectName : objectNames) { + String fileName = extractFileName(objectName); + if (!StringUtils.hasText(fileName)) { + continue; + } + WorkspaceFile workspaceFile = ensureWorkspaceFile( + context.userId(), + context.workstationId(), + objectName, + fileName, + resolveFileSize(objectName)); + ensureFileNode(context.userId(), context.workstationId(), taskDirectory.getNodeId(), workspaceFile, fileName); + } + + return Optional.of(new WorkspaceSyncContext( + context.workstationId(), + outputRoot.getNodeId(), + taskDirectory.getNodeId())); + } + + private TaskContext resolveTaskContext(String taskNo, String ossPath) { + String safeTaskNo = StringUtils.hasText(taskNo) ? taskNo : extractTaskNoFromPath(ossPath); + String workstationId = null; + String userId = null; + + if (StringUtils.hasText(safeTaskNo)) { + try { + Task task = taskService.getTaskByNo(safeTaskNo); + workstationId = dispatchConfig.resolveWorkstationId(task.getRoleId()); + userId = task.getCreatorId(); + } catch (Exception e) { + log.debug("TASK_OUTPUT_READY 任务上下文查询失败,回退路径解析: taskNo={}, error={}", + safeTaskNo, e.getMessage()); + } + } + + if (!StringUtils.hasText(workstationId)) { + workstationId = extractWorkstationIdFromPath(ossPath); + } + if (!StringUtils.hasText(userId)) { + userId = extractUserIdFromPath(ossPath); + } + + return new TaskContext(safeTaskNo, workstationId, userId); + } + + private FileNodeEntity ensureRootDirectory(String userId, String workstationId) { + FileNodeEntity primary = findNodeByName(userId, workstationId, null, ROOT_DIR_NAME); + if (primary != null) { + if ("DIR".equals(primary.getEntryType())) { + return primary; + } + FileNodeEntity fallback = findNodeByName(userId, workstationId, null, ROOT_DIR_FALLBACK_NAME); + if (fallback != null && "DIR".equals(fallback.getEntryType())) { + return fallback; + } + if (fallback == null) { + return createDirectory(userId, workstationId, null, ROOT_DIR_FALLBACK_NAME); + } + String uniqueName = resolveUniqueName(userId, workstationId, null, ROOT_DIR_FALLBACK_NAME); + return createDirectory(userId, workstationId, null, uniqueName); + } + return createDirectory(userId, workstationId, null, ROOT_DIR_NAME); + } + + private FileNodeEntity ensureTaskDirectory(String userId, String workstationId, String parentId, String taskNo) { + String safeTaskNo = StringUtils.hasText(taskNo) ? taskNo : "未命名任务"; + FileNodeEntity existed = findNodeByName(userId, workstationId, parentId, safeTaskNo); + if (existed != null) { + if ("DIR".equals(existed.getEntryType())) { + return existed; + } + String fallbackName = safeTaskNo + "_产出"; + FileNodeEntity fallback = findNodeByName(userId, workstationId, parentId, fallbackName); + if (fallback != null && "DIR".equals(fallback.getEntryType())) { + return fallback; + } + if (fallback == null) { + return createDirectory(userId, workstationId, parentId, fallbackName); + } + String uniqueName = resolveUniqueName(userId, workstationId, parentId, fallbackName); + return createDirectory(userId, workstationId, parentId, uniqueName); + } + return createDirectory(userId, workstationId, parentId, safeTaskNo); + } + + private WorkspaceFile ensureWorkspaceFile( + String userId, + String workstationId, + String objectName, + String fileName, + long fileSize) { + WorkspaceFile existed = workspaceFileMapper.selectOne(new LambdaQueryWrapper() + .eq(WorkspaceFile::getUserId, userId) + .eq(WorkspaceFile::getSpaceType, SPACE_TYPE_WORKSTATION) + .eq(WorkspaceFile::getWorkstationId, workstationId) + .eq(WorkspaceFile::getOssPath, objectName) + .isNull(WorkspaceFile::getDeletedAt) + .last("limit 1")); + if (existed != null) { + boolean changed = false; + if (!fileName.equals(existed.getFileName())) { + existed.setFileName(fileName); + changed = true; + } + String fileType = extractFileType(fileName); + if (!fileType.equals(existed.getFileType())) { + existed.setFileType(fileType); + changed = true; + } + if (!Long.valueOf(fileSize).equals(existed.getFileSize())) { + existed.setFileSize(fileSize); + changed = true; + } + if (changed) { + existed.setUpdatedAt(LocalDateTime.now()); + workspaceFileMapper.updateById(existed); + } + return existed; + } + + WorkspaceFile created = new WorkspaceFile(); + created.setFileId(UUID.randomUUID().toString().replace("-", "")); + created.setFileName(fileName); + created.setFileSize(fileSize); + created.setFileType(extractFileType(fileName)); + created.setSpaceType(SPACE_TYPE_WORKSTATION); + created.setWorkstationId(workstationId); + created.setUserId(userId); + created.setOssPath(objectName); + created.setParseStatus("SKIP"); + created.setMemoryIndexStatus("SKIP"); + created.setCreatedAt(LocalDateTime.now()); + created.setUpdatedAt(LocalDateTime.now()); + workspaceFileMapper.insert(created); + return created; + } + + private void ensureFileNode(String userId, String workstationId, String parentId, WorkspaceFile workspaceFile, String fileName) { + FileNodeEntity existed = fileNodeMapper.selectOne(new LambdaQueryWrapper() + .eq(FileNodeEntity::getUserId, userId) + .eq(FileNodeEntity::getSpaceType, SPACE_TYPE_WORKSTATION) + .eq(FileNodeEntity::getWorkstationId, workstationId) + .eq(FileNodeEntity::getParentId, parentId) + .eq(FileNodeEntity::getFileId, workspaceFile.getFileId()) + .isNull(FileNodeEntity::getDeletedAt) + .last("limit 1")); + if (existed != null) { + if (!fileName.equals(existed.getName())) { + existed.setName(fileName); + existed.setUpdatedAt(LocalDateTime.now()); + fileNodeMapper.updateById(existed); + } + return; + } + + String finalName = resolveUniqueName(userId, workstationId, parentId, fileName); + + FileNodeEntity node = new FileNodeEntity(); + node.setNodeId(UUID.randomUUID().toString().replace("-", "")); + node.setParentId(parentId); + node.setEntryType("FILE"); + node.setName(finalName); + node.setSpaceType(SPACE_TYPE_WORKSTATION); + node.setWorkstationId(workstationId); + node.setUserId(userId); + node.setFileId(workspaceFile.getFileId()); + node.setCreatedAt(LocalDateTime.now()); + node.setUpdatedAt(LocalDateTime.now()); + fileNodeMapper.insert(node); + } + + private String resolveUniqueName(String userId, String workstationId, String parentId, String expectedName) { + FileNodeEntity conflict = findNodeByName(userId, workstationId, parentId, expectedName); + if (conflict == null) { + return expectedName; + } + String base; + String ext; + int dotIdx = expectedName.lastIndexOf('.'); + if (dotIdx > 0 && dotIdx < expectedName.length() - 1) { + base = expectedName.substring(0, dotIdx); + ext = expectedName.substring(dotIdx); + } else { + base = expectedName; + ext = ""; + } + for (int i = 1; i <= 100; i++) { + String candidate = base + " (" + i + ")" + ext; + if (findNodeByName(userId, workstationId, parentId, candidate) == null) { + return candidate; + } + } + return base + "-" + UUID.randomUUID().toString().substring(0, 8) + ext; + } + + private FileNodeEntity createDirectory(String userId, String workstationId, String parentId, String name) { + FileNodeEntity node = new FileNodeEntity(); + node.setNodeId(UUID.randomUUID().toString().replace("-", "")); + node.setParentId(parentId); + node.setEntryType("DIR"); + node.setName(name); + node.setSpaceType(SPACE_TYPE_WORKSTATION); + node.setWorkstationId(workstationId); + node.setUserId(userId); + node.setCreatedAt(LocalDateTime.now()); + node.setUpdatedAt(LocalDateTime.now()); + fileNodeMapper.insert(node); + return node; + } + + private FileNodeEntity findNodeByName(String userId, String workstationId, String parentId, String name) { + LambdaQueryWrapper wrapper = new LambdaQueryWrapper() + .eq(FileNodeEntity::getUserId, userId) + .eq(FileNodeEntity::getSpaceType, SPACE_TYPE_WORKSTATION) + .eq(FileNodeEntity::getWorkstationId, workstationId) + .eq(FileNodeEntity::getName, name) + .isNull(FileNodeEntity::getDeletedAt) + .last("limit 1"); + + if (StringUtils.hasText(parentId)) { + wrapper.eq(FileNodeEntity::getParentId, parentId); + } else { + wrapper.isNull(FileNodeEntity::getParentId); + } + return fileNodeMapper.selectOne(wrapper); + } + + private String extractFileName(String objectName) { + if (!StringUtils.hasText(objectName)) { + return ""; + } + int idx = objectName.lastIndexOf('/'); + if (idx < 0 || idx == objectName.length() - 1) { + return objectName; + } + return objectName.substring(idx + 1); + } + + private String extractFileType(String fileName) { + if (!StringUtils.hasText(fileName)) { + return ""; + } + int idx = fileName.lastIndexOf('.'); + if (idx < 0 || idx == fileName.length() - 1) { + return ""; + } + return fileName.substring(idx + 1).toLowerCase(Locale.ROOT); + } + + private String extractWorkstationIdFromPath(String ossPath) { + String[] parts = safeSplitPath(ossPath); + if (parts.length >= 4 && "logs".equals(parts[0])) { + return parts[1]; + } + return null; + } + + private String extractUserIdFromPath(String ossPath) { + String[] parts = safeSplitPath(ossPath); + if (parts.length >= 4 && "logs".equals(parts[0])) { + return parts[2]; + } + return null; + } + + private String extractTaskNoFromPath(String ossPath) { + String[] parts = safeSplitPath(ossPath); + if (parts.length >= 4 && "logs".equals(parts[0])) { + return parts[3]; + } + return null; + } + + private String normalizeOssPath(String rawOssPath) { + String normalized = rawOssPath == null ? "" : rawOssPath.trim(); + while (normalized.startsWith("/")) { + normalized = normalized.substring(1); + } + return normalized; + } + + private List buildCandidatePrefixes(String workstationId, String ossPath, String resolvedOssPath) { + LinkedHashSet candidates = new LinkedHashSet<>(); + if (StringUtils.hasText(resolvedOssPath)) { + candidates.add(resolvedOssPath); + } + if (StringUtils.hasText(ossPath)) { + candidates.add(ossPath); + if (!ossPath.startsWith("system/") && StringUtils.hasText(workstationId)) { + candidates.add("system/" + workstationId + "/" + ossPath); + } + } + return List.copyOf(candidates); + } + + private long resolveFileSize(String objectName) { + if (!nfsStorageService.isConfigured()) { + return 0L; + } + try { + Path path = nfsStorageService.getAbsolutePath(objectName); + return Files.exists(path) ? Files.size(path) : 0L; + } catch (Exception e) { + log.debug("读取任务产出文件大小失败: objectName={}, error={}", objectName, e.getMessage()); + return 0L; + } + } + + private String[] safeSplitPath(String ossPath) { + if (!StringUtils.hasText(ossPath)) { + return new String[0]; + } + return ossPath.split("/"); + } + + private void upsertPathListArtifact( + String userId, + String workstationId, + String relativePath, + String objectName, + long fileSize + ) { + SpaceResolution resolution = resolveSpace(relativePath, workstationId); + if (!resolution.valid()) { + return; + } + + String fileName = extractFileName(resolution.subPath()); + if (!StringUtils.hasText(fileName)) { + return; + } + String parentDir = extractParentPath(resolution.subPath()); + String parentId = ensureDirectoryChain(userId, resolution.workstationId(), resolution.spaceType(), parentDir); + + WorkspaceFile workspaceFile = ensureWorkspaceFileGeneric( + userId, + resolution.workstationId(), + resolution.spaceType(), + objectName, + fileName, + fileSize + ); + ensureFileNodeGeneric( + userId, + resolution.workstationId(), + resolution.spaceType(), + parentId, + workspaceFile, + fileName + ); + } + + private void markDeletedByObjectName(String userId, String workstationId, String objectName) { + if (!StringUtils.hasText(objectName)) { + return; + } + WorkspaceFile existed = workspaceFileMapper.selectOne(new LambdaQueryWrapper() + .eq(WorkspaceFile::getUserId, userId) + .eq(WorkspaceFile::getOssPath, objectName) + .isNull(WorkspaceFile::getDeletedAt) + .last("limit 1")); + if (existed == null) { + return; + } + LocalDateTime now = LocalDateTime.now(); + existed.setDeletedAt(now); + existed.setUpdatedAt(now); + workspaceFileMapper.updateById(existed); + + List nodes = fileNodeMapper.selectList(new LambdaQueryWrapper() + .eq(FileNodeEntity::getFileId, existed.getFileId()) + .isNull(FileNodeEntity::getDeletedAt)); + for (FileNodeEntity node : nodes) { + node.setDeletedAt(now); + node.setUpdatedAt(now); + node.setName(buildDeletedTombstoneName(node.getName(), node.getNodeId())); + fileNodeMapper.updateById(node); + } + } + + private String buildDeletedTombstoneName(String originalName, String nodeId) { + String baseName = StringUtils.hasText(originalName) ? originalName : "node"; + String safeNodeId = StringUtils.hasText(nodeId) + ? nodeId + : UUID.randomUUID().toString().replace("-", ""); + String suffix = DELETED_NAME_MARKER + safeNodeId; + int allowedBaseLength = NODE_NAME_MAX_LENGTH - suffix.length(); + if (allowedBaseLength <= 0) { + return suffix.substring(Math.max(0, suffix.length() - NODE_NAME_MAX_LENGTH)); + } + if (baseName.length() > allowedBaseLength) { + baseName = baseName.substring(0, allowedBaseLength); + } + return baseName + suffix; + } + + private WorkspaceFile ensureWorkspaceFileGeneric( + String userId, + String workstationId, + String spaceType, + String objectName, + String fileName, + long fileSize + ) { + LambdaQueryWrapper query = new LambdaQueryWrapper() + .eq(WorkspaceFile::getUserId, userId) + .eq(WorkspaceFile::getSpaceType, spaceType) + .eq(WorkspaceFile::getOssPath, objectName) + .isNull(WorkspaceFile::getDeletedAt) + .last("limit 1"); + if (SPACE_TYPE_WORKSTATION.equals(spaceType)) { + query.eq(WorkspaceFile::getWorkstationId, workstationId); + } else { + query.isNull(WorkspaceFile::getWorkstationId); + } + + WorkspaceFile existed = workspaceFileMapper.selectOne(query); + if (existed != null) { + boolean changed = false; + String fileType = extractFileType(fileName); + if (!fileName.equals(existed.getFileName())) { + existed.setFileName(fileName); + changed = true; + } + if (!fileType.equals(existed.getFileType())) { + existed.setFileType(fileType); + changed = true; + } + if (!Long.valueOf(fileSize).equals(existed.getFileSize())) { + existed.setFileSize(fileSize); + changed = true; + } + if (changed) { + existed.setUpdatedAt(LocalDateTime.now()); + workspaceFileMapper.updateById(existed); + } + return existed; + } + + WorkspaceFile created = new WorkspaceFile(); + created.setFileId(UUID.randomUUID().toString().replace("-", "")); + created.setFileName(fileName); + created.setFileSize(fileSize); + created.setFileType(extractFileType(fileName)); + created.setSpaceType(spaceType); + created.setWorkstationId(SPACE_TYPE_WORKSTATION.equals(spaceType) ? workstationId : null); + created.setUserId(userId); + created.setOssPath(objectName); + created.setParseStatus("SKIP"); + created.setMemoryIndexStatus("SKIP"); + created.setCreatedAt(LocalDateTime.now()); + created.setUpdatedAt(LocalDateTime.now()); + workspaceFileMapper.insert(created); + return created; + } + + private void ensureFileNodeGeneric( + String userId, + String workstationId, + String spaceType, + String parentId, + WorkspaceFile workspaceFile, + String fileName + ) { + String desiredName = fileName; + LambdaQueryWrapper query = new LambdaQueryWrapper() + .eq(FileNodeEntity::getUserId, userId) + .eq(FileNodeEntity::getSpaceType, spaceType) + .eq(FileNodeEntity::getFileId, workspaceFile.getFileId()) + .isNull(FileNodeEntity::getDeletedAt) + .last("limit 1"); + if (SPACE_TYPE_WORKSTATION.equals(spaceType)) { + query.eq(FileNodeEntity::getWorkstationId, workstationId); + } else { + query.isNull(FileNodeEntity::getWorkstationId); + } + + FileNodeEntity existed = fileNodeMapper.selectOne(query); + if (existed != null) { + if (!Objects.equals(existed.getParentId(), parentId)) { + desiredName = resolveUniqueNameGenericExceptNode( + userId, workstationId, spaceType, parentId, fileName, existed.getNodeId()); + } else if (!fileName.equals(existed.getName())) { + desiredName = resolveUniqueNameGenericExceptNode( + userId, workstationId, spaceType, parentId, fileName, existed.getNodeId()); + } + + boolean changed = false; + if (!Objects.equals(existed.getParentId(), parentId)) { + existed.setParentId(parentId); + changed = true; + } + if (!desiredName.equals(existed.getName())) { + existed.setName(desiredName); + changed = true; + } + if (changed) { + existed.setUpdatedAt(LocalDateTime.now()); + fileNodeMapper.updateById(existed); + } + return; + } + + String finalName = resolveUniqueNameGeneric(userId, workstationId, spaceType, parentId, desiredName); + FileNodeEntity node = new FileNodeEntity(); + node.setNodeId(UUID.randomUUID().toString().replace("-", "")); + node.setParentId(parentId); + node.setEntryType("FILE"); + node.setName(finalName); + node.setSpaceType(spaceType); + node.setWorkstationId(SPACE_TYPE_WORKSTATION.equals(spaceType) ? workstationId : null); + node.setUserId(userId); + node.setFileId(workspaceFile.getFileId()); + node.setCreatedAt(LocalDateTime.now()); + node.setUpdatedAt(LocalDateTime.now()); + fileNodeMapper.insert(node); + } + + private String ensureDirectoryChain( + String userId, + String workstationId, + String spaceType, + String parentPath + ) { + if (!StringUtils.hasText(parentPath)) { + return null; + } + String parentId = null; + for (String segment : parentPath.split("/")) { + if (!StringUtils.hasText(segment)) { + continue; + } + FileNodeEntity existed = findNodeByNameGeneric(userId, workstationId, spaceType, parentId, segment); + if (existed != null && "DIR".equals(existed.getEntryType())) { + parentId = existed.getNodeId(); + continue; + } + String dirName = resolveUniqueNameGeneric(userId, workstationId, spaceType, parentId, segment); + FileNodeEntity dir = createDirectoryGeneric(userId, workstationId, spaceType, parentId, dirName); + parentId = dir.getNodeId(); + } + return parentId; + } + + private FileNodeEntity findNodeByNameGeneric( + String userId, + String workstationId, + String spaceType, + String parentId, + String name + ) { + LambdaQueryWrapper wrapper = new LambdaQueryWrapper() + .eq(FileNodeEntity::getUserId, userId) + .eq(FileNodeEntity::getSpaceType, spaceType) + .eq(FileNodeEntity::getName, name) + .isNull(FileNodeEntity::getDeletedAt) + .last("limit 1"); + if (SPACE_TYPE_WORKSTATION.equals(spaceType)) { + wrapper.eq(FileNodeEntity::getWorkstationId, workstationId); + } else { + wrapper.isNull(FileNodeEntity::getWorkstationId); + } + if (StringUtils.hasText(parentId)) { + wrapper.eq(FileNodeEntity::getParentId, parentId); + } else { + wrapper.isNull(FileNodeEntity::getParentId); + } + return fileNodeMapper.selectOne(wrapper); + } + + private String resolveUniqueNameGeneric( + String userId, + String workstationId, + String spaceType, + String parentId, + String expectedName + ) { + return resolveUniqueNameGenericExceptNode( + userId, workstationId, spaceType, parentId, expectedName, null); + } + + private String resolveUniqueNameGenericExceptNode( + String userId, + String workstationId, + String spaceType, + String parentId, + String expectedName, + String ignoredNodeId + ) { + FileNodeEntity conflict = findNodeByNameGeneric(userId, workstationId, spaceType, parentId, expectedName); + if (conflict == null || Objects.equals(conflict.getNodeId(), ignoredNodeId)) { + return expectedName; + } + String base; + String ext; + int dotIdx = expectedName.lastIndexOf('.'); + if (dotIdx > 0 && dotIdx < expectedName.length() - 1) { + base = expectedName.substring(0, dotIdx); + ext = expectedName.substring(dotIdx); + } else { + base = expectedName; + ext = ""; + } + for (int i = 1; i <= 100; i++) { + String candidate = base + " (" + i + ")" + ext; + FileNodeEntity candidateConflict = findNodeByNameGeneric( + userId, workstationId, spaceType, parentId, candidate); + if (candidateConflict == null || Objects.equals(candidateConflict.getNodeId(), ignoredNodeId)) { + return candidate; + } + } + return base + "-" + UUID.randomUUID().toString().substring(0, 8) + ext; + } + + private FileNodeEntity createDirectoryGeneric( + String userId, + String workstationId, + String spaceType, + String parentId, + String name + ) { + FileNodeEntity node = new FileNodeEntity(); + node.setNodeId(UUID.randomUUID().toString().replace("-", "")); + node.setParentId(parentId); + node.setEntryType("DIR"); + node.setName(name); + node.setSpaceType(spaceType); + node.setWorkstationId(SPACE_TYPE_WORKSTATION.equals(spaceType) ? workstationId : null); + node.setUserId(userId); + node.setCreatedAt(LocalDateTime.now()); + node.setUpdatedAt(LocalDateTime.now()); + fileNodeMapper.insert(node); + return node; + } + + private String normalizeWorkspaceRelativePath(String path) { + String normalized = path == null ? "" : path.trim().replace('\\', '/'); + if (normalized.startsWith("/workspace/")) { + normalized = normalized.substring("/workspace/".length()); + } + while (normalized.startsWith("/")) { + normalized = normalized.substring(1); + } + while (normalized.endsWith("/")) { + normalized = normalized.substring(0, normalized.length() - 1); + } + return normalized; + } + + private String extractParentPath(String path) { + if (!StringUtils.hasText(path)) { + return ""; + } + int idx = path.lastIndexOf('/'); + if (idx <= 0) { + return ""; + } + return path.substring(0, idx); + } + + private long extractSize(Object value) { + if (value == null) { + return 0L; + } + try { + return Long.parseLong(String.valueOf(value).trim()); + } catch (Exception ignore) { + return 0L; + } + } + + private String buildObjectNameFromRelative(String relativePath, String userId, String workstationId) { + if (relativePath.startsWith("user/")) { + String subPath = relativePath.substring("user/".length()); + if (StringUtils.hasText(subPath) && StringUtils.hasText(userId)) { + return "user-files/" + userId + "/" + subPath; + } + } + if (relativePath.startsWith("workstation/")) { + String subPath = relativePath.substring("workstation/".length()); + if (StringUtils.hasText(subPath) && StringUtils.hasText(userId) && StringUtils.hasText(workstationId)) { + return "workstation/" + workstationId + "/" + userId + "/" + subPath; + } + } + return ""; + } + + private SpaceResolution resolveSpace(String relativePath, String workstationId) { + if (relativePath.startsWith("user/")) { + return new SpaceResolution(SPACE_TYPE_USER, null, relativePath.substring("user/".length())); + } + if (relativePath.startsWith("workstation/")) { + return new SpaceResolution(SPACE_TYPE_WORKSTATION, workstationId, relativePath.substring("workstation/".length())); + } + return new SpaceResolution("", "", ""); + } + + private record SpaceResolution(String spaceType, String workstationId, String subPath) { + private boolean valid() { + return StringUtils.hasText(spaceType) && StringUtils.hasText(subPath); + } + } + + private record TaskContext(String taskNo, String workstationId, String userId) { + private boolean isValid() { + return StringUtils.hasText(taskNo) && StringUtils.hasText(workstationId) && StringUtils.hasText(userId); + } + } +} diff --git a/back/src/main/java/com/linkwork/service/TaskPathlistSyncService.java b/back/src/main/java/com/linkwork/service/TaskPathlistSyncService.java new file mode 100644 index 0000000..d6d4b55 --- /dev/null +++ b/back/src/main/java/com/linkwork/service/TaskPathlistSyncService.java @@ -0,0 +1,263 @@ +package com.linkwork.service; + +import lombok.RequiredArgsConstructor; +import lombok.extern.slf4j.Slf4j; +import org.springframework.stereotype.Service; +import org.springframework.util.StringUtils; + +import java.util.ArrayList; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; + +/** + * 处理 TASK_OUTPUT_PATHLIST_READY 事件: + * - 统一构建前端可展示 artifacts + * - 同步写入 USER/WORKSTATION 文件索引 + */ +@Slf4j +@Service +@RequiredArgsConstructor +public class TaskPathlistSyncService { + + private final TaskOutputWorkspaceSyncService taskOutputWorkspaceSyncService; + private final TaskService taskService; + private final NfsStorageService nfsStorageService; + private final Map taskWorkstationCache = new ConcurrentHashMap<>(); + + public void onEvent(String taskNo, Map eventData) { + try { + PathlistContext context = buildContext(taskNo, eventData); + if (!context.validForSync()) { + return; + } + long upsertCount = context.artifacts().stream() + .filter(item -> !"deleted".equalsIgnoreCase(asText(item.get("action")))) + .count(); + long deletedCount = context.artifacts().size() - upsertCount; + taskOutputWorkspaceSyncService.syncTaskPathListArtifacts( + context.taskNo(), + context.userId(), + context.workstationId(), + context.artifacts() + ); + log.info("TASK_OUTPUT_PATHLIST_READY synced: taskNo={}, userId={}, workstationId={}, path_count={}, upsert_count={}, deleted_count={}, skipped_count={}", + context.taskNo(), + context.userId(), + context.workstationId(), + context.artifacts().size(), + upsertCount, + deletedCount, + context.skippedCount()); + } catch (Exception e) { + log.error("sync TASK_OUTPUT_PATHLIST_READY failed: taskNo={}, err={}", taskNo, e.getMessage(), e); + } + } + + public void enrichEventForDisplay(String taskNo, Map eventData) { + try { + PathlistContext context = buildContext(taskNo, eventData); + if (!context.validForDisplay()) { + return; + } + @SuppressWarnings("unchecked") + Map data = (Map) eventData.get("data"); + data.put("artifacts", context.artifacts()); + data.put("count", context.artifacts().size()); + data.put("artifacts_pending", Boolean.FALSE); + } catch (Exception e) { + log.warn("enrich TASK_OUTPUT_PATHLIST_READY failed: taskNo={}, err={}", taskNo, e.getMessage(), e); + } + } + + private PathlistContext buildContext(String taskNo, Map eventData) { + String eventType = asText(eventData.get("event_type")); + if (!"TASK_OUTPUT_PATHLIST_READY".equals(eventType)) { + return PathlistContext.empty(); + } + + Object dataObj = eventData.get("data"); + if (!(dataObj instanceof Map dataMap)) { + return PathlistContext.empty(); + } + + @SuppressWarnings("unchecked") + Map data = (Map) dataMap; + Object rawPathList = data.get("path_list"); + if (!(rawPathList instanceof List pathList) || pathList.isEmpty()) { + return PathlistContext.empty(); + } + + String safeTaskNo = firstNonBlank(taskNo, asText(eventData.get("task_no")), asText(eventData.get("task_id"))); + String userId = firstNonBlank( + asText(data.get("user_id")), + asText(eventData.get("user_id")) + ); + String workstationId = resolveWorkstationId(safeTaskNo); + + List> artifacts = new ArrayList<>(); + int skippedCount = 0; + for (Object itemObj : pathList) { + if (!(itemObj instanceof Map itemMap)) { + skippedCount++; + continue; + } + String relativePath = asText(itemMap.get("relative_path")); + if (!StringUtils.hasText(relativePath)) { + relativePath = normalizeWorkspaceRelativePath(asText(itemMap.get("path"))); + } + if (!StringUtils.hasText(relativePath)) { + skippedCount++; + continue; + } + + String action = asText(itemMap.get("action")); + if (!StringUtils.hasText(action)) { + action = "upsert"; + } + + Map artifact = new LinkedHashMap<>(); + artifact.put("name", extractFileName(relativePath)); + artifact.put("relative_path", relativePath); + artifact.put("path", "/workspace/" + relativePath); + artifact.put("category", asText(itemMap.get("category"))); + artifact.put("action", action); + artifact.put("size", itemMap.get("size")); + + String objectName = resolveObjectName(relativePath, safeTaskNo, userId, workstationId); + if (StringUtils.hasText(objectName)) { + artifact.put("object_name", objectName); + if (nfsStorageService.isConfigured() && !"deleted".equalsIgnoreCase(action)) { + artifact.put("download_url", nfsStorageService.buildTaskOutputDownloadUrl(objectName)); + artifact.put("nfs_path", nfsStorageService.getAbsolutePath(objectName).toString()); + } + } + artifacts.add(artifact); + } + + return new PathlistContext(safeTaskNo, userId, workstationId, artifacts, skippedCount); + } + + private String resolveWorkstationId(String taskNo) { + if (!StringUtils.hasText(taskNo)) { + return null; + } + String cached = taskWorkstationCache.get(taskNo); + if (StringUtils.hasText(cached)) { + return cached; + } + try { + Long roleId = taskService.getTaskByNo(taskNo).getRoleId(); + if (roleId == null) { + return null; + } + String workstationId = String.valueOf(roleId); + taskWorkstationCache.put(taskNo, workstationId); + return workstationId; + } catch (Exception e) { + log.debug("resolve workstation id failed: taskNo={}, err={}", taskNo, e.getMessage()); + return null; + } + } + + private String resolveObjectName(String relativePath, String taskNo, String userId, String workstationId) { + String normalized = normalizeWorkspaceRelativePath(relativePath); + if (!StringUtils.hasText(normalized)) { + return ""; + } + + if (normalized.startsWith("logs/")) { + String subPath = normalized.substring("logs/".length()); + if (!StringUtils.hasText(subPath) + || !StringUtils.hasText(taskNo) + || !StringUtils.hasText(userId) + || !StringUtils.hasText(workstationId)) { + return ""; + } + return String.format("system/%s/logs/%s/%s/%s", workstationId, userId, taskNo, subPath); + } + + if (normalized.startsWith("user/")) { + String subPath = normalized.substring("user/".length()); + if (!StringUtils.hasText(subPath) || !StringUtils.hasText(userId)) { + return ""; + } + return String.format("user-files/%s/%s", userId, subPath); + } + + if (normalized.startsWith("workstation/")) { + String subPath = normalized.substring("workstation/".length()); + if (!StringUtils.hasText(subPath) + || !StringUtils.hasText(userId) + || !StringUtils.hasText(workstationId)) { + return ""; + } + return String.format("workstation/%s/%s/%s", workstationId, userId, subPath); + } + + return ""; + } + + private String extractFileName(String relativePath) { + String normalized = normalizeWorkspaceRelativePath(relativePath); + int index = normalized.lastIndexOf('/'); + if (index < 0 || index >= normalized.length() - 1) { + return normalized; + } + return normalized.substring(index + 1); + } + + private String normalizeWorkspaceRelativePath(String path) { + String normalized = asText(path).replace('\\', '/'); + if (normalized.startsWith("/workspace/")) { + normalized = normalized.substring("/workspace/".length()); + } + while (normalized.startsWith("/")) { + normalized = normalized.substring(1); + } + while (normalized.endsWith("/")) { + normalized = normalized.substring(0, normalized.length() - 1); + } + return normalized; + } + + private String asText(Object value) { + if (value == null) { + return ""; + } + return String.valueOf(value).trim(); + } + + private String firstNonBlank(String... values) { + for (String value : values) { + if (StringUtils.hasText(value)) { + return value.trim(); + } + } + return null; + } + + private record PathlistContext( + String taskNo, + String userId, + String workstationId, + List> artifacts, + int skippedCount + ) { + private static PathlistContext empty() { + return new PathlistContext(null, null, null, List.of(), 0); + } + + private boolean validForDisplay() { + return artifacts != null && !artifacts.isEmpty(); + } + + private boolean validForSync() { + return validForDisplay() + && StringUtils.hasText(taskNo) + && StringUtils.hasText(userId) + && StringUtils.hasText(workstationId); + } + } +} diff --git a/back/src/main/java/com/linkwork/service/TaskService.java b/back/src/main/java/com/linkwork/service/TaskService.java new file mode 100644 index 0000000..16e40ae --- /dev/null +++ b/back/src/main/java/com/linkwork/service/TaskService.java @@ -0,0 +1,1777 @@ +package com.linkwork.service; + +import com.baomidou.mybatisplus.core.conditions.query.LambdaQueryWrapper; +import com.baomidou.mybatisplus.extension.plugins.pagination.Page; +import com.fasterxml.jackson.core.type.TypeReference; +import com.fasterxml.jackson.core.JsonProcessingException; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.linkwork.common.ForbiddenOperationException; +import com.linkwork.common.SnowflakeIdGenerator; +import com.linkwork.config.DispatchConfig; +import com.linkwork.mapper.WorkspaceFileMapper; +import com.linkwork.mapper.TaskMapper; +import com.linkwork.model.dto.ScaleResult; +import com.linkwork.model.dto.TaskCompleteRequest; +import com.linkwork.model.dto.TaskCreateRequest; +import com.linkwork.model.dto.TaskResponse; +import com.linkwork.model.entity.McpServerEntity; +import com.linkwork.model.entity.WorkspaceFile; +import com.linkwork.model.entity.RoleEntity; +import com.linkwork.model.entity.SkillEntity; +import com.linkwork.model.entity.Task; +import com.linkwork.model.enums.TaskOutputType; +import com.linkwork.model.enums.TaskStatus; +import lombok.RequiredArgsConstructor; +import lombok.extern.slf4j.Slf4j; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.beans.factory.annotation.Value; +import org.springframework.data.redis.connection.stream.MapRecord; +import org.springframework.data.redis.connection.stream.StreamOffset; +import org.springframework.data.redis.core.StringRedisTemplate; +import org.springframework.stereotype.Service; +import org.springframework.transaction.annotation.Transactional; +import org.springframework.transaction.support.TransactionSynchronization; +import org.springframework.transaction.support.TransactionSynchronizationManager; +import org.springframework.util.StringUtils; + +import java.time.LocalDateTime; +import java.time.Duration; +import java.util.ArrayList; +import java.util.Comparator; +import java.util.Collections; +import java.util.HashMap; +import java.util.LinkedHashSet; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Locale; +import java.util.Map; +import java.util.UUID; +import java.util.stream.Collectors; + +/** + * 任务服务 + */ +@Slf4j +@Service +@RequiredArgsConstructor +public class TaskService { + private static final String DELIVERY_MODE_GIT = "git"; + private static final String DELIVERY_MODE_OSS = "oss"; + private static final String OUTPUT_CODE_GIT_BRANCH = TaskOutputType.GIT_BRANCH.getCode(); + private static final String OUTPUT_CODE_PULL_REQUEST = TaskOutputType.PULL_REQUEST.getCode(); + + private final TaskMapper taskMapper; + private final WorkspaceFileMapper workspaceFileMapper; + private final StringRedisTemplate redisTemplate; + private final ObjectMapper objectMapper; + private final RoleService roleService; + private final RuntimeModeService runtimeModeService; + private final SnowflakeIdGenerator idGenerator; + private final DispatchConfig dispatchConfig; + private final TaskOutputEstimatorAgent taskOutputEstimatorAgent; + private final TaskGitAuthService taskGitAuthService; + private final CronJobService cronJobService; + private final UserSoulService userSoulService; + private final TaskBillingUsageService taskBillingUsageService; + private final AdminAccessService adminAccessService; + @Autowired + private ServiceScaleService serviceScaleService; + @Autowired + private McpServerService mcpServerService; + + @Autowired + private SkillService skillService; + + @Value("${robot.prompt.platform:遵循平台工程规范:禁止吞异常;优先给结论和可执行步骤;涉及风险时明确说明并给出回滚方案。}") + private String platformPrompt = "遵循平台工程规范:禁止吞异常;优先给结论和可执行步骤;涉及风险时明确说明并给出回滚方案。"; + + @Value("${robot.prompt.user-soul-default:默认用户偏好:请使用中文沟通;先给结论,再给可执行步骤;信息不足时明确说明假设与风险。}") + private String defaultUserSoulPrompt = "默认用户偏好:请使用中文沟通;先给结论,再给可执行步骤;信息不足时明确说明假设与风险。"; + + /** + * 创建任务 + */ + @Transactional + public Task createTask(TaskCreateRequest request, String creatorId, String creatorName) { + return createTask(request, creatorId, creatorName, null, true, "MANUAL", null); + } + + /** + * 创建任务(带创建触发 IP) + */ + @Transactional + public Task createTask(TaskCreateRequest request, String creatorId, String creatorName, String creatorIp) { + return createTask(request, creatorId, creatorName, creatorIp, true, "MANUAL", null); + } + + /** + * 创建任务 + * + * 简化版本:前端只传 prompt, roleId, modelId, fileIds + * 其他配置(image, mcp, skills 等)由后端根据岗位自动填充 + * + * @param creatorIp 任务创建执行触发来源 IP + * @param sendNotify 是否发送创建通知(当前实现忽略) + */ + @Transactional + public Task createTask(TaskCreateRequest request, String creatorId, String creatorName, String creatorIp, boolean sendNotify) { + return createTask(request, creatorId, creatorName, creatorIp, sendNotify, "MANUAL", null); + } + + @Transactional + public Task createTask(TaskCreateRequest request, String creatorId, String creatorName, String creatorIp, + boolean sendNotify, String source, Long cronJobId) { + // 使用雪花算法生成分布式唯一任务编号 + String taskNo = idGenerator.nextTaskNo(); + + // 查询岗位信息(岗位包含默认镜像、MCP、技能等配置) + RoleEntity role = roleService.getById(request.getRoleId()); + if (role == null) { + throw new IllegalArgumentException("指定的岗位不存在: roleId=" + request.getRoleId()); + } + PromptLayers promptLayers = buildPromptLayers(role, creatorId); + String systemPromptAppend = toSystemPromptAppend(promptLayers); + + List selectedFiles = loadSelectedFiles(request.getFileIds(), creatorId); + List taskInputFiles = buildTaskInputFiles(selectedFiles); + String resolvedContent = buildResolvedTaskContent(request.getPrompt(), taskInputFiles); + + // 构建任务实体 + Task task = new Task(); + task.setTaskNo(taskNo); + task.setRoleId(request.getRoleId()); + task.setRoleName(role.getName()); + task.setPrompt(request.getPrompt()); + task.setStatus(TaskStatus.PENDING); + task.setSource(normalizeTaskSource(source)); + task.setCronJobId("CRON".equals(task.getSource()) ? cronJobId : null); + // 从岗位获取默认镜像,如果岗位没配置则使用默认值 + task.setImage(role.getImage() != null ? role.getImage() : "ubuntu-22.04-python3.10"); + task.setSelectedModel(request.getModelId()); + task.setCreatorId(creatorId); + task.setCreatorName(creatorName); + task.setCreatorIp(creatorIp); + task.setTokensUsed(0); + task.setDurationMs(0L); + task.setCreatedAt(LocalDateTime.now()); + task.setUpdatedAt(LocalDateTime.now()); + task.setIsDeleted(0); + + // 任务运行模式快照(严格双态:SIDECAR / ALONE) + RuntimeModeService.RuntimeSnapshot runtimeSnapshot = runtimeModeService.resolveForRole(role); + + // 关键:先落库任务核心字段,避免网关按 taskId 反查模型时出现“任务不存在” + taskMapper.insert(task); + + // 执行前产出预估:优先 LLM Gateway,失败兜底规则 + TaskOutputEstimatorAgent.EstimateResult estimateResult = + taskOutputEstimatorAgent.estimateWithBranch(taskNo, request, role); + List estimatedOutput = estimateResult.estimatedOutput(); + String branchName = estimateResult.branchName(); + String deliveryMode = resolveDeliveryMode(estimatedOutput, hasValidRoleGitRepo(role), taskNo); + + // 序列化配置(包含 fileIds、岗位继承配置和预估产出) + Map configMap = new HashMap<>(); + configMap.put("modelId", request.getModelId()); + configMap.put("fileIds", selectedFiles.stream().map(WorkspaceFile::getFileId).toList()); + configMap.put("systemPromptAppend", systemPromptAppend); + configMap.put("promptLayers", toPromptLayerMap(promptLayers)); + if (!selectedFiles.isEmpty()) { + List> fileDetails = new ArrayList<>(); + Map inputFileRefMap = new HashMap<>(); + Map aliasMap = new LinkedHashMap<>(); + for (TaskInputFileRef inputFile : taskInputFiles) { + inputFileRefMap.put(inputFile.fileId(), inputFile); + aliasMap.put(inputFile.runtimePath(), inputFile.realPath()); + } + for (WorkspaceFile rf : selectedFiles) { + TaskInputFileRef ref = inputFileRefMap.get(rf.getFileId()); + fileDetails.add(Map.of( + "fileId", rf.getFileId(), + "fileName", rf.getFileName(), + "fileType", rf.getFileType(), + "ossPath", rf.getOssPath(), + "spaceType", rf.getSpaceType(), + "workstationId", rf.getWorkstationId() == null ? "" : rf.getWorkstationId(), + "taskInputObject", "", + "taskInputPath", ref != null ? ref.runtimePath() : "")); + } + configMap.put("files", fileDetails); + configMap.put("resolvedContent", resolvedContent); + configMap.put("aliasMap", aliasMap); + } + configMap.put("image", task.getImage()); + configMap.put("runtimeMode", runtimeSnapshot.getRuntimeMode()); + configMap.put("zzMode", runtimeSnapshot.getZzMode()); + configMap.put("runnerImage", runtimeSnapshot.getRunnerImage()); + configMap.put("estimatedOutput", estimatedOutput); + configMap.put("deliveryMode", deliveryMode); + if (StringUtils.hasText(branchName)) { + configMap.put("branchName", branchName); + } + // 从岗位 configJson 继承 MCP、技能等配置 + if (role.getConfigJson() != null) { + configMap.put("mcp", role.getConfigJson().getMcp()); + configMap.put("skills", role.getConfigJson().getSkills()); + configMap.put("knowledge", role.getConfigJson().getKnowledge()); + configMap.put("gitRepos", role.getConfigJson().getGitRepos()); + configMap.put("env", role.getConfigJson().getEnv()); + } + try { + task.setConfigJson(objectMapper.writeValueAsString(configMap)); + } catch (JsonProcessingException e) { + throw new IllegalStateException("序列化任务配置失败: taskNo=" + taskNo, e); + } + + // 回写完整任务配置(含预估产物、执行模式等) + int updated = taskMapper.updateById(task); + if (updated != 1) { + throw new IllegalStateException("更新任务配置失败: taskNo=" + taskNo); + } + + // 绑定任务与 Git 认证映射(用户未授权时跳过) + taskGitAuthService.bindTaskWithLatestGitAuth(taskNo, creatorId); + + // 创建 Redis Stream (用于 WebSocket 事件) + // 动态格式: logs:{workstationId}:{taskNo} + String streamKey = dispatchConfig.getLogStreamKey(task.getRoleId(), taskNo); + publishTaskEvent(streamKey, "TASK_CREATED", taskNo, Map.of("message", "任务已创建")); + + Map estimateData = new HashMap<>(); + estimateData.put("estimated_output", estimatedOutput); + estimateData.put("domains", resolveOutputDomains(estimatedOutput)); + estimateData.put("delivery_mode", deliveryMode); + if (StringUtils.hasText(branchName)) { + estimateData.put("branch_name", branchName); + } + estimateData.put("message", "已完成任务产物预估"); + publishTaskEvent(streamKey, "TASK_OUTPUT_ESTIMATED", taskNo, estimateData); + + // 关键:事务提交后再入队,避免消费者查不到任务 + final Task dispatchTask = task; + TransactionSynchronizationManager.registerSynchronization(new TransactionSynchronization() { + @Override + public void afterCommit() { + pushToDispatchQueue(dispatchTask); + } + }); + + log.info("任务创建成功: taskNo={}, roleId={}, roleName={}, modelId={}, deliveryMode={}, estimatedOutput={}, branchName={}, estimateSource={}", + taskNo, task.getRoleId(), role.getName(), request.getModelId(), deliveryMode, estimatedOutput, branchName, estimateResult.source()); + + // 预留 sendNotify 参数,当前不发送外部 IM 通知 + + return task; + } + + private String normalizeTaskSource(String source) { + if (!StringUtils.hasText(source)) { + return "MANUAL"; + } + String normalized = source.trim().toUpperCase(Locale.ROOT); + if (!"MANUAL".equals(normalized) && !"CRON".equals(normalized)) { + throw new IllegalArgumentException("非法任务来源: source=" + source); + } + return normalized; + } + + /** + * 将任务推入调度队列(统一消息契约供 worker 消费) + */ + private void pushToDispatchQueue(Task task) { + try { + String queueKey = dispatchConfig.getTaskQueueKey(task.getRoleId()); + + Map message = new HashMap<>(); + message.put("task_id", task.getTaskNo()); + + String userId = task.getCreatorId(); + if (!StringUtils.hasText(userId)) { + userId = "system"; + log.warn("任务创建人为空,使用兜底 user_id=system 下发: taskNo={}", task.getTaskNo()); + } + message.put("user_id", userId); + message.put("content", resolveDispatchContent(task)); + message.put("system_prompt_append", resolveDispatchSystemPromptAppend(task)); + message.put("prompt_layers", resolveDispatchPromptLayers(task)); + message.put("source", task.getSource()); + message.put("cron_job_id", task.getCronJobId()); + if (task.getRoleId() != null) { + message.put("role_id", String.valueOf(task.getRoleId())); + } + if (StringUtils.hasText(task.getSelectedModel())) { + message.put("selected_model", task.getSelectedModel().trim()); + } + + List> gitConfig = buildDispatchGitConfig(task); + String deliveryMode = resolveDispatchDeliveryMode(task, gitConfig); + if (DELIVERY_MODE_GIT.equals(deliveryMode) && gitConfig.isEmpty()) { + log.warn("任务下发降级为 oss:delivery_mode=git 但 git_config 为空, taskNo={}", task.getTaskNo()); + deliveryMode = DELIVERY_MODE_OSS; + } + message.put("delivery_mode", deliveryMode); + if (DELIVERY_MODE_GIT.equals(deliveryMode)) { + message.put("git_config", gitConfig); + } + List> filePathMappings = parseDispatchFilePathMappings(task); + if (!filePathMappings.isEmpty()) { + message.put("file_path_mappings", filePathMappings); + } + + String messageJson = objectMapper.writeValueAsString(message); + Long queueLength = redisTemplate.opsForList().rightPush(queueKey, messageJson); + + log.info("任务入队成功: taskNo={}, queueKey={}, queueLength={}", + task.getTaskNo(), queueKey, queueLength); + + ensurePodsForRole(task.getRoleId(), task.getTaskNo()); + } catch (JsonProcessingException e) { + log.error("任务入队失败,序列化错误: taskNo={}", task.getTaskNo(), e); + } + } + + /** + * 任务入队后检查岗位下是否有存活 Pod,无则按岗位配置扩容。 + * 异步执行,扩容失败不影响任务入队。 + */ + private void ensurePodsForRole(Long roleId, String taskNo) { + if (roleId == null) { + return; + } + String serviceId = String.valueOf(roleId); + try { + ScaleResult result = serviceScaleService.ensurePodsForRole(serviceId); + if (result != null) { + if (result.isSuccess()) { + log.info("任务入队触发自动扩容: taskNo={}, serviceId={}, pods={}", + taskNo, serviceId, result.getCurrentPodCount()); + } else { + log.warn("任务入队触发自动扩容失败: taskNo={}, serviceId={}, error={}", + taskNo, serviceId, result.getErrorMessage()); + } + } + } catch (Exception e) { + log.warn("任务入队扩容检查异常(不影响任务下发): taskNo={}, serviceId={}, error={}", + taskNo, serviceId, e.getMessage()); + } + } + + /** + * 根据任务编号获取任务详情 + */ + public Task getTaskByNo(String taskNo) { + LambdaQueryWrapper wrapper = new LambdaQueryWrapper<>(); + wrapper.eq(Task::getTaskNo, taskNo); + Task task = taskMapper.selectOne(wrapper); + if (task == null) { + throw new IllegalArgumentException("任务不存在: " + taskNo); + } + return task; + } + + /** + * 根据任务编号获取任务详情(按创建人隔离) + */ + public Task getTaskByNo(String taskNo, String creatorId) { + Task task = getTaskByNo(taskNo); + assertTaskOwner(task, creatorId); + return task; + } + + /** + * 根据ID获取任务详情 + */ + public Task getTask(Long taskId) { + Task task = taskMapper.selectById(taskId); + if (task == null) { + throw new IllegalArgumentException("任务不存在: " + taskId); + } + return task; + } + + /** + * 获取任务列表 + * + * @param roleId 按岗位 ID 筛选(可选) + * @param status 按状态筛选(可选) + * @param page 页码 + * @param pageSize 每页大小 + */ + public Page listTasks(Long roleId, String status, Integer page, Integer pageSize) { + return listTasks(roleId, status, page, pageSize, null); + } + + /** + * 获取任务列表(按创建人隔离) + */ + public Page listTasks(Long roleId, String status, Integer page, Integer pageSize, String creatorId) { + LambdaQueryWrapper wrapper = new LambdaQueryWrapper<>(); + + if (StringUtils.hasText(creatorId) && !isAdmin(creatorId)) { + wrapper.eq(Task::getCreatorId, creatorId); + } + if (roleId != null) { + wrapper.eq(Task::getRoleId, roleId); + } + if (StringUtils.hasText(status)) { + wrapper.eq(Task::getStatus, TaskStatus.valueOf(status.toUpperCase())); + } + wrapper.orderByDesc(Task::getCreatedAt); + + return taskMapper.selectPage(new Page<>(page, pageSize), wrapper); + } + + /** + * 更新任务状态 + */ + @Transactional + public Task updateStatus(String taskNo, TaskStatus status) { + return updateStatusWithUsage(taskNo, status, null, null); + } + + /** + * 更新任务状态(可选补齐 tokens/duration,并在终态补齐计费字段) + */ + @Transactional + public Task updateStatusWithUsage(String taskNo, TaskStatus status, Integer tokensUsed, Long durationMs) { + Task task = getTaskByNo(taskNo); + task.setStatus(status); + + if (tokensUsed != null && tokensUsed >= 0) { + Integer currentTokens = task.getTokensUsed(); + if (tokensUsed > 0 || currentTokens == null || currentTokens <= 0) { + task.setTokensUsed(tokensUsed); + } + } + if (durationMs != null && durationMs >= 0) { + Long currentDuration = task.getDurationMs(); + if (durationMs > 0 || currentDuration == null || currentDuration <= 0) { + task.setDurationMs(durationMs); + } + } + + if (isTerminalStatus(status)) { + TaskBillingUsageService.UsageSnapshot billingSnapshot = taskBillingUsageService.fetchTaskUsage(taskNo).orElse(null); + if (billingSnapshot != null) { + if (billingSnapshot.tokenUsed() != null) { + task.setTokensUsed(billingSnapshot.tokenUsed()); + } + task.setInputTokens(billingSnapshot.inputTokens()); + task.setOutputTokens(billingSnapshot.outputTokens()); + task.setRequestCount(billingSnapshot.requestCount()); + task.setTokenLimit(billingSnapshot.tokenLimit()); + task.setUsagePercent(billingSnapshot.usagePercent()); + } + } + + task.setUpdatedAt(LocalDateTime.now()); + taskMapper.updateById(task); + cronJobService.onTaskStatusChanged(task, status); + log.info("任务状态更新: taskNo={}, status={}, tokensUsed={}, inputTokens={}, outputTokens={}, requestCount={}, durationMs={}", + taskNo, status, task.getTokensUsed(), task.getInputTokens(), task.getOutputTokens(), task.getRequestCount(), task.getDurationMs()); + return task; + } + + private boolean isTerminalStatus(TaskStatus status) { + return status == TaskStatus.COMPLETED || status == TaskStatus.FAILED || status == TaskStatus.ABORTED; + } + + /** + * 任务完成回写 + * + * Worker 执行完毕后调用,回写 tokens_used, duration_ms, report_json, status + */ + @Transactional + public Task completeTask(String taskNo, TaskCompleteRequest request) { + Task task = getTaskByNo(taskNo); + + // 校验状态:只有 RUNNING 或 PENDING 的任务可以回写完成 + if (task.getStatus() != TaskStatus.RUNNING && task.getStatus() != TaskStatus.PENDING) { + throw new IllegalArgumentException("任务当前状态不允许回写完成: " + task.getStatus()); + } + + // 解析目标状态 + TaskStatus targetStatus; + try { + targetStatus = TaskStatus.valueOf(request.getStatus().toUpperCase()); + } catch (IllegalArgumentException e) { + throw new IllegalArgumentException("无效的任务状态: " + request.getStatus() + ",允许值: COMPLETED, FAILED"); + } + if (targetStatus != TaskStatus.COMPLETED && targetStatus != TaskStatus.FAILED) { + throw new IllegalArgumentException("回写状态只能为 COMPLETED 或 FAILED,收到: " + request.getStatus()); + } + + int finalTokensUsed = request.getTokensUsed(); + TaskBillingUsageService.UsageSnapshot billingSnapshot = taskBillingUsageService.fetchTaskUsage(taskNo).orElse(null); + UsageFallback streamUsage = resolveUsageFromStream(task); + if (billingSnapshot != null && billingSnapshot.tokenUsed() != null) { + finalTokensUsed = billingSnapshot.tokenUsed(); + } else if (finalTokensUsed <= 0 && streamUsage.tokensUsed() != null && streamUsage.tokensUsed() > 0) { + finalTokensUsed = streamUsage.tokensUsed(); + } + Long finalDurationMs = request.getDurationMs(); + if ((finalDurationMs == null || finalDurationMs <= 0) + && streamUsage.durationMs() != null + && streamUsage.durationMs() > 0) { + finalDurationMs = streamUsage.durationMs(); + } + if ((finalDurationMs == null || finalDurationMs <= 0) && task.getCreatedAt() != null) { + finalDurationMs = Math.max(0, Duration.between(task.getCreatedAt(), LocalDateTime.now()).toMillis()); + } + + // 更新字段(优先使用计费网关真实值) + task.setStatus(targetStatus); + task.setTokensUsed(finalTokensUsed); + task.setInputTokens(billingSnapshot != null ? billingSnapshot.inputTokens() : null); + task.setOutputTokens(billingSnapshot != null ? billingSnapshot.outputTokens() : null); + task.setRequestCount(billingSnapshot != null ? billingSnapshot.requestCount() : null); + task.setTokenLimit(billingSnapshot != null ? billingSnapshot.tokenLimit() : null); + task.setUsagePercent(billingSnapshot != null ? billingSnapshot.usagePercent() : null); + task.setDurationMs(finalDurationMs); + task.setUpdatedAt(LocalDateTime.now()); + + Map reportMap = buildTaskReport(task, request, targetStatus, finalTokensUsed); + try { + task.setReportJson(objectMapper.writeValueAsString(reportMap)); + } catch (JsonProcessingException e) { + throw new IllegalStateException("序列化任务报告失败: taskNo=" + taskNo, e); + } + + taskMapper.updateById(task); + cronJobService.onTaskStatusChanged(task, targetStatus); + + // 发送完成事件到 Redis Stream: logs:{workstationId}:{taskNo} + String streamKey = dispatchConfig.getLogStreamKey(task.getRoleId(), taskNo); + Map completionData = new HashMap<>(); + completionData.put("tokens_used", finalTokensUsed); + completionData.put("input_tokens", task.getInputTokens()); + completionData.put("output_tokens", task.getOutputTokens()); + completionData.put("request_count", task.getRequestCount()); + completionData.put("duration_ms", finalDurationMs); + publishTaskEvent(streamKey, + targetStatus == TaskStatus.COMPLETED ? "TASK_COMPLETED" : "TASK_FAILED", + taskNo, + completionData); + + Map reportReadyData = new HashMap<>(); + reportReadyData.put("report", reportMap); + reportReadyData.put("message", "执行报告已生成"); + publishTaskEvent(streamKey, "TASK_REPORT_READY", taskNo, reportReadyData); + + log.info("任务完成回写: taskNo={}, status={}, tokensUsed={}, inputTokens={}, outputTokens={}, requestCount={}, durationMs={}", + taskNo, targetStatus, finalTokensUsed, task.getInputTokens(), task.getOutputTokens(), task.getRequestCount(), finalDurationMs); + + return task; + } + + private UsageFallback resolveUsageFromStream(Task task) { + if (task == null || !StringUtils.hasText(task.getTaskNo())) { + return new UsageFallback(null, null); + } + + List streamKeys = List.of( + dispatchConfig.getLogStreamKey(task.getRoleId(), task.getTaskNo()), + "stream:task:" + task.getTaskNo() + ":events", + "stream:task:" + task.getTaskNo() + ); + + Integer tokensUsed = null; + Long durationMs = null; + for (String streamKey : streamKeys) { + try { + List> records = redisTemplate.opsForStream() + .read(StreamOffset.fromStart(streamKey)); + if (records == null || records.isEmpty()) { + continue; + } + for (MapRecord record : records) { + Map eventData = extractEventData(record); + Integer eventTokens = resolveIntByKeys(eventData, "tokens_used", "tokensUsed", "token_usage"); + Long eventDuration = resolveLongByKeys(eventData, "duration_ms", "durationMs", "elapsed_ms"); + if (eventTokens != null && eventTokens >= 0) { + tokensUsed = eventTokens; + } + if (eventDuration != null && eventDuration > 0) { + durationMs = eventDuration; + } + } + } catch (Exception e) { + log.debug("从 Stream 提取 usage 失败: taskNo={}, streamKey={}, error={}", + task.getTaskNo(), streamKey, e.getMessage()); + } + } + return new UsageFallback(tokensUsed, durationMs); + } + + @SuppressWarnings("unchecked") + private Map extractEventData(MapRecord record) { + Map rawEvent = new HashMap<>(); + record.getValue().forEach((k, v) -> rawEvent.put(k.toString(), v)); + + Object payloadObj = rawEvent.get("payload"); + if (payloadObj instanceof String payloadStr && payloadStr.startsWith("{")) { + try { + Map payloadMap = objectMapper.readValue(payloadStr, Map.class); + Object innerData = payloadMap.get("data"); + if (innerData instanceof String dataStr && (dataStr.startsWith("{") || dataStr.startsWith("["))) { + try { + payloadMap.put("data", objectMapper.readValue(dataStr, Object.class)); + } catch (Exception ignored) { + } + } + Object payloadData = payloadMap.get("data"); + if (payloadData instanceof Map dataMap) { + dataMap.forEach((k, v) -> payloadMap.putIfAbsent(String.valueOf(k), v)); + } + return payloadMap; + } catch (Exception e) { + log.debug("解析 payload 失败,回退扁平事件: recordId={}, error={}", record.getId(), e.getMessage()); + } + } + + Object dataObj = rawEvent.get("data"); + if (dataObj instanceof String dataStr && (dataStr.startsWith("{") || dataStr.startsWith("["))) { + try { + rawEvent.put("data", objectMapper.readValue(dataStr, Object.class)); + } catch (Exception ignored) { + } + } + Object flatData = rawEvent.get("data"); + if (flatData instanceof Map dataMap) { + dataMap.forEach((k, v) -> rawEvent.putIfAbsent(String.valueOf(k), v)); + } + + return rawEvent; + } + + private Integer resolveIntByKeys(Map eventData, String... keys) { + Long value = resolveLongByKeys(eventData, keys); + if (value == null || value > Integer.MAX_VALUE || value < Integer.MIN_VALUE) { + return null; + } + return value.intValue(); + } + + private Long resolveLongByKeys(Map eventData, String... keys) { + if (eventData == null || keys == null) { + return null; + } + for (String key : keys) { + Object raw = eventData.get(key); + if (raw == null) { + continue; + } + try { + return Long.parseLong(String.valueOf(raw)); + } catch (NumberFormatException ignored) { + } + } + return null; + } + + private record UsageFallback(Integer tokensUsed, Long durationMs) {} + + /** + * 终止任务 + * + * terminate 接口只负责受理并下发终止命令,不会直接把任务写为 ABORTED。 + */ + @Transactional + public Task abortTask(String taskNo, String updaterId, String updaterName) { + Task task = StringUtils.hasText(updaterId) ? getTaskByNo(taskNo, updaterId) : getTaskByNo(taskNo); + + // 只有运行中或等待中的任务可以终止 + if (task.getStatus() != TaskStatus.RUNNING && task.getStatus() != TaskStatus.PENDING) { + throw new IllegalArgumentException("任务当前状态不允许终止: " + task.getStatus()); + } + + task.setUpdaterId(updaterId); + task.setUpdaterName(updaterName); + task.setUpdatedAt(LocalDateTime.now()); + taskMapper.updateById(task); + + TerminateEnqueueResult enqueueResult = pushTerminateCommand(task, updaterName); + + // 发布终止入队事件到 Redis Stream: logs:{workstationId}:{taskNo} + String streamKey = dispatchConfig.getLogStreamKey(task.getRoleId(), taskNo); + Map enqueueData = new HashMap<>(); + enqueueData.put("queue_key", enqueueResult.queueKey()); + enqueueData.put("request_id", enqueueResult.requestId()); + enqueueData.put("message", "任务终止命令已入队"); + publishTaskEvent(streamKey, "TASK_TERMINATE_ENQUEUED", taskNo, enqueueData); + + log.info("任务终止请求已受理: taskNo={}, queueKey={}, requestId={}", + taskNo, enqueueResult.queueKey(), enqueueResult.requestId()); + + return task; + } + + private boolean isAdmin(String userId) { + return adminAccessService.isAdmin(userId); + } + + private void assertTaskOwner(Task task, String creatorId) { + if (!StringUtils.hasText(creatorId)) { + throw new ForbiddenOperationException("用户未登录或登录态失效"); + } + if (isAdmin(creatorId)) { + return; + } + if (!StringUtils.hasText(task.getCreatorId()) || !creatorId.equals(task.getCreatorId())) { + throw new ForbiddenOperationException("无权限访问该任务"); + } + } + + private TerminateEnqueueResult pushTerminateCommand(Task task, String updaterName) { + String taskNo = task.getTaskNo(); + String queueKey = dispatchConfig.getTaskControlQueueKey(task.getRoleId()); + String requestId = "TRQ-" + UUID.randomUUID(); + + Map message = new HashMap<>(); + message.put("type", "TASK_TERMINATE_REQUEST"); + message.put("request_id", requestId); + message.put("task_id", taskNo); + message.put("reason", "terminated_by_user"); + message.put("operator", StringUtils.hasText(updaterName) ? updaterName : "system"); + message.put("requested_at", LocalDateTime.now().toString()); + + final String messageJson; + try { + messageJson = objectMapper.writeValueAsString(message); + } catch (JsonProcessingException e) { + throw new IllegalStateException("序列化任务终止命令失败: taskNo=" + taskNo, e); + } + + Long queueLength = redisTemplate.opsForList().rightPush(queueKey, messageJson); + if (queueLength == null) { + throw new IllegalStateException("任务终止命令入队失败: taskNo=" + taskNo); + } + + log.info("任务终止命令入队成功: taskNo={}, queueKey={}, queueLength={}, requestId={}", + taskNo, queueKey, queueLength, requestId); + return new TerminateEnqueueResult(queueKey, requestId); + } + + + /** + * 转换为响应 DTO + */ + public TaskResponse toResponse(Task task) { + TaskResponse response = new TaskResponse(); + response.setTaskNo(task.getTaskNo()); + response.setRoleId(task.getRoleId()); + response.setRoleName(task.getRoleName()); + response.setPrompt(task.getPrompt()); + response.setStatus(task.getStatus()); + response.setSource(task.getSource()); + response.setImage(task.getImage()); + response.setSelectedModel(task.getSelectedModel()); + response.setCreator(task.getCreatorName()); + response.setCreatedAt(task.getCreatedAt() != null ? task.getCreatedAt().toString() : null); + + // 构建 Usage + TaskResponse.Usage usage = new TaskResponse.Usage(); + usage.setTokensUsed(task.getTokensUsed()); + usage.setInputTokens(task.getInputTokens()); + usage.setOutputTokens(task.getOutputTokens()); + usage.setRequestCount(task.getRequestCount()); + usage.setTokenLimit(task.getTokenLimit()); + usage.setUsagePercent(task.getUsagePercent()); + usage.setDuration(formatDuration(task.getDurationMs())); + response.setUsage(usage); + + // 解析报告 + if (StringUtils.hasText(task.getReportJson())) { + try { + response.setReport(objectMapper.readValue(task.getReportJson(), TaskResponse.Report.class)); + } catch (JsonProcessingException e) { + log.error("解析任务报告失败", e); + } + } + + com.fasterxml.jackson.databind.JsonNode configNode = null; + if (StringUtils.hasText(task.getConfigJson())) { + try { + configNode = objectMapper.readTree(task.getConfigJson()); + } catch (Exception e) { + throw new IllegalStateException("解析任务配置失败: taskNo=" + task.getTaskNo(), e); + } + } + + // 运行模式字段强校验:仅 SIDECAR / ALONE,不允许第三态 + RuntimeModeService.RuntimeSnapshot runtimeSnapshot = resolveTaskRuntimeSnapshot(task, configNode); + response.setRuntimeMode(runtimeSnapshot.getRuntimeMode()); + response.setZzMode(runtimeSnapshot.getZzMode()); + response.setRunnerImage(runtimeSnapshot.getRunnerImage()); + + // 从 configJson 解析扩展字段 (repo/branch/mcp/skills/knowledge/env) + if (configNode != null) { + String branchName = null; + String gitRepoBranch = null; + + // estimatedOutput + var estimatedOutputNode = configNode.get("estimatedOutput"); + if (estimatedOutputNode != null && estimatedOutputNode.isArray()) { + response.setEstimatedOutput(objectMapper.convertValue(estimatedOutputNode, + objectMapper.getTypeFactory().constructCollectionType(List.class, String.class))); + } + + var branchNameNode = configNode.get("branchName"); + if (branchNameNode != null && branchNameNode.isTextual()) { + branchName = branchNameNode.asText(); + if (StringUtils.hasText(branchName)) { + branchName = branchName.trim(); + response.setBranchName(branchName); + } + } + var deliveryModeNode = configNode.get("deliveryMode"); + if (deliveryModeNode != null && deliveryModeNode.isTextual()) { + response.setDeliveryMode(normalizeDeliveryMode(deliveryModeNode.asText(), task.getTaskNo())); + } + + // gitRepos → repo + branch + var gitRepos = configNode.get("gitRepos"); + if (gitRepos != null && gitRepos.isArray() && !gitRepos.isEmpty()) { + var first = gitRepos.get(0); + if (first.has("url")) { + response.setRepo(first.get("url").asText()); + } + if (first.has("branch")) { + gitRepoBranch = first.get("branch").asText(); + if (StringUtils.hasText(gitRepoBranch)) { + gitRepoBranch = gitRepoBranch.trim(); + } + } + } + + // branch(兼容历史配置) + var branchNode = configNode.get("branch"); + if (!StringUtils.hasText(gitRepoBranch) && branchNode != null && branchNode.isTextual()) { + gitRepoBranch = branchNode.asText(); + if (StringUtils.hasText(gitRepoBranch)) { + gitRepoBranch = gitRepoBranch.trim(); + } + } + + if (StringUtils.hasText(branchName)) { + response.setBranch(branchName); + } else if (StringUtils.hasText(gitRepoBranch)) { + response.setBranch(gitRepoBranch); + } + + // mcp + var mcpNode = configNode.get("mcp"); + if (mcpNode != null && mcpNode.isArray()) { + response.setMcp(resolveMcpDisplayNames(mcpNode)); + } + + // skills + var skillsNode = configNode.get("skills"); + if (skillsNode != null && skillsNode.isArray()) { + response.setSkills(resolveSkillDisplayNames(skillsNode)); + } + + // knowledge + var knowledgeNode = configNode.get("knowledge"); + if (knowledgeNode != null && knowledgeNode.isArray()) { + response.setKnowledge(objectMapper.convertValue(knowledgeNode, + objectMapper.getTypeFactory().constructCollectionType(List.class, String.class))); + } + + // env: [{key, value}] → Map + var envNode = configNode.get("env"); + if (envNode != null && envNode.isArray()) { + var envMap = new LinkedHashMap(); + for (var item : envNode) { + String k = item.has("key") ? item.get("key").asText() : null; + String v = item.has("value") ? item.get("value").asText() : ""; + if (k != null && !k.isEmpty()) { + envMap.put(k, v); + } + } + if (!envMap.isEmpty()) { + response.setEnv(envMap); + } + } + } + + if (!StringUtils.hasText(response.getDeliveryMode())) { + response.setDeliveryMode(resolveDispatchDeliveryMode(task, buildDispatchGitConfig(task))); + } + + return response; + } + + /** + * 访客分享视图:移除敏感配置字段,仅保留任务执行详情。 + */ + public TaskResponse toShareResponse(Task task) { + TaskResponse response = toResponse(task); + response.setEnv(null); + response.setKnowledge(null); + return response; + } + + private List resolveMcpDisplayNames(com.fasterxml.jackson.databind.JsonNode refsNode) { + List refs = extractConfigRefs(refsNode); + if (refs.isEmpty()) { + return List.of(); + } + + Map displayByRef = new HashMap<>(); + List idRefs = new ArrayList<>(); + List nameRefs = new ArrayList<>(); + + for (String ref : refs) { + try { + idRefs.add(Long.parseLong(ref)); + } catch (NumberFormatException ex) { + nameRefs.add(ref); + } + } + + if (!idRefs.isEmpty()) { + List entities = mcpServerService.list( + new LambdaQueryWrapper().in(McpServerEntity::getId, idRefs)); + for (McpServerEntity entity : entities) { + String displayName = StringUtils.hasText(entity.getName()) + ? entity.getName() + : String.valueOf(entity.getId()); + displayByRef.put(String.valueOf(entity.getId()), displayName); + if (StringUtils.hasText(entity.getName())) { + displayByRef.put(entity.getName(), displayName); + } + } + } + + if (!nameRefs.isEmpty()) { + List entities = mcpServerService.list( + new LambdaQueryWrapper().in(McpServerEntity::getName, nameRefs)); + for (McpServerEntity entity : entities) { + String displayName = StringUtils.hasText(entity.getName()) + ? entity.getName() + : String.valueOf(entity.getId()); + displayByRef.put(entity.getName(), displayName); + displayByRef.put(String.valueOf(entity.getId()), displayName); + } + } + + List result = new ArrayList<>(); + for (String ref : refs) { + result.add(displayByRef.getOrDefault(ref, ref)); + } + return result; + } + + private List resolveSkillDisplayNames(com.fasterxml.jackson.databind.JsonNode refsNode) { + List refs = extractConfigRefs(refsNode); + if (refs.isEmpty()) { + return List.of(); + } + + Map displayByRef = new HashMap<>(); + List idRefs = new ArrayList<>(); + List nameRefs = new ArrayList<>(); + + for (String ref : refs) { + try { + idRefs.add(Long.parseLong(ref)); + } catch (NumberFormatException ex) { + nameRefs.add(ref); + } + } + + if (!idRefs.isEmpty()) { + List entities = skillService.list( + new LambdaQueryWrapper().in(SkillEntity::getId, idRefs)); + for (SkillEntity entity : entities) { + String displayName = resolveSkillDisplayName(entity); + displayByRef.put(String.valueOf(entity.getId()), displayName); + if (StringUtils.hasText(entity.getName())) { + displayByRef.put(entity.getName(), displayName); + } + } + } + + if (!nameRefs.isEmpty()) { + List entities = skillService.list( + new LambdaQueryWrapper().in(SkillEntity::getName, nameRefs)); + for (SkillEntity entity : entities) { + String displayName = resolveSkillDisplayName(entity); + displayByRef.put(entity.getName(), displayName); + displayByRef.put(String.valueOf(entity.getId()), displayName); + } + } + + List result = new ArrayList<>(); + for (String ref : refs) { + result.add(displayByRef.getOrDefault(ref, ref)); + } + return result; + } + + private String resolveSkillDisplayName(SkillEntity entity) { + if (StringUtils.hasText(entity.getDisplayName())) { + return entity.getDisplayName(); + } + if (StringUtils.hasText(entity.getName())) { + return entity.getName(); + } + return String.valueOf(entity.getId()); + } + + private List extractConfigRefs(com.fasterxml.jackson.databind.JsonNode refsNode) { + if (refsNode == null || !refsNode.isArray()) { + return List.of(); + } + + List refs = new ArrayList<>(); + for (var refNode : refsNode) { + String ref = extractConfigRef(refNode); + if (StringUtils.hasText(ref)) { + refs.add(ref.trim()); + } + } + return refs; + } + + private String extractConfigRef(com.fasterxml.jackson.databind.JsonNode refNode) { + if (refNode == null || refNode.isNull()) { + return null; + } + if (refNode.isTextual() || refNode.isNumber()) { + return refNode.asText(); + } + if (!refNode.isObject()) { + return null; + } + + String[] candidateKeys = {"id", "value", "name", "label", "key"}; + for (String key : candidateKeys) { + var valueNode = refNode.get(key); + if (valueNode == null || valueNode.isNull()) { + continue; + } + if (valueNode.isTextual() || valueNode.isNumber()) { + String value = valueNode.asText(); + if (StringUtils.hasText(value)) { + return value; + } + } + } + return null; + } + + /** + * 批量转换为响应 DTO + */ + public List toResponseList(List tasks) { + return tasks.stream().map(this::toResponse).collect(Collectors.toList()); + } + + private List loadSelectedFiles(List fileIds, String creatorId) { + if (fileIds == null || fileIds.isEmpty()) { + return Collections.emptyList(); + } + + LinkedHashSet uniqueIds = new LinkedHashSet<>(); + for (String fileId : fileIds) { + if (!StringUtils.hasText(fileId)) { + continue; + } + uniqueIds.add(fileId.trim()); + } + if (uniqueIds.isEmpty()) { + return Collections.emptyList(); + } + + List files = new ArrayList<>(); + for (String fileId : uniqueIds) { + WorkspaceFile file = workspaceFileMapper.selectOne( + new LambdaQueryWrapper() + .eq(WorkspaceFile::getFileId, fileId) + .isNull(WorkspaceFile::getDeletedAt) + .last("limit 1")); + if (file == null) { + throw new IllegalArgumentException("文件不存在: " + fileId); + } + if (!creatorId.equals(file.getUserId())) { + throw new IllegalArgumentException("无权引用该文件: " + fileId); + } + files.add(file); + } + return files; + } + + private List buildTaskInputFiles(List files) { + if (files == null || files.isEmpty()) { + return Collections.emptyList(); + } + + Map nameCount = files.stream() + .collect(Collectors.groupingBy(WorkspaceFile::getFileName, Collectors.counting())); + + Map nameSeq = new HashMap<>(); + + List refs = new ArrayList<>(); + for (WorkspaceFile file : files) { + String displayName = file.getFileName(); + if (nameCount.getOrDefault(displayName, 0L) > 1) { + int seq = nameSeq.merge(displayName, 1, Integer::sum); + if (seq > 1) { + displayName = appendSequence(displayName, seq - 1); + } + } + String readablePath = buildReadableRuntimePath(file, displayName); + String realPath = buildRuntimeFilePath(file); + refs.add(new TaskInputFileRef(file.getFileId(), file.getFileName(), readablePath, realPath)); + } + return refs; + } + + private String appendSequence(String fileName, int seq) { + int dotIdx = fileName.lastIndexOf('.'); + if (dotIdx > 0) { + return fileName.substring(0, dotIdx) + " (" + seq + ")" + fileName.substring(dotIdx); + } + return fileName + " (" + seq + ")"; + } + + private String buildReadableRuntimePath(WorkspaceFile file, String displayName) { + return buildWorkspaceFileBase(file) + displayName; + } + + private String buildResolvedTaskContent(String prompt, List refs) { + String basePrompt = prompt == null ? "" : prompt; + if (refs == null || refs.isEmpty()) { + return basePrompt; + } + + Map nameCount = refs.stream().collect( + Collectors.groupingBy(TaskInputFileRef::originalFileName, LinkedHashMap::new, Collectors.counting())); + + String resolved = basePrompt; + List sorted = refs.stream() + .sorted(Comparator.comparingInt( + (TaskInputFileRef ref) -> ref.originalFileName() == null ? 0 : ref.originalFileName().length()) + .reversed()) + .toList(); + + for (TaskInputFileRef ref : sorted) { + String originalName = ref.originalFileName(); + if (!StringUtils.hasText(originalName)) { + continue; + } + if (StringUtils.hasText(ref.fileId())) { + resolved = resolved.replace("@" + ref.fileId(), ref.runtimePath()); + } + if (nameCount.getOrDefault(originalName, 0L) != 1L) { + continue; + } + resolved = resolved.replace("@" + originalName, ref.runtimePath()); + } + + StringBuilder appendix = new StringBuilder(); + appendix.append("\n\n可用附件路径:"); + for (TaskInputFileRef ref : refs) { + appendix.append("\n- ") + .append(ref.originalFileName()) + .append(" => ") + .append(ref.runtimePath()); + } + return resolved + appendix; + } + + private String resolveDispatchContent(Task task) { + if (task == null) { + return ""; + } + String resolved = parseResolvedContentFromConfig(task); + if (StringUtils.hasText(resolved)) { + return resolved; + } + return task.getPrompt(); + } + + private String resolveDispatchSystemPromptAppend(Task task) { + if (task == null || !StringUtils.hasText(task.getConfigJson())) { + throw new IllegalStateException("任务缺少 systemPromptAppend: taskNo=" + (task == null ? "" : task.getTaskNo())); + } + try { + var configNode = objectMapper.readTree(task.getConfigJson()); + var node = configNode.get("systemPromptAppend"); + if (node != null && node.isTextual() && StringUtils.hasText(node.asText())) { + return node.asText(); + } + } catch (Exception e) { + throw new IllegalStateException("解析 systemPromptAppend 失败: taskNo=" + task.getTaskNo(), e); + } + throw new IllegalStateException("任务缺少 systemPromptAppend: taskNo=" + task.getTaskNo()); + } + + private Map resolveDispatchPromptLayers(Task task) { + if (task == null || !StringUtils.hasText(task.getConfigJson())) { + throw new IllegalStateException("任务缺少 promptLayers: taskNo=" + (task == null ? "" : task.getTaskNo())); + } + try { + var configNode = objectMapper.readTree(task.getConfigJson()); + var layersNode = configNode.get("promptLayers"); + if (layersNode == null || !layersNode.isObject()) { + throw new IllegalStateException("任务缺少 promptLayers: taskNo=" + task.getTaskNo()); + } + String rolePrompt = readRequiredLayer(layersNode, "rolePrompt", task.getTaskNo()); + String platformPromptText = readRequiredLayer(layersNode, "platformPrompt", task.getTaskNo()); + String userSoul = readRequiredLayer(layersNode, "userSoul", task.getTaskNo()); + + Map layers = new LinkedHashMap<>(); + layers.put("platform_prompt", platformPromptText); + layers.put("role_prompt", rolePrompt); + layers.put("user_soul", userSoul); + return layers; + } catch (IllegalStateException e) { + throw e; + } catch (Exception e) { + throw new IllegalStateException("解析 promptLayers 失败: taskNo=" + task.getTaskNo(), e); + } + } + + private String readRequiredLayer(com.fasterxml.jackson.databind.JsonNode layersNode, String field, String taskNo) { + var node = layersNode.get(field); + if (node == null || !node.isTextual() || !StringUtils.hasText(node.asText())) { + throw new IllegalStateException("任务 promptLayer 缺失: taskNo=" + taskNo + ", field=" + field); + } + return node.asText(); + } + + private PromptLayers buildPromptLayers(RoleEntity role, String creatorId) { + String rolePrompt = normalizePromptLayer(role == null ? null : role.getPrompt()); + if (!StringUtils.hasText(rolePrompt)) { + Long roleId = role == null ? null : role.getId(); + throw new IllegalArgumentException("ROLE_PROMPT_INVALID: 岗位 Prompt 不能为空, roleId=" + roleId); + } + + String platformPromptText = normalizePromptLayer(platformPrompt); + if (!StringUtils.hasText(platformPromptText)) { + throw new IllegalArgumentException("PLATFORM_PROMPT_INVALID: 平台 Prompt 不能为空"); + } + + String userSoul = normalizePromptLayer(userSoulService.getOptionalSoulContent(creatorId)); + if (!StringUtils.hasText(userSoul)) { + userSoul = normalizePromptLayer(defaultUserSoulPrompt); + log.info("用户 Soul 缺失,使用默认用户层 Prompt: userId={}", creatorId); + } + if (!StringUtils.hasText(userSoul)) { + throw new IllegalArgumentException("SOUL_CONTENT_INVALID: Soul 内容不能为空"); + } + + return new PromptLayers(rolePrompt, platformPromptText, userSoul); + } + + private String toSystemPromptAppend(PromptLayers promptLayers) { + return "\n" + + "\n" + + " \n" + + " \n" + + promptLayers.platformPrompt() + + "\n \n\n" + + " \n" + + " \n" + + promptLayers.rolePrompt() + + "\n \n\n" + + " \n" + + " \n" + + promptLayers.userSoul() + + "\n \n" + + ""; + } + + private Map toPromptLayerMap(PromptLayers promptLayers) { + Map map = new LinkedHashMap<>(); + map.put("platformPrompt", promptLayers.platformPrompt()); + map.put("rolePrompt", promptLayers.rolePrompt()); + map.put("userSoul", promptLayers.userSoul()); + return map; + } + + private String normalizePromptLayer(String text) { + return text == null ? "" : text.trim(); + } + + private String parseResolvedContentFromConfig(Task task) { + if (task == null || !StringUtils.hasText(task.getConfigJson())) { + return ""; + } + try { + var configNode = objectMapper.readTree(task.getConfigJson()); + var resolvedContentNode = configNode.get("resolvedContent"); + if (resolvedContentNode != null && resolvedContentNode.isTextual()) { + return resolvedContentNode.asText(); + } + } catch (Exception e) { + log.warn("解析 resolvedContent 失败: taskNo={}, error={}", task.getTaskNo(), e.getMessage()); + } + return ""; + } + + private List> parseDispatchFilePathMappings(Task task) { + if (task == null || !StringUtils.hasText(task.getConfigJson())) { + return Collections.emptyList(); + } + try { + var configNode = objectMapper.readTree(task.getConfigJson()); + var aliasNode = configNode.get("aliasMap"); + if (aliasNode == null || !aliasNode.isObject()) { + return Collections.emptyList(); + } + + Map aliasMap = objectMapper.convertValue( + aliasNode, + new TypeReference>() { + }); + if (aliasMap == null || aliasMap.isEmpty()) { + return Collections.emptyList(); + } + + List> mappings = new ArrayList<>(); + for (Map.Entry entry : aliasMap.entrySet()) { + String runtimePath = entry.getKey(); + String realPath = entry.getValue(); + if (!StringUtils.hasText(runtimePath) || !StringUtils.hasText(realPath)) { + continue; + } + mappings.add(Map.of( + "runtime_path", runtimePath, + "real_path", realPath)); + } + return mappings; + } catch (Exception e) { + log.warn("解析 file_path_mappings 失败: taskNo={}, error={}", task.getTaskNo(), e.getMessage()); + return Collections.emptyList(); + } + } + + private String buildRuntimeFilePath(WorkspaceFile file) { + String fileId = file == null ? null : file.getFileId(); + if (!StringUtils.hasText(fileId)) { + throw new IllegalArgumentException("文件标识缺失,无法生成容器路径"); + } + String extension = normalizeFileExtension(file.getFileType()); + return buildWorkspaceFileBase(file) + fileId + extension; + } + + private String buildWorkspaceFileBase(WorkspaceFile file) { + return "WORKSTATION".equalsIgnoreCase(file.getSpaceType()) + ? "/workspace/workstation/original/" + : "/workspace/user/original/"; + } + + private String normalizeFileExtension(String fileType) { + if (!StringUtils.hasText(fileType)) { + return ""; + } + String cleaned = fileType.trim().toLowerCase(Locale.ROOT).replaceAll("[^a-z0-9]", ""); + if (!StringUtils.hasText(cleaned)) { + return ""; + } + return "." + cleaned; + } + + private RuntimeModeService.RuntimeSnapshot resolveTaskRuntimeSnapshot( + Task task, + com.fasterxml.jackson.databind.JsonNode configNode) { + + String runtimeModeRaw = readConfigText(configNode, "runtimeMode"); + if (!StringUtils.hasText(runtimeModeRaw)) { + runtimeModeRaw = readConfigText(configNode, "podMode"); + } + + String runnerImageRaw = readConfigText(configNode, "runnerImage"); + if (!StringUtils.hasText(runnerImageRaw)) { + runnerImageRaw = readConfigText(configNode, "runnerBaseImage"); + } + + RuntimeModeService.RuntimeSnapshot roleSnapshot = null; + RuntimeModeService.RuntimeSnapshot runtimeSnapshot; + if (StringUtils.hasText(runtimeModeRaw)) { + String effectiveRunner = runnerImageRaw; + if (!StringUtils.hasText(effectiveRunner) && "SIDECAR".equalsIgnoreCase(runtimeModeRaw)) { + roleSnapshot = runtimeModeService.resolveForRole(task.getRoleId()); + effectiveRunner = roleSnapshot.getRunnerImage(); + } + runtimeSnapshot = runtimeModeService.resolveFromRaw( + runtimeModeRaw, + effectiveRunner, + "taskNo=" + task.getTaskNo()); + } else { + roleSnapshot = runtimeModeService.resolveForRole(task.getRoleId()); + runtimeSnapshot = roleSnapshot; + } + + String zzModeRaw = readConfigText(configNode, "zzMode"); + if (StringUtils.hasText(zzModeRaw) + && !zzModeRaw.equalsIgnoreCase(runtimeSnapshot.getZzMode())) { + throw new IllegalStateException("任务运行模式与 zz 通道不一致: taskNo=" + task.getTaskNo()); + } + + return runtimeSnapshot; + } + + private String readConfigText(com.fasterxml.jackson.databind.JsonNode node, String key) { + if (node == null || key == null || !node.has(key) || node.get(key) == null) { + return null; + } + String value = node.get(key).asText(); + return StringUtils.hasText(value) ? value : null; + } + + private Map buildTaskReport(Task task, TaskCompleteRequest request, TaskStatus targetStatus, int finalTokensUsed) { + TaskCompleteRequest.Report workerReport = request.getReport(); + List estimatedOutput = parseEstimatedOutput(task); + + Map reportMap = new HashMap<>(); + reportMap.put("summary", resolveReportSummary(workerReport, targetStatus)); + reportMap.put("tokens", finalTokensUsed); + reportMap.put("duration", formatDuration(request.getDurationMs())); + reportMap.put("completion", resolveReportCompletion(workerReport, targetStatus)); + reportMap.put("audit", resolveReportAudit(workerReport, targetStatus)); + reportMap.put("artifacts", resolveReportArtifacts(workerReport, estimatedOutput)); + reportMap.put("branch", workerReport != null ? workerReport.getBranch() : null); + reportMap.put("commit", workerReport != null ? workerReport.getCommit() : null); + return reportMap; + } + + private List resolveReportArtifacts(TaskCompleteRequest.Report workerReport, List estimatedOutput) { + if (workerReport != null && workerReport.getArtifacts() != null && !workerReport.getArtifacts().isEmpty()) { + return new ArrayList<>(workerReport.getArtifacts()); + } + + LinkedHashSet uniqueOutputs = new LinkedHashSet<>(estimatedOutput); + List artifacts = new ArrayList<>(); + for (String outputCode : uniqueOutputs) { + if (!StringUtils.hasText(outputCode)) { + continue; + } + Map artifact = buildFallbackArtifact(outputCode); + if (!artifact.isEmpty()) { + artifacts.add(artifact); + } + } + + if (artifacts.isEmpty()) { + artifacts.add(buildFallbackArtifact(TaskOutputType.DIALOG_CONCLUSION.getCode())); + } + return artifacts; + } + + private Map buildFallbackArtifact(String outputCode) { + Map artifact = new HashMap<>(); + if (!StringUtils.hasText(outputCode)) { + artifact.put("name", "dialog-conclusion.md"); + artifact.put("url", "/workspace/dialog-conclusion.md"); + return artifact; + } + + switch (outputCode) { + case "git_branch" -> { + artifact.put("name", "git-branch.txt"); + artifact.put("url", "/workspace/git-branch.txt"); + } + case "pull_request" -> { + artifact.put("name", "pull-request.txt"); + artifact.put("url", "/workspace/pull-request.txt"); + } + case "python_file" -> { + artifact.put("name", "output.py"); + artifact.put("url", "/workspace/output.py"); + } + case "java_file" -> { + artifact.put("name", "Output.java"); + artifact.put("url", "/workspace/Output.java"); + } + case "javascript_file" -> { + artifact.put("name", "output.js"); + artifact.put("url", "/workspace/output.js"); + } + case "typescript_file" -> { + artifact.put("name", "output.ts"); + artifact.put("url", "/workspace/output.ts"); + } + case "sql_file" -> { + artifact.put("name", "output.sql"); + artifact.put("url", "/workspace/output.sql"); + } + case "shell_script" -> { + artifact.put("name", "output.sh"); + artifact.put("url", "/workspace/output.sh"); + } + case "config_file" -> { + artifact.put("name", "output.yaml"); + artifact.put("url", "/workspace/output.yaml"); + } + case "txt" -> { + artifact.put("name", "output.txt"); + artifact.put("url", "/workspace/output.txt"); + } + case "markdown" -> { + artifact.put("name", "output.md"); + artifact.put("url", "/workspace/output.md"); + } + case "word" -> { + artifact.put("name", "output.docx"); + artifact.put("url", "/workspace/output.docx"); + } + case "excel" -> { + artifact.put("name", "output.xlsx"); + artifact.put("url", "/workspace/output.xlsx"); + } + case "ppt" -> { + artifact.put("name", "output.pptx"); + artifact.put("url", "/workspace/output.pptx"); + } + case "pdf" -> { + artifact.put("name", "output.pdf"); + artifact.put("url", "/workspace/output.pdf"); + } + case "json" -> { + artifact.put("name", "output.json"); + artifact.put("url", "/workspace/output.json"); + } + case "csv" -> { + artifact.put("name", "output.csv"); + artifact.put("url", "/workspace/output.csv"); + } + case "png" -> { + artifact.put("name", "output.png"); + artifact.put("url", "/workspace/output.png"); + } + case "archive" -> { + artifact.put("name", "output.zip"); + artifact.put("url", "/workspace/output.zip"); + } + case "api_call_result" -> { + artifact.put("name", "api-result.json"); + artifact.put("url", "/workspace/api-result.json"); + } + case "dialog_conclusion" -> { + artifact.put("name", "dialog-conclusion.md"); + artifact.put("url", "/workspace/dialog-conclusion.md"); + } + default -> { + artifact.put("name", outputCode + ".txt"); + artifact.put("url", "/workspace/" + outputCode + ".txt"); + } + } + return artifact; + } + + private String resolveReportSummary(TaskCompleteRequest.Report workerReport, TaskStatus targetStatus) { + if (workerReport != null && StringUtils.hasText(workerReport.getSummary())) { + return workerReport.getSummary(); + } + return targetStatus == TaskStatus.COMPLETED ? "任务执行完成" : "任务执行失败"; + } + + private Integer resolveReportCompletion(TaskCompleteRequest.Report workerReport, TaskStatus targetStatus) { + if (workerReport != null && workerReport.getCompletion() != null) { + return workerReport.getCompletion(); + } + return targetStatus == TaskStatus.COMPLETED ? 100 : 0; + } + + private String resolveReportAudit(TaskCompleteRequest.Report workerReport, TaskStatus targetStatus) { + if (workerReport != null && StringUtils.hasText(workerReport.getAudit())) { + return workerReport.getAudit(); + } + return targetStatus == TaskStatus.COMPLETED ? "A" : "C"; + } + + private List parseEstimatedOutput(Task task) { + if (task == null || !StringUtils.hasText(task.getConfigJson())) { + return Collections.emptyList(); + } + try { + var configNode = objectMapper.readTree(task.getConfigJson()); + var estimatedOutputNode = configNode.get("estimatedOutput"); + if (estimatedOutputNode == null || !estimatedOutputNode.isArray()) { + return Collections.emptyList(); + } + return objectMapper.convertValue(estimatedOutputNode, + objectMapper.getTypeFactory().constructCollectionType(List.class, String.class)); + } catch (Exception e) { + log.warn("解析 estimatedOutput 失败: taskNo={}, error={}", task.getTaskNo(), e.getMessage()); + return Collections.emptyList(); + } + } + + private List resolveOutputDomains(List estimatedOutput) { + if (estimatedOutput == null || estimatedOutput.isEmpty()) { + return Collections.emptyList(); + } + + LinkedHashSet domains = new LinkedHashSet<>(); + for (String outputCode : estimatedOutput) { + TaskOutputType.fromCode(outputCode).map(TaskOutputType::getDomain).ifPresent(domains::add); + } + return new ArrayList<>(domains); + } + + private List> buildDispatchGitConfig(Task task) { + if (!StringUtils.hasText(task.getConfigJson())) { + return Collections.emptyList(); + } + + try { + var configNode = objectMapper.readTree(task.getConfigJson()); + var gitRepos = configNode.get("gitRepos"); + if (gitRepos == null || !gitRepos.isArray() || gitRepos.isEmpty()) { + return Collections.emptyList(); + } + + List> result = new ArrayList<>(); + for (var repoNode : gitRepos) { + String repo = repoNode.has("url") ? repoNode.get("url").asText("").trim() : ""; + String originBranch = repoNode.has("branch") ? repoNode.get("branch").asText("").trim() : ""; + if (!StringUtils.hasText(repo) || !StringUtils.hasText(originBranch)) { + log.warn("跳过无效 gitRepos 配置: taskNo={}, repo={}, branch={}", + task.getTaskNo(), repo, originBranch); + continue; + } + + Map gitConfig = new HashMap<>(); + gitConfig.put("repo", repo); + gitConfig.put("origin_branch", originBranch); + gitConfig.put("task_branch", "feat/" + task.getTaskNo()); + result.add(gitConfig); + } + return result; + } catch (Exception e) { + log.warn("解析任务 gitRepos 失败,忽略 git_config 下发: taskNo={}, error={}", + task.getTaskNo(), e.getMessage()); + return Collections.emptyList(); + } + } + + private String resolveDispatchDeliveryMode(Task task, List> gitConfig) { + String snapshotMode = parseDeliveryModeFromConfig(task); + if (StringUtils.hasText(snapshotMode)) { + return normalizeDeliveryMode(snapshotMode, task.getTaskNo()); + } + return resolveDeliveryMode(parseEstimatedOutput(task), !gitConfig.isEmpty(), task.getTaskNo()); + } + + private String parseDeliveryModeFromConfig(Task task) { + if (task == null || !StringUtils.hasText(task.getConfigJson())) { + return ""; + } + try { + var configNode = objectMapper.readTree(task.getConfigJson()); + var deliveryModeNode = configNode.get("deliveryMode"); + if (deliveryModeNode != null && deliveryModeNode.isTextual()) { + return deliveryModeNode.asText(); + } + } catch (Exception e) { + log.warn("解析 deliveryMode 失败: taskNo={}, error={}", task.getTaskNo(), e.getMessage()); + } + return ""; + } + + private String resolveDeliveryMode(List estimatedOutput, boolean hasGitConfig, String taskNo) { + if (!hasGitConfig) { + return DELIVERY_MODE_OSS; + } + + if (containsGitDeliverySignal(estimatedOutput)) { + return DELIVERY_MODE_GIT; + } + + if (estimatedOutput == null || estimatedOutput.isEmpty()) { + log.warn("estimatedOutput 为空但任务存在 git 配置,兼容回退为 git: taskNo={}", taskNo); + return DELIVERY_MODE_GIT; + } + + return DELIVERY_MODE_OSS; + } + + private boolean containsGitDeliverySignal(List estimatedOutput) { + if (estimatedOutput == null || estimatedOutput.isEmpty()) { + return false; + } + for (String outputCode : estimatedOutput) { + if (OUTPUT_CODE_GIT_BRANCH.equalsIgnoreCase(outputCode) + || OUTPUT_CODE_PULL_REQUEST.equalsIgnoreCase(outputCode)) { + return true; + } + } + return false; + } + + private boolean hasValidRoleGitRepo(RoleEntity role) { + if (role == null || role.getConfigJson() == null || role.getConfigJson().getGitRepos() == null) { + return false; + } + return role.getConfigJson().getGitRepos().stream().anyMatch( + gitRepo -> gitRepo != null + && StringUtils.hasText(gitRepo.getUrl()) + && StringUtils.hasText(gitRepo.getBranch())); + } + + private String normalizeDeliveryMode(String rawMode, String taskNo) { + if (!StringUtils.hasText(rawMode)) { + return DELIVERY_MODE_OSS; + } + String mode = rawMode.trim().toLowerCase(); + if (DELIVERY_MODE_GIT.equals(mode) || DELIVERY_MODE_OSS.equals(mode)) { + return mode; + } + log.warn("未知 deliveryMode,回退为 oss: taskNo={}, rawMode={}", taskNo, rawMode); + return DELIVERY_MODE_OSS; + } + + private record TaskInputFileRef( + String fileId, + String originalFileName, + String runtimePath, + String realPath) { + } + + private record PromptLayers( + String rolePrompt, + String platformPrompt, + String userSoul) { + } + + private record TerminateEnqueueResult(String queueKey, String requestId) {} + + private void publishTaskEvent(String streamKey, String eventType, String taskNo, Map data) { + Map fields = new HashMap<>(); + fields.put("event_type", eventType); + fields.put("task_no", taskNo); + fields.put("timestamp", LocalDateTime.now().toString()); + + if (data != null && !data.isEmpty()) { + try { + fields.put("data", objectMapper.writeValueAsString(data)); + } catch (JsonProcessingException e) { + throw new IllegalStateException("序列化任务事件失败: eventType=" + eventType + ", taskNo=" + taskNo, e); + } + } + + redisTemplate.opsForStream().add(streamKey, fields); + } + + /** + * 格式化时长 + */ + private String formatDuration(Long durationMs) { + if (durationMs == null || durationMs == 0) { + return "0s"; + } + long seconds = durationMs / 1000; + if (seconds < 60) { + return seconds + "s"; + } + long minutes = seconds / 60; + seconds = seconds % 60; + return minutes + "m " + seconds + "s"; + } +} diff --git a/back/src/main/java/com/linkwork/service/TaskShareLinkService.java b/back/src/main/java/com/linkwork/service/TaskShareLinkService.java new file mode 100644 index 0000000..b31f45e --- /dev/null +++ b/back/src/main/java/com/linkwork/service/TaskShareLinkService.java @@ -0,0 +1,166 @@ +package com.linkwork.service; + +import com.fasterxml.jackson.core.type.TypeReference; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.linkwork.model.dto.TaskShareLinkResponse; +import com.linkwork.model.entity.Task; +import lombok.RequiredArgsConstructor; +import lombok.extern.slf4j.Slf4j; +import org.springframework.beans.factory.annotation.Value; +import org.springframework.stereotype.Service; +import org.springframework.util.StringUtils; +import org.springframework.web.util.UriUtils; + +import javax.crypto.Mac; +import javax.crypto.spec.SecretKeySpec; +import java.nio.charset.StandardCharsets; +import java.security.MessageDigest; +import java.time.Instant; +import java.time.LocalDateTime; +import java.time.ZoneOffset; +import java.util.Base64; +import java.util.LinkedHashMap; +import java.util.Map; +import java.util.UUID; + +/** + * 任务临时分享链接服务(签名 token,无状态校验)。 + */ +@Slf4j +@Service +@RequiredArgsConstructor +public class TaskShareLinkService { + + @Value("${robot.task-share.secret:}") + private String shareSecret; + + @Value("${robot.task-share.base-url:}") + private String shareBaseUrl; + + @Value("${robot.task-share.default-expire-hours:24}") + private int defaultExpireHours; + + @Value("${robot.task-share.max-expire-hours:168}") + private int maxExpireHours; + + private final TaskService taskService; + private final ObjectMapper objectMapper; + + public TaskShareLinkResponse createShareLink(String taskNo, String creatorId, Integer expireHours) { + Task task = taskService.getTaskByNo(taskNo, creatorId); + int resolvedExpireHours = resolveExpireHours(expireHours); + Instant expiresAt = Instant.now().plusSeconds(resolvedExpireHours * 3600L); + + String token = buildToken(task.getTaskNo(), expiresAt.getEpochSecond()); + TaskShareLinkResponse response = new TaskShareLinkResponse(); + response.setTaskId(task.getTaskNo()); + response.setToken(token); + response.setShareUrl(buildShareUrl(task.getTaskNo(), token)); + response.setExpiresAt(LocalDateTime.ofInstant(expiresAt, ZoneOffset.UTC)); + return response; + } + + public void validateShareToken(String taskNo, String token) { + if (!StringUtils.hasText(taskNo) || !StringUtils.hasText(token)) { + throw new IllegalArgumentException("分享链接参数缺失"); + } + String[] parts = token.split("\\."); + if (parts.length != 2) { + throw new IllegalArgumentException("分享链接无效"); + } + + String payloadEncoded = parts[0]; + String expectedSignature = signPayload(payloadEncoded); + String actualSignature = parts[1]; + if (!MessageDigest.isEqual( + expectedSignature.getBytes(StandardCharsets.UTF_8), + actualSignature.getBytes(StandardCharsets.UTF_8))) { + throw new IllegalArgumentException("分享链接无效"); + } + + try { + String payloadJson = new String(Base64.getUrlDecoder().decode(payloadEncoded), StandardCharsets.UTF_8); + Map payload = objectMapper.readValue(payloadJson, new TypeReference>() { + }); + + String tokenTaskNo = String.valueOf(payload.get("taskNo")); + long exp = parseLong(payload.get("exp")); + if (!taskNo.equals(tokenTaskNo)) { + throw new IllegalArgumentException("分享链接与任务不匹配"); + } + if (exp <= Instant.now().getEpochSecond()) { + throw new IllegalArgumentException("分享链接已过期"); + } + } catch (IllegalArgumentException e) { + throw e; + } catch (Exception e) { + throw new IllegalArgumentException("分享链接无效"); + } + } + + private int resolveExpireHours(Integer expireHours) { + int resolved = expireHours == null ? defaultExpireHours : expireHours; + if (resolved <= 0) { + resolved = defaultExpireHours; + } + if (resolved > maxExpireHours) { + resolved = maxExpireHours; + } + return resolved; + } + + private String buildToken(String taskNo, long expEpochSecond) { + try { + Map payload = new LinkedHashMap<>(); + payload.put("taskNo", taskNo); + payload.put("exp", expEpochSecond); + payload.put("nonce", UUID.randomUUID().toString().replace("-", "")); + String payloadJson = objectMapper.writeValueAsString(payload); + String payloadEncoded = Base64.getUrlEncoder() + .withoutPadding() + .encodeToString(payloadJson.getBytes(StandardCharsets.UTF_8)); + return payloadEncoded + "." + signPayload(payloadEncoded); + } catch (Exception e) { + throw new IllegalStateException("生成任务分享链接失败: taskNo=" + taskNo, e); + } + } + + private String signPayload(String payloadEncoded) { + try { + Mac mac = Mac.getInstance("HmacSHA256"); + String secret = requireShareSecret(); + mac.init(new SecretKeySpec(secret.getBytes(StandardCharsets.UTF_8), "HmacSHA256")); + byte[] signature = mac.doFinal(payloadEncoded.getBytes(StandardCharsets.UTF_8)); + return Base64.getUrlEncoder().withoutPadding().encodeToString(signature); + } catch (Exception e) { + throw new IllegalStateException("签名分享链接失败", e); + } + } + + private String buildShareUrl(String taskNo, String token) { + String base = shareBaseUrl == null ? "" : shareBaseUrl.trim(); + if (base.endsWith("/")) { + base = base.substring(0, base.length() - 1); + } + String encodedTaskNo = UriUtils.encodePathSegment(taskNo, StandardCharsets.UTF_8); + String encodedToken = UriUtils.encodeQueryParam(token, StandardCharsets.UTF_8); + return String.format("%s/share/task/%s?token=%s", base, encodedTaskNo, encodedToken); + } + + private String requireShareSecret() { + if (!StringUtils.hasText(shareSecret)) { + throw new IllegalStateException("robot.task-share.secret 未配置"); + } + return shareSecret.trim(); + } + + private long parseLong(Object value) { + if (value == null) { + throw new IllegalArgumentException("分享链接无效"); + } + if (value instanceof Number number) { + return number.longValue(); + } + return Long.parseLong(String.valueOf(value)); + } +} diff --git a/back/src/main/java/com/linkwork/service/TaskStatusSyncService.java b/back/src/main/java/com/linkwork/service/TaskStatusSyncService.java new file mode 100644 index 0000000..1743411 --- /dev/null +++ b/back/src/main/java/com/linkwork/service/TaskStatusSyncService.java @@ -0,0 +1,500 @@ +package com.linkwork.service; + +import com.baomidou.mybatisplus.extension.plugins.pagination.Page; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.linkwork.config.DispatchConfig; +import com.linkwork.model.dto.TaskCompleteRequest; +import com.linkwork.model.dto.TaskResponse; +import com.linkwork.model.entity.Task; +import com.linkwork.model.enums.TaskStatus; +import jakarta.annotation.PostConstruct; +import lombok.RequiredArgsConstructor; +import lombok.extern.slf4j.Slf4j; +import org.springframework.data.redis.connection.stream.MapRecord; +import org.springframework.data.redis.connection.stream.StreamOffset; +import org.springframework.data.redis.core.StringRedisTemplate; +import org.springframework.scheduling.annotation.Scheduled; +import org.springframework.stereotype.Service; + +import java.util.*; +import java.util.concurrent.ConcurrentHashMap; + +/** + * 任务状态同步服务 + * + * 监听 Redis Stream 中的关键事件,将任务状态同步到数据库。 + * + * 背景:momo-worker (外部 Agent 执行器) 执行任务时只向 Redis Stream 写入事件, + * 不会回调后端更新数据库。导致数据库中的任务状态永远停留在 PENDING。 + * + * 本服务既在 WebSocket 转发事件时实时同步,也通过后台定时扫描做兜底补偿, + * 避免无人订阅时终态无法落库。 + * + * 事件 → 状态映射: + * TASK_STARTED / SESSION_START → RUNNING + * TASK_COMPLETED / SESSION_END (exit_code=0) → COMPLETED + * TASK_FAILED / SESSION_END (exit_code!=0) → FAILED + * TASK_ABORTED → ABORTED + */ +@Slf4j +@Service +@RequiredArgsConstructor +public class TaskStatusSyncService { + + private final TaskService taskService; + private final StringRedisTemplate redisTemplate; + private final DispatchConfig dispatchConfig; + private final ObjectMapper objectMapper = new ObjectMapper(); + + /** + * 启动时扫描数据库中 PENDING / RUNNING 状态的任务, + * 从 Redis Stream 中读取历史事件补齐状态。 + */ + @PostConstruct + public void syncHistoryOnStartup() { + Thread.ofVirtual().name("task-status-sync-init").start(() -> { + try { + // 等待 Spring 容器完全启动 + Thread.sleep(5000); + syncActiveTasks("startup"); + } catch (Exception e) { + log.error("启动时状态补齐异常", e); + } + }); + } + + /** + * 后台常驻扫描(不依赖 WebSocket 订阅): + * 周期性补齐 PENDING / RUNNING 任务状态,避免终态长期未落库。 + */ + @Scheduled(fixedDelayString = "${robot.task-status-sync.scan-interval-ms:15000}") + public void syncHistoryPeriodically() { + syncActiveTasks("schedule"); + } + + private void syncActiveTasks(String trigger) { + int scanned = 0; + int synced = 0; + + for (TaskStatus status : ACTIVE_STATES) { + long current = 1; + while (true) { + Page page = taskService.listTasks(null, status.name(), (int) current, TASK_SCAN_PAGE_SIZE); + List records = page.getRecords(); + if (records == null || records.isEmpty()) { + break; + } + scanned += records.size(); + for (Task task : records) { + if (syncSingleTask(task, trigger)) { + synced++; + } + } + if (current >= page.getPages()) { + break; + } + current++; + } + } + + if (synced > 0) { + log.info("任务状态补齐完成: trigger={}, synced={}, scanned={}", trigger, synced, scanned); + } else if ("startup".equals(trigger)) { + log.info("启动补齐完成: trigger={}, synced=0, scanned={}", trigger, scanned); + } + } + + private boolean syncSingleTask(Task task, String trigger) { + TaskStatus currentStatus = task.getStatus(); + TaskStatus resolved = resolveStatusFromStream(task.getTaskNo()); + if (resolved == null || resolved == currentStatus) { + return false; + } + try { + UsageSnapshot usageSnapshot = resolveUsageFromStream(task.getTaskNo()); + PersistResult persistResult = persistStatusWithUsage( + task.getTaskNo(), + resolved, + usageSnapshot.tokensUsed(), + usageSnapshot.durationMs(), + "sync-" + trigger); + Task updatedTask = persistResult.task(); + syncedStatus.put(task.getTaskNo(), resolved); + if (!persistResult.notifiedByCompleteFlow()) { + log.debug("任务终态由同步流程补齐: trigger={}, taskNo={}, status={}", + trigger, task.getTaskNo(), resolved); + } + log.info("任务状态补齐: trigger={}, taskNo={}, {} -> {}", + trigger, task.getTaskNo(), currentStatus, resolved); + return true; + } catch (Exception e) { + log.error("补齐任务状态失败: trigger={}, taskNo={}, from={}, to={}", + trigger, task.getTaskNo(), currentStatus, resolved, e); + return false; + } + } + + /** + * 从 Redis Stream 历史事件中推断任务的最终状态 + */ + private TaskStatus resolveStatusFromStream(String taskNo) { + Long roleId = resolveRoleId(taskNo); + List streamKeys = List.of( + dispatchConfig.getLogStreamKey(roleId, taskNo), + "stream:task:" + taskNo + ":events", + "stream:task:" + taskNo + ); + + TaskStatus best = null; + for (String streamKey : streamKeys) { + try { + List> records = redisTemplate.opsForStream() + .read(StreamOffset.fromStart(streamKey)); + if (records == null || records.isEmpty()) continue; + + for (MapRecord record : records) { + Map eventData = extractEventData(record); + + String eventType = String.valueOf(eventData.getOrDefault("event_type", "")); + TaskStatus status = resolveTargetStatus(eventType, eventData); + if (shouldUpdateStatus(best, status)) { + best = status; + } + } + } catch (Exception e) { + log.debug("读取 Stream {} 失败: {}", streamKey, e.getMessage()); + } + } + return best; + } + + @SuppressWarnings("unchecked") + private Map extractEventData(MapRecord record) { + Map rawEvent = new HashMap<>(); + record.getValue().forEach((k, v) -> rawEvent.put(k.toString(), v)); + + Object payloadObj = rawEvent.get("payload"); + if (payloadObj instanceof String payloadStr && payloadStr.startsWith("{")) { + try { + Map payloadMap = objectMapper.readValue(payloadStr, Map.class); + Object innerData = payloadMap.get("data"); + if (innerData instanceof String dataStr && (dataStr.startsWith("{") || dataStr.startsWith("["))) { + try { + payloadMap.put("data", objectMapper.readValue(dataStr, Object.class)); + } catch (Exception ignored) {} + } + Object payloadData = payloadMap.get("data"); + if (payloadData instanceof Map dataMap) { + dataMap.forEach((k, v) -> payloadMap.putIfAbsent(String.valueOf(k), v)); + } + return payloadMap; + } catch (Exception e) { + log.debug("解析 payload 失败,回退扁平事件: {}", e.getMessage()); + } + } + + Object dataObj = rawEvent.get("data"); + if (dataObj instanceof String dataStr && (dataStr.startsWith("{") || dataStr.startsWith("["))) { + try { + rawEvent.put("data", objectMapper.readValue(dataStr, Object.class)); + } catch (Exception ignored) {} + } + + Object flatData = rawEvent.get("data"); + if (flatData instanceof Map dataMap) { + dataMap.forEach((k, v) -> rawEvent.putIfAbsent(String.valueOf(k), v)); + } + return rawEvent; + } + + private Long resolveRoleId(String taskNo) { + try { + Task task = taskService.getTaskByNo(taskNo); + return task.getRoleId(); + } catch (Exception e) { + log.debug("解析任务 roleId 失败,使用空 roleId 路由: taskNo={}", taskNo); + return null; + } + } + + /** + * 记录每个任务已同步到的最终状态,避免重复更新和状态回退。 + * key=taskNo, value=已同步的最高优先级状态 + */ + private final ConcurrentHashMap syncedStatus = new ConcurrentHashMap<>(); + + /** + * 终态集合:任务到达这些状态后不再变更 + */ + private static final Set TERMINAL_STATES = Set.of( + TaskStatus.COMPLETED, TaskStatus.FAILED, TaskStatus.ABORTED + ); + private static final List ACTIVE_STATES = List.of(TaskStatus.PENDING, TaskStatus.RUNNING); + private static final int TASK_SCAN_PAGE_SIZE = 200; + + /** + * 处理一条 Redis Stream 事件,判断是否需要同步任务状态 + * + * @param taskNo 任务编号 + * @param eventData 事件数据(从 Redis Stream record 解析) + */ + public void onEvent(String taskNo, Map eventData) { + if (taskNo == null || taskNo.isEmpty() || eventData == null) return; + + String eventType = String.valueOf(eventData.getOrDefault("event_type", "")); + TaskStatus targetStatus = resolveTargetStatus(eventType, eventData); + + if (targetStatus == null) return; + + // 执行数据库更新 + try { + Integer tokensUsed = resolveTokensUsedFromEvent(eventData); + Long durationMs = resolveDurationMsFromEvent(eventData); + + Task currentTask = taskService.getTaskByNo(taskNo); + TaskStatus currentStatus = currentTask.getStatus(); + boolean shouldSyncStatus = shouldUpdateStatus(currentStatus, targetStatus); + boolean shouldBackfillUsage = shouldBackfillUsage(currentTask, tokensUsed, durationMs); + + // 历史任务详情反复打开会重放 Stream 事件,这里必须幂等避免重复通知。 + if (!shouldSyncStatus && !shouldBackfillUsage) { + return; + } + + TaskStatus statusToPersist = shouldSyncStatus ? targetStatus : currentStatus; + PersistResult persistResult = persistStatusWithUsage(taskNo, statusToPersist, tokensUsed, durationMs, "event"); + Task updatedTask = persistResult.task(); + syncedStatus.put(taskNo, statusToPersist); + if (shouldSyncStatus && !persistResult.notifiedByCompleteFlow()) { + log.debug("任务终态由事件同步补齐: taskNo={}, status={}", updatedTask.getTaskNo(), statusToPersist); + } + log.info("任务状态已同步: taskNo={}, event={}, status={}", taskNo, eventType, targetStatus); + + // 如果到达终态,延迟清理内存(避免内存泄漏) + if (TERMINAL_STATES.contains(statusToPersist)) { + // 保留 5 分钟后清理,防止短期内重复事件触发 + Thread.ofVirtual().start(() -> { + try { + Thread.sleep(300_000); // 5 min + } catch (InterruptedException ignored) { + Thread.currentThread().interrupt(); + } + syncedStatus.remove(taskNo); + }); + } + } catch (Exception e) { + log.error("同步任务状态失败: taskNo={}, targetStatus={}", taskNo, targetStatus, e); + } + } + + private PersistResult persistStatusWithUsage(String taskNo, + TaskStatus statusToPersist, + Integer tokensUsed, + Long durationMs, + String trigger) { + Integer safeTokens = tokensUsed != null && tokensUsed >= 0 ? tokensUsed : 0; + Long safeDurationMs = durationMs != null && durationMs >= 0 ? durationMs : 0L; + + if (statusToPersist == TaskStatus.COMPLETED || statusToPersist == TaskStatus.FAILED) { + try { + Task currentTask = taskService.getTaskByNo(taskNo); + if (currentTask.getStatus() == TaskStatus.PENDING + || currentTask.getStatus() == TaskStatus.RUNNING + || currentTask.getStatus() == TaskStatus.PENDING_AUTH) { + TaskCompleteRequest completeRequest = new TaskCompleteRequest(); + completeRequest.setStatus(statusToPersist.name()); + completeRequest.setTokensUsed(safeTokens); + completeRequest.setDurationMs(safeDurationMs); + Task completedTask = taskService.completeTask(taskNo, completeRequest); + log.info("状态同步触发 completeTask 补全: trigger={}, taskNo={}, status={}, tokens={}, durationMs={}", + trigger, taskNo, statusToPersist, safeTokens, safeDurationMs); + return new PersistResult(completedTask, true); + } + } catch (Exception ex) { + log.warn("状态同步 completeTask 补全失败,降级 updateStatusWithUsage: trigger={}, taskNo={}, status={}, err={}", + trigger, taskNo, statusToPersist, ex.getMessage()); + } + } + Task updatedTask = taskService.updateStatusWithUsage(taskNo, statusToPersist, safeTokens, safeDurationMs); + return new PersistResult(updatedTask, false); + } + + private boolean shouldBackfillUsage(Task task, Integer eventTokensUsed, Long eventDurationMs) { + if (task == null) { + return false; + } + if (TERMINAL_STATES.contains(task.getStatus()) && needsBillingBackfill(task)) { + return true; + } + boolean canBackfillTokens = eventTokensUsed != null + && eventTokensUsed >= 0 + && (task.getTokensUsed() == null || task.getTokensUsed() <= 0); + boolean canBackfillDuration = eventDurationMs != null + && eventDurationMs > 0 + && (task.getDurationMs() == null || task.getDurationMs() <= 0); + return canBackfillTokens || canBackfillDuration; + } + + private boolean needsBillingBackfill(Task task) { + boolean missingTokens = task.getTokensUsed() == null || task.getTokensUsed() <= 0; + boolean missingUsageFields = task.getInputTokens() == null + || task.getOutputTokens() == null + || task.getRequestCount() == null + || task.getTokenLimit() == null + || task.getUsagePercent() == null; + return missingTokens || missingUsageFields; + } + + private UsageSnapshot resolveUsageFromStream(String taskNo) { + Long roleId = resolveRoleId(taskNo); + List streamKeys = List.of( + dispatchConfig.getLogStreamKey(roleId, taskNo), + "stream:task:" + taskNo + ":events", + "stream:task:" + taskNo + ); + + Integer tokensUsed = null; + Long durationMs = null; + for (String streamKey : streamKeys) { + try { + List> records = redisTemplate.opsForStream() + .read(StreamOffset.fromStart(streamKey)); + if (records == null || records.isEmpty()) { + continue; + } + + for (MapRecord record : records) { + Map eventData = extractEventData(record); + Integer eventTokens = resolveTokensUsedFromEvent(eventData); + Long eventDuration = resolveDurationMsFromEvent(eventData); + if (eventTokens != null && eventTokens >= 0) { + tokensUsed = eventTokens; + } + if (eventDuration != null && eventDuration > 0) { + durationMs = eventDuration; + } + } + } catch (Exception e) { + log.debug("读取 Stream usage 失败: streamKey={}, error={}", streamKey, e.getMessage()); + } + } + + return new UsageSnapshot(tokensUsed, durationMs); + } + + private Integer resolveTokensUsedFromEvent(Map eventData) { + Long value = resolveLongByKeys(eventData, "tokens_used", "tokensUsed", "token_usage"); + if (value == null || value < 0 || value > Integer.MAX_VALUE) { + return null; + } + return value.intValue(); + } + + private Long resolveDurationMsFromEvent(Map eventData) { + return resolveLongByKeys(eventData, "duration_ms", "durationMs", "elapsed_ms"); + } + + private Long resolveLongByKeys(Map eventData, String... keys) { + if (eventData == null || keys == null) { + return null; + } + for (String key : keys) { + Object raw = eventData.get(key); + if (raw == null) { + continue; + } + try { + return Long.parseLong(String.valueOf(raw)); + } catch (NumberFormatException ignored) { + } + } + return null; + } + + private record UsageSnapshot(Integer tokensUsed, Long durationMs) {} + private record PersistResult(Task task, boolean notifiedByCompleteFlow) {} + + /** + * 根据事件类型和数据推断目标任务状态 + */ + private TaskStatus resolveTargetStatus(String eventType, Map eventData) { + String normalizedEventType = eventType == null ? "" : eventType.trim().toUpperCase(Locale.ROOT); + return switch (normalizedEventType) { + // → RUNNING + case "TASK_STARTED", "SESSION_START" -> TaskStatus.RUNNING; + + // → COMPLETED or FAILED(根据 exit_code 判断) + case "SESSION_END" -> { + Object exitCodeObj = eventData.get("exit_code"); + if (exitCodeObj != null) { + try { + int exitCode = Integer.parseInt(String.valueOf(exitCodeObj)); + yield exitCode == 0 ? TaskStatus.COMPLETED : TaskStatus.FAILED; + } catch (NumberFormatException e) { + yield null; + } + } + // SESSION_END 未携带 exit_code 时不推断终态,等待 TASK_COMPLETED/TASK_FAILED/WORKSPACE_ARCHIVED + yield null; + } + + // → COMPLETED + case "TASK_COMPLETED" -> TaskStatus.COMPLETED; + + // → FAILED + case "TASK_FAILED" -> TaskStatus.FAILED; + + // → ABORTED + case "TASK_ABORTED", "TASK_TERMINATED" -> TaskStatus.ABORTED; + + // WORKSPACE 归档事件,status=failed/completed/aborted + case "WORKSPACE_ARCHIVED" -> { + String archivedStatus = String.valueOf(eventData.getOrDefault("status", "")).toLowerCase(Locale.ROOT); + if ("failed".equals(archivedStatus)) { + yield TaskStatus.FAILED; + } + if ("completed".equals(archivedStatus) || "success".equals(archivedStatus)) { + yield TaskStatus.COMPLETED; + } + if ("aborted".equals(archivedStatus) || "cancelled".equals(archivedStatus) + || "canceled".equals(archivedStatus)) { + yield TaskStatus.ABORTED; + } + yield null; + } + + // → RUNNING(终止请求已被执行端接收,但尚未完成终止) + case "TASK_ABORT_ACK" -> TaskStatus.RUNNING; + + // 其他事件:如果是首次出现的工具调用/思考等事件,说明任务已开始执行 + case "TOOL_CALL", "TOOL_RESULT", "THINKING", "ASSISTANT_TEXT", + "SECURITY_ALLOW", "SECURITY_DENY" -> TaskStatus.RUNNING; + + // 不需要同步的事件 + default -> null; + }; + } + + private boolean shouldUpdateStatus(TaskStatus current, TaskStatus target) { + if (target == null) { + return false; + } + if (current == null) { + return true; + } + if (current == target) { + return false; + } + return statusPriority(target) > statusPriority(current); + } + + private int statusPriority(TaskStatus status) { + return switch (status) { + case PENDING -> 0; + case RUNNING, PENDING_AUTH -> 10; + case COMPLETED -> 20; + case FAILED -> 30; + case ABORTED -> 40; + }; + } +} diff --git a/back/src/main/java/com/linkwork/service/UserSoulService.java b/back/src/main/java/com/linkwork/service/UserSoulService.java new file mode 100644 index 0000000..93a5af0 --- /dev/null +++ b/back/src/main/java/com/linkwork/service/UserSoulService.java @@ -0,0 +1,195 @@ +package com.linkwork.service; + +import com.baomidou.mybatisplus.core.conditions.query.LambdaQueryWrapper; +import com.baomidou.mybatisplus.core.conditions.update.LambdaUpdateWrapper; +import com.linkwork.mapper.UserSoulMapper; +import com.linkwork.model.dto.UserSoulResponse; +import com.linkwork.model.dto.UserSoulUpsertRequest; +import com.linkwork.model.entity.UserSoulEntity; +import lombok.extern.slf4j.Slf4j; +import lombok.RequiredArgsConstructor; +import org.springframework.jdbc.BadSqlGrammarException; +import org.springframework.stereotype.Service; +import org.springframework.transaction.annotation.Transactional; +import org.springframework.util.StringUtils; + +@Service +@RequiredArgsConstructor +@Slf4j +public class UserSoulService { + + private static final int MAX_CONTENT_LENGTH = 8000; + private static final int MAX_PRESET_ID_LENGTH = 64; + private static final int MAX_OPERATOR_NAME_LENGTH = 100; + + private final UserSoulMapper userSoulMapper; + + public UserSoulResponse getCurrentUserSoul(String userId) { + UserSoulEntity entity = findByUserId(userId); + if (entity == null) { + throw new IllegalArgumentException("SOUL_NOT_FOUND: 当前用户尚未配置 Soul"); + } + return toResponse(entity); + } + + public String getRequiredSoulContent(String userId) { + UserSoulEntity entity = findByUserId(userId); + if (entity == null) { + throw new IllegalArgumentException("SOUL_NOT_FOUND: 当前用户尚未配置 Soul"); + } + String content = normalizeContent(entity.getContent()); + if (!StringUtils.hasText(content)) { + throw new IllegalArgumentException("SOUL_CONTENT_INVALID: Soul 内容不能为空"); + } + return content; + } + + public String getOptionalSoulContent(String userId) { + UserSoulEntity entity = findByUserId(userId); + if (entity == null) { + return ""; + } + return normalizeContent(entity.getContent()); + } + + @Transactional + public UserSoulResponse upsertCurrentUserSoul(String userId, String userName, UserSoulUpsertRequest request) { + String content = normalizeContent(request.getContent()); + validateContent(content); + String operatorName = normalizeOperatorName(userName, userId); + + UserSoulEntity existing = findByUserId(userId); + if (existing == null) { + if (request.getVersion() != 0L) { + throw new IllegalArgumentException("SOUL_VERSION_CONFLICT: 首次保存 version 必须为 0"); + } + UserSoulEntity entity = new UserSoulEntity(); + entity.setUserId(userId); + entity.setContent(content); + entity.setPresetId(normalizePresetId(request.getPresetId())); + entity.setVersion(1L); + entity.setCreatorId(userId); + entity.setCreatorName(operatorName); + entity.setUpdaterId(userId); + entity.setUpdaterName(operatorName); + int inserted = userSoulMapper.insert(entity); + if (inserted != 1) { + throw new IllegalStateException("创建用户 Soul 失败: userId=" + userId); + } + return toResponse(entity); + } + + Long storedVersion = existing.getVersion(); + Long currentVersion = normalizeVersion(storedVersion); + if (!currentVersion.equals(request.getVersion())) { + throw new IllegalArgumentException("SOUL_VERSION_CONFLICT: Soul 已被更新,请刷新后重试"); + } + + long nextVersion = currentVersion + 1; + LambdaUpdateWrapper wrapper = new LambdaUpdateWrapper() + .set(UserSoulEntity::getContent, content) + .set(UserSoulEntity::getPresetId, normalizePresetId(request.getPresetId())) + .set(UserSoulEntity::getUpdaterId, userId) + .set(UserSoulEntity::getUpdaterName, operatorName) + .set(UserSoulEntity::getVersion, nextVersion); + if (existing.getId() != null) { + wrapper.eq(UserSoulEntity::getId, existing.getId()); + } else { + wrapper.eq(UserSoulEntity::getUserId, userId); + } + if (storedVersion == null) { + wrapper.isNull(UserSoulEntity::getVersion); + } else { + wrapper.eq(UserSoulEntity::getVersion, storedVersion); + } + int updated = userSoulMapper.update(null, wrapper); + if (updated != 1) { + throw new IllegalArgumentException("SOUL_VERSION_CONFLICT: Soul 已被更新,请刷新后重试"); + } + UserSoulEntity refreshed = findByUserId(userId); + if (refreshed == null) { + throw new IllegalStateException("更新后读取用户 Soul 失败: userId=" + userId); + } + return toResponse(refreshed); + } + + private UserSoulEntity findByUserId(String userId) { + if (!StringUtils.hasText(userId)) { + throw new IllegalArgumentException("用户未登录或登录态失效"); + } + try { + return userSoulMapper.selectOne(new LambdaQueryWrapper() + .eq(UserSoulEntity::getUserId, userId) + .orderByDesc(UserSoulEntity::getUpdatedAt) + .orderByDesc(UserSoulEntity::getId) + .last("limit 1")); + } catch (BadSqlGrammarException ex) { + if (isCompatColumnMismatch(ex)) { + log.warn("UserSoul schema mismatch detected, fallback to compat query: userId={}, err={}", + userId, ex.getMessage()); + return userSoulMapper.selectLatestCompatByUserId(userId); + } + throw ex; + } + } + + private boolean isCompatColumnMismatch(BadSqlGrammarException ex) { + String msg = ex.getMessage(); + return msg != null + && (msg.contains("Unknown column 'content'") + || msg.contains("Unknown column 'preset_id'") + || msg.contains("Unknown column 'version'") + || msg.contains("Unknown column 'updater_id'") + || msg.contains("Unknown column 'updater_name'")); + } + + private String normalizeContent(String content) { + return content == null ? "" : content.trim(); + } + + private String normalizePresetId(String presetId) { + if (!StringUtils.hasText(presetId)) { + return null; + } + String normalized = presetId.trim(); + if (normalized.length() > MAX_PRESET_ID_LENGTH) { + throw new IllegalArgumentException("SOUL_CONTENT_INVALID: presetId 长度不能超过 " + MAX_PRESET_ID_LENGTH); + } + return normalized; + } + + private String normalizeOperatorName(String userName, String fallbackUserId) { + String normalized = StringUtils.hasText(userName) ? userName.trim() : fallbackUserId; + if (!StringUtils.hasText(normalized)) { + return ""; + } + return normalized.length() <= MAX_OPERATOR_NAME_LENGTH + ? normalized + : normalized.substring(0, MAX_OPERATOR_NAME_LENGTH); + } + + private void validateContent(String content) { + if (!StringUtils.hasText(content)) { + throw new IllegalArgumentException("SOUL_CONTENT_INVALID: Soul 内容不能为空"); + } + if (content.length() > MAX_CONTENT_LENGTH) { + throw new IllegalArgumentException("SOUL_CONTENT_INVALID: Soul 内容长度不能超过 " + MAX_CONTENT_LENGTH); + } + } + + private UserSoulResponse toResponse(UserSoulEntity entity) { + UserSoulResponse response = new UserSoulResponse(); + response.setContent(entity.getContent()); + response.setPresetId(entity.getPresetId()); + response.setVersion(normalizeVersion(entity.getVersion())); + response.setUpdatedAt(entity.getUpdatedAt()); + return response; + } + + private Long normalizeVersion(Long version) { + if (version == null || version < 0) { + return 0L; + } + return version; + } +} diff --git a/back/src/main/java/com/linkwork/service/WeeklyReportService.java b/back/src/main/java/com/linkwork/service/WeeklyReportService.java new file mode 100644 index 0000000..7af6472 --- /dev/null +++ b/back/src/main/java/com/linkwork/service/WeeklyReportService.java @@ -0,0 +1,1370 @@ +package com.linkwork.service; + +import com.fasterxml.jackson.databind.JsonNode; +import com.fasterxml.jackson.databind.ObjectMapper; +import lombok.extern.slf4j.Slf4j; +import org.springframework.beans.factory.annotation.Value; +import org.springframework.stereotype.Service; +import org.springframework.util.StringUtils; + +import java.io.BufferedReader; +import java.io.IOException; +import java.io.InputStream; +import java.io.InputStreamReader; +import java.net.URI; +import java.net.http.HttpClient; +import java.net.http.HttpRequest; +import java.net.http.HttpResponse; +import java.nio.charset.StandardCharsets; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.time.DayOfWeek; +import java.time.LocalDate; +import java.time.LocalDateTime; +import java.time.ZoneId; +import java.time.format.DateTimeFormatter; +import java.time.temporal.TemporalAdjusters; +import java.util.ArrayList; +import java.util.Comparator; +import java.util.HashMap; +import java.util.LinkedHashMap; +import java.util.LinkedHashSet; +import java.util.List; +import java.util.Locale; +import java.util.Map; +import java.util.Set; +import java.util.regex.Pattern; +import java.util.stream.Collectors; + +/** + * 读取 git 提交记录并生成周报。 + */ +@Slf4j +@Service +public class WeeklyReportService { + + private static final ZoneId ZONE_ID = ZoneId.systemDefault(); + private static final DateTimeFormatter DATE_FMT = DateTimeFormatter.ofPattern("yyyy-MM-dd"); + private static final DateTimeFormatter GIT_TIME_FMT = DateTimeFormatter.ofPattern("yyyy-MM-dd HH:mm:ss"); + private static final String DONE_TOKEN = "[DONE]"; + + private static final List THEME_RULES = List.of( + new ThemeRule("任务执行与终止链路", Pattern.compile("task|任务|mission|terminate|终止|dispatch|websocket|stream|redis", Pattern.CASE_INSENSITIVE)), + new ThemeRule("审批与风控能力", Pattern.compile("approval|审批|risk|风控|\\bip\\b|\\bipv4\\b|\\bipv6\\b", Pattern.CASE_INSENSITIVE)), + new ThemeRule("GitLab 认证与权限", Pattern.compile("gitlab|oauth|auth|认证|权限", Pattern.CASE_INSENSITIVE)), + new ThemeRule("部署与环境治理", Pattern.compile("deploy|部署|docker|compose|prod|robot_env|environment", Pattern.CASE_INSENSITIVE)), + new ThemeRule("前端体验与可读性", Pattern.compile("front|ui|style|layout|sidebar|table|readability|看板|动画|交互", Pattern.CASE_INSENSITIVE)), + new ThemeRule("文档与规范", Pattern.compile("docs|文档|guideline|spec", Pattern.CASE_INSENSITIVE)) + ); + + private static final List KEYWORD_RULES = List.of( + new KeywordRule("任务终止", Pattern.compile("terminate|终止", Pattern.CASE_INSENSITIVE)), + new KeywordRule("审批风控", Pattern.compile("approval|审批|risk|风控", Pattern.CASE_INSENSITIVE)), + new KeywordRule("GitLab认证", Pattern.compile("gitlab|oauth|认证|auth", Pattern.CASE_INSENSITIVE)), + new KeywordRule("Redis队列", Pattern.compile("redis|queue|stream", Pattern.CASE_INSENSITIVE)), + new KeywordRule("部署发布", Pattern.compile("deploy|部署|docker|compose", Pattern.CASE_INSENSITIVE)), + new KeywordRule("Runtime模式", Pattern.compile("runtime|sidecar|alone", Pattern.CASE_INSENSITIVE)), + new KeywordRule("输出估算", Pattern.compile("estimate|estimation|taxonomy|报告", Pattern.CASE_INSENSITIVE)), + new KeywordRule("前端可读性", Pattern.compile("readability|table|layout|sidebar|ui|style", Pattern.CASE_INSENSITIVE)), + new KeywordRule("安全策略", Pattern.compile("security|安全|jwt|策略", Pattern.CASE_INSENSITIVE)) + ); + + private static final Map KEYWORD_PHRASE_MAP = Map.ofEntries( + Map.entry("审批风控", "审批流中心全栈联通"), + Map.entry("任务终止", "任务终止链路闭环(API-队列-流式回显)"), + Map.entry("GitLab认证", "GitLab 认证稳定性与生产部署能力提升"), + Map.entry("Redis队列", "任务队列与流式事件联调收敛"), + Map.entry("部署发布", "生产部署流程统一与环境治理升级"), + Map.entry("Runtime模式", "Sidecar/Alone 运行模式能力贯通"), + Map.entry("输出估算", "任务产出估算与报告自动化"), + Map.entry("前端可读性", "执行流与看板 UI 可读性专项优化"), + Map.entry("安全策略", "风险 IP 审计能力补齐") + ); + + private static final Map AUTHOR_KEYWORD_PHRASE_MAP = Map.ofEntries( + Map.entry("审批风控", "审批流与风险控制能力建设"), + Map.entry("任务终止", "任务终止与执行链路优化"), + Map.entry("GitLab认证", "GitLab 认证与权限治理"), + Map.entry("Redis队列", "Redis 队列与流式通道联调"), + Map.entry("部署发布", "部署发布流程优化"), + Map.entry("Runtime模式", "Runtime 模式能力建设"), + Map.entry("输出估算", "任务产出估算与报告能力"), + Map.entry("前端可读性", "前端交互与可读性优化"), + Map.entry("安全策略", "安全策略与认证治理") + ); + + private static final Map AUTHOR_ALIAS_MAP = Map.of(); + + private final ObjectMapper objectMapper = new ObjectMapper(); + + @Value("${robot.weekly-report.repo-path:}") + private String configuredRepoPath; + + @Value("${robot.weekly-report.llm.enabled:true}") + private boolean llmEnabled; + + @Value("${robot.weekly-report.llm.gateway-url:}") + private String llmGatewayUrl; + + @Value("${robot.weekly-report.llm.model:minimax-m2.1}") + private String llmModel; + + @Value("${robot.weekly-report.llm.max-tokens:900}") + private int llmMaxTokens; + + @Value("${robot.weekly-report.llm.stream:true}") + private boolean llmStream; + + @Value("${robot.weekly-report.llm.connect-timeout-ms:3000}") + private int llmConnectTimeoutMs; + + @Value("${robot.weekly-report.llm.read-timeout-ms:12000}") + private int llmReadTimeoutMs; + + @Value("${robot.weekly-report.llm.auth-token:}") + private String llmAuthToken; + + @Value("${robot.weekly-report.llm.x-litellm-api-key:}") + private String xLitellmApiKey; + + public String buildCurrentWeekMarkdown() { + DateWindow window = currentWeekWindow(); + WeeklyReportData data = collectWeeklyData(window); + + String fallbackMarkdown = renderRuleMarkdown(data); + String llmMarkdown = renderLlmMarkdown(data); + if (StringUtils.hasText(llmMarkdown)) { + return llmMarkdown; + } + return fallbackMarkdown; + } + + private WeeklyReportData collectWeeklyData(DateWindow window) { + List branches = listAllBranches(); + Map> authorBranches = collectAuthorBranches(branches, window); + List authorStats = buildAuthorStats(authorBranches); + List allCommits = listAllCommits(window); + int mergeCommitCount = countMergeCommits(window); + DiffStats diffStats = collectDiffStats(window); + List focusModules = summarizeFocusModules(window); + + Set allAuthors = allCommits.stream() + .map(CommitRecord::author) + .collect(Collectors.toCollection(LinkedHashSet::new)); + + List themes = summarizeThemes(allCommits); + List keywords = summarizeKeywords(allCommits); + List authorContributions = buildAuthorContributions(allCommits); + + return new WeeklyReportData( + window.startDate(), + window.endDate(), + allCommits.size(), + mergeCommitCount, + diffStats.addedLines(), + diffStats.deletedLines(), + allAuthors, + countActiveBranches(authorStats), + focusModules, + authorContributions, + themes, + keywords + ); + } + + private String renderRuleMarkdown(WeeklyReportData data) { + StringBuilder sb = new StringBuilder(); + sb.append(String.format("### 本周周报(%s ~ %s)\n\n", + data.startDate().format(DATE_FMT), + data.endDate().format(DATE_FMT))); + + sb.append("#### 本周提交作者与产出(按作者分类)\n\n"); + if (data.authorContributions().isEmpty()) { + sb.append("- 本周暂无提交记录\n\n"); + } else { + for (AuthorContribution author : data.authorContributions()) { + sb.append(String.format("**%s**(%d)\n", author.author(), author.commitCount())); + for (String output : author.outputs()) { + sb.append(String.format("- %s\n", output)); + } + sb.append("\n"); + } + } + + sb.append("#### 本周工作周报\n\n"); + List weeklyBullets = buildWeeklyBullets(data); + for (String bullet : weeklyBullets) { + sb.append("- ").append(bullet).append("\n"); + } + sb.append("\n"); + + sb.append("#### 本周产出关键词\n\n"); + List outputKeywords = buildOutputKeywords(data); + if (outputKeywords.isEmpty()) { + sb.append("- 本周暂无可沉淀的关键词产出\n"); + } else { + for (String keyword : outputKeywords) { + sb.append("- ").append(keyword).append("\n"); + } + } + + return sb.toString(); + } + + private List buildWeeklyBullets(WeeklyReportData data) { + List bullets = new ArrayList<>(); + bullets.add(buildSummaryBullet(data)); + + if (data.themeStats().isEmpty()) { + bullets.add("本周以零散修复为主,暂无可聚合主题。"); + return bullets; + } + + List prioritized = data.themeStats().stream() + .filter(theme -> !"文档与规范".equals(theme.name())) + .limit(5) + .toList(); + if (prioritized.isEmpty()) { + prioritized = data.themeStats().stream().limit(5).toList(); + } + + for (ThemeStat theme : prioritized) { + bullets.add(buildThemeBullet(theme)); + } + return bullets; + } + + private String buildSummaryBullet(WeeklyReportData data) { + String modulePhrase = formatFocusModules(data.focusModules()); + return String.format("本周共提交 %d 个非合并 commit(另有 %d 个 merge),累计变更约 +%d/-%d 行,重点集中在 %s。", + data.commitCount(), + data.mergeCommitCount(), + data.addedLines(), + data.deletedLines(), + modulePhrase); + } + + private String formatFocusModules(List modules) { + if (modules == null || modules.isEmpty()) { + return "多个核心模块"; + } + List names = modules.stream().map(ModuleCount::module).toList(); + if (names.size() == 1) { + return names.get(0) + " 模块"; + } + if (names.size() == 2) { + return String.join("、", names) + " 两块"; + } + return String.join("、", names) + " 三块"; + } + + private String buildThemeBullet(ThemeStat theme) { + String detail = themeDetailHint(theme.name()); + return switch (theme.name()) { + case "任务执行与终止链路" -> String.format("完成“任务与执行链路”核心能力:%s(相关提交 %d 条)。", detail, theme.count()); + case "审批与风控能力" -> String.format("完成“风险治理与审计”建设:%s(相关提交 %d 条)。", detail, theme.count()); + case "GitLab 认证与权限" -> String.format("完成“GitLab 认证与权限”增强:%s(相关提交 %d 条)。", detail, theme.count()); + case "部署与环境治理" -> String.format("完善“部署与环境治理”能力:%s(相关提交 %d 条)。", detail, theme.count()); + case "前端体验与可读性" -> String.format("持续优化“前端体验与可读性”:%s(相关提交 %d 条)。", detail, theme.count()); + case "文档与规范" -> String.format("同步“文档与规范”沉淀:%s(相关提交 %d 条)。", detail, theme.count()); + default -> String.format("推进“%s”:%s(相关提交 %d 条)。", theme.name(), detail, theme.count()); + }; + } + + private String themeDetailHint(String themeName) { + return switch (themeName) { + case "任务执行与终止链路" -> "覆盖任务终止 API、Redis 队列联动、WebSocket 流式状态同步与执行稳定性修复"; + case "审批与风控能力" -> "覆盖审批流联动、风险等级展示、风险 IP 记录与审计链路补齐"; + case "GitLab 认证与权限" -> "覆盖认证前置校验、读写权限区分、认证提示与失败重试优化"; + case "部署与环境治理" -> "覆盖 ROBOT_ENV 分层、部署脚本统一、镜像与配置路径标准化"; + case "前端体验与可读性" -> "覆盖任务流状态展示、表格可读性提升、侧边栏与详情交互优化"; + case "文档与规范" -> "覆盖 API 设计、任务进展追踪与实施规范同步"; + default -> "覆盖核心功能迭代与稳定性改进"; + }; + } + + private String normalizeCommitSubject(String subject) { + if (!StringUtils.hasText(subject)) { + return ""; + } + String cleaned = subject.trim(); + cleaned = cleaned.replaceFirst("^\\[[^]]+\\]\\s*", ""); + cleaned = cleaned.replaceFirst("^[A-Za-z]+\\([^)]*\\):\\s*", ""); + cleaned = cleaned.replaceFirst("^[A-Za-z]+:\\s*", ""); + cleaned = cleaned.replaceFirst("^(?i)(fix|feat|chore|docs|refactor|style|test|ci|build|perf|revert)\\s+", ""); + cleaned = cleaned.replaceFirst("^[,,;;。\\s]+", ""); + cleaned = cleaned.replaceFirst("[。;;\\s]+$", ""); + return truncate(cleaned, 42); + } + + + private String truncate(String value, int maxLen) { + if (!StringUtils.hasText(value)) { + return ""; + } + String trimmed = value.trim(); + return trimmed.length() > maxLen ? trimmed.substring(0, maxLen) + "..." : trimmed; + } + + private List buildOutputKeywords(WeeklyReportData data) { + List keywords = new ArrayList<>(); + for (KeywordCount keyword : data.keywordCounts()) { + if (keyword.count() < 2) { + continue; + } + String phrase = KEYWORD_PHRASE_MAP.get(keyword.keyword()); + if (StringUtils.hasText(phrase) && !keywords.contains(phrase)) { + keywords.add(phrase); + } + if (keywords.size() >= 5) { + return keywords; + } + } + + if (keywords.size() < 5) { + for (ThemeStat theme : data.themeStats()) { + String fallback = switch (theme.name()) { + case "任务执行与终止链路" -> "任务与终止链路稳定性提升"; + case "审批与风控能力" -> "审批风险治理能力持续增强"; + case "GitLab 认证与权限" -> "GitLab 认证与权限治理完善"; + case "部署与环境治理" -> "部署流程与环境配置标准化"; + case "前端体验与可读性" -> "前端交互与可读性专项优化"; + default -> null; + }; + if (StringUtils.hasText(fallback) && !keywords.contains(fallback)) { + keywords.add(fallback); + } + if (keywords.size() >= 5) { + break; + } + } + } + + return keywords; + } + + private String renderLlmMarkdown(WeeklyReportData data) { + if (data.commitCount() == 0) { + return null; + } + if (!llmEnabled || !StringUtils.hasText(llmGatewayUrl) || !StringUtils.hasText(llmAuthToken)) { + return null; + } + + try { + String systemPrompt = buildLlmSystemPrompt(); + String userPrompt = buildLlmUserPrompt(data); + String completion = callLlmGateway(systemPrompt, userPrompt); + String markdown = cleanupLlmMarkdown(completion); + if (!StringUtils.hasText(markdown)) { + return null; + } + if (!isValidWeeklyReportMarkdown(markdown, data)) { + log.warn("LLM 周报格式校验失败,降级规则输出"); + return null; + } + return markdown; + } catch (Exception e) { + log.warn("LLM 周报生成失败,降级规则输出: {}", e.getMessage()); + return null; + } + } + + private String buildLlmSystemPrompt() { + return """ + 你是研发团队周报助手。请根据输入 JSON 生成“信息密度高、可直接群发”的 Markdown。 + 输出要求: + 1) 仅输出 Markdown 正文,不要解释,不要代码块。 + 2) 必须包含且仅包含以下三级标题: + - #### 本周提交作者与产出(按作者分类) + - #### 本周工作周报 + - #### 本周产出关键词 + 3) 第二节“本周工作周报”必须使用 5~7 条列表,且第一条固定为统计总览句,句式参考: + 本周共提交 X 个非合并 commit(另有 Y 个 merge),累计变更约 +A/-D 行,重点集中在 M1、M2、M3 三块。 + 4) 第二节其余条目要按主题写“完成/推进/优化 + 具体事项”,不得只写“若干优化/持续迭代”等空话。 + 5) 第三节“本周产出关键词”输出 5 条左右短语,不带计数。 + 6) 第一节按作者分组,仅列举作者本周代表提交产出,不展示分支列表。 + 7) 第一节按提交量控制篇幅:commit>=100 输出 6~8 条,commit>=50 输出 4~6 条,其余输出 2~4 条;禁止流水账式逐条罗列。 + 8) 全文尽量使用中文表达,英文仅保留必要技术名词(如 GitLab、Redis、WebSocket),避免生硬中英混写。 + 9) 数据必须严格来自输入,不得编造作者、分支、提交量。 + """; + } + + private String buildLlmUserPrompt(WeeklyReportData data) throws Exception { + Map payload = new LinkedHashMap<>(); + payload.put("weekStart", data.startDate().format(DATE_FMT)); + payload.put("weekEnd", data.endDate().format(DATE_FMT)); + payload.put("commitCount", data.commitCount()); + payload.put("mergeCommitCount", data.mergeCommitCount()); + payload.put("addedLines", data.addedLines()); + payload.put("deletedLines", data.deletedLines()); + payload.put("authorCount", data.authors().size()); + payload.put("authors", data.authors()); + payload.put("activeBranchCount", data.activeBranchCount()); + + List> focusModules = new ArrayList<>(); + for (ModuleCount module : data.focusModules()) { + Map moduleObj = new LinkedHashMap<>(); + moduleObj.put("module", module.module()); + moduleObj.put("count", module.count()); + focusModules.add(moduleObj); + } + payload.put("focusModules", focusModules); + + List> authors = new ArrayList<>(); + for (AuthorContribution author : data.authorContributions()) { + Map authorObj = new LinkedHashMap<>(); + authorObj.put("author", author.author()); + authorObj.put("commitCount", author.commitCount()); + authorObj.put("minOutputCount", minOutputsForAuthor(author.commitCount())); + authorObj.put("maxOutputCount", maxOutputsForAuthor(author.commitCount())); + authorObj.put("outputs", author.outputs()); + authors.add(authorObj); + } + payload.put("authorContributions", authors); + + List> themes = new ArrayList<>(); + for (ThemeStat theme : data.themeStats()) { + Map themeObj = new LinkedHashMap<>(); + themeObj.put("theme", theme.name()); + themeObj.put("count", theme.count()); + themeObj.put("summaryHint", themeDetailHint(theme.name())); + themes.add(themeObj); + } + payload.put("weeklyThemes", themes); + + List> keywords = new ArrayList<>(); + for (KeywordCount keyword : data.keywordCounts()) { + Map keywordObj = new LinkedHashMap<>(); + keywordObj.put("keyword", keyword.keyword()); + keywordObj.put("count", keyword.count()); + keywordObj.put("phraseHint", KEYWORD_PHRASE_MAP.get(keyword.keyword())); + keywords.add(keywordObj); + } + payload.put("keywords", keywords); + payload.put("ruleWeeklyBullets", buildWeeklyBullets(data)); + payload.put("ruleKeywordBullets", buildOutputKeywords(data)); + + return "请严格按 system 要求输出周报 Markdown,输入数据如下:\n" + + objectMapper.writerWithDefaultPrettyPrinter().writeValueAsString(payload); + } + + private String callLlmGateway(String systemPrompt, String userPrompt) throws IOException, InterruptedException { + JsonNode requestBody = buildLlmRequestPayload(systemPrompt, userPrompt); + + HttpClient client = HttpClient.newBuilder() + .connectTimeout(java.time.Duration.ofMillis(Math.max(llmConnectTimeoutMs, 1000))) + .build(); + + HttpRequest.Builder requestBuilder = HttpRequest.newBuilder(URI.create(llmGatewayUrl)) + .timeout(java.time.Duration.ofMillis(Math.max(llmReadTimeoutMs, 2000))) + .header("Authorization", "Bearer " + llmAuthToken.trim()) + .header("Content-Type", "application/json"); + + if (StringUtils.hasText(xLitellmApiKey)) { + requestBuilder.header("x-litellm-api-key", xLitellmApiKey.trim()); + } + + HttpRequest request = requestBuilder + .POST(HttpRequest.BodyPublishers.ofString(objectMapper.writeValueAsString(requestBody), StandardCharsets.UTF_8)) + .build(); + + if (llmStream) { + HttpResponse response = client.send(request, HttpResponse.BodyHandlers.ofInputStream()); + if (response.statusCode() >= 400) { + String errorBody = readErrorBody(response.body()); + throw new IllegalStateException("LLM Gateway 请求失败: status=" + response.statusCode() + ", body=" + errorBody); + } + return readStreamCompletion(response.body()); + } + + HttpResponse response = client.send(request, HttpResponse.BodyHandlers.ofString(StandardCharsets.UTF_8)); + if (response.statusCode() >= 400) { + throw new IllegalStateException("LLM Gateway 请求失败: status=" + response.statusCode() + ", body=" + response.body()); + } + + String text = extractTextFromGatewayPayload(response.body()); + return StringUtils.hasText(text) ? text : response.body(); + } + + private JsonNode buildLlmRequestPayload(String systemPrompt, String userPrompt) { + var root = objectMapper.createObjectNode(); + root.put("model", llmModel); + root.put("max_tokens", llmMaxTokens); + root.put("stream", llmStream); + + var messages = root.putArray("messages"); + var system = messages.addObject(); + system.put("role", "system"); + system.put("content", systemPrompt); + + var user = messages.addObject(); + user.put("role", "user"); + user.put("content", userPrompt); + + return root; + } + + private String readStreamCompletion(InputStream bodyStream) throws IOException { + StringBuilder completion = new StringBuilder(); + StringBuilder raw = new StringBuilder(); + + try (BufferedReader reader = new BufferedReader(new InputStreamReader(bodyStream, StandardCharsets.UTF_8))) { + String line; + while ((line = reader.readLine()) != null) { + raw.append(line).append('\n'); + String trimmed = line.trim(); + if (trimmed.isEmpty() || trimmed.startsWith(":")) { + continue; + } + if (trimmed.startsWith("data:")) { + trimmed = trimmed.substring(5).trim(); + } + if (!StringUtils.hasText(trimmed) || DONE_TOKEN.equals(trimmed)) { + if (DONE_TOKEN.equals(trimmed)) { + break; + } + continue; + } + + String delta = extractTextFromGatewayPayload(trimmed); + if (StringUtils.hasText(delta)) { + completion.append(delta); + } + } + } + + if (completion.length() > 0) { + return completion.toString(); + } + return raw.toString(); + } + + private String extractTextFromGatewayPayload(String payload) { + if (!StringUtils.hasText(payload)) { + return ""; + } + + String trimmed = payload.trim(); + if (!trimmed.startsWith("{")) { + return trimmed; + } + + try { + JsonNode root = objectMapper.readTree(trimmed); + StringBuilder text = new StringBuilder(); + + appendTextNode(root.path("output_text"), text); + appendTextNode(root.path("content"), text); + + JsonNode choices = root.path("choices"); + if (choices.isArray()) { + for (JsonNode choice : choices) { + appendTextNode(choice.path("delta").path("content"), text); + appendTextNode(choice.path("message").path("content"), text); + appendTextNode(choice.path("text"), text); + } + } + + JsonNode data = root.path("data"); + if (data.isTextual()) { + appendTextNode(data, text); + } else if (data.isObject()) { + appendTextNode(data.path("content"), text); + } + + if (text.length() > 0) { + return text.toString(); + } + if (root.isTextual()) { + return root.asText(); + } + return ""; + } catch (Exception ignore) { + return trimmed; + } + } + + private void appendTextNode(JsonNode node, StringBuilder builder) { + if (node == null || node.isMissingNode() || node.isNull()) { + return; + } + + if (node.isTextual()) { + builder.append(node.asText()); + return; + } + + if (node.isArray()) { + for (JsonNode child : node) { + appendTextNode(child, builder); + } + return; + } + + if (node.isObject()) { + appendTextNode(node.path("text"), builder); + appendTextNode(node.path("content"), builder); + appendTextNode(node.path("value"), builder); + } + } + + private String cleanupLlmMarkdown(String completion) { + if (!StringUtils.hasText(completion)) { + return null; + } + String markdown = completion.trim(); + if (markdown.startsWith("```")) { + markdown = markdown.replaceFirst("^```[a-zA-Z]*\\s*", ""); + markdown = markdown.replaceFirst("\\s*```$", ""); + } + return markdown.trim(); + } + + private boolean isValidWeeklyReportMarkdown(String markdown, WeeklyReportData data) { + String lower = markdown.toLowerCase(Locale.ROOT); + boolean hasSections = lower.contains("本周提交作者与产出") + && lower.contains("本周工作周报") + && lower.contains("本周产出关键词"); + if (!hasSections) { + return false; + } + + boolean hasSummarySentence = markdown.contains("本周共提交") + && markdown.contains(String.valueOf(data.commitCount())) + && markdown.contains(String.valueOf(data.mergeCommitCount())); + int workBullets = countSectionBullets(markdown, "#### 本周工作周报", "#### 本周产出关键词"); + int keywordBullets = countSectionBullets(markdown, "#### 本周产出关键词", null); + return hasSummarySentence && workBullets >= 5 && keywordBullets >= 4; + } + + private int countSectionBullets(String markdown, String startTitle, String endTitle) { + int start = markdown.indexOf(startTitle); + if (start < 0) { + return 0; + } + int from = start + startTitle.length(); + int end = endTitle == null ? markdown.length() : markdown.indexOf(endTitle, from); + if (end < 0) { + end = markdown.length(); + } + + int count = 0; + for (String line : markdown.substring(from, end).split("\n")) { + if (line.trim().startsWith("- ")) { + count++; + } + } + return count; + } + + private String readErrorBody(InputStream inputStream) { + if (inputStream == null) { + return ""; + } + try (BufferedReader reader = new BufferedReader(new InputStreamReader(inputStream, StandardCharsets.UTF_8))) { + StringBuilder sb = new StringBuilder(); + String line; + while ((line = reader.readLine()) != null) { + sb.append(line); + } + return sb.toString(); + } catch (Exception e) { + return ""; + } + } + + private DateWindow currentWeekWindow() { + LocalDate today = LocalDate.now(ZONE_ID); + LocalDate monday = today.with(TemporalAdjusters.previousOrSame(DayOfWeek.MONDAY)); + LocalDate sunday = monday.plusDays(6); + LocalDateTime since = monday.atStartOfDay(); + LocalDateTime until = sunday.plusDays(1).atStartOfDay(); + return new DateWindow(monday, sunday, since, until); + } + + private List listAllBranches() { + List remoteBranches = runGit(List.of("git", "for-each-ref", "--format=%(refname:short)", "refs/remotes/origin")); + List normalizedRemote = remoteBranches.stream() + .map(String::trim) + .filter(s -> !s.isBlank()) + .filter(s -> !"origin/HEAD".equals(s)) + .sorted() + .toList(); + if (!normalizedRemote.isEmpty()) { + return normalizedRemote; + } + + List localBranches = runGit(List.of("git", "for-each-ref", "--format=%(refname:short)", "refs/heads")); + return localBranches.stream() + .map(String::trim) + .filter(s -> !s.isBlank()) + .sorted() + .toList(); + } + + private Map> collectAuthorBranches(List branches, DateWindow window) { + Map> result = new HashMap<>(); + String since = formatGitTime(window.sinceInclusive()); + String until = formatGitTime(window.untilExclusive()); + + for (String branch : branches) { + List lines = runGit(List.of( + "git", "log", branch, + "--first-parent", + "--since=" + since, + "--until=" + until, + "--no-merges", + "--pretty=format:%an" + )); + + if (lines.isEmpty()) { + continue; + } + + Map authorCount = lines.stream() + .map(String::trim) + .filter(s -> !s.isBlank()) + .collect(Collectors.groupingBy(s -> s, Collectors.counting())); + + String displayBranch = normalizeBranchName(branch); + authorCount.forEach((author, count) -> result + .computeIfAbsent(author, k -> new HashMap<>()) + .merge(displayBranch, count, Long::sum)); + } + + return result; + } + + private List buildAuthorStats(Map> authorBranches) { + return authorBranches.entrySet().stream() + .map(entry -> { + String author = entry.getKey(); + List branches = entry.getValue().entrySet().stream() + .map(e -> new BranchCount(e.getKey(), e.getValue())) + .sorted(Comparator.comparingLong(BranchCount::count).reversed() + .thenComparing(BranchCount::branch)) + .toList(); + long total = branches.stream().mapToLong(BranchCount::count).sum(); + return new AuthorBranchStat(author, total, branches); + }) + .sorted(Comparator.comparingLong(AuthorBranchStat::totalCommits).reversed() + .thenComparing(AuthorBranchStat::author)) + .toList(); + } + + private List buildAuthorContributions(List commits) { + Map commitCountByAuthor = new HashMap<>(); + Map> subjectsByAuthor = new HashMap<>(); + + for (CommitRecord commit : commits) { + String author = canonicalAuthorName(commit.author()); + commitCountByAuthor.merge(author, 1, Integer::sum); + subjectsByAuthor.computeIfAbsent(author, k -> new ArrayList<>()).add(commit.subject()); + } + + return commitCountByAuthor.entrySet().stream() + .map(entry -> { + String author = entry.getKey(); + List outputs = summarizeAuthorOutputs(subjectsByAuthor.getOrDefault(author, List.of()), entry.getValue()); + return new AuthorContribution(author, entry.getValue(), outputs); + }) + .sorted(Comparator.comparingInt(AuthorContribution::commitCount).reversed() + .thenComparing(AuthorContribution::author)) + .toList(); + } + + private List summarizeAuthorOutputs(List subjects, int commitCount) { + if (subjects == null || subjects.isEmpty()) { + return List.of("常规功能迭代与缺陷修复"); + } + + int maxOutputs = maxOutputsForAuthor(commitCount); + int minOutputs = minOutputsForAuthor(commitCount); + + Set outputSet = new LinkedHashSet<>(); + for (String subject : subjects) { + String output = toLocalizedAuthorOutput(subject); + if (!StringUtils.hasText(output)) { + continue; + } + outputSet.add(output); + if (outputSet.size() >= maxOutputs) { + break; + } + } + + List outputs = new ArrayList<>(outputSet); + + Map phraseCounts = new HashMap<>(); + for (KeywordRule rule : KEYWORD_RULES) { + long count = subjects.stream() + .filter(StringUtils::hasText) + .filter(subject -> rule.pattern().matcher(subject).find()) + .count(); + if (count <= 0) { + continue; + } + String phrase = AUTHOR_KEYWORD_PHRASE_MAP.getOrDefault(rule.name(), rule.name()); + phraseCounts.merge(phrase, count, Long::sum); + } + + List phraseOutputs = phraseCounts.entrySet().stream() + .sorted(Map.Entry.comparingByValue().reversed() + .thenComparing(Map.Entry::getKey)) + .map(Map.Entry::getKey) + .toList(); + + if (outputs.size() < minOutputs) { + for (String phrase : phraseOutputs) { + if (outputs.size() >= maxOutputs) { + break; + } + if (!outputs.contains(phrase)) { + outputs.add(phrase); + } + if (outputs.size() >= minOutputs) { + break; + } + } + } + + if (!outputs.isEmpty()) { + return outputs.stream().limit(maxOutputs).toList(); + } + + if (!phraseOutputs.isEmpty()) { + return phraseOutputs.stream().limit(maxOutputs).toList(); + } + + return List.of("常规功能迭代与缺陷修复"); + } + + private String toLocalizedAuthorOutput(String subject) { + String normalized = normalizeCommitSubject(subject); + if (!StringUtils.hasText(normalized)) { + return ""; + } + + if (containsChinese(normalized)) { + return truncate(normalized, 42); + } + + return translateEnglishSubject(subject, normalized); + } + + private String translateEnglishSubject(String rawSubject, String normalizedSubject) { + String lower = rawSubject == null ? "" : rawSubject.toLowerCase(Locale.ROOT); + + if (lower.contains("weekly-report") || lower.contains("weekly report")) { + if (lower.contains("output coverage") || lower.contains("heavy contributors")) { + return "周报作者代表产出覆盖范围提升"; + } + if (lower.contains("author contributions") || lower.contains("branch breakdown")) { + return "周报改为按作者归类展示代表产出"; + } + if (lower.contains("chinese") || lower.contains("localization") || lower.contains("alias")) { + return "周报输出中文化与作者别名归并优化"; + } + if (lower.contains("safe.directory")) { + return "周报仓库 safe.directory 兼容修复"; + } + if (lower.contains("runtime image") || lower.contains("install git")) { + return "周报运行镜像补充 Git 依赖"; + } + if (lower.contains("origin branches") || lower.contains("repo path")) { + return "周报按 origin 分支聚合作者提交"; + } + if (lower.contains("summary") || lower.contains("stats")) { + return "周报统计总览与重点模块信息增强"; + } + return "周报生成能力优化"; + } + + if (lower.contains("gitlab") && (lower.contains("auth") || lower.contains("oauth"))) { + return "GitLab 认证与权限能力优化"; + } + + if ((lower.contains("risk") && lower.contains("ip")) || lower.contains("approval ip")) { + return "风险 IP 审计链路补齐"; + } + + if (lower.contains("deploy") || lower.contains("docker") || lower.contains("compose") || lower.contains("robot_env")) { + return "部署流程与环境治理优化"; + } + + if (lower.contains("readability") || lower.contains("table") || lower.contains("sidebar") || lower.contains("layout")) { + return "前端可读性与交互体验优化"; + } + + if (lower.contains("terminate") || lower.contains("websocket") || lower.contains("redis") || lower.contains("task")) { + return "任务执行与终止链路优化"; + } + + if (lower.contains("approval")) { + return "审批流程能力优化"; + } + + String singleWord = normalizedSubject.toLowerCase(Locale.ROOT); + if (singleWord.matches("^(fix|test|wip|tmp|misc|draft|done)$")) { + return authorThemeLabel(rawSubject); + } + + return authorThemeLabel(rawSubject); + } + + private String authorThemeLabel(String subject) { + if (!StringUtils.hasText(subject)) { + return "通用能力迭代"; + } + + String normalizedSubject = subject.toLowerCase(Locale.ROOT); + if (normalizedSubject.contains("weekly-report") || normalizedSubject.contains("weekly report") || normalizedSubject.contains("周报")) { + return "周报能力优化"; + } + + for (ThemeRule rule : THEME_RULES) { + if (!rule.pattern().matcher(subject).find()) { + continue; + } + return switch (rule.name()) { + case "任务执行与终止链路" -> "任务执行与终止链路优化"; + case "审批与风控能力" -> "审批风控能力优化"; + case "GitLab 认证与权限" -> "GitLab 认证与权限优化"; + case "部署与环境治理" -> "部署与环境治理优化"; + case "前端体验与可读性" -> "前端体验与可读性优化"; + case "文档与规范" -> "文档与规范同步"; + default -> "通用能力迭代"; + }; + } + return "通用能力迭代"; + } + + private int maxOutputsForAuthor(int commitCount) { + if (commitCount >= 100) { + return 8; + } + if (commitCount >= 70) { + return 7; + } + if (commitCount >= 40) { + return 6; + } + if (commitCount >= 20) { + return 5; + } + return 4; + } + + private int minOutputsForAuthor(int commitCount) { + if (commitCount >= 100) { + return 6; + } + if (commitCount >= 70) { + return 5; + } + if (commitCount >= 40) { + return 4; + } + if (commitCount >= 20) { + return 3; + } + return 2; + } + + private String canonicalAuthorName(String rawAuthor) { + if (!StringUtils.hasText(rawAuthor)) { + return "未知作者"; + } + String author = rawAuthor.trim(); + String alias = AUTHOR_ALIAS_MAP.get(author); + if (StringUtils.hasText(alias)) { + return alias; + } + + String normalized = normalizeAuthorKey(author); + for (Map.Entry entry : AUTHOR_ALIAS_MAP.entrySet()) { + if (normalizeAuthorKey(entry.getKey()).equals(normalized)) { + return entry.getValue(); + } + } + return author; + } + + private String normalizeAuthorKey(String author) { + return author.toLowerCase(Locale.ROOT).replaceAll("[^a-z0-9\u4e00-\u9fa5]", ""); + } + + private boolean containsChinese(String text) { + if (!StringUtils.hasText(text)) { + return false; + } + for (int i = 0; i < text.length(); i++) { + Character.UnicodeScript script = Character.UnicodeScript.of(text.charAt(i)); + if (script == Character.UnicodeScript.HAN) { + return true; + } + } + return false; + } + + private List listAllCommits(DateWindow window) { + String since = formatGitTime(window.sinceInclusive()); + String until = formatGitTime(window.untilExclusive()); + + List lines = runGit(List.of( + "git", "log", "--all", + "--since=" + since, + "--until=" + until, + "--no-merges", + "--pretty=format:%H%x09%an%x09%s" + )); + + List records = new ArrayList<>(); + for (String line : lines) { + String trimmed = line.trim(); + if (trimmed.isEmpty()) { + continue; + } + String[] parts = trimmed.split("\\t", 3); + if (parts.length < 3) { + continue; + } + records.add(new CommitRecord(parts[0], parts[1], parts[2])); + } + return records; + } + + private int countMergeCommits(DateWindow window) { + String since = formatGitTime(window.sinceInclusive()); + String until = formatGitTime(window.untilExclusive()); + + List lines = runGit(List.of( + "git", "log", "--all", + "--since=" + since, + "--until=" + until, + "--merges", + "--pretty=format:%H" + )); + int count = 0; + for (String line : lines) { + if (StringUtils.hasText(line)) { + count++; + } + } + return count; + } + + private DiffStats collectDiffStats(DateWindow window) { + String since = formatGitTime(window.sinceInclusive()); + String until = formatGitTime(window.untilExclusive()); + + List lines = runGit(List.of( + "git", "log", "--all", + "--since=" + since, + "--until=" + until, + "--no-merges", + "--pretty=tformat:", + "--numstat" + )); + + long added = 0; + long deleted = 0; + for (String line : lines) { + if (!StringUtils.hasText(line)) { + continue; + } + String[] parts = line.split("\t"); + if (parts.length < 2) { + continue; + } + if (!"-".equals(parts[0])) { + try { + added += Long.parseLong(parts[0]); + } catch (NumberFormatException ignore) { + // ignore malformed numstat + } + } + if (!"-".equals(parts[1])) { + try { + deleted += Long.parseLong(parts[1]); + } catch (NumberFormatException ignore) { + // ignore malformed numstat + } + } + } + + return new DiffStats(added, deleted); + } + + private List summarizeFocusModules(DateWindow window) { + String since = formatGitTime(window.sinceInclusive()); + String until = formatGitTime(window.untilExclusive()); + + List lines = runGit(List.of( + "git", "log", "--all", + "--since=" + since, + "--until=" + until, + "--no-merges", + "--name-only", + "--pretty=format:" + )); + + Map moduleCounts = new HashMap<>(); + for (String line : lines) { + String filePath = line == null ? "" : line.trim(); + if (filePath.isEmpty()) { + continue; + } + String module = toModuleName(filePath); + moduleCounts.merge(module, 1L, Long::sum); + } + + return moduleCounts.entrySet().stream() + .map(e -> new ModuleCount(e.getKey(), e.getValue())) + .sorted(Comparator.comparingLong(ModuleCount::count).reversed() + .thenComparing(ModuleCount::module)) + .limit(3) + .toList(); + } + + private String toModuleName(String filePath) { + String normalized = filePath.replace('\\', '/'); + int idx = normalized.indexOf('/'); + if (idx < 0) { + return normalized; + } + String top = normalized.substring(0, idx); + if ("robot-web-front".equals(top) || "robot-web-service".equals(top) || "docs".equals(top)) { + return top; + } + return top; + } + + private List summarizeThemes(List commits) { + Map statMap = new LinkedHashMap<>(); + for (ThemeRule rule : THEME_RULES) { + statMap.put(rule.name(), new ThemeStatBuilder(rule.name())); + } + ThemeStatBuilder other = new ThemeStatBuilder("综合修复与其他"); + + for (CommitRecord commit : commits) { + String subject = commit.subject(); + boolean matched = false; + for (ThemeRule rule : THEME_RULES) { + if (rule.pattern().matcher(subject).find()) { + statMap.get(rule.name()).addSample(subject); + matched = true; + break; + } + } + if (!matched) { + other.addSample(subject); + } + } + + List top = statMap.values().stream() + .filter(v -> v.count > 0) + .map(ThemeStatBuilder::build) + .sorted(Comparator.comparingLong(ThemeStat::count).reversed()) + .limit(5) + .collect(Collectors.toCollection(ArrayList::new)); + + ThemeStat otherStat = other.build(); + if (otherStat.count() > 0 && top.size() < 5) { + top.add(otherStat); + } + + return top; + } + + private List summarizeKeywords(List commits) { + List counts = new ArrayList<>(); + for (KeywordRule rule : KEYWORD_RULES) { + long count = commits.stream() + .map(CommitRecord::subject) + .filter(subject -> rule.pattern().matcher(subject).find()) + .count(); + if (count > 0) { + counts.add(new KeywordCount(rule.name(), count)); + } + } + + return counts.stream() + .sorted(Comparator.comparingLong(KeywordCount::count).reversed()) + .limit(8) + .toList(); + } + + private long countActiveBranches(List authorStats) { + Set branches = new LinkedHashSet<>(); + for (AuthorBranchStat author : authorStats) { + for (BranchCount branch : author.branches()) { + branches.add(branch.branch()); + } + } + return branches.size(); + } + + private String formatGitTime(LocalDateTime time) { + return time.format(GIT_TIME_FMT); + } + + private String normalizeBranchName(String raw) { + if (!StringUtils.hasText(raw)) { + return "-"; + } + String branch = raw.trim(); + if (branch.startsWith("origin/")) { + return branch.substring("origin/".length()); + } + return branch; + } + + private List runGit(List command) { + Path repoDir = resolveRepoPath(); + if (!Files.exists(repoDir.resolve(".git"))) { + throw new IllegalStateException("未找到 Git 仓库,请配置 robot.weekly-report.repo-path"); + } + + List actualCommand = withSafeDirectory(command, repoDir); + ProcessBuilder pb = new ProcessBuilder(actualCommand); + pb.directory(repoDir.toFile()); + + try { + Process process = pb.start(); + List lines; + try (BufferedReader reader = new BufferedReader(new InputStreamReader(process.getInputStream(), StandardCharsets.UTF_8))) { + lines = reader.lines().toList(); + } + + String err; + try (BufferedReader errReader = new BufferedReader(new InputStreamReader(process.getErrorStream(), StandardCharsets.UTF_8))) { + err = errReader.lines().collect(Collectors.joining("\n")); + } + + int code = process.waitFor(); + if (code != 0) { + throw new IllegalStateException(String.format("git 命令执行失败: %s, err=%s", String.join(" ", actualCommand), err)); + } + return lines; + } catch (Exception e) { + throw new IllegalStateException("读取 git 记录失败", e); + } + } + + private List withSafeDirectory(List command, Path repoDir) { + if (command == null || command.isEmpty()) { + return command; + } + if (!"git".equals(command.get(0))) { + return command; + } + + List cmd = new ArrayList<>(); + cmd.add("git"); + cmd.add("-c"); + cmd.add("safe.directory=" + repoDir.toString()); + cmd.addAll(command.subList(1, command.size())); + return cmd; + } + + private Path resolveRepoPath() { + if (configuredRepoPath != null && !configuredRepoPath.isBlank()) { + Path configured = Paths.get(configuredRepoPath).toAbsolutePath().normalize(); + if (Files.exists(configured.resolve(".git"))) { + return configured; + } + log.warn("robot.weekly-report.repo-path 未找到 .git: {}", configured); + } + + Path current = Paths.get("").toAbsolutePath().normalize(); + Path cursor = current; + while (cursor != null) { + if (Files.exists(cursor.resolve(".git"))) { + return cursor; + } + cursor = cursor.getParent(); + } + return current; + } + + private record DateWindow(LocalDate startDate, + LocalDate endDate, + LocalDateTime sinceInclusive, + LocalDateTime untilExclusive) { + } + + private record CommitRecord(String hash, String author, String subject) { + } + + private record ThemeRule(String name, Pattern pattern) { + } + + private record KeywordRule(String name, Pattern pattern) { + } + + private record BranchCount(String branch, long count) { + } + + private record ModuleCount(String module, long count) { + } + + private record DiffStats(long addedLines, long deletedLines) { + } + + private record AuthorBranchStat(String author, long totalCommits, List branches) { + } + + private record AuthorContribution(String author, int commitCount, List outputs) { + } + + private record ThemeStat(String name, long count, List samples) { + } + + private record KeywordCount(String keyword, long count) { + } + + private record WeeklyReportData(LocalDate startDate, + LocalDate endDate, + int commitCount, + int mergeCommitCount, + long addedLines, + long deletedLines, + Set authors, + long activeBranchCount, + List focusModules, + List authorContributions, + List themeStats, + List keywordCounts) { + } + + private static class ThemeStatBuilder { + private final String name; + private long count; + private final List samples = new ArrayList<>(); + + private ThemeStatBuilder(String name) { + this.name = name; + } + + private void addSample(String sample) { + this.count++; + if (samples.size() < 2) { + samples.add(truncate(sample, 80)); + } + } + + private ThemeStat build() { + return new ThemeStat(name, count, samples); + } + + private static String truncate(String value, int maxLen) { + if (value == null) { + return "-"; + } + return value.length() > maxLen ? value.substring(0, maxLen) + "..." : value; + } + } +} diff --git a/back/src/main/java/com/linkwork/service/WorkspaceService.java b/back/src/main/java/com/linkwork/service/WorkspaceService.java new file mode 100644 index 0000000..2e86f59 --- /dev/null +++ b/back/src/main/java/com/linkwork/service/WorkspaceService.java @@ -0,0 +1,54 @@ +package com.linkwork.service; + +import com.linkwork.model.FileNode; +import org.springframework.stereotype.Service; + +import java.util.ArrayList; +import java.util.List; + +@Service +public class WorkspaceService { + + public List listFiles(String taskId) { + // 在真实场景中,这里会读取 /tmp/workspace/{taskId} 目录 + // 现在返回与前端一致的 Mock 数据以供联调 + List files = new ArrayList<>(); + + FileNode src = FileNode.builder() + .name("src") + .type("directory") + .children(new ArrayList<>()) + .build(); + + src.getChildren().add(FileNode.builder() + .name("main.py") + .type("file") + .content("import os\nimport time\n\ndef main():\n print(\"Robot Agent starting...\")\n time.sleep(1)\n print(\"Initialization complete.\")\n\nif __name__ == \"__main__\":\n main()") + .size("1.2KB") + .build()); + + src.getChildren().add(FileNode.builder() + .name("utils.py") + .type("file") + .content("def format_bytes(size):\n power = 2**10\n n = 0\n power_labels = {0 : '', 1: 'K', 2: 'M', 3: 'G'}\n while size > power:\n size /= power\n n += 1\n return f\"{size:.2f} {power_labels[n]}B\"") + .size("0.8KB") + .build()); + + files.add(src); + files.add(FileNode.builder() + .name("requirements.txt") + .type("file") + .content("anthropic>=0.40.0\nrequests>=2.31.0\npydantic>=2.5.0\nhttpx>=0.26.0") + .size("0.1KB") + .build()); + + files.add(FileNode.builder() + .name("README.md") + .type("file") + .content("# Robot Agent Project\n\nThis workspace is served from robot-web-service for task: " + taskId) + .size("0.5KB") + .build()); + + return files; + } +} diff --git a/back/src/main/java/com/linkwork/service/ZzdServiceAuthService.java b/back/src/main/java/com/linkwork/service/ZzdServiceAuthService.java new file mode 100644 index 0000000..e3ac982 --- /dev/null +++ b/back/src/main/java/com/linkwork/service/ZzdServiceAuthService.java @@ -0,0 +1,42 @@ +package com.linkwork.service; + +import lombok.extern.slf4j.Slf4j; +import org.springframework.beans.factory.annotation.Value; +import org.springframework.stereotype.Service; +import org.springframework.util.StringUtils; + +import java.nio.charset.StandardCharsets; +import java.security.MessageDigest; + +/** + * zzd 服务身份鉴权 + */ +@Slf4j +@Service +public class ZzdServiceAuthService { + + @Value("${robot.zzd.api-server-token:}") + private String apiServerToken; + + public String extractBearerToken(String authorization) { + if (!StringUtils.hasText(authorization) || !authorization.startsWith("Bearer ")) { + return null; + } + String token = authorization.substring(7).trim(); + return StringUtils.hasText(token) ? token : null; + } + + public boolean validateToken(String token) { + if (!StringUtils.hasText(token)) { + return false; + } + if (!StringUtils.hasText(apiServerToken)) { + log.error("ZZD_API_SERVER_TOKEN 未配置,拒绝访问"); + return false; + } + return MessageDigest.isEqual( + apiServerToken.getBytes(StandardCharsets.UTF_8), + token.getBytes(StandardCharsets.UTF_8) + ); + } +} diff --git a/back/src/main/java/com/linkwork/service/memory/DocumentParserService.java b/back/src/main/java/com/linkwork/service/memory/DocumentParserService.java new file mode 100644 index 0000000..5a07709 --- /dev/null +++ b/back/src/main/java/com/linkwork/service/memory/DocumentParserService.java @@ -0,0 +1,65 @@ +package com.linkwork.service.memory; + +import lombok.extern.slf4j.Slf4j; +import org.apache.tika.Tika; +import org.apache.tika.exception.TikaException; +import org.springframework.stereotype.Service; + +import java.io.IOException; +import java.io.InputStream; +import java.nio.file.Files; +import java.nio.file.Path; + +/** + * Document parser using Apache Tika. + * Converts PDF, Word, HTML, and other formats to plain text. + */ +@Slf4j +@org.springframework.boot.autoconfigure.condition.ConditionalOnProperty(name = "memory.enabled", havingValue = "true", matchIfMissing = true) +@Service +public class DocumentParserService { + + private final Tika tika = new Tika(); + + /** + * Parse a file to plain text. + * For .md/.txt files, reads raw content directly (preserving markdown structure). + * For other formats (PDF, Word, HTML), uses Tika extraction. + */ + public String parseFile(Path filePath) throws IOException { + String fileName = filePath.getFileName().toString().toLowerCase(); + if (fileName.endsWith(".md") || fileName.endsWith(".markdown") || fileName.endsWith(".txt")) { + return Files.readString(filePath); + } + try (InputStream is = Files.newInputStream(filePath)) { + return tika.parseToString(is); + } catch (TikaException e) { + throw new IOException("Tika parsing failed for " + filePath, e); + } + } + + /** + * Detect file type from extension. + */ + public String detectFileType(Path filePath) { + String name = filePath.getFileName().toString().toLowerCase(); + if (name.endsWith(".md") || name.endsWith(".markdown")) return "markdown"; + if (name.endsWith(".txt")) return "text"; + if (name.endsWith(".pdf")) return "pdf"; + if (name.endsWith(".docx") || name.endsWith(".doc")) return "word"; + if (name.endsWith(".html") || name.endsWith(".htm")) return "html"; + if (name.endsWith(".pptx") || name.endsWith(".ppt")) return "ppt"; + if (name.endsWith(".xlsx") || name.endsWith(".xls")) return "excel"; + return "unknown"; + } + + /** + * Check if a file type is supported for memory indexing. + */ + public boolean isIndexable(String fileType) { + return switch (fileType) { + case "markdown", "text", "pdf", "word", "html" -> true; + default -> false; + }; + } +} diff --git a/back/src/main/java/com/linkwork/service/memory/EmbeddingService.java b/back/src/main/java/com/linkwork/service/memory/EmbeddingService.java new file mode 100644 index 0000000..4035284 --- /dev/null +++ b/back/src/main/java/com/linkwork/service/memory/EmbeddingService.java @@ -0,0 +1,87 @@ +package com.linkwork.service.memory; + +import com.fasterxml.jackson.databind.JsonNode; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.linkwork.config.MemoryConfig; +import lombok.RequiredArgsConstructor; +import lombok.extern.slf4j.Slf4j; +import org.springframework.beans.factory.annotation.Value; +import org.springframework.http.*; +import org.springframework.stereotype.Service; +import org.springframework.web.client.RestTemplate; + +import java.util.ArrayList; +import java.util.List; +import java.util.Map; + +/** + * Embedding service that calls the LLM Gateway's OpenAI-compatible embedding endpoint. + */ +@Slf4j +@org.springframework.boot.autoconfigure.condition.ConditionalOnProperty(name = "memory.enabled", havingValue = "true", matchIfMissing = true) +@Service +@RequiredArgsConstructor +public class EmbeddingService { + + private final MemoryConfig memoryConfig; + private final ObjectMapper objectMapper; + private final RestTemplate restTemplate = new RestTemplate(); + + @Value("${schedule.network.llm-gateway-url:http://llm-gateway:8080}") + private String llmGatewayUrl; + + /** + * Generate embeddings for a batch of texts. + * Calls POST {llmGatewayUrl}/v1/embeddings with OpenAI-compatible format. + */ + public List> embed(List texts) { + if (texts == null || texts.isEmpty()) return List.of(); + + String url = llmGatewayUrl + "/v1/embeddings"; + Map body = Map.of( + "model", memoryConfig.getEmbedding().getModel(), + "input", texts + ); + + HttpHeaders headers = new HttpHeaders(); + headers.setContentType(MediaType.APPLICATION_JSON); + HttpEntity> entity = new HttpEntity<>(body, headers); + + try { + ResponseEntity response = restTemplate.exchange(url, HttpMethod.POST, entity, String.class); + return parseEmbeddingResponse(response.getBody()); + } catch (Exception e) { + log.error("Embedding API call failed: {}", e.getMessage(), e); + throw new RuntimeException("Embedding API call failed", e); + } + } + + /** + * Generate embedding for a single text. + */ + public List embedSingle(String text) { + List> results = embed(List.of(text)); + if (results.isEmpty()) throw new RuntimeException("Empty embedding result"); + return results.get(0); + } + + private List> parseEmbeddingResponse(String responseBody) { + try { + JsonNode root = objectMapper.readTree(responseBody); + JsonNode data = root.get("data"); + List> embeddings = new ArrayList<>(); + for (JsonNode item : data) { + JsonNode embNode = item.get("embedding"); + List embedding = new ArrayList<>(); + for (JsonNode val : embNode) { + embedding.add(val.floatValue()); + } + embeddings.add(embedding); + } + return embeddings; + } catch (Exception e) { + log.error("Failed to parse embedding response: {}", responseBody, e); + throw new RuntimeException("Failed to parse embedding response", e); + } + } +} diff --git a/back/src/main/java/com/linkwork/service/memory/MemoryIndexConsumer.java b/back/src/main/java/com/linkwork/service/memory/MemoryIndexConsumer.java new file mode 100644 index 0000000..049b661 --- /dev/null +++ b/back/src/main/java/com/linkwork/service/memory/MemoryIndexConsumer.java @@ -0,0 +1,95 @@ +package com.linkwork.service.memory; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.linkwork.config.MemoryConfig; +import com.linkwork.model.dto.MemoryIndexJob; +import jakarta.annotation.PostConstruct; +import jakarta.annotation.PreDestroy; +import lombok.RequiredArgsConstructor; +import lombok.extern.slf4j.Slf4j; +import org.springframework.data.redis.core.StringRedisTemplate; +import org.springframework.stereotype.Service; + +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.atomic.AtomicBoolean; + +@Slf4j +@org.springframework.boot.autoconfigure.condition.ConditionalOnProperty(name = "memory.enabled", havingValue = "true", matchIfMissing = true) +@Service +@RequiredArgsConstructor +public class MemoryIndexConsumer { + + private final StringRedisTemplate redisTemplate; + private final ObjectMapper objectMapper; + private final MemoryConfig memoryConfig; + private final MemoryService memoryService; + + private final AtomicBoolean running = new AtomicBoolean(true); + private ExecutorService executorService; + + private static final int POLL_INTERVAL_MS = 1000; + + @PostConstruct + public void start() { + if (!memoryConfig.isEnabled()) { + log.info("Memory service disabled, MemoryIndexConsumer not started"); + return; + } + executorService = Executors.newSingleThreadExecutor(r -> { + Thread t = new Thread(r, "memory-index-consumer"); + t.setDaemon(true); + return t; + }); + executorService.submit(this::consumeLoop); + log.info("MemoryIndexConsumer started, queue: {}", memoryConfig.getIndex().getQueueKey()); + } + + @PreDestroy + public void stop() { + running.set(false); + if (executorService != null) { + executorService.shutdownNow(); + } + log.info("MemoryIndexConsumer stopped"); + } + + private void consumeLoop() { + String queueKey = memoryConfig.getIndex().getQueueKey(); + while (running.get()) { + try { + String json = redisTemplate.opsForList().rightPop(queueKey); + if (json != null) { + processJob(json); + } else { + Thread.sleep(POLL_INTERVAL_MS); + } + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + break; + } catch (Exception e) { + if (running.get()) { + log.error("Error consuming memory index job", e); + try { + Thread.sleep(POLL_INTERVAL_MS * 2L); + } catch (InterruptedException ie) { + Thread.currentThread().interrupt(); + break; + } + } + } + } + } + + private void processJob(String json) { + try { + MemoryIndexJob job = objectMapper.readValue(json, MemoryIndexJob.class); + log.info("Processing index job: id={}, type={}, source={}", + job.getJobId(), job.getJobType(), job.getSource()); + memoryService.processIndexJob(job); + log.info("Completed index job: id={}", job.getJobId()); + } catch (Exception e) { + log.error("Failed to process index job: {}", json, e); + } + } +} diff --git a/back/src/main/java/com/linkwork/service/memory/MemoryService.java b/back/src/main/java/com/linkwork/service/memory/MemoryService.java new file mode 100644 index 0000000..605eca3 --- /dev/null +++ b/back/src/main/java/com/linkwork/service/memory/MemoryService.java @@ -0,0 +1,248 @@ +package com.linkwork.service.memory; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.linkwork.config.MemoryConfig; +import com.linkwork.model.dto.MemoryIndexJob; +import com.linkwork.mapper.WorkspaceFileMapper; +import com.linkwork.model.dto.MemoryIndexJob.JobType; +import com.linkwork.model.entity.WorkspaceFile; +import com.linkwork.service.NfsStorageService; +import lombok.RequiredArgsConstructor; +import lombok.extern.slf4j.Slf4j; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.data.redis.core.StringRedisTemplate; +import org.springframework.stereotype.Service; + +import java.nio.file.Path; +import java.util.*; + +@Slf4j +@org.springframework.boot.autoconfigure.condition.ConditionalOnProperty(name = "memory.enabled", havingValue = "true", matchIfMissing = true) +@Service +@RequiredArgsConstructor +public class MemoryService { + + private final MemoryConfig memoryConfig; + private final MilvusStoreService milvusStore; + private final TextChunkerService chunker; + private final DocumentParserService documentParser; + private final EmbeddingService embeddingService; + private final StringRedisTemplate redisTemplate; + private final ObjectMapper objectMapper; + + @Autowired(required = false) + private NfsStorageService nfsStorageService; + + @Autowired(required = false) + private WorkspaceFileMapper workspaceFileMapper; + + public List> search(String workstationId, String userId, String query, int topK) { + String collection = memoryConfig.collectionName(workstationId, userId); + List embedding = embeddingService.embedSingle(query); + return milvusStore.search(collection, embedding, query, topK); + } + + public void ingest(String workstationId, String userId, String content, String source) { + MemoryIndexJob job = MemoryIndexJob.builder() + .jobId(UUID.randomUUID().toString()) + .workstationId(workstationId) + .userId(userId) + .jobType(JobType.SESSION_SUMMARY) + .content(content) + .source(source.isEmpty() ? "session-summary/" + java.time.LocalDate.now() : source) + .build(); + enqueueJob(job); + } + + public void triggerIndexFile(String workstationId, String userId, String filePath) { + Path path = Path.of(filePath); + String fileType = documentParser.detectFileType(path); + MemoryIndexJob job = MemoryIndexJob.builder() + .jobId(UUID.randomUUID().toString()) + .workstationId(workstationId) + .userId(userId) + .jobType(JobType.FILE_UPLOAD) + .filePath(filePath) + .source(filePath) + .fileType(fileType) + .build(); + enqueueJob(job); + } + + public int triggerBatchIndex(String workstationId, String userId, List filePaths) { + int count = 0; + for (String fp : filePaths) { + Path path = Path.of(fp); + String fileType = documentParser.detectFileType(path); + if (!documentParser.isIndexable(fileType)) { + log.debug("Skipping non-indexable file: {} (type={})", fp, fileType); + continue; + } + MemoryIndexJob job = MemoryIndexJob.builder() + .jobId(UUID.randomUUID().toString()) + .workstationId(workstationId) + .userId(userId) + .jobType(JobType.MEMORY_WRITEBACK) + .filePath(fp) + .source(fp) + .fileType(fileType) + .build(); + enqueueJob(job); + count++; + } + log.info("Enqueued batch index: {} files for ws={}, user={}", count, workstationId, userId); + return count; + } + + public List> recent(String workstationId, String userId, int limit) { + String collection = memoryConfig.collectionName(workstationId, userId); + return milvusStore.recent(collection, limit); + } + + public Map stats(String workstationId, String userId) { + String collection = memoryConfig.collectionName(workstationId, userId); + long count = milvusStore.count(collection); + return Map.of( + "collection", collection, + "chunkCount", count, + "workstationId", workstationId, + "userId", userId + ); + } + + public void deleteSource(String workstationId, String userId, String source) { + String collection = memoryConfig.collectionName(workstationId, userId); + milvusStore.deleteBySource(collection, source); + } + + /** + * Process an index job synchronously (called by MemoryIndexConsumer). + * Incremental: only new/changed chunks upserted, stale chunks deleted. + */ + public void processIndexJob(MemoryIndexJob job) { + String collection = job.getCollectionName() != null && !job.getCollectionName().isBlank() + ? job.getCollectionName() + : memoryConfig.collectionName(job.getWorkstationId(), job.getUserId()); + String model = memoryConfig.getEmbedding().getModel(); + + String text; + String source = job.getSource(); + + if (job.getJobType() == JobType.SESSION_SUMMARY) { + text = job.getContent(); + } else if (("NFS".equals(job.getStorageType()) || "OSS".equals(job.getStorageType())) && job.getObjectName() != null) { + if (nfsStorageService == null) { + log.warn("Skip NFS memory index job because NfsStorageService is unavailable: jobId={}", job.getJobId()); + return; + } + Path temp = null; + try { + temp = nfsStorageService.downloadToTempFile(job.getObjectName()); + text = documentParser.parseFile(temp); + source = job.getObjectName(); + } catch (Exception e) { + log.error("Failed to parse NFS object {}: {}", job.getObjectName(), e.getMessage(), e); + return; + } finally { + if (temp != null) { + try { + java.nio.file.Files.deleteIfExists(temp); + } catch (Exception e) { + log.warn("Failed to cleanup temp file {}: {}", temp, e.getMessage()); + } + } + } + } else { + try { + text = documentParser.parseFile(Path.of(job.getFilePath())); + } catch (Exception e) { + log.error("Failed to parse file {}: {}", job.getFilePath(), e.getMessage(), e); + return; + } + } + + List chunks = chunker.chunkMarkdown(text, source); + if (chunks.isEmpty()) { + milvusStore.deleteBySource(collection, source); + log.info("No chunks from source {}, removed stale data", source); + return; + } + + Map newChunkMap = new LinkedHashMap<>(); + for (TextChunkerService.Chunk c : chunks) { + String id = chunker.computeChunkId(c.source(), c.startLine(), c.endLine(), c.contentHash(), model); + newChunkMap.put(id, c); + } + + Set existingHashes = milvusStore.hashesBySource(collection, source); + + Set staleHashes = new HashSet<>(existingHashes); + staleHashes.removeAll(newChunkMap.keySet()); + if (!staleHashes.isEmpty()) { + milvusStore.deleteByHashes(collection, new ArrayList<>(staleHashes)); + log.info("Removed {} stale chunks from source {}", staleHashes.size(), source); + } + + List newIds = new ArrayList<>(); + List newChunks = new ArrayList<>(); + for (Map.Entry entry : newChunkMap.entrySet()) { + if (!existingHashes.contains(entry.getKey())) { + newIds.add(entry.getKey()); + newChunks.add(entry.getValue()); + } + } + + if (newChunks.isEmpty()) { + log.debug("All chunks already indexed for source {}", source); + return; + } + + List contents = newChunks.stream().map(TextChunkerService.Chunk::content).toList(); + List> embeddings = embeddingService.embed(contents); + long now = System.currentTimeMillis(); + + List> records = new ArrayList<>(); + for (int i = 0; i < newChunks.size(); i++) { + TextChunkerService.Chunk c = newChunks.get(i); + Map rec = new HashMap<>(); + rec.put("chunk_hash", newIds.get(i)); + rec.put("embedding", embeddings.get(i)); + rec.put("content", c.content()); + rec.put("source", c.source()); + rec.put("heading", c.heading()); + rec.put("heading_level", (long) c.headingLevel()); + rec.put("start_line", (long) c.startLine()); + rec.put("end_line", (long) c.endLine()); + rec.put("file_type", job.getFileType() != null ? job.getFileType() : ""); + rec.put("indexed_at", now); + records.add(rec); + } + + int upserted = milvusStore.upsert(collection, records); + log.info("Indexed {} new chunks from source {} into {}", upserted, source, collection); + + if (workspaceFileMapper != null && job.getObjectName() != null + && ("NFS".equals(job.getStorageType()) || "OSS".equals(job.getStorageType()))) { + WorkspaceFile file = workspaceFileMapper.selectOne(new com.baomidou.mybatisplus.core.conditions.query.LambdaQueryWrapper() + .and(w -> w.eq(WorkspaceFile::getOssPath, job.getObjectName()) + .or().eq(WorkspaceFile::getParsedOssPath, job.getObjectName())) + .isNull(WorkspaceFile::getDeletedAt) + .last("limit 1")); + if (file != null) { + file.setMemoryIndexStatus("INDEXED"); + file.setUpdatedAt(java.time.LocalDateTime.now()); + workspaceFileMapper.updateById(file); + } + } + } + + private void enqueueJob(MemoryIndexJob job) { + try { + String json = objectMapper.writeValueAsString(job); + redisTemplate.opsForList().leftPush(memoryConfig.getIndex().getQueueKey(), json); + } catch (Exception e) { + log.error("Failed to enqueue index job: {}", e.getMessage(), e); + throw new RuntimeException("Failed to enqueue memory index job", e); + } + } +} diff --git a/back/src/main/java/com/linkwork/service/memory/MilvusStoreService.java b/back/src/main/java/com/linkwork/service/memory/MilvusStoreService.java new file mode 100644 index 0000000..512cf12 --- /dev/null +++ b/back/src/main/java/com/linkwork/service/memory/MilvusStoreService.java @@ -0,0 +1,397 @@ +package com.linkwork.service.memory; + +import com.linkwork.config.MemoryConfig; +import io.milvus.common.clientenum.FunctionType; +import io.milvus.v2.client.ConnectConfig; +import io.milvus.v2.client.MilvusClientV2; +import io.milvus.v2.common.DataType; +import io.milvus.v2.common.IndexParam; +import io.milvus.v2.service.collection.request.AddFieldReq; +import io.milvus.v2.service.collection.request.CreateCollectionReq; +import io.milvus.v2.service.collection.request.HasCollectionReq; +import io.milvus.v2.service.vector.request.*; +import io.milvus.v2.service.vector.request.data.EmbeddedText; +import io.milvus.v2.service.vector.request.data.FloatVec; +import io.milvus.v2.service.vector.request.ranker.RRFRanker; +import io.milvus.v2.service.vector.response.DeleteResp; +import io.milvus.v2.service.vector.response.QueryResp; +import io.milvus.v2.service.vector.response.SearchResp; +import io.milvus.v2.service.vector.response.UpsertResp; +import jakarta.annotation.PostConstruct; +import jakarta.annotation.PreDestroy; +import lombok.RequiredArgsConstructor; +import lombok.extern.slf4j.Slf4j; +import org.springframework.stereotype.Service; + +import java.util.*; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.ConcurrentHashMap; + +@Slf4j +@org.springframework.boot.autoconfigure.condition.ConditionalOnProperty(name = "memory.enabled", havingValue = "true", matchIfMissing = true) +@Service +@RequiredArgsConstructor +public class MilvusStoreService { + + private final MemoryConfig memoryConfig; + private MilvusClientV2 client; + private final Set knownCollections = ConcurrentHashMap.newKeySet(); + private volatile boolean available = false; + private final AtomicBoolean unavailableWarned = new AtomicBoolean(false); + + static final List QUERY_FIELDS = List.of( + "content", "source", "heading", "chunk_hash", + "heading_level", "start_line", "end_line", "file_type", "indexed_at" + ); + + @PostConstruct + public void init() { + if (!memoryConfig.isEnabled()) { + log.info("Memory service disabled, skipping Milvus connection"); + return; + } + try { + ConnectConfig.ConnectConfigBuilder builder = ConnectConfig.builder() + .uri(memoryConfig.getMilvus().getUri()); + String token = memoryConfig.getMilvus().getToken(); + if (token != null && !token.isBlank()) { + builder.token(token); + } + client = new MilvusClientV2(builder.build()); + available = true; + unavailableWarned.set(false); + log.info("Connected to Milvus at {}", memoryConfig.getMilvus().getUri()); + } catch (Exception e) { + available = false; + log.warn("Milvus unavailable at startup, memory features degraded: {}", e.getMessage()); + log.debug("Milvus init failure details", e); + } + } + + @PreDestroy + public void close() { + if (client != null) { + try { + client.close(); + } catch (Exception e) { + log.warn("Error closing Milvus client", e); + } + } + } + + public void ensureCollection(String collectionName) { + if (!isAvailable("ensureCollection")) { + return; + } + if (knownCollections.contains(collectionName)) { + return; + } + try { + boolean exists = client.hasCollection( + HasCollectionReq.builder().collectionName(collectionName).build()); + if (exists) { + knownCollections.add(collectionName); + return; + } + createCollection(collectionName); + knownCollections.add(collectionName); + } catch (Exception e) { + markUnavailable(); + log.error("Failed to ensure collection {}: {}", collectionName, e.getMessage(), e); + throw new RuntimeException("Milvus collection setup failed", e); + } + } + + private void createCollection(String collectionName) { + int dim = memoryConfig.getEmbedding().getDimension(); + + CreateCollectionReq.CollectionSchema schema = CreateCollectionReq.CollectionSchema.builder() + .enableDynamicField(true) + .build(); + + schema.addField(AddFieldReq.builder() + .fieldName("chunk_hash").dataType(DataType.VarChar).maxLength(64).isPrimaryKey(true).build()); + schema.addField(AddFieldReq.builder() + .fieldName("embedding").dataType(DataType.FloatVector).dimension(dim).build()); + schema.addField(AddFieldReq.builder() + .fieldName("content").dataType(DataType.VarChar).maxLength(65535) + .enableAnalyzer(true).build()); + schema.addField(AddFieldReq.builder() + .fieldName("sparse_vector").dataType(DataType.SparseFloatVector).build()); + schema.addField(AddFieldReq.builder() + .fieldName("source").dataType(DataType.VarChar).maxLength(1024).build()); + schema.addField(AddFieldReq.builder() + .fieldName("heading").dataType(DataType.VarChar).maxLength(1024).build()); + schema.addField(AddFieldReq.builder() + .fieldName("heading_level").dataType(DataType.Int64).build()); + schema.addField(AddFieldReq.builder() + .fieldName("start_line").dataType(DataType.Int64).build()); + schema.addField(AddFieldReq.builder() + .fieldName("end_line").dataType(DataType.Int64).build()); + schema.addField(AddFieldReq.builder() + .fieldName("file_type").dataType(DataType.VarChar).maxLength(32).build()); + schema.addField(AddFieldReq.builder() + .fieldName("indexed_at").dataType(DataType.Int64).build()); + + schema.addFunction(CreateCollectionReq.Function.builder() + .name("bm25_fn") + .functionType(FunctionType.BM25) + .inputFieldNames(Collections.singletonList("content")) + .outputFieldNames(Collections.singletonList("sparse_vector")) + .build()); + + List indexParams = new ArrayList<>(); + indexParams.add(IndexParam.builder() + .fieldName("embedding") + .indexType(IndexParam.IndexType.FLAT) + .metricType(IndexParam.MetricType.COSINE) + .build()); + indexParams.add(IndexParam.builder() + .fieldName("sparse_vector") + .indexType(IndexParam.IndexType.SPARSE_INVERTED_INDEX) + .metricType(IndexParam.MetricType.BM25) + .build()); + + client.createCollection(CreateCollectionReq.builder() + .collectionName(collectionName) + .collectionSchema(schema) + .indexParams(indexParams) + .build()); + + log.info("Created Milvus collection: {} (dim={})", collectionName, dim); + } + + public int upsert(String collectionName, List> records) { + if (records == null || records.isEmpty()) return 0; + if (!isAvailable("upsert")) return 0; + ensureCollection(collectionName); + if (!isAvailable("upsert")) return 0; + // Convert List to List for Milvus SDK + List jsonRecords = records.stream().map(record -> { + com.google.gson.JsonObject json = new com.google.gson.JsonObject(); + record.forEach((k, v) -> { + if (v instanceof String) json.addProperty(k, (String) v); + else if (v instanceof Number) json.addProperty(k, (Number) v); + else if (v instanceof Boolean) json.addProperty(k, (Boolean) v); + else if (v != null) json.addProperty(k, v.toString()); + }); + return json; + }).collect(java.util.stream.Collectors.toList()); + try { + UpsertResp resp = client.upsert(UpsertReq.builder() + .collectionName(collectionName) + .data(jsonRecords) + .build()); + return (int) resp.getUpsertCnt(); + } catch (Exception e) { + markUnavailable(); + log.warn("Milvus upsert degraded: collection={}, error={}", collectionName, e.getMessage()); + return 0; + } + } + + /** + * Hybrid search: dense cosine + BM25 sparse + RRF reranking. + */ + public List> search( + String collectionName, List queryEmbedding, String queryText, int topK) { + if (!isAvailable("search")) return Collections.emptyList(); + ensureCollection(collectionName); + if (!isAvailable("search")) return Collections.emptyList(); + + List searchRequests = new ArrayList<>(); + + searchRequests.add(AnnSearchReq.builder() + .vectorFieldName("embedding") + .vectors(Collections.singletonList(new FloatVec(queryEmbedding))) + .params("{\"metric_type\": \"COSINE\"}") + .topK(topK) + .build()); + + if (queryText != null && !queryText.isBlank()) { + searchRequests.add(AnnSearchReq.builder() + .vectorFieldName("sparse_vector") + .vectors(Collections.singletonList(new EmbeddedText(queryText))) + .topK(topK) + .build()); + } + + SearchResp resp; + try { + resp = client.hybridSearch(HybridSearchReq.builder() + .collectionName(collectionName) + .searchRequests(searchRequests) + .ranker(new RRFRanker(60)) + .topK(topK) + .outFields(QUERY_FIELDS) + .build()); + } catch (Exception e) { + markUnavailable(); + log.warn("Milvus search degraded: collection={}, error={}", collectionName, e.getMessage()); + return Collections.emptyList(); + } + + List> results = new ArrayList<>(); + if (resp.getSearchResults() != null && !resp.getSearchResults().isEmpty()) { + for (SearchResp.SearchResult hit : resp.getSearchResults().get(0)) { + Map row = new HashMap<>(hit.getEntity()); + row.put("score", hit.getScore()); + results.add(row); + } + } + return results; + } + + public List> query(String collectionName, String filterExpr) { + if (!isAvailable("query")) return Collections.emptyList(); + ensureCollection(collectionName); + if (!isAvailable("query")) return Collections.emptyList(); + String filter = (filterExpr != null && !filterExpr.isBlank()) ? filterExpr : "chunk_hash != \"\""; + QueryResp resp; + try { + resp = client.query(QueryReq.builder() + .collectionName(collectionName) + .filter(filter) + .outputFields(QUERY_FIELDS) + .build()); + } catch (Exception e) { + markUnavailable(); + log.warn("Milvus query degraded: collection={}, error={}", collectionName, e.getMessage()); + return Collections.emptyList(); + } + List> results = new ArrayList<>(); + if (resp.getQueryResults() != null) { + for (QueryResp.QueryResult r : resp.getQueryResults()) { + results.add(new HashMap<>(r.getEntity())); + } + } + return results; + } + + public Set hashesBySource(String collectionName, String source) { + if (!isAvailable("hashesBySource")) return Collections.emptySet(); + String filter = "source == \"" + source.replace("\"", "\\\"") + "\""; + QueryResp resp; + try { + resp = client.query(QueryReq.builder() + .collectionName(collectionName) + .filter(filter) + .outputFields(Collections.singletonList("chunk_hash")) + .build()); + } catch (Exception e) { + markUnavailable(); + log.warn("Milvus hashes query degraded: collection={}, error={}", collectionName, e.getMessage()); + return Collections.emptySet(); + } + Set hashes = new HashSet<>(); + if (resp.getQueryResults() != null) { + for (QueryResp.QueryResult r : resp.getQueryResults()) { + Object hash = r.getEntity().get("chunk_hash"); + if (hash != null) hashes.add(hash.toString()); + } + } + return hashes; + } + + public void deleteBySource(String collectionName, String source) { + if (!isAvailable("deleteBySource")) return; + String filter = "source == \"" + source.replace("\"", "\\\"") + "\""; + try { + client.delete(DeleteReq.builder() + .collectionName(collectionName) + .filter(filter) + .build()); + } catch (Exception e) { + markUnavailable(); + log.warn("Milvus deleteBySource degraded: collection={}, error={}", collectionName, e.getMessage()); + return; + } + log.info("Deleted chunks for source {} from {}", source, collectionName); + } + + public void deleteByHashes(String collectionName, List hashes) { + if (hashes == null || hashes.isEmpty()) return; + if (!isAvailable("deleteByHashes")) return; + try { + client.delete(DeleteReq.builder() + .collectionName(collectionName) + .ids(new ArrayList<>(hashes)) + .build()); + } catch (Exception e) { + markUnavailable(); + log.warn("Milvus deleteByHashes degraded: collection={}, error={}", collectionName, e.getMessage()); + } + } + + public long count(String collectionName) { + try { + if (!isAvailable("count")) return 0; + ensureCollection(collectionName); + if (!isAvailable("count")) return 0; + QueryResp resp = client.query(QueryReq.builder() + .collectionName(collectionName) + .filter("chunk_hash != \"\"") + .outputFields(Collections.singletonList("chunk_hash")) + .build()); + return resp.getQueryResults() != null ? resp.getQueryResults().size() : 0; + } catch (Exception e) { + markUnavailable(); + return 0; + } + } + + /** + * Query recent chunks ordered by indexed_at descending. + */ + public List> recent(String collectionName, int limit) { + if (!isAvailable("recent")) return Collections.emptyList(); + ensureCollection(collectionName); + if (!isAvailable("recent")) return Collections.emptyList(); + QueryResp resp; + try { + resp = client.query(QueryReq.builder() + .collectionName(collectionName) + .filter("chunk_hash != \"\"") + .outputFields(QUERY_FIELDS) + .build()); + } catch (Exception e) { + markUnavailable(); + log.warn("Milvus recent degraded: collection={}, error={}", collectionName, e.getMessage()); + return Collections.emptyList(); + } + List> results = new ArrayList<>(); + if (resp.getQueryResults() != null) { + for (QueryResp.QueryResult r : resp.getQueryResults()) { + results.add(new HashMap<>(r.getEntity())); + } + } + results.sort((a, b) -> { + long ta = a.get("indexed_at") != null ? ((Number) a.get("indexed_at")).longValue() : 0; + long tb = b.get("indexed_at") != null ? ((Number) b.get("indexed_at")).longValue() : 0; + return Long.compare(tb, ta); + }); + return results.subList(0, Math.min(limit, results.size())); + } + + private boolean isAvailable(String operation) { + if (!memoryConfig.isEnabled()) { + return false; + } + if (client != null && available) { + return true; + } + if (unavailableWarned.compareAndSet(false, true)) { + log.warn( + "Milvus unavailable, memory operation degraded: op={}, uri={}", + operation, + memoryConfig.getMilvus().getUri() + ); + } + return false; + } + + private void markUnavailable() { + available = false; + unavailableWarned.set(false); + } +} diff --git a/back/src/main/java/com/linkwork/service/memory/TextChunkerService.java b/back/src/main/java/com/linkwork/service/memory/TextChunkerService.java new file mode 100644 index 0000000..712dd80 --- /dev/null +++ b/back/src/main/java/com/linkwork/service/memory/TextChunkerService.java @@ -0,0 +1,148 @@ +package com.linkwork.service.memory; + +import com.linkwork.config.MemoryConfig; +import lombok.RequiredArgsConstructor; +import lombok.extern.slf4j.Slf4j; +import org.springframework.stereotype.Service; + +import java.nio.charset.StandardCharsets; +import java.security.MessageDigest; +import java.security.NoSuchAlgorithmException; +import java.util.ArrayList; +import java.util.HexFormat; +import java.util.List; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +/** + * Markdown/text chunking by headings, with SHA-256 content dedup. + * Ported from memsearch chunker.py. + */ +@Slf4j +@org.springframework.boot.autoconfigure.condition.ConditionalOnProperty(name = "memory.enabled", havingValue = "true", matchIfMissing = true) +@Service +@RequiredArgsConstructor +public class TextChunkerService { + + private final MemoryConfig memoryConfig; + private static final Pattern HEADING_RE = Pattern.compile("^(#{1,6})\\s+(.+)$", Pattern.MULTILINE); + + public record Chunk( + String content, + String source, + String heading, + int headingLevel, + int startLine, + int endLine, + String contentHash + ) {} + + public List chunkMarkdown(String text, String source) { + int maxChunkSize = memoryConfig.getIndex().getMaxChunkSize(); + int overlapLines = memoryConfig.getIndex().getOverlapLines(); + return chunkMarkdown(text, source, maxChunkSize, overlapLines); + } + + public List chunkMarkdown(String text, String source, int maxChunkSize, int overlapLines) { + String[] lines = text.split("\n", -1); + + List headingPositions = new ArrayList<>(); // [lineIdx, level] + List headingTitles = new ArrayList<>(); + Matcher m = HEADING_RE.matcher(text); + int lineStart = 0; + for (int i = 0; i < lines.length; i++) { + Matcher lineMatcher = HEADING_RE.matcher(lines[i]); + if (lineMatcher.matches()) { + headingPositions.add(new int[]{i, lineMatcher.group(1).length()}); + headingTitles.add(lineMatcher.group(2).trim()); + } + } + + // Build sections between headings + List sections = new ArrayList<>(); // [start, end, headingIdx] + if (headingPositions.isEmpty() || headingPositions.get(0)[0] > 0) { + int sectionEnd = headingPositions.isEmpty() ? lines.length : headingPositions.get(0)[0]; + sections.add(new int[]{0, sectionEnd, -1}); // preamble + } + for (int idx = 0; idx < headingPositions.size(); idx++) { + int lineIdx = headingPositions.get(idx)[0]; + int nextStart = (idx + 1 < headingPositions.size()) + ? headingPositions.get(idx + 1)[0] + : lines.length; + sections.add(new int[]{lineIdx, nextStart, idx}); + } + + List chunks = new ArrayList<>(); + for (int[] sec : sections) { + int start = sec[0], end = sec[1], hIdx = sec[2]; + String heading = hIdx >= 0 ? headingTitles.get(hIdx) : ""; + int level = hIdx >= 0 ? headingPositions.get(hIdx)[1] : 0; + + String sectionText = joinLines(lines, start, end).strip(); + if (sectionText.isEmpty()) continue; + + if (sectionText.length() <= maxChunkSize) { + chunks.add(new Chunk(sectionText, source, heading, level, + start + 1, end, sha256Short(sectionText))); + } else { + chunks.addAll(splitLargeSection(lines, start, end, source, + heading, level, maxChunkSize, overlapLines)); + } + } + return chunks; + } + + /** + * Compute composite chunk ID matching memsearch/OpenClaw format. + */ + public String computeChunkId(String source, int startLine, int endLine, String contentHash, String model) { + String raw = "markdown:" + source + ":" + startLine + ":" + endLine + ":" + contentHash + ":" + model; + return sha256Short(raw); + } + + private List splitLargeSection(String[] lines, int start, int end, String source, + String heading, int headingLevel, int maxSize, int overlap) { + List chunks = new ArrayList<>(); + List currentLines = new ArrayList<>(); + int currentStart = 0; + + for (int i = start; i < end; i++) { + currentLines.add(lines[i]); + String text = String.join("\n", currentLines); + boolean isParagraphBreak = lines[i].strip().isEmpty() && (i + 1 < end); + boolean isLastLine = (i == end - 1); + + if ((text.length() >= maxSize && isParagraphBreak) || isLastLine) { + String content = text.strip(); + if (!content.isEmpty()) { + chunks.add(new Chunk(content, source, heading, headingLevel, + start + currentStart + 1, start + i + 1, sha256Short(content))); + } + int overlapStart = Math.max(0, currentLines.size() - overlap); + currentLines = isLastLine ? new ArrayList<>() + : new ArrayList<>(currentLines.subList(overlapStart, currentLines.size())); + currentStart = i + 1 - currentLines.size() - start; + } + } + return chunks; + } + + private static String joinLines(String[] lines, int start, int end) { + StringBuilder sb = new StringBuilder(); + for (int i = start; i < end; i++) { + if (i > start) sb.append('\n'); + sb.append(lines[i]); + } + return sb.toString(); + } + + static String sha256Short(String input) { + try { + MessageDigest digest = MessageDigest.getInstance("SHA-256"); + byte[] hash = digest.digest(input.getBytes(StandardCharsets.UTF_8)); + return HexFormat.of().formatHex(hash).substring(0, 16); + } catch (NoSuchAlgorithmException e) { + throw new RuntimeException("SHA-256 not available", e); + } + } +} diff --git a/back/src/main/java/com/linkwork/websocket/TaskWebSocketHandler.java b/back/src/main/java/com/linkwork/websocket/TaskWebSocketHandler.java new file mode 100644 index 0000000..6d7eb4b --- /dev/null +++ b/back/src/main/java/com/linkwork/websocket/TaskWebSocketHandler.java @@ -0,0 +1,461 @@ +package com.linkwork.websocket; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.linkwork.config.DispatchConfig; +import com.linkwork.service.NfsStorageService; +import com.linkwork.service.TaskEventBroadcaster; +import com.linkwork.service.TaskOutputWorkspaceSyncService; +import com.linkwork.service.TaskPathlistSyncService; +import com.linkwork.service.TaskService; +import com.linkwork.service.TaskStatusSyncService; +import jakarta.annotation.PostConstruct; +import jakarta.annotation.PreDestroy; +import lombok.extern.slf4j.Slf4j; +import org.springframework.data.redis.connection.stream.MapRecord; +import org.springframework.data.redis.connection.stream.StreamOffset; +import org.springframework.data.redis.core.StringRedisTemplate; +import org.springframework.stereotype.Component; +import org.springframework.util.StringUtils; +import org.springframework.web.socket.CloseStatus; +import org.springframework.web.socket.TextMessage; +import org.springframework.web.socket.WebSocketSession; +import org.springframework.web.socket.handler.TextWebSocketHandler; + +import java.io.IOException; +import java.net.URI; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.HashSet; +import java.util.LinkedHashMap; +import java.util.LinkedHashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; +import java.util.regex.Pattern; + +/** + * 任务 WebSocket 事件推送。 + * + * 说明: + * - Redis Stream 的实时消费职责已下沉到 TaskEventConsumerService + * - 本 Handler 仅负责:会话绑定、历史回放、实时广播推送 + */ +@Slf4j +@Component +public class TaskWebSocketHandler extends TextWebSocketHandler { + private static final Pattern NUMERIC_ID_PATTERN = Pattern.compile("^\\d+$"); + + private final ObjectMapper objectMapper = new ObjectMapper(); + private final StringRedisTemplate redisTemplate; + private final TaskStatusSyncService taskStatusSyncService; + private final TaskService taskService; + private final DispatchConfig dispatchConfig; + private final NfsStorageService nfsStorageService; + private final TaskOutputWorkspaceSyncService taskOutputWorkspaceSyncService; + private final TaskPathlistSyncService taskPathlistSyncService; + private final TaskEventBroadcaster taskEventBroadcaster; + + private final Map sessions = new ConcurrentHashMap<>(); + private final Map sessionTaskMap = new ConcurrentHashMap<>(); + private final Map taskWorkstationCache = new ConcurrentHashMap<>(); + + private String broadcasterListenerId; + + public TaskWebSocketHandler(StringRedisTemplate redisTemplate, + TaskStatusSyncService taskStatusSyncService, + TaskService taskService, + DispatchConfig dispatchConfig, + NfsStorageService nfsStorageService, + TaskOutputWorkspaceSyncService taskOutputWorkspaceSyncService, + TaskPathlistSyncService taskPathlistSyncService, + TaskEventBroadcaster taskEventBroadcaster) { + this.redisTemplate = redisTemplate; + this.taskStatusSyncService = taskStatusSyncService; + this.taskService = taskService; + this.dispatchConfig = dispatchConfig; + this.nfsStorageService = nfsStorageService; + this.taskOutputWorkspaceSyncService = taskOutputWorkspaceSyncService; + this.taskPathlistSyncService = taskPathlistSyncService; + this.taskEventBroadcaster = taskEventBroadcaster; + } + + @PostConstruct + public void registerBroadcaster() { + broadcasterListenerId = taskEventBroadcaster.register(this::broadcastToTask); + } + + @PreDestroy + public void unregisterBroadcaster() { + taskEventBroadcaster.unregister(broadcasterListenerId); + } + + @Override + public void afterConnectionEstablished(WebSocketSession session) { + sessions.put(session.getId(), session); + String taskId = extractTaskId(session); + log.info("WebSocket connected: {}, taskId: {}", session.getId(), taskId); + if (taskId != null) { + bindTask(session, taskId); + } + } + + private String extractTaskId(WebSocketSession session) { + URI uri = session.getUri(); + if (uri != null && uri.getQuery() != null) { + for (String param : uri.getQuery().split("&")) { + String[] kv = param.split("="); + if (kv.length == 2 && "taskId".equals(kv[0])) { + return kv[1]; + } + } + } + return null; + } + + @Override + protected void handleTextMessage(WebSocketSession session, TextMessage message) throws Exception { + Map request = objectMapper.readValue(message.getPayload(), Map.class); + String action = (String) request.get("action"); + String taskId = firstNonBlank(request, "taskId", "taskNo", "task_id"); + log.info("Received message: action={}, taskId={}", action, taskId); + if ("bind".equals(action) && taskId != null) { + bindTask(session, taskId); + } + } + + private void bindTask(WebSocketSession session, String taskId) { + String taskNo = normalizeTaskNo(taskId); + sessionTaskMap.put(session.getId(), taskNo); + if (!taskNo.equals(taskId)) { + log.info("Session {} bound task normalized: rawTaskId={}, taskNo={}", session.getId(), taskId, taskNo); + } else { + log.info("Session {} bound to task {}", session.getId(), taskNo); + } + pushHistoryEvents(session, taskNo); + } + + private List buildStreamKeys(String taskId) { + List keys = new ArrayList<>(); + Long roleId = resolveRoleId(taskId); + keys.add(dispatchConfig.getLogStreamKey(roleId, taskId)); + keys.add("stream:task:" + taskId); + keys.add("stream:task:" + taskId + ":events"); + keys.add("stream:build:" + taskId); + return keys; + } + + private Long resolveRoleId(String taskId) { + try { + return taskService.getTaskByNo(taskId).getRoleId(); + } catch (Exception e) { + log.debug("WebSocket resolve roleId failed, fallback null: taskId={}", taskId); + return null; + } + } + + private String normalizeTaskNo(String rawTaskId) { + if (!StringUtils.hasText(rawTaskId)) { + return rawTaskId; + } + String candidate = rawTaskId.trim(); + try { + return taskService.getTaskByNo(candidate).getTaskNo(); + } catch (Exception ignored) { + // ignore and continue resolving by numeric id + } + + if (NUMERIC_ID_PATTERN.matcher(candidate).matches()) { + try { + return taskService.getTask(Long.parseLong(candidate)).getTaskNo(); + } catch (Exception e) { + log.debug("WebSocket normalize taskId by numeric id failed: taskId={}, err={}", + candidate, e.getMessage()); + } + } + return candidate; + } + + private void pushHistoryEvents(WebSocketSession session, String taskId) { + try { + List streamKeys = buildStreamKeys(taskId); + Set sentIds = new HashSet<>(); + + for (String streamKey : streamKeys) { + try { + List> records = redisTemplate.opsForStream() + .read(StreamOffset.fromStart(streamKey)); + if (records == null || records.isEmpty()) { + continue; + } + log.info("Push {} history events for task {} from {}", records.size(), taskId, streamKey); + for (MapRecord record : records) { + String recordId = record.getId().getValue(); + if (sentIds.contains(recordId)) { + continue; + } + sentIds.add(recordId); + syncTaskStatus(taskId, record); + sendEvent(session, record); + } + } catch (Exception e) { + log.debug("history stream read skipped: streamKey={}, err={}", streamKey, e.getMessage()); + } + } + } catch (Exception e) { + log.error("push history events failed: taskId={}", taskId, e); + } + } + + private void broadcastToTask(String taskId, MapRecord record) { + sessionTaskMap.forEach((sessionId, tid) -> { + if (!taskId.equals(tid)) { + return; + } + WebSocketSession session = sessions.get(sessionId); + if (session != null && session.isOpen()) { + sendEvent(session, record); + } + }); + } + + private void syncTaskStatus(String taskId, MapRecord record) { + try { + Map eventData = extractEventData(record); + taskStatusSyncService.onEvent(taskId, eventData); + } catch (Exception e) { + log.debug("sync task status failed: taskId={}, err={}", taskId, e.getMessage()); + } + } + + private void sendEvent(WebSocketSession session, MapRecord record) { + try { + Map event = extractEventData(record); + String taskNo = firstNonBlank(event, "task_no", "task_id"); + if (!StringUtils.hasText(taskNo)) { + taskNo = sessionTaskMap.get(session.getId()); + } + enrichOutputReadyEvent(event); + taskPathlistSyncService.enrichEventForDisplay(taskNo, event); + event.put("_id", record.getId().getValue()); + String jsonMessage = objectMapper.writeValueAsString(event); + log.debug("Sending WebSocket event: {}", jsonMessage); + session.sendMessage(new TextMessage(jsonMessage)); + } catch (IOException e) { + log.error("send websocket event failed", e); + } + } + + @SuppressWarnings("unchecked") + private Map extractEventData(MapRecord record) { + Map rawValues = record.getValue(); + + Object payloadObj = rawValues.get("payload"); + if (payloadObj instanceof String payloadStr && payloadStr.startsWith("{")) { + try { + Map parsed = objectMapper.readValue(payloadStr, Map.class); + Object innerData = parsed.get("data"); + if (innerData instanceof String dataStr && (dataStr.startsWith("{") || dataStr.startsWith("["))) { + try { + parsed.put("data", objectMapper.readValue(dataStr, Object.class)); + } catch (Exception ignored) { + } + } + Object data = parsed.get("data"); + if (data instanceof Map dataMap) { + dataMap.forEach((k, v) -> parsed.putIfAbsent(k.toString(), v)); + } + return parsed; + } catch (Exception e) { + log.warn("parse payload json failed, fallback flat event: {}", e.getMessage()); + } + } + + Map event = new HashMap<>(); + rawValues.forEach((k, v) -> { + String key = k.toString(); + Object value = v; + if ("data".equals(key) && v instanceof String strVal) { + if (strVal.startsWith("{") || strVal.startsWith("[")) { + try { + value = objectMapper.readValue(strVal, Object.class); + } catch (Exception ignored) { + } + } + } + event.put(key, value); + }); + + Object data = event.get("data"); + if (data instanceof Map dataMap) { + dataMap.forEach((k, v) -> event.putIfAbsent(k.toString(), v)); + } + return event; + } + + @SuppressWarnings("unchecked") + private void enrichOutputReadyEvent(Map eventData) { + try { + String eventType = (String) eventData.get("event_type"); + if (!"TASK_OUTPUT_READY".equals(eventType)) { + return; + } + + Object dataObj = eventData.get("data"); + if (!(dataObj instanceof Map)) { + return; + } + + Map data = (Map) dataObj; + String outputType = (String) data.get("output_type"); + if (!"oss".equals(outputType)) { + return; + } + + String ossPath = normalizeOssPath((String) data.get("oss_path")); + if (!StringUtils.hasText(ossPath)) { + return; + } + String taskNo = firstNonBlank(eventData, "task_no", "task_id"); + + taskOutputWorkspaceSyncService.syncTaskOutput(taskNo, data).ifPresent(context -> { + data.put("workspace_space_type", "WORKSTATION"); + data.put("workspace_workstation_id", context.workstationId()); + data.put("workspace_parent_node_id", context.parentNodeId()); + data.put("workspace_node_id", context.taskNodeId()); + }); + + List candidatePrefixes = buildOssCandidatePrefixes(eventData, ossPath); + String resolvedOssPath = choosePreferredOssPath(candidatePrefixes); + data.put("oss_path", ossPath); + data.put("oss_path_resolved", resolvedOssPath); + data.put("nfs_path", "/data/oss/robot/" + ossPath); + data.put("artifacts_pending", Boolean.TRUE); + + if (!nfsStorageService.isConfigured()) { + log.debug("TASK_OUTPUT_READY: NFS not configured, only keep path contract: {}", resolvedOssPath); + return; + } + + List objectNames = List.of(); + String matchedPrefix = null; + for (String prefix : candidatePrefixes) { + objectNames = nfsStorageService.listObjects(prefix); + if (!objectNames.isEmpty()) { + matchedPrefix = prefix; + break; + } + } + + if (objectNames.isEmpty()) { + log.info("TASK_OUTPUT_READY path contract emitted but directory empty: candidates={}, oss_path={}", + candidatePrefixes, ossPath); + return; + } + + if (matchedPrefix != null && !matchedPrefix.equals(data.get("oss_path_resolved"))) { + data.put("oss_path_resolved", matchedPrefix); + } + + List> artifacts = new ArrayList<>(); + for (String objectName : objectNames) { + String fileName = objectName; + int lastSlash = objectName.lastIndexOf('/'); + if (lastSlash >= 0 && lastSlash < objectName.length() - 1) { + fileName = objectName.substring(lastSlash + 1); + } + String downloadUrl = nfsStorageService.buildTaskOutputDownloadUrl(objectName); + Map artifact = new LinkedHashMap<>(); + artifact.put("name", fileName); + artifact.put("download_url", downloadUrl); + artifacts.add(artifact); + } + + if (!artifacts.isEmpty()) { + data.put("artifacts", artifacts); + data.put("artifacts_pending", Boolean.FALSE); + log.info("TASK_OUTPUT_READY enriched: oss_path={}, resolved={}, artifacts={}", + ossPath, data.get("oss_path_resolved"), artifacts.size()); + } + } catch (Exception e) { + log.warn("enrich TASK_OUTPUT_READY failed (non-blocking): {}", e.getMessage()); + } + } + + private List buildOssCandidatePrefixes(Map eventData, String rawOssPath) { + String ossPath = normalizeOssPath(rawOssPath); + LinkedHashSet candidates = new LinkedHashSet<>(); + candidates.add(ossPath); + + if (!ossPath.startsWith("system/")) { + String workstationId = resolveWorkstationId(eventData); + if (StringUtils.hasText(workstationId)) { + candidates.add("system/" + workstationId + "/" + ossPath); + } + } + return new ArrayList<>(candidates); + } + + private String normalizeOssPath(String rawOssPath) { + String normalized = rawOssPath == null ? "" : rawOssPath.trim(); + while (normalized.startsWith("/")) { + normalized = normalized.substring(1); + } + return normalized; + } + + private String choosePreferredOssPath(List candidates) { + for (String candidate : candidates) { + if (candidate.startsWith("system/")) { + return candidate; + } + } + return candidates.isEmpty() ? "" : candidates.get(0); + } + + private String resolveWorkstationId(Map eventData) { + String taskNo = firstNonBlank(eventData, "task_no", "task_id"); + if (!StringUtils.hasText(taskNo)) { + return null; + } + + String cached = taskWorkstationCache.get(taskNo); + if (StringUtils.hasText(cached)) { + return cached; + } + + try { + Long roleId = taskService.getTaskByNo(taskNo).getRoleId(); + if (roleId == null) { + return null; + } + String workstationId = String.valueOf(roleId); + taskWorkstationCache.put(taskNo, workstationId); + return workstationId; + } catch (Exception e) { + log.debug("resolve workstationId failed: taskNo={}, err={}", taskNo, e.getMessage()); + return null; + } + } + + private String firstNonBlank(Map source, String... keys) { + for (String key : keys) { + Object value = source.get(key); + if (value == null) { + continue; + } + String text = String.valueOf(value).trim(); + if (!text.isEmpty()) { + return text; + } + } + return null; + } + + @Override + public void afterConnectionClosed(WebSocketSession session, CloseStatus status) { + String taskId = sessionTaskMap.remove(session.getId()); + sessions.remove(session.getId()); + log.info("WebSocket closed: {}, taskId: {}", session.getId(), taskId); + } +} diff --git a/back/src/main/resources/application-gg.yml b/back/src/main/resources/application-gg.yml new file mode 100644 index 0000000..e429872 --- /dev/null +++ b/back/src/main/resources/application-gg.yml @@ -0,0 +1,19 @@ +schedule: + images: + agent: ${SCHEDULE_AGENT_IMAGE:} + runner: ${RUNNER_IMAGE:} + +image-build: + enabled: ${IMAGE_BUILD_ENABLED:true} + push-enabled: ${IMAGE_PUSH_ENABLED:false} + default-agent-base-image: ${DEFAULT_AGENT_BASE_IMAGE:} + registry: ${IMAGE_REGISTRY:} + registry-username: ${IMAGE_REGISTRY_USERNAME:} + registry-password: ${IMAGE_REGISTRY_PASSWORD:} + auto-load-to-kind: ${LINKWORK_BUILD_LOCAL_LOAD_ENABLED:true} + kind-cluster-name: ${LINKWORK_BUILD_KIND_CLUSTER_NAME:} + kind-load-timeout: ${KIND_LOAD_TIMEOUT:600} + local-cleanup-enabled: ${IMAGE_LOCAL_CLEANUP_ENABLED:true} + local-image-retention-hours: ${IMAGE_LOCAL_RETENTION_HOURS:24} + local-cleanup-cron: ${IMAGE_LOCAL_CLEANUP_CRON:0 40 * * * *} + kind-prune-enabled: ${IMAGE_KIND_PRUNE_ENABLED:true} diff --git a/back/src/main/resources/application.yml b/back/src/main/resources/application.yml new file mode 100644 index 0000000..1dc4b4d --- /dev/null +++ b/back/src/main/resources/application.yml @@ -0,0 +1,365 @@ +server: + port: 8081 + +spring: + application: + name: ${SPRING_APPLICATION_NAME:linkwork-backend-service} + config: + import: optional:file:.env[.properties],optional:file:../.env[.properties],optional:file:../../.env[.properties] + + # 文件上传配置 + servlet: + multipart: + max-file-size: 100MB + max-request-size: 100MB + + # MySQL 数据库配置 + datasource: + url: ${LINKWORK_DB_URL:jdbc:mysql://localhost:3306/linkwork?useSSL=false&allowPublicKeyRetrieval=true&serverTimezone=Asia/Shanghai&characterEncoding=utf8} + username: ${LINKWORK_DB_USERNAME:linkwork} + password: ${LINKWORK_DB_PASSWORD:${MYSQL_PASSWORD:}} + driver-class-name: com.mysql.cj.jdbc.Driver + hikari: + minimum-idle: 5 + maximum-pool-size: 20 + idle-timeout: 30000 + connection-timeout: 30000 + + # Redis 配置(优先读取环境变量,默认使用 redis 服务名) + data: + redis: + host: ${LINKWORK_REDIS_HOST:${REDIS_HOST:redis}} + port: ${LINKWORK_REDIS_PORT:6379} + password: ${LINKWORK_REDIS_PASSWORD:} + +# MyBatis Plus 配置 +mybatis-plus: + mapper-locations: classpath:/mapper/**/*.xml + configuration: + map-underscore-to-camel-case: true + log-impl: org.apache.ibatis.logging.stdout.StdOutImpl + global-config: + db-config: + id-type: assign_id + logic-delete-field: isDeleted + logic-delete-value: true + logic-not-delete-value: false + +logging: + level: + com.linkwork: DEBUG + org.springframework.web: INFO + com.baomidou.mybatisplus: DEBUG + +# 认证配置 +robot: + litellm: + base-url: ${LITELLM_BASE_URL:${ROBOT_LITELLM_BASE_URL:http://localhost:4000}} + api-key: ${LITELLM_API_KEY:${ROBOT_LITELLM_API_KEY:}} + default-chat-model: openrouter/anthropic/claude-sonnet-4.5 + auth: + # 访问密码(BCrypt 哈希) + password: ${AUTH_PASSWORD:${ROBOT_AUTH_PASSWORD:}} + # JWT 密钥(至少 32 字符) + jwt-secret: ${AUTH_JWT_SECRET:${ROBOT_AUTH_JWT_SECRET:}} + # JWT 过期时间(毫秒),默认 24 小时 + jwt-expiration: 86400000 + # 任务调度配置(与 worker 联调对齐) + dispatch: + workstation-id: ${DISPATCH_WORKSTATION_ID:test-post-001} + log-stream-prefix: logs + approval-key-prefix: approval + cron: + enabled: ${CRON_ENABLED:true} + scan-interval-ms: ${CRON_SCAN_INTERVAL_MS:60000} + dispatch-lead-ms: ${CRON_DISPATCH_LEAD_MS:180000} + max-jobs-per-user: ${CRON_MAX_JOBS_PER_USER:50} + max-jobs-per-role: ${CRON_MAX_JOBS_PER_ROLE:100} + max-runs-per-job: ${CRON_MAX_RUNS_PER_JOB:100} + lock-key: ${CRON_LOCK_KEY:lock:cron:scanner} + lock-ttl-seconds: ${CRON_LOCK_TTL_SECONDS:55} + gitlab: + base-url: ${GITLAB_BASE_URL:https://gitlab.example.com} + auth-base-url: ${GITLAB_AUTH_BASE_URL:${robot.gitlab.base-url}} + client-id: ${GITLAB_CLIENT_ID:} + client-secret: ${GITLAB_CLIENT_SECRET:} + redirect-uri: ${GITLAB_REDIRECT_URI:http://localhost:3003/auth/callback/gitlab} + zzd: + # zzd 服务调用 /api/v1/tasks/{taskId}/git-token 时使用的服务身份令牌 + api-server-token: ${ZZD_API_SERVER_TOKEN:} + output-estimator: + llm: + enabled: ${TASK_OUTPUT_ESTIMATOR_ENABLED:true} + gateway-url: ${TASK_OUTPUT_ESTIMATOR_GATEWAY_URL:${robot.litellm.base-url}/v1/chat/completions} + model: ${TASK_OUTPUT_ESTIMATOR_MODEL:${robot.litellm.default-chat-model}} + max-tokens: ${TASK_OUTPUT_ESTIMATOR_MAX_TOKENS:256} + stream: ${TASK_OUTPUT_ESTIMATOR_STREAM:true} + connect-timeout-ms: ${TASK_OUTPUT_ESTIMATOR_CONNECT_TIMEOUT_MS:3000} + read-timeout-ms: ${TASK_OUTPUT_ESTIMATOR_READ_TIMEOUT_MS:12000} + # 固定 LiteLLM Virtual Key(sk- 开头) + auth-token: ${TASK_OUTPUT_ESTIMATOR_AUTH_TOKEN:${robot.litellm.api-key}} + x-litellm-api-key: ${TASK_OUTPUT_ESTIMATOR_X_LITELLM_API_KEY:${robot.litellm.api-key}} + model-registry: + gateway-url: ${MODEL_REGISTRY_GATEWAY_URL:${robot.litellm.base-url}/v1/models} + timeout-ms: ${MODEL_REGISTRY_TIMEOUT_MS:5000} + auth-token: ${MODEL_REGISTRY_AUTH_TOKEN:${robot.litellm.api-key}} + x-litellm-api-key: ${MODEL_REGISTRY_X_LITELLM_API_KEY:${robot.litellm.api-key}} + billing: + gateway-url-template: ${BILLING_GATEWAY_URL_TEMPLATE:http://localhost:8080/v1/billing/tasks/{taskId}} + timeout-ms: ${BILLING_TIMEOUT_MS:3000} + sync-retries: ${BILLING_SYNC_RETRIES:3} + retry-interval-ms: ${BILLING_RETRY_INTERVAL_MS:200} + task-share: + secret: ${TASK_SHARE_SECRET:${robot.auth.jwt-secret}} + base-url: ${TASK_SHARE_BASE_URL:http://localhost:3003} + default-expire-hours: ${TASK_SHARE_DEFAULT_EXPIRE_HOURS:24} + max-expire-hours: ${TASK_SHARE_MAX_EXPIRE_HOURS:168} + skills: + repo-url: ${SKILLS_REPO_URL:} + deploy-token: ${SKILLS_DEPLOY_TOKEN:} + project-id: ${SKILLS_PROJECT_ID:100001744} + weekly-report: + repo-path: ${WEEKLY_REPORT_REPO_PATH:} + llm: + enabled: ${WEEKLY_REPORT_LLM_ENABLED:true} + gateway-url: ${WEEKLY_REPORT_LLM_GATEWAY_URL:${robot.litellm.base-url}/v1/chat/completions} + model: ${WEEKLY_REPORT_LLM_MODEL:${robot.litellm.default-chat-model}} + max-tokens: ${WEEKLY_REPORT_LLM_MAX_TOKENS:900} + stream: ${WEEKLY_REPORT_LLM_STREAM:true} + connect-timeout-ms: ${WEEKLY_REPORT_LLM_CONNECT_TIMEOUT_MS:3000} + read-timeout-ms: ${WEEKLY_REPORT_LLM_READ_TIMEOUT_MS:12000} + auth-token: ${WEEKLY_REPORT_LLM_AUTH_TOKEN:${robot.litellm.api-key}} + x-litellm-api-key: ${WEEKLY_REPORT_LLM_X_LITELLM_API_KEY:${robot.litellm.api-key}} + +# 调度服务配置 +schedule: + cluster: + namespace: linkwork-dev + scheduler-name: volcano + kubeconfig-path: /root/.kube/config + + queue: + prefix: ai-worker + + images: + registry: ${SCHEDULE_IMAGE_REGISTRY:} + # Agent 基础镜像(本地构建用,固定 digest 保证一致性) + agent: ${SCHEDULE_AGENT_IMAGE:} + # Runner 默认镜像(K8s 运行时使用内部仓库镜像) + runner: ${RUNNER_IMAGE:} + + # Agent 启动脚本配置(main.py 从链接下载,启动 agent+executor) + agent-bootstrap: + main-py-url: ${MAIN_PY_URL:} + + # 文件放置配置 + file-placement: + # token 文件(仅 executor 可访问) + token-path: /workspace/.credentials/token + token-dir-mode: "700" + token-file-mode: "600" + token-owner: executor:executor + # SSH 密钥(agent 和 executor 都可访问) + ssh-path: /workspace/.ssh + ssh-dir-mode: "755" + ssh-key-mode: "600" + ssh-pub-mode: "644" + + network: + api-base-url: ${API_BASE_URL:http://localhost:8080} + ws-base-url: ${WS_BASE_URL:ws://localhost:8080} + llm-gateway-url: ${LLM_GATEWAY_URL:http://localhost:4000} + # 默认与 spring.data.redis 同源,避免后端入队和 worker 消费落到不同 Redis + redis-url: ${REDIS_URL:redis://${LINKWORK_REDIS_HOST:${REDIS_HOST:redis}}:${LINKWORK_REDIS_PORT:6379}} + + ssh: + port: 22 + key-type: ed25519 + + default-resources: + agent: + cpu-request: "1" + cpu-limit: "2" + memory-request: 2Gi + memory-limit: 4Gi + runner: + cpu-request: "1" + cpu-limit: "4" + memory-request: 2Gi + memory-limit: 8Gi + + # OSS 挂载配置(需配合节点级 ossfs 三路挂载) + # 节点级 ossfs 挂载: + # oss://agent-files/system/ → {host-path}/system + # oss://agent-files/user-files/ → {host-path}/user-files + # oss://agent-files/workstation/ → {host-path}/workstation + oss-mount: + enabled: ${OSS_MOUNT_ENABLED:true} + host-path: ${OSS_MOUNT_HOST_PATH:/mnt/oss/agent-files} # 三个 ossfs 挂载点的公共父目录 + mount-path: ${OSS_MOUNT_PATH:/data/oss/agent} # 容器内产出物挂载根 (system/{wsId}) + read-only: false # 产出物挂载是否只读 + user-files-mount-path: /mnt/user-files # 容器内个人空间挂载根 + workstation-mount-path: /mnt/workstation # 容器内岗位空间挂载根 + + pod-mode-rules: + default-mode: SIDECAR + overrides: + data_analysis: ALONE + ops: SIDECAR + development: SIDECAR + +# 镜像构建配置 +# 注意:仅构建 Agent 镜像,Runner 由运行时 agent 启动 +image-build: + # 是否启用镜像构建阶段(true: 构建新镜像, false: 直接使用基础镜像) + enabled: true + + # 是否推送镜像到仓库(true: 构建后推送, false: 只构建不推送) + push-enabled: true + + # 镜像拉取策略 + # - Always: 总是从仓库拉取 + # - IfNotPresent: 本地有则不拉取 + # - Never: 从不拉取,要求镜像必须已在节点上 + image-pull-policy: ${IMAGE_PULL_POLICY:IfNotPresent} + + # Docker 连接配置 + docker-host: ${DOCKER_HOST:unix:///var/run/docker.sock} + + # 默认 Agent 基础镜像(K8s 模式,固定 digest 保证一致性) + default-agent-base-image: ${DEFAULT_AGENT_BASE_IMAGE:} + + # Compose 模式基础镜像(用户本地构建使用) + compose-base-image: ${COMPOSE_BASE_IMAGE:} + + # 镜像仓库配置 + # K8s 模式下构建的镜像会推送到此仓库 + # 注意:直接配置,不依赖环境变量(避免环境变量为空时覆盖默认值) + registry: ${IMAGE_REGISTRY:} + registry-username: ${IMAGE_REGISTRY_USERNAME:} + registry-password: ${IMAGE_REGISTRY_PASSWORD:} + + # K8s 拉取私有镜像的 Secret 名称 + image-pull-secret: ${IMAGE_PULL_SECRET:linkwork-registry-secret} + + # 构建脚本路径(用户补充内容) + build-script-path: ${BUILD_SCRIPT_PATH:/opt/scripts/build.sh} + + # 入口点脚本 + entrypoint-script: /entrypoint.sh + + # 构建超时时间(秒)- build.sh 含 dnf 安装,首次构建需要较长时间 + build-timeout: ${BUILD_TIMEOUT:900} + + # 构建上下文临时目录 + build-context-dir: ${BUILD_CONTEXT_DIR:/tmp/docker-build} + + # 本地镜像同步(K8s + imageRegistry 为空时,将镜像导入 Kind 节点) + auto-load-to-kind: ${LINKWORK_BUILD_LOCAL_LOAD_ENABLED:${IMAGE_AUTO_LOAD_TO_KIND:true}} + # Kind 集群名(为空则自动发现) + kind-cluster-name: ${LINKWORK_BUILD_KIND_CLUSTER_NAME:${KIND_CLUSTER_NAME:}} + # Kind 导入超时(秒) + kind-load-timeout: ${KIND_LOAD_TIMEOUT:600} + + # 定期清理 + local-cleanup-enabled: ${IMAGE_LOCAL_CLEANUP_ENABLED:true} + local-image-retention-hours: ${IMAGE_LOCAL_RETENTION_HOURS:24} + local-cleanup-cron: ${IMAGE_LOCAL_CLEANUP_CRON:0 40 * * * *} + kind-prune-enabled: ${IMAGE_KIND_PRUNE_ENABLED:true} + + # 构建资产路径(来自项目内置 build-assets) + sdk-source-path: ${SDK_SOURCE_PATH:/opt/linkwork-agent-build/sdk-source} + zzd-binaries-path: ${ZZD_BINARIES_PATH:/opt/linkwork-agent-build/zzd-binaries} + +# NFS 存储配置(替代原 OSS) +# 后续更换 NFS 服务器地址只需重新 mount + 修改 NFS_STORAGE_BASE_PATH 环境变量,无需改代码 +nfs: + storage: + base-path: ${NFS_STORAGE_BASE_PATH:/mnt/oss/agent-files} + download-base-url: /api/v1/files + task-output-base-url: /api/v1/task-outputs + +# Memory 服务配置 +memory: + enabled: ${MEMORY_ENABLED:true} + milvus: + uri: ${MILVUS_URI:http://localhost:19530} + token: ${MILVUS_TOKEN:} + embedding: + model: ${MEMORY_EMBEDDING_MODEL:text-embedding-3-small} + dimension: ${MEMORY_EMBEDDING_DIMENSION:1536} + index: + max-chunk-size: 1500 + overlap-lines: 2 + queue-key: memory:index:jobs + oss-mount-path: ${MEMORY_OSS_MOUNT_PATH:/data/oss} + +# 构建队列配置 +build-queue: + # 是否启用队列功能(false 时直接异步执行,兼容旧行为) + enabled: true + # CPU 使用率阈值 (0.0-1.0),低于此值才允许新构建 + cpu-threshold: ${BUILD_QUEUE_CPU_THRESHOLD:0.7} + # 内存使用率阈值 (0.0-1.0),低于此值才允许新构建 + memory-threshold: ${BUILD_QUEUE_MEMORY_THRESHOLD:0.7} + # 硬性并发上限(即使资源充足也不超过此值) + max-concurrent: ${BUILD_QUEUE_MAX_CONCURRENT:2} + # 队列最大容量 + max-queue-size: ${BUILD_QUEUE_MAX_SIZE:50} + # 构建超时时间(秒) + task-timeout: ${BUILD_QUEUE_TASK_TIMEOUT:1200} + # 资源检查间隔(毫秒) + check-interval: ${BUILD_QUEUE_CHECK_INTERVAL:1000} + # 残留文件清理阈值(小时) + stale-context-hours: ${BUILD_QUEUE_STALE_CONTEXT_HOURS:1} + +# LinkWork Starter 配置(统一 linkwork.agent.* 前缀) +linkwork: + agent: + storage: + enabled: ${LINKWORK_AGENT_STORAGE_ENABLED:true} + provider: ${LINKWORK_AGENT_STORAGE_PROVIDER:nfs} + nfs: + base-path: ${LINKWORK_AGENT_STORAGE_NFS_BASE_PATH:${nfs.storage.base-path:/mnt/oss/agent-files}} + mount-path: ${LINKWORK_AGENT_STORAGE_NFS_MOUNT_PATH:/workspace} + read-only: ${LINKWORK_AGENT_STORAGE_NFS_READ_ONLY:false} + uid: ${LINKWORK_AGENT_STORAGE_NFS_UID:1000} + gid: ${LINKWORK_AGENT_STORAGE_NFS_GID:1000} + skill: + enabled: ${LINKWORK_AGENT_SKILL_ENABLED:true} + provider: ${LINKWORK_AGENT_SKILL_PROVIDER:gitlab} + retry-times: ${LINKWORK_AGENT_SKILL_RETRY_TIMES:2} + retry-backoff-ms: ${LINKWORK_AGENT_SKILL_RETRY_BACKOFF_MS:200} + cache-ttl-ms: ${LINKWORK_AGENT_SKILL_CACHE_TTL_MS:10000} + gitlab: + url: ${LINKWORK_AGENT_SKILL_GITLAB_URL:${robot.gitlab.base-url:}} + token: ${LINKWORK_AGENT_SKILL_GITLAB_TOKEN:} + repo-url: ${LINKWORK_AGENT_SKILL_GITLAB_REPO_URL:${robot.skills.repo-url:}} + deploy-token: ${LINKWORK_AGENT_SKILL_GITLAB_DEPLOY_TOKEN:${robot.skills.deploy-token:}} + project-id: ${LINKWORK_AGENT_SKILL_GITLAB_PROJECT_ID:${robot.skills.project-id:}} + branch: ${LINKWORK_AGENT_SKILL_GITLAB_BRANCH:main} + root-path: ${LINKWORK_AGENT_SKILL_GITLAB_ROOT_PATH:skills} + mcp: + enabled: ${LINKWORK_AGENT_MCP_ENABLED:true} + mode: ${LINKWORK_AGENT_MCP_MODE:gateway} + gateway: + agent-base-url: ${LINKWORK_AGENT_MCP_GATEWAY_AGENT_BASE_URL:http://localhost:9080} + proxy-base-url: ${LINKWORK_AGENT_MCP_GATEWAY_PROXY_BASE_URL:http://localhost:38090} + client: + connect-timeout-ms: ${LINKWORK_AGENT_MCP_CONNECT_TIMEOUT_MS:5000} + read-timeout-ms: ${LINKWORK_AGENT_MCP_READ_TIMEOUT_MS:35000} + security: + encryption-key: ${LINKWORK_AGENT_MCP_ENCRYPTION_KEY:${robot.mcp.encryption-key:}} + sandbox: + provider: ${LINKWORK_AGENT_SANDBOX_PROVIDER:${LINKWORK_SANDBOX_PROVIDER:k8s-volcano}} + k8s: + namespace: ${LINKWORK_AGENT_SANDBOX_K8S_NAMESPACE:${K8S_NAMESPACE:linkwork-dev}} + scheduler-name: ${LINKWORK_AGENT_SANDBOX_K8S_SCHEDULER_NAME:${schedule.cluster.scheduler-name:volcano}} + queue-name: ${LINKWORK_AGENT_SANDBOX_K8S_QUEUE_NAME:default} + kubeconfig-path: ${LINKWORK_AGENT_SANDBOX_K8S_KUBECONFIG_PATH:${KUBECONFIG_PATH:/root/.kube/config}} + default-agent-image: ${LINKWORK_AGENT_SANDBOX_K8S_DEFAULT_AGENT_IMAGE:${schedule.images.agent:}} + default-runner-image: ${LINKWORK_AGENT_SANDBOX_K8S_DEFAULT_RUNNER_IMAGE:${schedule.images.runner:}} + default-image-pull-policy: ${LINKWORK_AGENT_SANDBOX_K8S_IMAGE_PULL_POLICY:${image-build.image-pull-policy:IfNotPresent}} + registry: ${LINKWORK_AGENT_SANDBOX_K8S_REGISTRY:${image-build.registry:}} + registry-username: ${LINKWORK_AGENT_SANDBOX_K8S_REGISTRY_USERNAME:${image-build.registry-username:}} + registry-password: ${LINKWORK_AGENT_SANDBOX_K8S_REGISTRY_PASSWORD:${image-build.registry-password:}} diff --git a/back/src/main/resources/scripts/00-platform.cedar b/back/src/main/resources/scripts/00-platform.cedar new file mode 100644 index 0000000..fd82fbf --- /dev/null +++ b/back/src/main/resources/scripts/00-platform.cedar @@ -0,0 +1,104 @@ +// ============================================================ +// 00-platform.cedar — 平台级安全策略(最高优先级) +// ============================================================ +// Cedar 原生语义:forbid 覆盖 permit,无匹配 permit → 隐式拒绝 +// 评估模型:所有 .cedar 文件合并为单个 PolicySet,一次评估 +// ============================================================ + +// ---- FORBID: 高危操作(覆盖所有 permit)---- + +// 文件系统破坏(精确匹配根目录,不误拦工作区路径) +forbid ( + principal, + action == Action::"execute", + resource +) +when { + context.command like "*rm -rf /" || + context.command like "*rm -rf / *" || + context.command like "*rm -rf ~*" || + context.command like "*mkfs*" || + context.command like "*dd if=*of=/dev/*" || + context.command like "*chmod -R 777 /" || + context.command like "*chmod -R 777 / *" || + context.command like "*chown -R * /" || + context.command like "*chown -R * / *" +}; + +// [暂时禁用] 网络服务启动 / 反弹 shell +// forbid ( +// principal, +// action == Action::"execute", +// resource +// ) +// when { +// context.command like "*python* -m http.server*" || +// context.command like "*python* -m SimpleHTTPServer*" || +// context.command like "*nc -l*" || +// context.command like "*ncat *" || +// context.command like "*socat *" || +// context.command like "*/bin/bash -i*" || +// context.command like "*/bin/sh -i*" +// }; + +// [暂时禁用] 外发网络请求 +// forbid ( +// principal, +// action == Action::"execute", +// resource +// ) +// when { +// context.command like "curl *" || +// context.command like "wget *" +// }; + +// 提权命令(危险 sudo 模式 + su) +forbid ( + principal, + action == Action::"execute", + resource +) +when { + context.command like "sudo *su*" || + context.command like "sudo */bin/bash*" || + context.command like "sudo *-i*" || + context.command like "su *" || + context.command == "su" +}; + +// Docker / K8s 操作(需要额外岗位授权) +forbid ( + principal, + action == Action::"execute", + resource +) +when { + context.command_type == "docker" || + context.command_type == "k8s" +}; + +// ---- PERMIT: 允许的命令类型 ---- + +// Shell 命令(ls, cat, grep, echo, whoami, mkdir, cp, mv, etc.) +permit ( + principal, + action == Action::"execute", + resource +) +when { context.command_type == "shell" }; + +// Git 操作(git, gh, gitlab) +permit ( + principal, + action == Action::"execute", + resource +) +when { context.command_type == "git" }; + +// 包管理器(npm, pip, go, cargo, etc.) +permit ( + principal, + action == Action::"execute", + resource +) +when { context.command_type == "package" }; diff --git a/back/src/main/resources/scripts/build.sh b/back/src/main/resources/scripts/build.sh new file mode 100755 index 0000000..2fd962c --- /dev/null +++ b/back/src/main/resources/scripts/build.sh @@ -0,0 +1,1680 @@ +#!/bin/bash +# +# K8s Agent 镜像构建脚本 +# +# 功能:在镜像构建阶段执行,用于初始化工作环境 +# 1. 克隆 Git 项目到 /workspace +# 2. 从 Git 仓库部署 Skills 到 /opt/agent/skills +# 3. 校验 Skills 依赖 (Python/Node.js/Go) +# 4. 部署 MCP 配置到 /opt/agent/mcp.json +# +# 构建时步骤: +# 1. 校验基础镜像内置依赖 (Python 3.12, Node.js, npm, git, Claude CLI, uv, uvx) +# 2. 创建基础目录 +# 3. 安装 zzd 二进制 (zzd, zz, gen-key, encrypt-key) +# 4. 安装 linkwork-agent-sdk (源码) +# 5. 创建 agent 用户 + passwordless sudo +# 6. 创建 workspace 目录结构 +# 6.1 部署 security.json/mcp.json/skills 到 /opt/agent/ (root:agent 只读) +# 7. 下载 Cedar 策略文件 (从 URL) +# 8. 创建 zzd 运行时目录 +# 9. 校验 git 可用性 +# 11. 克隆 Git 项目 (clone 后清除 token) +# 12. 下载 MCP 配置 (从 URL) +# 13. 下载 Skills (从 URL) +# 14. 校验 Skills 依赖 +# 15. 部署 start.sh 和 ai_employee.py 到 /opt/agent/ (root:root) +# 16. 最终权限设置 (/opt/agent/ config 0440 root:agent, /etc/zzd 0700 root:root) +# 17. 清理构建临时文件 +# +# 构建时环境变量(由构建系统通过 Dockerfile ARG 传入): +# GIT_TOKEN - Git 认证 Token +# GIT_REPOS - 待克隆的 Git 仓库列表 (JSON 数组格式) +# CEDAR_POLICIES_URL - Cedar 策略文件压缩包 URL +# MCP_CONFIG_URL - MCP 配置文件 URL (JSON) +# CONFIG_URL - [预留,未启用] Agent config.json URL(后续可支持构建期远端下载) +# SKILLS_URL - Skills 压缩包的 OSS 链接 +# 环境变量(由构建系统导出): +# GIT_TOKEN - Git 认证 Token +# GIT_REPOS - 待克隆的 Git 仓库列表 (JSON 数组格式) +# 示例: '[{"url":"https://git.example.com/repo1.git","branch":"main"}]' +# SKILLS_CONFIG - Skills Git 配置 JSON(优先级最高) +# 格式: {"repoUrl":"...","token":"...","skills":[{"name":"...","branch":"...","commit":"..."}]} +# SKILLS_URL - Skills 压缩包的 OSS 链接(SKILLS_CONFIG 为空时回退使用) +# MCP_CONFIG - MCP 配置 JSON 字符串(由后端从岗位 configJson.mcp 生成,优先级最高) +# MCP_CONFIG_URL - MCP 配置文件 URL(MCP_CONFIG 为空时回退使用) +# +# 构建输入目录(支持两种来源): +# 1) 固定目录(推荐):$BUILD_ASSETS_ROOT +# - zzd-binaries/ - zzd, zz, gen-key, encrypt-key +# - sdk-source/ - linkwork-agent-sdk 源码 (含 pyproject.toml + src/) +# - start-scripts/ - start-single.sh, start-dual.sh, ai_employee.py +# 2) 兼容旧路径(若固定目录不存在则回退):/tmp/* +# + +set -o pipefail + +# ============================================================================= +# 常量定义 +# ============================================================================= +readonly WORKSPACE_DIR="/workspace" +WORKSPACE_GROUP="${WORKSPACE_GROUP:-workspace}" +WORKSPACE_GID="${WORKSPACE_GID:-2000}" +readonly AGENT_CONFIG_DIR="/opt/agent" +readonly SKILLS_DIR="${AGENT_CONFIG_DIR}/skills" +readonly MCP_CONFIG_FILE="${AGENT_CONFIG_DIR}/mcp.json" +readonly SECURITY_FILE="${AGENT_CONFIG_DIR}/security.json" +readonly SKILLS_ARCHIVE="/tmp/skills.tar.gz" +readonly CEDAR_ARCHIVE="/tmp/cedar-policies.tar.gz" + +# zzd 相关路径 +readonly ZZD_CONFIG_DIR="/etc/zzd" +readonly ZZD_POLICY_DIR="${ZZD_CONFIG_DIR}/policies" +readonly ZZD_SOCKET_DIR="/var/run/zzd" +readonly ZZD_AUDIT_DIR="/var/log/zzd/audit" + +# 构建输入根目录(固定目录,可通过环境变量覆盖) +readonly BUILD_ASSETS_ROOT="${BUILD_ASSETS_ROOT:-/opt/linkwork-agent-build}" +readonly LEGACY_TMP_ROOT="/tmp" + +# 输入路径(优先固定目录,不存在时回退旧 /tmp 路径) +ZZD_BIN_SRC="${ZZD_BIN_SRC:-${BUILD_ASSETS_ROOT}/zzd-binaries}" +SDK_SRC="${SDK_SRC:-${BUILD_ASSETS_ROOT}/sdk-source}" +START_SCRIPTS_SRC="${START_SCRIPTS_SRC:-${BUILD_ASSETS_ROOT}/start-scripts}" + +if [[ ! -d "${ZZD_BIN_SRC}" && -d "${LEGACY_TMP_ROOT}/zzd-binaries" ]]; then + ZZD_BIN_SRC="${LEGACY_TMP_ROOT}/zzd-binaries" +fi +if [[ ! -d "${SDK_SRC}" && -d "${LEGACY_TMP_ROOT}/sdk-build" ]]; then + SDK_SRC="${LEGACY_TMP_ROOT}/sdk-build" +fi +if [[ ! -d "${SDK_SRC}" && -d "${LEGACY_TMP_ROOT}/linkwork-agent-sdk" ]]; then + SDK_SRC="${LEGACY_TMP_ROOT}/linkwork-agent-sdk" +fi +if [[ ! -d "${START_SCRIPTS_SRC}" && -d "${LEGACY_TMP_ROOT}/start-scripts" ]]; then + START_SCRIPTS_SRC="${LEGACY_TMP_ROOT}/start-scripts" +fi + +readonly ZZD_BIN_SRC SDK_SRC START_SCRIPTS_SRC + +# 颜色输出 +readonly RED='\033[0;31m' +readonly GREEN='\033[0;32m' +readonly YELLOW='\033[1;33m' +readonly BLUE='\033[0;34m' +readonly NC='\033[0m' # No Color + +# ============================================================================= +# 日志函数 +# ============================================================================= +log_info() { + echo -e "${BLUE}[INFO]${NC} $(date '+%Y-%m-%d %H:%M:%S') $*" +} + +log_success() { + echo -e "${GREEN}[SUCCESS]${NC} $(date '+%Y-%m-%d %H:%M:%S') $*" +} + +log_warn() { + echo -e "${YELLOW}[WARN]${NC} $(date '+%Y-%m-%d %H:%M:%S') $*" +} + +log_error() { + echo -e "${RED}[ERROR]${NC} $(date '+%Y-%m-%d %H:%M:%S') $*" >&2 +} + +# ============================================================================= +# 工具函数 +# ============================================================================= + +# URL 脱敏 — 剥离 query string、fragment 和 userinfo,仅保留 scheme://host/path +# 用途: 日志输出 URL 时防止泄露签名、token 等敏感参数 +redact_url() { + local url="$1" + # 去掉 query string (?...) 和 fragment (#...) + url="${url%%\?*}" + url="${url%%#*}" + # 去掉 userinfo (oauth2:TOKEN@ 等) + url=$(echo "${url}" | sed -E 's|(https?://)([^@]+@)|\1|') + echo "${url}" +} + +# 检查命令是否存在(静默模式) +command_exists() { + command -v "$1" &> /dev/null +} + +# 安装系统依赖(禁用:基础镜像已预装) +install_system_dependencies() { + log_error "检测到缺失依赖,但当前模式禁用在线安装。请在基础镜像中预装依赖后重试。" + return 1 +} + +# 检查 Claude Code CLI(禁用在线安装) +install_claude_cli() { + if command_exists claude; then + log_success "Claude Code CLI 已安装" + return 0 + fi + log_error "claude 命令缺失。当前模式禁用在线安装,请在基础镜像中预装 Claude CLI。" + return 1 +} + +# 检查基础镜像内置依赖(不安装) +check_prerequisites() { + log_info "检查基础镜像内置依赖(不执行在线安装)..." + + # pi CLI 在部分基础镜像中未内置,当前构建流程不依赖其作为硬门槛。 + local required_cmds=("curl" "jq" "python3.12" "node" "npm" "git" "claude" "uv" "uvx") + local missing=() + + for cmd in "${required_cmds[@]}"; do + if ! command_exists "$cmd"; then + missing+=("$cmd") + fi + done + + if [[ ${#missing[@]} -gt 0 ]]; then + log_error "缺少必需命令: ${missing[*]}" + log_error "请确保基础镜像已预装: Python 3.12 / Node.js v24.13.0 / npm 11.6.2 / git 2.43.5 / Claude CLI / uv / uvx" + return 1 + fi + + if ! python3.12 -m pip --version >/dev/null 2>&1; then + log_error "python3.12 -m pip 不可用,请确保基础镜像内置 pip" + return 1 + fi + + log_info "依赖版本信息:" + log_info " $(python3.12 --version 2>&1)" + log_info " $(node --version 2>&1)" + log_info " npm $(npm --version 2>&1)" + log_info " $(git --version 2>&1)" + log_info " $(claude --version 2>&1 | head -1)" + if command_exists pi; then + log_info " $(pi --version 2>&1 | head -1)" + else + log_warn "pi 命令未安装,按可选依赖处理" + fi + log_info " $(uv --version 2>&1)" + log_info " $(uvx --version 2>&1 | head -1)" + + # 版本提示(不强制中断) + local py_ver node_ver npm_ver git_ver + py_ver=$(python3.12 --version 2>&1 | awk '{print $2}') + node_ver=$(node --version 2>&1) + npm_ver=$(npm --version 2>&1) + git_ver=$(git --version 2>&1 | awk '{print $3}') + + [[ "${py_ver}" == 3.12* ]] || log_warn "Python 版本不是 3.12.x: ${py_ver}" + [[ "${node_ver}" == "v24.13.0" ]] || log_warn "Node.js 版本不是 v24.13.0: ${node_ver}" + [[ "${npm_ver}" == "11.6.2" ]] || log_warn "npm 版本不是 11.6.2: ${npm_ver}" + [[ "${git_ver}" == "2.43.5" ]] || log_warn "git 版本不是 2.43.5: ${git_ver}" + + # ★ 修复 python3 符号链接:确保 /usr/bin/python3 → python3.12 + # Rocky Linux alternatives 可能残留 python3 → python3.11(不存在), + # start-dual.sh 用 "sudo -u agent python3 ..." 需要 python3 可用 + if ! python3 --version &>/dev/null || ! python3 -c "import sys" &>/dev/null; then + log_warn "python3 不可用或指向错误目标,修复符号链接..." + alternatives --remove-all python3 2>/dev/null || true + rm -f /etc/alternatives/python3 /usr/bin/python3 /usr/bin/python + ln -s /usr/bin/python3.12 /usr/bin/python3 + ln -s /usr/bin/python3.12 /usr/bin/python + log_info "python3 -> python3.12 ($(python3 --version 2>&1))" + elif [[ "$(readlink -f /usr/bin/python3 2>/dev/null)" != "/usr/bin/python3.12" ]]; then + log_warn "python3 未指向 python3.12,修复..." + alternatives --remove-all python3 2>/dev/null || true + rm -f /etc/alternatives/python3 /usr/bin/python3 /usr/bin/python + ln -s /usr/bin/python3.12 /usr/bin/python3 + ln -s /usr/bin/python3.12 /usr/bin/python + log_info "python3 -> python3.12 ($(python3 --version 2>&1))" + else + log_success "python3 -> $(readlink -f /usr/bin/python3) (OK)" + fi + + log_success "依赖检查完成" + return 0 +} + +# 创建必要目录 +create_directories() { + log_info "创建工作目录..." + + mkdir -p "${WORKSPACE_DIR}" || { + log_error "创建 ${WORKSPACE_DIR} 失败" + return 1 + } + + mkdir -p "${AGENT_CONFIG_DIR}" || { + log_error "创建 ${AGENT_CONFIG_DIR} 失败" + return 1 + } + + mkdir -p "${SKILLS_DIR}" || { + log_error "创建 ${SKILLS_DIR} 失败" + return 1 + } + + # 确保 MCP 配置文件父目录存在 + mkdir -p "$(dirname "${MCP_CONFIG_FILE}")" || { + log_error "创建 $(dirname "${MCP_CONFIG_FILE}") 失败" + return 1 + } + + log_success "目录结构已创建" + return 0 +} + +# ============================================================================= +# Phase 2: zzd / SDK / 用户 +# ============================================================================= + +# 安装 zzd 二进制文件 +install_zzd_binaries() { + log_info "安装 zzd 二进制文件..." + + local binaries=("zzd" "zz" "gen-key" "encrypt-key") + + if [[ ! -d "${ZZD_BIN_SRC}" ]]; then + local missing=() + for bin in "${binaries[@]}"; do + if ! command_exists "${bin}"; then + missing+=("${bin}") + fi + done + if [[ ${#missing[@]} -eq 0 ]]; then + log_warn "未找到 ${ZZD_BIN_SRC},但基础镜像已内置 zzd 二进制,跳过复制" + return 0 + fi + + log_error "zzd 二进制目录不存在: ${ZZD_BIN_SRC}" + log_error "请将 zzd/zz/gen-key/encrypt-key 放置到固定目录,或通过 ZZD_BIN_SRC 指定路径" + return 1 + fi + + for bin in "${binaries[@]}"; do + if [[ ! -f "${ZZD_BIN_SRC}/${bin}" ]]; then + log_error "二进制文件不存在: ${ZZD_BIN_SRC}/${bin}" + return 1 + fi + done + + for bin in "${binaries[@]}"; do + cp "${ZZD_BIN_SRC}/${bin}" "/usr/local/bin/${bin}" + chmod +x "/usr/local/bin/${bin}" + # 创建 /usr/bin/ 符号链接,确保 agent 用户 PATH 可达 + ln -sf "/usr/local/bin/${bin}" "/usr/bin/${bin}" + done + + log_success "zzd 二进制安装完成 (${#binaries[@]}/${#binaries[@]})" + return 0 +} + +# 安装 linkwork-agent-sdk +install_sdk() { + log_info "安装 linkwork-agent-sdk..." + + # 固定使用 Python 3.12(基础镜像内置) + local python_cmd="python3.12" + if ! command_exists "${python_cmd}"; then + log_error "未找到 ${python_cmd}" + return 1 + fi + + if [[ ! -d "${SDK_SRC}" ]]; then + if ${python_cmd} -c "import linkwork_agent_sdk" >/dev/null 2>&1; then + log_warn "未找到 SDK 源码目录,检测到基础镜像已安装 linkwork_agent_sdk,跳过安装" + return 0 + fi + + log_error "SDK 源码目录不存在: ${SDK_SRC}" + log_error "请将 linkwork-agent-sdk 源码放置到固定目录,或通过 SDK_SRC 指定路径" + return 1 + fi + + if [[ ! -f "${SDK_SRC}/pyproject.toml" ]]; then + if ${python_cmd} -c "import linkwork_agent_sdk" >/dev/null 2>&1; then + log_warn "${SDK_SRC}/pyproject.toml 不存在,但基础镜像已安装 linkwork_agent_sdk,跳过安装" + return 0 + fi + + log_error "${SDK_SRC}/pyproject.toml 不存在" + return 1 + fi + + log_info "使用 ${python_cmd} 安装 SDK..." + # --no-deps: 运行时依赖已在基础镜像中预装,不再下载 + # --no-build-isolation: 使用系统已装的 setuptools/wheel 构建,不联网 + ${python_cmd} -m pip install --no-cache-dir --no-deps --no-build-isolation "${SDK_SRC}" || { + log_error "SDK 安装失败" + return 1 + } + + log_success "linkwork-agent-sdk 安装完成" + return 0 +} + +# 修补 linkwork_agent_sdk 运行时模型选择: +# 当 config.json/env 提供 ANTHROPIC_MODEL 时,优先使用该完整模型名, +# 避免 claude-sdk 将 "sonnet" 展开为不被 LiteLLM 识别的别名。 +patch_sdk_runtime_model_override() { + local python_cmd="python3.12" + + if ! command_exists "${python_cmd}"; then + log_warn "未找到 ${python_cmd},跳过 SDK runtime model 补丁" + return 0 + fi + + local engine_file + engine_file="$(${python_cmd} - <<'PY' +import importlib +try: + m = importlib.import_module("linkwork_agent_sdk.engine.agent_engine") + print(getattr(m, "__file__", "")) +except Exception: + print("") +PY +)" + + if [[ -z "${engine_file}" || ! -f "${engine_file}" ]]; then + log_warn "未定位到 linkwork_agent_sdk.engine.agent_engine,跳过补丁" + return 0 + fi + + if grep -q 'resolved_env.get("ANTHROPIC_MODEL")' "${engine_file}"; then + log_info "SDK runtime model 补丁已存在,跳过" + return 0 + fi + + if ! grep -q '"model": self._config.claude_settings.model,' "${engine_file}"; then + log_warn "未匹配到预期模型配置行,跳过 SDK runtime model 补丁: ${engine_file}" + return 0 + fi + + sed -i \ + 's/"model": self._config.claude_settings.model,/"model": (resolved_env.get("ANTHROPIC_MODEL") if resolved_env and resolved_env.get("ANTHROPIC_MODEL") else self._config.claude_settings.model),/' \ + "${engine_file}" || { + log_error "应用 SDK runtime model 补丁失败: ${engine_file}" + return 1 + } + + log_success "SDK runtime model 补丁已应用: ${engine_file}" + return 0 +} + +# 创建 agent 用户 + 配置 sudoers +setup_agent_user() { + log_info "创建 agent 用户..." + + # 创建用户(如果不存在) + if id agent &>/dev/null; then + log_info "agent 用户已存在,跳过创建" + else + groupadd -g 1001 agent || { + log_error "创建 agent 用户组失败" + return 1 + } + + useradd -u 1001 -g agent -m -s /bin/bash agent || { + log_error "创建 agent 用户失败" + return 1 + } + + log_success "agent 用户创建完成 (uid=1001)" + fi + + # 创建 monitor 用户(专用于运行 zzd 守护进程,降低 zzd 运行权限) + if id monitor &>/dev/null; then + log_info "monitor 用户已存在,跳过创建" + else + groupadd -g 1002 monitor || { + log_error "创建 monitor 用户组失败" + return 1 + } + + useradd -u 1002 -g monitor -M -r -s /sbin/nologin monitor || { + log_error "创建 monitor 用户失败" + return 1 + } + + log_success "monitor 用户创建完成 (uid=1002, nologin)" + fi + + # 创建/复用 workspace 协作组(用于 agent/runner 跨容器共享写入) + if getent group "${WORKSPACE_GID}" >/dev/null 2>&1; then + local gid_group + gid_group=$(getent group "${WORKSPACE_GID}" | cut -d: -f1) + if [[ -n "${gid_group}" && "${gid_group}" != "${WORKSPACE_GROUP}" ]]; then + log_warn "WORKSPACE_GID=${WORKSPACE_GID} 已被组 ${gid_group} 占用,复用该组" + WORKSPACE_GROUP="${gid_group}" + fi + elif getent group "${WORKSPACE_GROUP}" >/dev/null 2>&1; then + local group_gid + group_gid=$(getent group "${WORKSPACE_GROUP}" | cut -d: -f3) + if [[ -n "${group_gid}" && "${group_gid}" != "${WORKSPACE_GID}" ]]; then + log_warn "组 ${WORKSPACE_GROUP} 已存在且 gid=${group_gid},将使用该 gid" + WORKSPACE_GID="${group_gid}" + fi + else + groupadd -g "${WORKSPACE_GID}" "${WORKSPACE_GROUP}" || { + log_error "创建 workspace 协作组失败 (${WORKSPACE_GROUP}:${WORKSPACE_GID})" + return 1 + } + fi + usermod -aG "${WORKSPACE_GROUP}" agent || { + log_error "将 agent 加入 workspace 协作组失败 (${WORKSPACE_GROUP})" + return 1 + } + log_info " -> workspace 协作组: ${WORKSPACE_GROUP}(gid=${WORKSPACE_GID}), agent 已加入" + + # zzd sudoers: 只允许 monitor(zzd) 以 agent 身份执行命令 + # 参考: docs/zzd/zzd.md §安全机制 — agent 用户无 sudo 权限, zzd 以 monitor 身份运行 + rm -f /etc/sudoers.d/agent 2>/dev/null || true # 清除可能存在的错误配置 + echo 'monitor ALL=(agent) NOPASSWD: ALL' > /etc/sudoers.d/zzd + chmod 0440 /etc/sudoers.d/zzd + chown root:root /etc/sudoers.d/zzd + log_info " -> sudoers 配置已更新 (/etc/sudoers.d/zzd: root -> agent)" + + # 写入 Claude CLI 配置,跳过 onboarding 认证流程 + local agent_home="/home/agent" + mkdir -p "${agent_home}/.claude" + echo '{"hasCompletedOnboarding": true}' > "${agent_home}/.claude.json" + chown agent:agent "${agent_home}/.claude.json" + chown -R agent:agent "${agent_home}/.claude" + log_info " -> 写入 .claude.json (hasCompletedOnboarding: true)" + + return 0 +} + +# ============================================================================= +# Phase 3: workspace + zzd 配置 +# ============================================================================= + +# 创建 workspace 目录结构 +setup_workspace() { + log_info "创建 workspace 目录结构..." + + mkdir -p "${WORKSPACE_DIR}/logs" \ + "${WORKSPACE_DIR}/user" \ + "${WORKSPACE_DIR}/workstation" \ + "${WORKSPACE_DIR}/task-logs" \ + "${WORKSPACE_DIR}/worker-logs" || { + log_error "workspace 目录创建失败" + return 1 + } + + log_success "workspace 目录结构创建完成" + return 0 +} + +# 部署 agent 配置文件到 /opt/agent/ (root:agent 只读) +setup_agent_config() { + log_info "部署 agent 配置到 ${AGENT_CONFIG_DIR}..." + + mkdir -p "${SKILLS_DIR}/default" || { + log_error "${SKILLS_DIR}/default 目录创建失败" + return 1 + } + + # 默认 security.json (SDK 需要此文件存在) + if [[ ! -f "${SECURITY_FILE}" ]]; then + echo '{"rules": []}' > "${SECURITY_FILE}" + log_info " -> 创建默认 security.json" + fi + + # 默认 mcp.json (SDK 需要 mcpServers 字段) + if [[ ! -f "${MCP_CONFIG_FILE}" ]]; then + echo '{"mcpServers": {}}' > "${MCP_CONFIG_FILE}" + log_info " -> 创建默认 mcp.json" + fi + + # 占位 SKILL.md (SDK 需要至少一个有效 skill,含 YAML frontmatter) + local default_skill="${SKILLS_DIR}/default/SKILL.md" + if [[ ! -f "${default_skill}" ]]; then + printf '%s\n' '---' 'name: default' 'description: Default placeholder skill' '---' '' '# Default Skill' '' 'Placeholder skill.' > "${default_skill}" + log_info " -> 创建占位 SKILL.md" + fi + + log_success "agent 配置部署完成" + return 0 +} + +# 下载 Cedar 策略文件(从 URL 下载,回退到 /tmp/cedar-policies/ COPY 方式) +download_cedar_policies() { + log_info "下载/部署 Cedar 策略文件..." + + mkdir -p "${ZZD_POLICY_DIR}" + + local policy_tmp="/tmp/cedar-policies-download" + + if [[ -n "${CEDAR_POLICIES_URL}" ]]; then + # 方式 1: 从 URL 下载 + log_info "从 $(redact_url "${CEDAR_POLICIES_URL}") 下载 Cedar 策略..." + + if ! curl -fsSL -o "${CEDAR_ARCHIVE}" "${CEDAR_POLICIES_URL}"; then + log_error "Cedar 策略下载失败" + return 1 + fi + + mkdir -p "${policy_tmp}" + + # 解压(支持 tar.gz 和 zip) + if file "${CEDAR_ARCHIVE}" | grep -q "gzip"; then + tar -xzf "${CEDAR_ARCHIVE}" -C "${policy_tmp}" --strip-components=0 || { + log_error "Cedar 策略解压失败 (tar.gz)" + return 1 + } + elif file "${CEDAR_ARCHIVE}" | grep -q "Zip"; then + unzip -o "${CEDAR_ARCHIVE}" -d "${policy_tmp}" || { + log_error "Cedar 策略解压失败 (zip)" + return 1 + } + else + tar -xzf "${CEDAR_ARCHIVE}" -C "${policy_tmp}" --strip-components=0 || { + log_error "Cedar 策略解压失败 (未知格式)" + return 1 + } + fi + + rm -f "${CEDAR_ARCHIVE}" + + # 部署 .cedar 文件 + local count=0 + for f in "${policy_tmp}"/*.cedar; do + [[ -f "$f" ]] || continue + cp "$f" "${ZZD_POLICY_DIR}/" + ((count++)) + done + + rm -rf "${policy_tmp}" + + if [[ ${count} -eq 0 ]]; then + log_warn "下载的压缩包中无 .cedar 文件" + return 0 + fi + + elif [[ -d "/tmp/cedar-policies" ]]; then + # 方式 2: 回退到 Dockerfile COPY 的文件 + log_info "使用 /tmp/cedar-policies/ 中的策略文件 (COPY 方式)..." + + local count=0 + for f in /tmp/cedar-policies/*.cedar; do + [[ -f "$f" ]] || continue + cp "$f" "${ZZD_POLICY_DIR}/" + ((count++)) + done + + if [[ ${count} -eq 0 ]]; then + log_warn "/tmp/cedar-policies/ 中无 .cedar 文件" + return 0 + fi + else + log_warn "CEDAR_POLICIES_URL 为空且 /tmp/cedar-policies/ 不存在,跳过 Cedar 策略部署" + return 0 + fi + + # 清理 macOS 资源 fork 文件 + find "${ZZD_POLICY_DIR}" -name '._*' -delete 2>/dev/null || true + + # 策略文件 root only, agent 不可读 + if ls "${ZZD_POLICY_DIR}"/*.cedar &>/dev/null; then + chmod 0400 "${ZZD_POLICY_DIR}"/*.cedar + chown root:root "${ZZD_POLICY_DIR}"/*.cedar + local final_count + final_count=$(ls -1 "${ZZD_POLICY_DIR}"/*.cedar 2>/dev/null | wc -l) + log_success "Cedar 策略部署完成 (${final_count} 个文件, 0400 root:root)" + fi + + return 0 +} + +# 创建 zzd 运行时目录 +setup_zzd_directories() { + log_info "创建 zzd 运行时目录..." + + mkdir -p "${ZZD_CONFIG_DIR}" \ + "${ZZD_POLICY_DIR}" \ + "${ZZD_SOCKET_DIR}" \ + "${ZZD_AUDIT_DIR}" || { + log_error "zzd 目录创建失败" + return 1 + } + + # /etc/zzd 及子目录 — root:monitor, monitor(zzd) 可读, agent 不可读不可改 + chmod 0750 "${ZZD_CONFIG_DIR}" + chmod 0750 "${ZZD_POLICY_DIR}" + chown -R root:monitor "${ZZD_CONFIG_DIR}" + + # socket 和 audit 目录归 monitor 用户(zzd 进程以 monitor 身份写入) + chown monitor:monitor "${ZZD_SOCKET_DIR}" + chmod 0755 "${ZZD_SOCKET_DIR}" + chown monitor:monitor "${ZZD_AUDIT_DIR}" + chmod 0700 "${ZZD_AUDIT_DIR}" + + log_success "zzd 运行时目录创建完成 (config: 0750 root:monitor, socket/audit: monitor:monitor)" + return 0 +} + +# ============================================================================= +# Phase 4: Git +# ============================================================================= + +# 校验 git(基础镜像应已预装) +install_git() { + log_info "校验 git..." + + if ! command_exists git; then + log_error "git 未安装。当前模式禁用在线安装,请在基础镜像中预装 git 2.43.5" + return 1 + fi + + log_success "git 已安装: $(git --version)" + return 0 +} + +# 克隆 Git 项目 +clone_git_repos() { + log_info "开始克隆 Git 仓库..." + + if [[ -z "${GIT_REPOS}" ]]; then + log_warn "GIT_REPOS 环境变量为空,跳过 Git 克隆" + return 0 + fi + + if [[ -z "${GIT_TOKEN}" ]]; then + log_warn "GIT_TOKEN 环境变量为空,将使用无认证方式克隆" + fi + + # 解析 JSON 数组 + local repo_count + repo_count=$(echo "${GIT_REPOS}" | jq -r 'length') + + if [[ "${repo_count}" == "null" ]] || [[ "${repo_count}" -eq 0 ]]; then + log_warn "GIT_REPOS 为空或格式错误,跳过 Git 克隆" + return 0 + fi + + log_info "共有 ${repo_count} 个仓库需要克隆" + + local success_count=0 + local fail_count=0 + + for ((i=0; i/dev/null 2>&1; then + # 安全: 清除 .git/config 中的 token URL,防止 agent 读取 + if [[ -n "${GIT_TOKEN}" ]]; then + git -C "${target_dir}" remote set-url origin "${repo_url}" 2>/dev/null || true + fi + log_success "仓库 ${repo_name} 克隆成功" + ((success_count++)) + else + log_error "仓库 ${repo_name} 克隆失败 (url=$(redact_url "${repo_url}"))" + ((fail_count++)) + fi + done + + log_info "Git 克隆完成: 成功 ${success_count},失败 ${fail_count}" + + if [[ ${fail_count} -gt 0 ]]; then + return 1 + fi + + return 0 +} + +# ============================================================================= +# Phase 5: MCP + Skills +# ============================================================================= +# wangfenghe 20260226 该代码未知定义 暂时注释 +# 下载 MCP 配置(从 URL 下载) +#download_mcp_config() { +# log_info "下载 MCP 配置..." +# +# if [[ -z "${MCP_CONFIG_URL}" ]]; then +# log_warn "MCP_CONFIG_URL 为空,使用默认 MCP 配置" +# return 0 +# fi +# +# log_info "从 $(redact_url "${MCP_CONFIG_URL}") 下载 MCP 配置..." +# +# local tmp_mcp="/tmp/mcp_download.json" +# +# if ! curl -fsSL -o "${tmp_mcp}" "${MCP_CONFIG_URL}"; then +# log_error "MCP 配置下载失败" +# return 1 +# fi +# +# # 验证 JSON 格式 +# if ! jq empty "${tmp_mcp}" 2>/dev/null; then +# log_error "下载的 MCP 配置不是有效的 JSON 格式" +# rm -f "${tmp_mcp}" +# return 1 +# fi +# +# # 写入配置文件(格式化) +# jq '.' "${tmp_mcp}" > "${MCP_CONFIG_FILE}" || { +# log_error "写入 MCP 配置文件失败" +# rm -f "${tmp_mcp}" +# return 1 +# } +# +# rm -f "${tmp_mcp}" +# +# # 设置权限 +# chmod 600 "${MCP_CONFIG_FILE}" +# +# log_success "MCP 配置已部署到 ${MCP_CONFIG_FILE}" +# return 0 +#} + +# 下载并部署 Skills +download_skills() { + log_info "开始下载 Skills..." + + if [[ -z "${SKILLS_URL}" ]]; then + log_warn "SKILLS_URL 环境变量为空,跳过 Skills 下载" + return 0 + fi + + log_info "从 $(redact_url "${SKILLS_URL}") 下载 Skills..." + + if ! curl -fsSL -o "${SKILLS_ARCHIVE}" "${SKILLS_URL}"; then + log_error "Skills 下载失败" + return 1 + fi + + log_success "Skills 下载完成" + + # 解压到 skills 目录 + log_info "解压 Skills 到 ${SKILLS_DIR}..." + + # 清空目标目录(保留 default skill) + find "${SKILLS_DIR}" -mindepth 1 -maxdepth 1 -type d ! -name default -exec rm -rf {} \; 2>/dev/null || true + + # 解压(支持 tar.gz 和 zip 格式) + if file "${SKILLS_ARCHIVE}" | grep -q "gzip"; then + tar -xzf "${SKILLS_ARCHIVE}" -C "${SKILLS_DIR}" --strip-components=0 || { + log_error "Skills 解压失败 (tar.gz)" + return 1 + } + elif file "${SKILLS_ARCHIVE}" | grep -q "Zip"; then + unzip -o "${SKILLS_ARCHIVE}" -d "${SKILLS_DIR}" || { + log_error "Skills 解压失败 (zip)" + return 1 + } + else + # 尝试 tar.gz 解压 + tar -xzf "${SKILLS_ARCHIVE}" -C "${SKILLS_DIR}" --strip-components=0 || { + log_error "Skills 解压失败 (未知格式)" + return 1 + } + fi + + # 清理临时文件 + rm -f "${SKILLS_ARCHIVE}" + + log_success "Skills 部署完成" + + # 列出已安装的 Skills + log_info "已安装的 Skills:" + find "${SKILLS_DIR}" -mindepth 1 -maxdepth 1 -type d -exec basename {} \; | while read -r skill; do + echo " - ${skill}" + done + + return 0 +} + +# 校验 Skills 依赖(支持 Python、Node.js、Go) +# 3. 从 Git 仓库部署 Skills(优先于 SKILLS_URL) +deploy_skills_from_git() { + log_info "开始从 Git 仓库部署 Skills..." + + if [[ -z "${SKILLS_CONFIG}" ]] || [[ "${SKILLS_CONFIG}" == "[]" ]]; then + log_warn "SKILLS_CONFIG 环境变量为空,跳过 Git Skills 部署" + return 0 + fi + + # 校验 JSON 格式 + if ! echo "${SKILLS_CONFIG}" | jq empty 2>/dev/null; then + log_error "SKILLS_CONFIG 不是有效的 JSON 格式" + return 1 + fi + + local repo_url token skill_count + repo_url=$(echo "${SKILLS_CONFIG}" | jq -r '.repoUrl // empty') + token=$(echo "${SKILLS_CONFIG}" | jq -r '.token // empty') + skill_count=$(echo "${SKILLS_CONFIG}" | jq -r '.skills | length') + + if [[ -z "${repo_url}" ]]; then + log_error "SKILLS_CONFIG 中缺少 repoUrl" + return 1 + fi + + if [[ "${skill_count}" == "null" ]] || [[ "${skill_count}" -eq 0 ]]; then + log_warn "SKILLS_CONFIG 中 skills 列表为空" + return 0 + fi + + # 构建认证 URL + local clone_url="${repo_url}" + if [[ -n "${token}" ]] && [[ "${repo_url}" == https://* ]]; then + clone_url=$(echo "${repo_url}" | sed "s|https://|https://oauth2:${token}@|") + fi + + log_info "共有 ${skill_count} 个 Skills 需要从 Git 部署" + + local success_count=0 + local fail_count=0 + + for ((i=0; i&1; then + log_error "Skill ${skill_name} clone 失败" + ((fail_count++)) + continue + fi + + # 如果指定了精确 commit,checkout 到该 commit + if [[ -n "${skill_commit}" ]]; then + (cd "${tmp_dir}" && git fetch --depth 1 origin "${skill_commit}" && git checkout "${skill_commit}") 2>&1 || { + log_warn "Skill ${skill_name} checkout commit ${skill_commit} 失败,使用分支最新版本" + } + fi + + # 校验 SKILL.md 存在 + if [[ ! -f "${tmp_dir}/SKILL.md" ]]; then + log_error "Skill ${skill_name} 缺少 SKILL.md 文件" + rm -rf "${tmp_dir}" + ((fail_count++)) + continue + fi + + # 复制到目标目录(排除 .git) + rm -rf "${target_dir}" + mkdir -p "${target_dir}" + if command_exists rsync; then + rsync -a --exclude='.git' "${tmp_dir}/" "${target_dir}/" + else + cp -a "${tmp_dir}/." "${target_dir}/" + rm -rf "${target_dir}/.git" + fi + + # 清理临时目录 + rm -rf "${tmp_dir}" + + log_success "Skill ${skill_name} 部署成功" + ((success_count++)) + done + + log_info "Git Skills 部署完成: 成功 ${success_count},失败 ${fail_count}" + + if [[ ${fail_count} -gt 0 ]]; then + return 1 + fi + + return 0 +} + +# 4. 校验 Skills 依赖(支持 Python、Node.js、Go) +check_skills_dependencies() { + log_info "开始检查 Skills 依赖(支持: Python, Node.js, Go)..." + + if [[ ! -d "${SKILLS_DIR}" ]]; then + log_warn "Skills 目录不存在,跳过依赖检查" + return 0 + fi + + local skill_count=0 + local dep_issues=0 + + for skill_dir in "${SKILLS_DIR}"/*/; do + [[ -d "${skill_dir}" ]] || continue + + local skill_name + skill_name=$(basename "${skill_dir}") + ((skill_count++)) + + local found_deps=false + + # === Python 依赖检查 === + local py_req_file="" + if [[ -f "${skill_dir}requirements.txt" ]]; then + py_req_file="${skill_dir}requirements.txt" + elif [[ -f "${skill_dir}requirement.txt" ]]; then + py_req_file="${skill_dir}requirement.txt" + fi + + if [[ -n "${py_req_file}" ]]; then + found_deps=true + log_info " [${skill_name}] Python 依赖: $(basename "${py_req_file}")" + local py_issues + py_issues=$(check_python_deps "${py_req_file}") + if [[ "${py_issues}" == "FATAL" ]]; then + return 1 + fi + dep_issues=$((dep_issues + py_issues)) + fi + + # === Node.js 依赖检查 === + if [[ -f "${skill_dir}package.json" ]]; then + found_deps=true + log_info " [${skill_name}] Node.js 依赖: package.json" + local node_issues + node_issues=$(check_nodejs_deps "${skill_dir}package.json") + if [[ "${node_issues}" == "FATAL" ]]; then + return 1 + fi + dep_issues=$((dep_issues + node_issues)) + fi + + # === Go 依赖检查 === + if [[ -f "${skill_dir}go.mod" ]]; then + found_deps=true + log_info " [${skill_name}] Go 依赖: go.mod" + local go_issues + go_issues=$(check_go_deps "${skill_dir}go.mod") + if [[ "${go_issues}" == "FATAL" ]]; then + return 1 + fi + dep_issues=$((dep_issues + go_issues)) + fi + + if [[ "${found_deps}" == "false" ]]; then + log_info " [${skill_name}] 无依赖文件,跳过检查" + fi + done + + log_info "依赖检查完成: 扫描 ${skill_count} 个 Skills" + + if [[ ${dep_issues} -gt 0 ]]; then + log_error "发现 ${dep_issues} 个依赖问题,构建失败" + return 1 + fi + + log_success "所有依赖校验通过" + return 0 +} + +# Python 依赖检查 +check_python_deps() { + local req_file="$1" + local issues=0 + + if ! python3.12 -m pip --version &> /dev/null; then + log_error " python3.12 -m pip 不可用,无法检查 Python 依赖" + echo "FATAL" + return + fi + + while IFS= read -r line || [[ -n "$line" ]]; do + # 跳过空行和注释 + [[ -z "${line}" ]] && continue + [[ "${line}" =~ ^# ]] && continue + [[ "${line}" =~ ^[[:space:]]*$ ]] && continue + # 跳过 -r、-e 等特殊行 + [[ "${line}" =~ ^- ]] && continue + + # 解析包名(支持 package==1.0, package>=1.0 等格式) + local pkg_name pkg_spec + pkg_name=$(echo "${line}" | sed -E 's/([a-zA-Z0-9_-]+).*/\1/' | tr '[:upper:]' '[:lower:]' | tr '_' '-') + pkg_spec="${line}" + + if python3.12 -m pip show "${pkg_name}" &> /dev/null; then + local installed_version + installed_version=$(python3.12 -m pip show "${pkg_name}" 2>/dev/null | grep "^Version:" | awk '{print $2}') + + if python3.12 -c "import pkg_resources; pkg_resources.require('${pkg_spec}')" 2>/dev/null; then + log_success " [Python][OK] ${pkg_name}==${installed_version}" + else + log_error " [Python][VERSION] ${pkg_name}==${installed_version} 不满足版本要求: ${pkg_spec}" + ((issues++)) + fi + else + log_error " [Python][MISSING] ${pkg_name} 未安装 (需要: ${pkg_spec})" + ((issues++)) + fi + done < "${req_file}" + + echo ${issues} +} + +# Node.js 依赖检查 +check_nodejs_deps() { + local pkg_json="$1" + local issues=0 + + # 检查 npm 是否可用 + if ! command -v npm &> /dev/null; then + log_error " npm 未安装,无法检查 Node.js 依赖" + echo "FATAL" + return + fi + + # 提取 dependencies 和 devDependencies + local deps + deps=$(jq -r '(.dependencies // {}) + (.devDependencies // {}) | to_entries[] | "\(.key)|\(.value)"' "${pkg_json}" 2>/dev/null) + + if [[ -z "${deps}" ]]; then + log_info " package.json 无依赖声明" + echo 0 + return + fi + + while IFS='|' read -r pkg_name version_spec; do + [[ -z "${pkg_name}" ]] && continue + + # 检查包是否全局安装 + local installed_version="" + installed_version=$(npm list -g "${pkg_name}" --depth=0 2>/dev/null | grep "${pkg_name}@" | sed -E 's/.*@([0-9.]+).*/\1/' | head -1) + + if [[ -n "${installed_version}" ]]; then + log_success " [Node][OK] ${pkg_name}@${installed_version} (全局)" + else + # 检查是否在常见全局模块中 + if npm list -g --depth=0 2>/dev/null | grep -q "${pkg_name}"; then + log_success " [Node][OK] ${pkg_name} (全局已安装)" + else + log_error " [Node][MISSING] ${pkg_name}@${version_spec} 未全局安装" + ((issues++)) + fi + fi + done <<< "${deps}" + + echo ${issues} +} + +# Go 依赖检查 +check_go_deps() { + local skill_dir="$1" + local go_mod="$1" + local issues=0 + + # 检查 go 是否可用 + if ! command -v go &> /dev/null; then + log_error " go 未安装,无法检查 Go 依赖" + echo "FATAL" + return + fi + + local go_mod="${skill_dir}/go.mod" + if [[ ! -f "${go_mod}" ]]; then + go_mod="${skill_dir}go.mod" + fi + + local required_go_version + required_go_version=$(grep "^go " "${go_mod}" | awk '{print $2}') + + if [[ -n "${required_go_version}" ]]; then + local current_go_version + current_go_version=$(go version | sed -E 's/go version go([0-9.]+).*/\1/') + + log_info " Go 版本要求: ${required_go_version}, 当前版本: ${current_go_version}" + + # 简单版本比较(主版本.次版本) + local req_major req_minor cur_major cur_minor + req_major=$(echo "${required_go_version}" | cut -d. -f1) + req_minor=$(echo "${required_go_version}" | cut -d. -f2) + cur_major=$(echo "${current_go_version}" | cut -d. -f1) + cur_minor=$(echo "${current_go_version}" | cut -d. -f2) + + if [[ "${cur_major}" -gt "${req_major}" ]] || \ + [[ "${cur_major}" -eq "${req_major}" && "${cur_minor}" -ge "${req_minor}" ]]; then + log_success " [Go][OK] Go 版本满足要求" + else + log_error " [Go][VERSION] Go ${current_go_version} 不满足版本要求 (需要 >= ${required_go_version})" + ((issues++)) + fi + fi + + # 提取 require 块中的依赖 +# local in_require=false +# while IFS= read -r line; do +# if [[ "${line}" =~ ^require[[:space:]]*\( ]]; then +# in_require=true +# continue +# fi +# if [[ "${in_require}" == "true" && "${line}" =~ ^\) ]]; then +# in_require=false +# continue +# fi +# if [[ "${line}" =~ ^require[[:space:]]+ && ! "${line}" =~ \( ]]; then +# local module_path module_version +# module_path=$(echo "${line}" | awk '{print $2}') +# module_version=$(echo "${line}" | awk '{print $3}') +# log_info " [Go][DEP] ${module_path} ${module_version}" +# continue +# fi +# if [[ "${in_require}" == "true" ]]; then +# line=$(echo "${line}" | sed 's/^[[:space:]]*//' | sed 's/[[:space:]]*$//') +# [[ -z "${line}" ]] && continue +# [[ "${line}" =~ ^// ]] && continue +# local module_path module_version +# module_path=$(echo "${line}" | awk '{print $1}') +# module_version=$(echo "${line}" | awk '{print $2}') +# if [[ -n "${module_path}" ]]; then +# log_info " [Go][DEP] ${module_path} ${module_version}" +# fi +# fi +# done < "${go_mod}" + + log_info " [Go] 依赖将在构建时通过 go mod download 自动获取" + echo ${issues} +} + +# 4. 部署 MCP 配置(优先 MCP_CONFIG 环境变量,其次 MCP_CONFIG_URL 下载) +deploy_mcp_config() { + log_info "开始部署 MCP 配置..." + + # 优先级 1: MCP_CONFIG 环境变量(JSON 字符串,由构建系统从岗位配置生成) + if [[ -n "${MCP_CONFIG}" ]] && [[ "${MCP_CONFIG}" != "[]" ]]; then + log_info "检测到 MCP_CONFIG 环境变量,直接写入配置..." + + # 验证 JSON 格式 + if ! echo "${MCP_CONFIG}" | jq empty 2>/dev/null; then + log_error "MCP_CONFIG 不是有效的 JSON 格式" + return 1 + fi + + # 写入配置文件 + echo "${MCP_CONFIG}" | jq '.' > "${MCP_CONFIG_FILE}" || { + log_error "写入 MCP 配置文件失败" + return 1 + } + + # 设置权限 + chmod 0440 "${MCP_CONFIG_FILE}" + chown root:agent "${MCP_CONFIG_FILE}" 2>/dev/null || true + + log_success "MCP 配置已从 MCP_CONFIG 环境变量部署到 ${MCP_CONFIG_FILE}" + return 0 + fi + + # 优先级 2: MCP_CONFIG_URL 下载 + if [[ -n "${MCP_CONFIG_URL}" ]]; then + log_info "从 ${MCP_CONFIG_URL} 下载 MCP 配置..." + + local tmp_mcp="/tmp/mcp_download.json" + + if ! curl -fsSL -o "${tmp_mcp}" "${MCP_CONFIG_URL}"; then + log_error "MCP 配置下载失败" + rm -f "${tmp_mcp}" + return 1 + fi + + # 验证 JSON 格式 + if ! jq empty "${tmp_mcp}" 2>/dev/null; then + log_error "下载的 MCP 配置不是有效的 JSON 格式" + rm -f "${tmp_mcp}" + return 1 + fi + + # 写入配置文件(格式化) + jq '.' "${tmp_mcp}" > "${MCP_CONFIG_FILE}" || { + log_error "写入 MCP 配置文件失败" + rm -f "${tmp_mcp}" + return 1 + } + + rm -f "${tmp_mcp}" + chmod 0440 "${MCP_CONFIG_FILE}" + chown root:agent "${MCP_CONFIG_FILE}" 2>/dev/null || true + + log_success "MCP 配置已从 URL 部署到 ${MCP_CONFIG_FILE}" + return 0 + fi + + log_warn "MCP_CONFIG 和 MCP_CONFIG_URL 均为空,跳过 MCP 配置部署" + return 0 +} +# 部署 start 脚本和 ai_employee.py 到 /opt/agent/ (root:root, agent 不可改) +# 安全: 放在 /workspace 外防止 agent 篡改后 root 执行被劫持的脚本 +deploy_start_scripts() { + log_info "部署启动脚本到 /opt/agent/..." + + local deploy_dir="/opt/agent" + mkdir -p "${deploy_dir}" + local scripts=("start-single.sh" "start-dual.sh" "ai_employee.py") + + if [[ ! -d "${START_SCRIPTS_SRC}" ]]; then + local existing=0 + for script in "${scripts[@]}"; do + if [[ -f "${deploy_dir}/${script}" ]]; then + chmod 0755 "${deploy_dir}/${script}" || true + chown root:root "${deploy_dir}/${script}" || true + ((existing++)) + fi + done + if [[ ${existing} -eq ${#scripts[@]} ]]; then + log_warn "未找到 ${START_SCRIPTS_SRC},使用基础镜像已存在的启动脚本" + return 0 + fi + + log_error "启动脚本目录不存在: ${START_SCRIPTS_SRC}" + log_error "请将 start-single.sh/start-dual.sh/ai_employee.py 放置到固定目录,或通过 START_SCRIPTS_SRC 指定路径" + return 1 + fi + + local deployed=0 + + for script in "${scripts[@]}"; do + if [[ ! -f "${START_SCRIPTS_SRC}/${script}" ]]; then + log_error "脚本不存在: ${START_SCRIPTS_SRC}/${script}" + return 1 + fi + + cp "${START_SCRIPTS_SRC}/${script}" "${deploy_dir}/${script}" + chmod 0755 "${deploy_dir}/${script}" + chown root:root "${deploy_dir}/${script}" + ((deployed++)) + log_info " -> ${deploy_dir}/${script}" + done + + log_success "启动脚本部署完成 (${deployed}/${#scripts[@]}, root:root 0755)" + return 0 +} + +# 最终权限设置 +finalize_permissions() { + log_info "设置最终权限..." + + # workspace 归 agent + workspace 协作组,目录 setgid 保证跨容器共享组继承 + if id agent &>/dev/null; then + chown -R agent:"${WORKSPACE_GROUP}" "${WORKSPACE_DIR}" + chmod -R g+rwX "${WORKSPACE_DIR}" + for dir in "${WORKSPACE_DIR}" "${WORKSPACE_DIR}/logs" "${WORKSPACE_DIR}/user" "${WORKSPACE_DIR}/workstation" "${WORKSPACE_DIR}/task-logs" "${WORKSPACE_DIR}/worker-logs"; do + mkdir -p "${dir}" + chown agent:"${WORKSPACE_GROUP}" "${dir}" + chmod 2770 "${dir}" + done + log_info " -> ${WORKSPACE_DIR} owner=agent:${WORKSPACE_GROUP}, 共享目录=2770(setgid)" + + # /opt/agent/ 下配置文件:root:agent 0440(agent 只读不可改) + for cfg_file in "${SECURITY_FILE}" "${MCP_CONFIG_FILE}" "${AGENT_CONFIG_DIR}/config.json"; do + if [[ -f "${cfg_file}" ]]; then + chmod 0440 "${cfg_file}" + chown root:agent "${cfg_file}" + fi + done + log_info " -> config/security/mcp.json → 0440 root:agent" + + # skills 目录:root:agent 0750(agent 可读可进入,不可改) + if [[ -d "${SKILLS_DIR}" ]]; then + chown -R root:agent "${SKILLS_DIR}" + find "${SKILLS_DIR}" -type d -exec chmod 0750 {} \; + find "${SKILLS_DIR}" -type f -exec chmod 0440 {} \; + fi + log_info " -> skills/ → 0750/0440 root:agent" + + log_success "workspace + /opt/agent/ 权限设置完成" + else + log_warn "agent 用户不存在,跳过权限设置" + fi + + # /etc/zzd 加固校验 — monitor(zzd) 可读, agent 不可读不可改 + if [[ -d "${ZZD_CONFIG_DIR}" ]]; then + find "${ZZD_CONFIG_DIR}" -type d -exec chmod 0750 {} \; + find "${ZZD_CONFIG_DIR}" -type f -exec chmod 0440 {} \; + chown -R root:monitor "${ZZD_CONFIG_DIR}" + # socket 和 audit 目录归 monitor(zzd 进程需要写入) + if [[ -d "${ZZD_SOCKET_DIR}" ]]; then + chown monitor:monitor "${ZZD_SOCKET_DIR}" + chmod 0755 "${ZZD_SOCKET_DIR}" + fi + if [[ -d "${ZZD_AUDIT_DIR}" ]]; then + chown monitor:monitor "${ZZD_AUDIT_DIR}" + chmod 0700 "${ZZD_AUDIT_DIR}" + fi + log_success "/etc/zzd 权限加固完成 (config: 0750/0440 root:monitor; socket/audit: monitor:monitor)" + fi + + return 0 +} + +# ============================================================================= +# Phase 7: 网络隔离 (iptables) +# ============================================================================= + +# 配置 iptables 网络白名单 +# Gateway 模式下: MCP 访问全部走 Gateway (ClusterIP),不需要逐个开放 MCP 端点 +# 仅放行: loopback / DNS / RFC1918 / 运行时环境变量端点 / Skills 端点 +# 策略: 使用自定义链 AGENT_OUTPUT,不影响其他用户/进程的网络访问 +setup_network_firewall() { + log_info "配置 iptables 网络白名单..." + + if ! command_exists iptables; then + log_warn "iptables 未安装,跳过网络隔离配置" + return 0 + fi + + local allowed_hosts=() + + # 从 Skills 中提取可能声明的外部端点 + if [[ -d "${SKILLS_DIR}" ]]; then + for skill_meta in "${SKILLS_DIR}"/*/SKILL.md; do + [[ -f "${skill_meta}" ]] || continue + while IFS= read -r endpoint_host; do + [[ -n "${endpoint_host}" ]] && allowed_hosts+=("${endpoint_host}") + done < <(grep -oP 'https?://\K[^/:]+' "${skill_meta}" 2>/dev/null | sort -u) + done + fi + + # 去重 + local unique_hosts=() + local seen_map="" + for h in "${allowed_hosts[@]}"; do + if [[ "${seen_map}" != *"|${h}|"* ]]; then + unique_hosts+=("${h}") + seen_map="${seen_map}|${h}|" + fi + done + + local agent_uid=1001 + + local fw_script="/opt/agent/setup-firewall.sh" + + cat > "${fw_script}" <<'FWEOF' +#!/bin/bash +# Agent 网络隔离防火墙规则 (自动生成,请勿手动编辑) +# MCP 访问已统一通过 Gateway (K8s ClusterIP),无需逐个开放 MCP 端点 +set -e + +AGENT_UID=1001 + +iptables -N AGENT_OUTPUT 2>/dev/null || iptables -F AGENT_OUTPUT + +iptables -D OUTPUT -m owner --uid-owner ${AGENT_UID} -j AGENT_OUTPUT 2>/dev/null || true +iptables -A OUTPUT -m owner --uid-owner ${AGENT_UID} -j AGENT_OUTPUT + +# ── 基础规则 ── +iptables -A AGENT_OUTPUT -o lo -j ACCEPT +iptables -A AGENT_OUTPUT -m state --state ESTABLISHED,RELATED -j ACCEPT + +# ── DNS (UDP/TCP 53) ── +iptables -A AGENT_OUTPUT -p udp --dport 53 -j ACCEPT +iptables -A AGENT_OUTPUT -p tcp --dport 53 -j ACCEPT + +# ── RFC1918 私有网络: 集群内部通信 (Redis, K8s Service, Pod CIDR, MCP Gateway) ── +iptables -A AGENT_OUTPUT -d 10.0.0.0/8 -j ACCEPT +iptables -A AGENT_OUTPUT -d 172.16.0.0/12 -j ACCEPT +iptables -A AGENT_OUTPUT -d 192.168.0.0/16 -j ACCEPT + +# ── 运行时服务端点 (从环境变量动态提取公网 host) ── +extract_host() { + local url="$1" + echo "${url}" | sed -E 's|^[a-zA-Z]+://||; s|/.*||; s|:[0-9]+$||' +} + +for env_url in "${REDIS_URL:-}" "${API_BASE_URL:-}" "${WS_BASE_URL:-}" "${LLM_GATEWAY_URL:-}" "${MAIN_PY_URL:-}"; do + [ -z "${env_url}" ] && continue + local_host=$(extract_host "${env_url}") + [ -z "${local_host}" ] && continue + case "${local_host}" in + 10.*|172.1[6-9].*|172.2[0-9].*|172.3[0-1].*|192.168.*|*.svc|*.svc.*|localhost) continue ;; + esac + iptables -A AGENT_OUTPUT -d "${local_host}" -j ACCEPT +done + +FWEOF + + # 追加 Skills 白名单 + if [[ ${#unique_hosts[@]} -gt 0 ]]; then + echo "# ── Skills 白名单 (构建时静态) ──" >> "${fw_script}" + for host in "${unique_hosts[@]}"; do + echo "iptables -A AGENT_OUTPUT -d \"${host}\" -j ACCEPT" >> "${fw_script}" + done + echo "" >> "${fw_script}" + fi + + cat >> "${fw_script}" <<'FWEOF2' +# ── 默认拒绝: agent 用户的所有其他出站连接 ── +iptables -A AGENT_OUTPUT -j REJECT --reject-with icmp-port-unreachable + +echo "[INFO] Agent 网络隔离防火墙已生效 (agent uid=${AGENT_UID})" +FWEOF2 + + chmod 0700 "${fw_script}" + chown root:root "${fw_script}" + + if [[ ${#unique_hosts[@]} -gt 0 ]]; then + log_info "白名单主机 (${#unique_hosts[@]}): ${unique_hosts[*]}" + else + log_info "MCP 访问通过 Gateway (K8s ClusterIP),无需额外白名单" + fi + + log_info "防火墙规则脚本已生成: ${fw_script}" + + if bash "${fw_script}" 2>/dev/null; then + log_success "iptables 网络白名单已在构建环境生效" + else + log_warn "构建环境无法应用 iptables(可能缺少 NET_ADMIN 权限),规则已保存到 ${fw_script},需在运行时由 start.sh 加载" + fi + + return 0 +} + +# ============================================================================= +# 主函数 +# ============================================================================= +main() { + log_info "==========================================" + log_info "K8s Agent 镜像构建脚本启动(生产版本 v2)" + log_info "参考: docs/docker.md §3.2" + log_info "==========================================" + + local exit_code=0 + + log_info "构建输入目录解析结果:" + log_info " BUILD_ASSETS_ROOT=${BUILD_ASSETS_ROOT}" + log_info " ZZD_BIN_SRC=${ZZD_BIN_SRC}" + log_info " SDK_SRC=${SDK_SRC}" + log_info " START_SCRIPTS_SRC=${START_SCRIPTS_SRC}" + + # ── Phase 1: 系统依赖 ── + if ! check_prerequisites; then + log_error "前置条件检查失败" + exit 1 + fi + + # 创建目录结构 + if ! create_directories; then + log_error "目录创建失败" + exit 1 + fi + + # ── Phase 2: zzd + SDK + 用户 ── + if ! install_zzd_binaries; then + log_error "zzd 二进制安装失败" + exit_code=1 + fi + + if ! install_sdk; then + log_error "SDK 安装失败" + exit 1 + fi + + if ! patch_sdk_runtime_model_override; then + log_error "SDK runtime model 补丁失败" + exit 1 + fi + + if ! setup_agent_user; then + log_error "agent 用户创建失败" + exit 1 + fi + + # ── Phase 3: workspace + zzd 配置 ── + if ! setup_workspace; then + log_error "workspace 目录结构创建失败" + exit 1 + fi + + if ! setup_agent_config; then + log_error "agent 配置部署失败" + exit 1 + fi + + if ! download_cedar_policies; then + log_error "Cedar 策略下载/部署失败" + exit_code=1 + fi + + if ! setup_zzd_directories; then + log_error "zzd 目录创建失败" + exit 1 + fi + + # ── Phase 4: Git ── + if ! install_git; then + log_error "git 安装失败" + exit 1 + fi + + if ! clone_git_repos; then + log_error "Git 仓库克隆失败" + exit_code=1 + fi + + # 2 & 3. 部署 Skills(优先 SKILLS_CONFIG Git clone,其次 SKILLS_URL 下载) + if [[ -n "${SKILLS_CONFIG}" ]] && [[ "${SKILLS_CONFIG}" != "[]" ]]; then + if ! deploy_skills_from_git; then + log_error "Skills Git 部署失败" + exit_code=1 + fi + elif [[ -n "${SKILLS_URL}" ]]; then + if ! download_skills; then + log_error "Skills 下载失败" + exit_code=1 + fi + else + log_info "未配置 Skills(SKILLS_CONFIG 和 SKILLS_URL 均为空),跳过" + fi + + # 3. 检查依赖(失败则终止构建) + if ! check_skills_dependencies; then + log_error "Skills 依赖检查失败" + exit 1 + fi + + # ── Phase 6: 启动脚本 + 权限 ── + if ! deploy_start_scripts; then + log_error "启动脚本部署失败" + exit_code=1 + fi + + if ! finalize_permissions; then + log_error "权限设置失败" + exit_code=1 + fi + + # 5. 部署 MCP 配置 + if ! deploy_mcp_config; then + log_error "MCP 配置部署失败" + exit_code=1 + fi + + # ── Phase 7: 网络隔离 (生成防火墙脚本,运行时由 ENABLE_NETWORK_FIREWALL 控制是否加载) ── + if ! setup_network_firewall; then + log_error "网络隔离配置失败" + exit_code=1 + fi + + # ── 清理构建临时文件(避免策略/源码残留在镜像中被 agent 读取)── + log_info "清理构建临时文件..." + rm -rf /tmp/cedar-policies /tmp/cedar-policies-download + + # 仅清理 /tmp 回退路径,避免误删固定资产目录(如 /opt/linkwork-agent-build) + for cleanup_dir in "${ZZD_BIN_SRC}" "${SDK_SRC}" "${START_SCRIPTS_SRC}"; do + if [[ "${cleanup_dir}" == /tmp/* ]]; then + rm -rf "${cleanup_dir}" + fi + done + + log_success "构建临时文件已清理" + + # ── 输出结果 ── + log_info "==========================================" + if [[ ${exit_code} -eq 0 ]]; then + log_success "构建完成" + else + log_error "构建完成但存在错误" + fi + log_info "==========================================" + + # 输出目录结构 + log_info "工作目录结构:" + if command -v tree &> /dev/null; then + tree -L 3 "${WORKSPACE_DIR}" 2>/dev/null || ls -laR "${WORKSPACE_DIR}" + else + ls -laR "${WORKSPACE_DIR}" 2>/dev/null | head -40 + fi + + log_info "zzd 配置目录:" + ls -la "${ZZD_CONFIG_DIR}" 2>/dev/null || true + ls -la "${ZZD_POLICY_DIR}" 2>/dev/null || true + + log_info "Agent 组件目录:" + if command -v tree &> /dev/null; then + tree -L 2 /opt/agent/ 2>/dev/null || ls -la /opt/agent/ + else + ls -la /opt/agent/ 2>/dev/null || true + fi + + exit ${exit_code} +} + +# 执行主函数 +main "$@" diff --git a/back/src/main/resources/scripts/config.json b/back/src/main/resources/scripts/config.json new file mode 100644 index 0000000..ed37115 --- /dev/null +++ b/back/src/main/resources/scripts/config.json @@ -0,0 +1,31 @@ +{ + "claude_settings": { + "model": "sonnet", + "language": "Chinese", + "env": { + "ANTHROPIC_BASE_URL": "${ANTHROPIC_BASE_URL:http://localhost:4000}", + "ANTHROPIC_AUTH_TOKEN": "${ANTHROPIC_AUTH_TOKEN:}", + "ANTHROPIC_API_KEY": "${ANTHROPIC_API_KEY:}", + "ANTHROPIC_MODEL": "openrouter/anthropic/claude-sonnet-4.5", + "LITELLM_BASE_URL": "${LITELLM_BASE_URL:http://localhost:4000}", + "LITELLM_API_KEY": "${LITELLM_API_KEY:}", + "API_TIMEOUT_MS": "600000", + "CLAUDE_CODE_DISABLE_NONESSENTIAL_TRAFFIC": "1" + } + }, + "agent": { + "name": "test-employee", + "max_turns": 50, + "max_thinking_tokens": 5000, + "permission_mode": "bypassPermissions", + "allowed_tools": [], + "disallowed_tools": [], + "can_use_tools": [], + "zz_enabled": true + }, + "system_prompt": { + "use_preset": true, + "preset": "claude_code", + "append": "强制规则(不可违反):\n1) 开始前必须先读取 `AGENTS.md`;未读取=任务失败。\n2) 目录与产出规则只以 `AGENTS.md` 为准。\n3) 最终交付只能写入 `/workspace/workstation`;`/workspace/logs` 仅中间文件。\n4) 禁止只回复分析不落地文件;至少产出 1 个最终文件。\n5) 若任一规则无法满足,立即报错并停止,禁止伪完成。" + } +} diff --git a/back/src/main/resources/scripts/start-runner.sh b/back/src/main/resources/scripts/start-runner.sh new file mode 100755 index 0000000..f9f3d2e --- /dev/null +++ b/back/src/main/resources/scripts/start-runner.sh @@ -0,0 +1,243 @@ +#!/bin/bash +# ============================================================================= +# Runner 容器入口脚本(基于 Rocky Linux 基础镜像) +# +# 职责: 准备 sshd 环境 → 等待 Agent 公钥 → 配置 SSH 认证 → 前台启动 sshd +# +# 适用场景: +# - K8s Pod sidecar: 镜像应已预装 sshd(避免运行时安装) +# - Docker Compose: 纯 Rocky 基础镜像,脚本自动兜底安装 +# +# 运行时依赖: +# - 共享卷 /shared-keys (Agent 写入 zzd_pubkey.pub) +# - 共享卷 /workspace (工作目录) +# +# 重点: +# - Runner 不负责 zzd/worker,只负责"被 SSH 执行" +# - 这些步骤每次 Pod 重建都要重复(共享卷通常是 emptyDir,会清空) +# - Agent 的 start-dual.sh 会等待 Runner SSH 就绪,然后再启动 zzd+worker +# ============================================================================= +set -e + +readonly SHARED_KEY_DIR="/shared-keys" +readonly PUBKEY_FILE="${SHARED_KEY_DIR}/zzd_pubkey.pub" +readonly PUBKEY_TIMEOUT="${PUBKEY_TIMEOUT:-120}" +readonly WORKSPACE_GROUP="${WORKSPACE_GROUP:-workspace}" +readonly WORKSPACE_GID="${WORKSPACE_GID:-2000}" +readonly RUNNER_USER="${RUNNER_USER:-runner}" +readonly RUNNER_UID="${RUNNER_UID:-1001}" +readonly RUNNER_HOME="${RUNNER_HOME:-/home/${RUNNER_USER}}" + +# ============================================================================= +# 日志 +# ============================================================================= +log_info() { echo "[Runner][INFO] $(date '+%H:%M:%S') $*"; } +log_error() { echo "[Runner][ERROR] $(date '+%H:%M:%S') $*" >&2; } +log_warn() { echo "[Runner][WARN] $(date '+%H:%M:%S') $*"; } + +configure_runner_python_env() { + local bashrc="${RUNNER_HOME}/.bashrc" + local python_bin="" + + if [[ -x /usr/bin/python3.12 ]]; then + python_bin="/usr/bin/python3.12" + ln -sf /usr/bin/python3.12 /usr/local/bin/python3 2>/dev/null || true + ln -sf /usr/bin/python3.12 /usr/local/bin/python 2>/dev/null || true + elif command -v python3.12 >/dev/null 2>&1; then + python_bin="$(command -v python3.12)" + elif command -v python3 >/dev/null 2>&1; then + python_bin="$(command -v python3)" + fi + + if [[ -z "${python_bin}" ]]; then + log_warn "未找到 python3.12/python3,跳过 ${RUNNER_USER} Python 环境变量注入" + return 0 + fi + + sed -i '/# >>> workspace-python >>>/,/# <<< workspace-python <</dev/null || true + cat >> "${bashrc}" <>> workspace-python >>> +export PYTHON_BIN="${python_bin}" +export PYTHON="${python_bin}" +export UV_PYTHON="${python_bin}" +export PATH="/usr/bin:/usr/local/bin:\$PATH" +# <<< workspace-python <<< +EOF + chown "${RUNNER_USER}:${RUNNER_USER}" "${bashrc}" + log_info "${RUNNER_USER} python 默认解释器: ${python_bin} ($("${python_bin}" --version 2>&1))" +} + +setup_workspace_group_permissions() { + local resolved_group="${WORKSPACE_GROUP}" + + if getent group "${WORKSPACE_GID}" >/dev/null 2>&1; then + resolved_group=$(getent group "${WORKSPACE_GID}" | cut -d: -f1) + elif ! getent group "${WORKSPACE_GROUP}" >/dev/null 2>&1; then + groupadd -g "${WORKSPACE_GID}" "${WORKSPACE_GROUP}" || { + log_error "创建 workspace 协作组失败 (${WORKSPACE_GROUP}:${WORKSPACE_GID})" + return 1 + } + fi + + usermod -aG "${resolved_group}" "${RUNNER_USER}" || { + log_error "将 ${RUNNER_USER} 加入 workspace 协作组失败 (${resolved_group})" + return 1 + } + + for dir in /workspace /workspace/logs /workspace/user /workspace/workstation /workspace/task-logs /workspace/worker-logs; do + mkdir -p "${dir}" + chgrp -R "${resolved_group}" "${dir}" + chmod -R g+rwX "${dir}" + find "${dir}" -type d -exec chmod g+s {} + + chmod 2770 "${dir}" + done + + log_info "/workspace 权限已对齐 (group=${resolved_group}, dirs=workspace/logs/user/workstation/task-logs/worker-logs, umask=0002)" +} + +# ============================================================================= +# 1. 启动 sshd 所需环境(镜像内应已带好,兜底运行时安装) +# ============================================================================= +log_info "================================================" +log_info " Runner 容器启动" +log_info " PUBKEY_TIMEOUT: ${PUBKEY_TIMEOUT}s" +log_info "================================================" + +if [ ! -x /usr/sbin/sshd ]; then + log_warn "sshd 未预装,运行时安装(生产镜像应预装以加速启动)..." + dnf install -y openssh-server openssh-clients sudo && dnf clean all + if [ ! -x /usr/sbin/sshd ]; then + log_error "sshd 安装失败" + exit 1 + fi + log_info "sshd 运行时安装完成" +else + log_info "sshd 已预装" +fi + +# 生成 SSH host keys(如果不存在) +if [ ! -f /etc/ssh/ssh_host_rsa_key ] && [ ! -f /etc/ssh/ssh_host_ed25519_key ]; then + log_info "生成 SSH host keys..." + ssh-keygen -A +fi + +# SSH 配置(幂等写入,多次执行不会重复追加) +log_info "配置 sshd_config..." +sed -i 's/^#*PermitRootLogin.*/PermitRootLogin yes/' /etc/ssh/sshd_config +sed -i 's/^#*PubkeyAuthentication.*/PubkeyAuthentication yes/' /etc/ssh/sshd_config +sed -i 's/^#*PasswordAuthentication.*/PasswordAuthentication no/' /etc/ssh/sshd_config +grep -q '^AuthorizedKeysFile' /etc/ssh/sshd_config \ + || echo 'AuthorizedKeysFile .ssh/authorized_keys' >> /etc/ssh/sshd_config + +# ============================================================================= +# 2. 确保有执行用户(默认 runner,可通过 RUNNER_USER 覆盖,必要时 root 也可) +# ============================================================================= +if id "${RUNNER_USER}" &>/dev/null; then + CURRENT_UID="$(id -u "${RUNNER_USER}")" + if [ "$CURRENT_UID" != "$RUNNER_UID" ]; then + log_info "校准用户 ${RUNNER_USER} UID: ${CURRENT_UID} -> ${RUNNER_UID}" + usermod -u "${RUNNER_UID}" "${RUNNER_USER}" + chown -R "${RUNNER_USER}:${RUNNER_USER}" "${RUNNER_HOME}" + else + log_info "用户 ${RUNNER_USER} 已存在 (uid=${CURRENT_UID})" + fi +else + log_info "创建用户 ${RUNNER_USER} (uid=${RUNNER_UID})..." + groupadd -g "${RUNNER_UID}" "${RUNNER_USER}" 2>/dev/null || true + useradd -u "${RUNNER_UID}" -g "${RUNNER_USER}" -m -s /bin/bash "${RUNNER_USER}" 2>/dev/null || true + echo "${RUNNER_USER} ALL=(ALL) NOPASSWD:ALL" > "/etc/sudoers.d/${RUNNER_USER}" + chmod 0440 "/etc/sudoers.d/${RUNNER_USER}" + log_info "用户 ${RUNNER_USER} 已创建" +fi +log_info "${RUNNER_USER} 最终 uid=$(id -u "${RUNNER_USER}")" + +if ! grep -q '^umask 0002$' "${RUNNER_HOME}/.bashrc" 2>/dev/null; then + echo 'umask 0002' >> "${RUNNER_HOME}/.bashrc" + chown "${RUNNER_USER}:${RUNNER_USER}" "${RUNNER_HOME}/.bashrc" +fi +configure_runner_python_env + +# ============================================================================= +# 3. 等待 Agent 写入共享卷公钥: /shared-keys/zzd_pubkey.pub +# ============================================================================= +log_info "等待 Agent 公钥: ${PUBKEY_FILE} ..." +WAIT=0 +while [ ! -f "$PUBKEY_FILE" ] && [ $WAIT -lt $PUBKEY_TIMEOUT ]; do + sleep 1 + WAIT=$((WAIT + 1)) + if [ $((WAIT % 10)) -eq 0 ]; then + log_info " 等待中... (${WAIT}/${PUBKEY_TIMEOUT}s)" + fi +done + +if [ ! -f "$PUBKEY_FILE" ]; then + log_error "超时 (${PUBKEY_TIMEOUT}s): 未收到 Agent 公钥 ${PUBKEY_FILE}" + log_error "请检查 Agent 容器是否正常启动并生成了密钥" + exit 1 +fi + +log_info "检测到公钥: ${PUBKEY_FILE}" + +# ============================================================================= +# 4. 把公钥写到 authorized_keys(runner 用户 + root) +# ============================================================================= +log_info "配置 SSH authorized_keys..." + +# runner 用户 +mkdir -p "${RUNNER_HOME}/.ssh" +cp "$PUBKEY_FILE" "${RUNNER_HOME}/.ssh/authorized_keys" +chown -R "${RUNNER_USER}:${RUNNER_USER}" "${RUNNER_HOME}/.ssh" +chmod 700 "${RUNNER_HOME}/.ssh" +chmod 600 "${RUNNER_HOME}/.ssh/authorized_keys" +log_info " -> ${RUNNER_USER} authorized_keys 已配置" + +# root 用户(可选,调试用) +mkdir -p /root/.ssh +cp "$PUBKEY_FILE" /root/.ssh/authorized_keys +chmod 700 /root/.ssh +chmod 600 /root/.ssh/authorized_keys +log_info " -> root authorized_keys 已配置" + +# ============================================================================= +# 5. 准备 /workspace 权限 +# ============================================================================= +log_info "设置 /workspace 权限..." +mkdir -p /workspace +setup_workspace_group_permissions + +# ============================================================================= +# 6. 启动 SSH 服务并监控 Agent 退出信号 +# +# K8s 1.18 无原生 sidecar 支持,Runner 需要主动感知 Agent 退出: +# Agent 退出时写 /shared-keys/shutdown 标记 +# Runner 检测到标记后停止 sshd,容器退出,Pod 整体终止 +# ============================================================================= +readonly SHUTDOWN_MARKER="${SHARED_KEY_DIR}/shutdown" +readonly SHUTDOWN_CHECK_INTERVAL=5 + +log_info "================================================" +log_info " sshd 启动 (后台模式 + shutdown 监控)" +log_info "================================================" + +/usr/sbin/sshd -D -e & +SSHD_PID=$! + +shutdown_runner() { + log_info "正在停止 sshd (pid=$SSHD_PID)..." + kill -TERM "$SSHD_PID" 2>/dev/null || true + wait "$SSHD_PID" 2>/dev/null || true + log_info "Runner 已退出" +} +trap shutdown_runner EXIT SIGTERM SIGINT + +while kill -0 "$SSHD_PID" 2>/dev/null; do + if [ -f "$SHUTDOWN_MARKER" ]; then + log_info "检测到 Agent shutdown 标记: ${SHUTDOWN_MARKER}" + exit 0 + fi + sleep "$SHUTDOWN_CHECK_INTERVAL" +done + +SSHD_EXIT=$? +log_warn "sshd 意外退出 (code=$SSHD_EXIT)" +exit "$SSHD_EXIT" diff --git a/docs/guides/deployment.md b/docs/guides/deployment.md index ad558b1..ca67282 100644 --- a/docs/guides/deployment.md +++ b/docs/guides/deployment.md @@ -154,8 +154,10 @@ LinkWork uses a "One Role, One Image" mechanism. The role build process: 1. **Admin configures the role** — Select Skills, MCP tools, security policies, resource quotas 2. **Trigger image build** — Server dynamically generates a Dockerfile, executes `docker build` -3. **Push to Harbor** — Auto-push to the configured image registry after build -4. **K8s pulls and runs** — During task scheduling, K8s pulls the role image from Harbor to create Pods +3. **Image distribution (choose one)** + - `imageRegistry` configured: push the built image to remote registry + - `imageRegistry` empty: keep image local and auto-sync only the current built image to Kind nodes (no bulk load of all host images) +4. **K8s pulls and runs** — During task scheduling, K8s pulls the role image from the configured source Image naming convention: `{registry}/service-{serviceId}-agent:{serviceId}-{timestamp}` @@ -165,6 +167,25 @@ Base image built on Rocky Linux 9, pre-installed with: - git, curl, jq, and other common tools - Claude CLI, uv/uvx, and other AI development tools +### Local Image Ops (Kind) + +In local-image mode, backend can auto-run image sync and cleanup. Recommended env vars: + +```bash +LINKWORK_BUILD_LOCAL_LOAD_ENABLED=true +LINKWORK_BUILD_KIND_CLUSTER_NAME=shared-dev # optional, auto-discover if empty +IMAGE_LOCAL_CLEANUP_ENABLED=true +IMAGE_LOCAL_RETENTION_HOURS=24 +IMAGE_LOCAL_CLEANUP_CRON="0 40 * * * *" # minute 40 of every hour +IMAGE_KIND_PRUNE_ENABLED=true +``` + +You can also trigger one maintenance run manually (without waiting for cron): + +```bash +curl -X POST http:///api/v1/build/ops/local-image-maintenance +``` + --- ## Horizontal Scaling diff --git a/docs/guides/deployment_zh-CN.md b/docs/guides/deployment_zh-CN.md index ec3e5e3..e540d54 100644 --- a/docs/guides/deployment_zh-CN.md +++ b/docs/guides/deployment_zh-CN.md @@ -154,8 +154,10 @@ LinkWork 采用「一岗位一镜像」机制,岗位构建流程: 1. **管理员配置岗位** — 选择 Skills、MCP 工具、安全策略、资源配额 2. **触发镜像构建** — server 动态生成 Dockerfile,执行 `docker build` -3. **推送到 Harbor** — 构建完成后自动推送到配置的镜像仓库 -4. **K8s 拉取运行** — 任务调度时,K8s 从 Harbor 拉取对应岗位镜像创建 Pod +3. **镜像分发(二选一)** + - 配置了 `imageRegistry`:构建完成后推送到远程仓库 + - 未配置 `imageRegistry`:仅保留本地镜像,并自动把“当前构建镜像”同步到 Kind 节点(不会全量同步主机所有镜像) +4. **K8s 拉取运行** — 任务调度时,从配置的镜像来源拉取对应岗位镜像创建 Pod 镜像命名规则:`{registry}/service-{serviceId}-agent:{serviceId}-{timestamp}` @@ -165,6 +167,25 @@ LinkWork 采用「一岗位一镜像」机制,岗位构建流程: - git、curl、jq 等常用工具 - Claude CLI、uv/uvx 等 AI 开发工具 +### 本地镜像运维动作(Kind) + +后端在本地镜像模式下会自动执行镜像同步与清理,建议在环境变量中显式配置: + +```bash +LINKWORK_BUILD_LOCAL_LOAD_ENABLED=true +LINKWORK_BUILD_KIND_CLUSTER_NAME=shared-dev # 可选,不填则自动发现 +IMAGE_LOCAL_CLEANUP_ENABLED=true +IMAGE_LOCAL_RETENTION_HOURS=24 +IMAGE_LOCAL_CLEANUP_CRON="0 40 * * * *" # 每小时第 40 分钟 +IMAGE_KIND_PRUNE_ENABLED=true +``` + +并支持手动触发一次运维动作(立即执行,不等 cron): + +```bash +curl -X POST http:///api/v1/build/ops/local-image-maintenance +``` + --- ## 横向扩展