diff --git a/bsp/qemu-vexpress-a9/.config b/bsp/qemu-vexpress-a9/.config index 67eb2636e1f..72ef97fe576 100644 --- a/bsp/qemu-vexpress-a9/.config +++ b/bsp/qemu-vexpress-a9/.config @@ -208,6 +208,8 @@ CONFIG_RT_USING_USER_MAIN=y CONFIG_RT_MAIN_THREAD_STACK_SIZE=8196 CONFIG_RT_MAIN_THREAD_PRIORITY=10 CONFIG_RT_USING_LEGACY=y +CONFIG_COMPONENT_USING_CUSTOM_FEE=y +CONFIG_CUSTOM_FEE_MOCK_FLASH_SIZE=0xA0000 CONFIG_RT_USING_MSH=y CONFIG_RT_USING_FINSH=y CONFIG_FINSH_USING_MSH=y @@ -387,6 +389,7 @@ CONFIG_RT_USING_POSIX_SELECT=y CONFIG_RT_USING_POSIX_TERMIOS=y CONFIG_RT_USING_POSIX_AIO=y # CONFIG_RT_USING_POSIX_MMAN is not set +CONFIG_RT_USING_CLOCK_TIME=y CONFIG_RT_USING_POSIX_DELAY=y CONFIG_RT_USING_POSIX_CLOCK=y CONFIG_RT_USING_POSIX_TIMER=y diff --git a/bsp/qemu-vexpress-a9/rtconfig.h b/bsp/qemu-vexpress-a9/rtconfig.h index 8125bcebd16..a6e626f40a2 100644 --- a/bsp/qemu-vexpress-a9/rtconfig.h +++ b/bsp/qemu-vexpress-a9/rtconfig.h @@ -131,6 +131,8 @@ #define RT_MAIN_THREAD_STACK_SIZE 8196 #define RT_MAIN_THREAD_PRIORITY 10 #define RT_USING_LEGACY +#define COMPONENT_USING_CUSTOM_FEE +#define CUSTOM_FEE_MOCK_FLASH_SIZE 0xA0000 #define RT_USING_MSH #define RT_USING_FINSH #define FINSH_USING_MSH @@ -243,6 +245,7 @@ #define RT_USING_POSIX_SELECT #define RT_USING_POSIX_TERMIOS #define RT_USING_POSIX_AIO +#define RT_USING_CLOCK_TIME #define RT_USING_POSIX_DELAY #define RT_USING_POSIX_CLOCK #define RT_USING_POSIX_TIMER diff --git a/components/Kconfig b/components/Kconfig index accc38c6743..b642d70f3ee 100644 --- a/components/Kconfig +++ b/components/Kconfig @@ -32,6 +32,7 @@ endif if !RT_USING_NANO rsource "dfs/Kconfig" rsource "fal/Kconfig" +rsource "custom_fee/Kconfig" rsource "drivers/Kconfig" rsource "libc/Kconfig" rsource "net/Kconfig" diff --git a/components/custom_fee/Kconfig b/components/custom_fee/Kconfig new file mode 100644 index 00000000000..ae867d264d0 --- /dev/null +++ b/components/custom_fee/Kconfig @@ -0,0 +1,11 @@ +config COMPONENT_USING_CUSTOM_FEE + bool "using component custom fee" + default n + +if COMPONENT_USING_CUSTOM_FEE + +config CUSTOM_FEE_MOCK_FLASH_SIZE + hex "RAM mock flash size" + default 0xA0000 + +endif diff --git a/components/custom_fee/SConscript b/components/custom_fee/SConscript new file mode 100644 index 00000000000..7d9ffa289cb --- /dev/null +++ b/components/custom_fee/SConscript @@ -0,0 +1,29 @@ +from building import * + +objs = [] +cwd = GetCurrentDir() + +if not GetDepend("COMPONENT_USING_CUSTOM_FEE"): + Return('objs') + +src = [ + 'fee_cfg.c', + 'fee_api.c', + 'fee_port.c', + 'fee_sched.c', + 'fee_core.c', + 'fee_gc.c', + 'fee_recovery.c', + 'fee_cache.c', + 'fee_ckpt.c', + 'fee_lane_fast.c', + 'fee_lane_log.c', + 'fee_lane_bulk.c', + 'fee_onflash.c', + 'fee_test.c', +] + +CPPPATH = [cwd] +group = DefineGroup('custom_fee', src, depend = ['COMPONENT_USING_CUSTOM_FEE'], CPPPATH = CPPPATH) + +Return('group') diff --git "a/components/custom_fee/doc/API\345\210\227\350\241\250.md" "b/components/custom_fee/doc/API\345\210\227\350\241\250.md" new file mode 100644 index 00000000000..7cb3d7894b7 --- /dev/null +++ "b/components/custom_fee/doc/API\345\210\227\350\241\250.md" @@ -0,0 +1,944 @@ +# custom_fee 对外 API 列表 + +## 1. 这份文档解决什么问题 + +这份文档面向 **使用 `custom_fee` 的工程师**,不是面向 FEE 内部维护者。 + +它回答四类问题: + +- 用户到底能调哪些接口 +- 每个接口是同步完成,还是异步排队 +- 调用后应该怎么看结果 +- 一个新项目如何把 FEE 接起来并跑起来 + +如果你想看 flash 中的 record / sector / checkpoint 具体长什么样,请再看 [fee_diag_test.md](C:/sourcedata/rt-thread/components/custom_fee/fee_diag_test.md)。 + +## 2. 哪些是“对用户暴露”的接口 + +当前建议用户直接依赖的头文件只有: + +- [fee_api.h](C:/sourcedata/rt-thread/components/custom_fee/fee_api.h) + +不建议业务层直接调用的内部接口: + +- [fee_port.h](C:/sourcedata/rt-thread/components/custom_fee/fee_port.h) +- [fee_flash_drv.h](C:/sourcedata/rt-thread/components/custom_fee/fee_flash_drv.h) +- `fee_core_*` +- `fee_sched_*` +- `fee_gc_*` +- `fee_recovery_*` + +可以把边界理解成: + +```text +业务代码 + -> fee_api.h + +板级/驱动适配代码 + -> fee_flash_drv.h + +FEE 内部实现 + -> 其他 fee_*.c / *.h +``` + +### 2.1 功能特性 + +这一节不是重复列 API 名字,而是说明当前这套 `custom_fee` 机制本身做了什么,以及这些能力在代码里分别落在哪些模块。 + +#### 2.1.1 RAM cache + 快速 block 查找 + +- `fee_cache.c` 里不是简单数组顺序扫描,而是同时维护 `g_fee_cache[]` 和 `g_fee_cache_index[]` +- `fee_cache_lookup(block_id)` 先对 `block_id` 做 hash,再做线性探测定位 slot,平均查找复杂度接近 O(1) +- `fee_get_status()`、`fee_core_read()`、恢复流程、checkpoint 导入导出,都会先走这层 cache,而不是每次从 flash 头扫到尾 + +对使用者的直接价值: + +- block 数量增大后,查找最新版本地址的开销更稳定 +- 读路径先定位地址,再按地址读 payload,不需要把历史 record 全部重扫一遍 + +#### 2.1.2 同步读,异步写,语义明确 + +- `fee_read()` 是同步接口,路径是 `fee_read()` -> `fee_sched_submit_read()` -> `fee_core_read()`,函数返回时结果已经在用户 buffer 里,或者已经明确失败 +- `fee_write()`、`fee_invalidate()`、`fee_rollback()` 是异步提交接口,先进入调度队列,真正执行靠 `fee_mainfunction()` 逐步推进 +- `fee_sched.c` 在提交写请求时会立刻把模块状态置成 `BUSY_INTERNAL/PENDING`,这表示“请求已受理”,不是“已经落盘” + +这意味着: + +- **立即读**:是的,当前实现支持同步读,调用返回即可拿结果 +- **立即写完成**:不是。当前实现支持“立即提交写请求”,但真正写完并持久化到 flash,要看 `fee_get_job_result()` 最终是否变成 `FEE_JOB_OK` + +这种设计的价值是: + +- 读请求延迟更确定 +- 写请求不会把业务线程长期卡死在擦写动作里 + +#### 2.1.3 当前副本 + 上一副本,支持回滚和容错读 + +- `fee_cache_entry_t` 同时保存 `cur_addr/prev_addr` 和 `cur_valid/prev_valid` +- 新数据写入成功后,旧版本会下沉成 `prev_*` +- `fee_core_read()` 先读 `cur_addr`;如果当前副本校验失败,且 `prev_valid` 还在,就会再尝试读上一份 +- `fee_core_rollback()` 不是简单改指针,而是把 `prev_addr` 对应的数据重新写成一条新的 current record + +对使用者的价值: + +- 可以显式调用 `fee_rollback()` 回到上一版 +- 当前副本损坏时,读路径还有机会从上一份恢复结果 +- 对掉电中断、坏 record、异常校验失败更稳健 + +#### 2.1.4 checkpoint 快速启动,boot-critical block 可提前可读 + +- `fee_init()` 内部不是一次性把所有 lane 全扫完才返回,而是先通过 `fee_recovery_step()` 尝试恢复 checkpoint +- 如果 checkpoint 能恢复,且所有 `boot_critical` block 都已经在 cache 里有有效 current copy,就先进入 `FEE_INIT_CKPT_READY` +- 后续剩余 tail 记录再由 `fee_mainfunction()` 在后台继续补扫,最终进入 `FEE_INIT_FULL_READY` + +这带来的好处是: + +- 系统启动时可以先把关键配置读出来,不必等所有历史 record 全恢复完 +- “快启动”和“全量恢复”被拆成两个阶段,启动时间更可控 + +这里要特别说明: + +- `CKPT_READY` 不等于“全部 block 都能读” +- 在这个阶段,只有 `boot_critical` 且确实已经恢复到 RAM cache 的 block 才允许读 + +#### 2.1.5 lane 分级隔离,快慢业务不混跑 + +- `fee_cfg.c` 里的每个 block 都带 `lane_type`,当前分成 `FAST`、`NORMAL`、`BULK` +- `fee_sched.c` 会把 `FAST` block 的请求放进 urgent queue,把 `NORMAL/BULK` 放进 normal queue +- `fee_recovery.c` 和 GC 上下文里,每个 lane 都独立维护自己的地址范围、`active/dst/spare sector`、`free_offset`、GC 状态和代际信息 + +这意味着: + +- 高频小块和低频大块不会完全共享同一条写入通道 +- `FAST` lane 请求可以优先于普通 lane 请求推进 +- GC、恢复、容量压力也是按 lane 分开处理,不是整片 flash 一锅炖 + +#### 2.1.6 checkpoint 与 GC 都在后台推进 + +- 写入后,`fee_core.c` 会按剩余空间计算 `gc_requested/gc_force` +- 同时还会按每个 lane 的脏 record 数和脏字节数,决定是否请求 checkpoint,或者直接强制 checkpoint +- `fee_mainfunction()` 会依次推进 `port -> recovery -> scheduler -> gc -> core(checkpoint flush)`,把后台维护动作拆成小步执行 + +这种机制的价值: + +- 正常读写请求和后台维护动作被解耦 +- checkpoint 不需要每写一笔都同步刷 +- 空间接近阈值时,又能自动把 GC 和 checkpoint 拉起来做自保护 + +#### 2.1.7 读路径带完整性校验,不是盲读 + +- `fee_core_read_payload()` 读取时会检查 record header、commit tail +- 如果 block 配置启用了 CRC,还会把 payload 读回 RAM 做 CRC 校验 +- 只有 header、tail、CRC 都通过,才认为本次读取成功 + +这带来的价值: + +- 不是“地址存在就算有效” +- 用户读到的数据是经过格式和完整性校验的 +- 一旦当前版本损坏,配合上一副本机制,还能继续尝试降级读取 + +#### 2.1.8 底层驱动统一抽象,QEMU/RAM mock 和真 flash 共用一套 FEE + +- 对 FEE 上层来说,只依赖 `fee_flash_driver_*` 这一组统一接口 +- `fee_port.c` 只是薄封装,默认提供 weak 的 RAM mock 实现,板级工程可以给出 strong definition 覆盖掉它 +- 所以上层 `fee_api.c / fee_core.c / fee_sched.c / fee_recovery.c` 不需要知道底层接的是 QEMU mock、RAM mock,还是真实 FLS 驱动 + +对接入工程的价值: + +- 先在 QEMU 或 RAM mock 上把 block 配置、恢复逻辑、GC 行为跑通 +- 再切到真实 flash driver 时,业务层 API 和 FEE 核心逻辑都不用重写 +- 驱动适配边界清晰,板级代码只关心 `read/write/erase/get_caps/mainfunction/status/job_result` + +## 3. 公开 API 总览 + +### 3.1 功能接口 + +| 接口 | 作用 | 同步/异步 | 何时算完成 | +| --- | --- | --- | --- | +| `fee_init()` | 初始化 FEE | 半同步 | 函数返回后已到 `CKPT_READY` 或 `FULL_READY`;如需全量 ready 还要继续跑 `fee_mainfunction()` | +| `fee_read()` | 读取 block 数据 | 同步 | 函数返回即完成 | +| `fee_write()` | 写入 block 数据 | 异步提交 | 要靠 `fee_mainfunction()` 推进,直到 `fee_get_job_result() != PENDING` | +| `fee_invalidate()` | 失效一个 block | 异步提交 | 同上 | +| `fee_rollback()` | 回滚到上一个版本 | 异步提交 | 同上 | +| `fee_get_status()` | 查询某个 block 的逻辑状态 | 同步 | 函数返回即完成 | +| `fee_mainfunction()` | 驱动内部状态机 | 轮询入口 | 需要周期调用 | + +### 3.2 状态查询接口 + +| 接口 | 返回什么 | +| --- | --- | +| `fee_get_memif_status()` | FEE 模块当前总体状态:未初始化 / 空闲 / 忙 | +| `fee_get_job_result()` | 最近一次异步 job 的全局结果 | +| `fee_get_init_state()` | 初始化阶段当前处于哪个状态 | + +## 4. 返回码和状态码 + +### 4.1 `fee_ret_t` + +| 值 | 含义 | 典型场景 | +| --- | --- | --- | +| `FEE_E_OK` | 成功 | 提交成功 / 读取成功 | +| `FEE_E_NOT_OK` | 失败 | block 不存在、数据无效、底层失败 | +| `FEE_E_BUSY` | 当前不能做 | 正在初始化、GC 阻塞、队列满、空间不足待回收 | +| `FEE_E_UNINIT` | 未初始化 | 没先调 `fee_init()` | +| `FEE_E_PARAM` | 参数错误 | 空指针、非法长度、非法 block id | + +### 4.2 `fee_status_t` + +| 值 | 含义 | +| --- | --- | +| `FEE_STATUS_UNINIT` | 模块未初始化 | +| `FEE_STATUS_IDLE` | 空闲,没有待处理工作 | +| `FEE_STATUS_BUSY` | 底层 driver busy | +| `FEE_STATUS_BUSY_INTERNAL` | FEE 内部仍有调度、恢复、checkpoint、GC 等工作 | + +### 4.3 `fee_job_result_t` + +| 值 | 含义 | +| --- | --- | +| `FEE_JOB_NONE` | 还没有 job | +| `FEE_JOB_OK` | 最近一次 job 成功结束 | +| `FEE_JOB_PENDING` | 最近一次 job 仍在处理中 | +| `FEE_JOB_FAILED` | 最近一次 job 失败 | +| `FEE_JOB_CANCELED` | 预留 | +| `FEE_JOB_INCONSISTENT` | 预留 | + +### 4.4 `fee_block_status_t` + +| 值 | 含义 | +| --- | --- | +| `FEE_BLOCK_STATUS_EMPTY` | 该 block 还没有有效数据 | +| `FEE_BLOCK_STATUS_VALID` | 当前 block 有有效数据 | +| `FEE_BLOCK_STATUS_INVALIDATED` | block 被失效 | +| `FEE_BLOCK_STATUS_INCONSISTENT` | 预留 | + +## 5. 每个 API 到底该怎么用 + +### 5.1 `fee_init` + +```c +fee_ret_t fee_init(void); +``` + +用途: + +- 初始化 FEE 内部上下文 +- 初始化底层 flash 适配层 +- 尝试恢复 checkpoint +- 扫描必要的 flash 结构 + +使用建议: + +1. 系统启动时调用一次 +2. 如果返回不是 `FEE_E_OK`,初始化失败 +3. 如果返回 `FEE_E_OK`,再通过 `fee_get_init_state()` 判断是否已经 `FULL_READY` + +要点: + +- `fee_init()` 返回 `FEE_E_OK`,**不等于所有 block 都完全可读** +- 它只保证初始化至少已经推进到: + - `FEE_INIT_CKPT_READY`,或者 + - `FEE_INIT_FULL_READY` + +状态解释: + +| 状态 | 对用户意味着什么 | +| --- | --- | +| `FEE_INIT_CKPT_READY` | checkpoint 已恢复,部分 boot-critical block 可读,后台仍需继续扫描 | +| `FEE_INIT_FULL_READY` | 全部恢复完成,可正常读写所有 block | +| `FEE_INIT_FAILED` | 初始化失败 | + +推荐写法: + +```c +if (fee_init() != FEE_E_OK) +{ + /* init failed */ +} + +while (fee_get_init_state() != FEE_INIT_FULL_READY) +{ + fee_mainfunction(); +} +``` + +如果你的系统希望尽快启动,也可以在 `CKPT_READY` 先放行业务,再后台继续推进: + +```c +if (fee_init() == FEE_E_OK) +{ + if (fee_get_init_state() == FEE_INIT_CKPT_READY) + { + /* 先只使用 boot-critical block */ + } +} +``` + +### 5.2 `fee_read` + +```c +fee_ret_t fee_read(uint16_t block_id, uint16_t offset, uint8_t *dst, uint16_t len); +``` + +用途: + +- 从某个 block 读取数据 + +特点: + +- **同步接口** +- 函数返回时,`dst` 已经拿到结果,或者已经明确失败 + +参数说明: + +| 参数 | 含义 | +| --- | --- | +| `block_id` | 要读取的 block 号 | +| `offset` | block 内偏移 | +| `dst` | 用户缓冲区 | +| `len` | 要读的长度 | + +返回结果判断: + +- `FEE_E_OK`:读取成功 +- `FEE_E_BUSY`:当前还没恢复到可读阶段,或者 block 当前不可读 +- `FEE_E_NOT_OK`:block 没有有效数据,或数据校验失败 +- `FEE_E_PARAM`:参数错误 + +例子: + +```c +uint8_t data[32]; +fee_ret_t ret; + +ret = fee_read(1U, 0U, data, sizeof(data)); +if (ret == FEE_E_OK) +{ + /* data 有效 */ +} +``` + +注意: + +- `fee_read()` 不走异步队列 +- 但它仍依赖当前恢复状态 +- 如果 `fee_get_init_state()` 还没到允许读取该 block 的阶段,可能返回 `FEE_E_BUSY` + +### 5.3 `fee_write` + +```c +fee_ret_t fee_write(uint16_t block_id, const uint8_t *src, uint16_t len); +``` + +用途: + +- 把一份新的 block 数据提交给 FEE + +特点: + +- **异步提交接口** +- `fee_write()` 返回 `FEE_E_OK` 只代表“已成功入队或成功接收” +- 不代表数据已经真正落到 flash 并提交完成 + +正确用法: + +```c +fee_ret_t ret; + +ret = fee_write(1U, data, sizeof(data)); +if (ret != FEE_E_OK) +{ + /* 提交失败 */ +} + +while (fee_get_job_result() == FEE_JOB_PENDING) +{ + fee_mainfunction(); +} + +if (fee_get_job_result() == FEE_JOB_OK) +{ + /* 本次写入真正完成 */ +} +``` + +写完成后,你可以再用 `fee_read()` 回读验证。 + +### 5.4 `fee_invalidate` + +```c +fee_ret_t fee_invalidate(uint16_t block_id); +``` + +用途: + +- 标记一个 block 为失效 + +特点: + +- 异步提交 +- 完成判断方式和 `fee_write()` 相同 + +适用场景: + +- 某 block 不再有效,需要让后续 `fee_read()` 返回无效 + +### 5.5 `fee_rollback` + +```c +fee_ret_t fee_rollback(uint16_t block_id); +``` + +用途: + +- 回滚到该 block 的上一个有效版本 + +特点: + +- 异步提交 +- 只有 block 配置了 `keep_prev_copy=1` 且 `allow_rollback=1` 才有意义 + +完成后建议: + +- 用 `fee_read()` 重新回读 +- 用业务层校验回滚后的内容是否正确 + +### 5.6 `fee_get_status` + +```c +fee_ret_t fee_get_status(uint16_t block_id, fee_block_status_t *status); +``` + +用途: + +- 查询某个 block 当前的逻辑状态 + +它回答的不是“模块忙不忙”,而是: + +- 这个 block 有没有数据 +- 这个 block 是否已失效 + +例子: + +```c +fee_block_status_t status; + +if (fee_get_status(1U, &status) == FEE_E_OK) +{ + if (status == FEE_BLOCK_STATUS_VALID) + { + /* block 有效 */ + } +} +``` + +### 5.7 `fee_mainfunction` + +```c +void fee_mainfunction(void); +``` + +这是最容易被忽略,但又最关键的接口。 + +作用: + +- 推进恢复流程 +- 推进写队列 +- 推进 rollback / invalidate +- 推进 GC +- 推进 checkpoint flush +- 推进底层 flash driver 状态机 + +如果你的系统只调了 `fee_write()`,但没有周期调 `fee_mainfunction()`,那么: + +- job 会一直停在 `FEE_JOB_PENDING` +- `fee_get_memif_status()` 可能一直是 busy + +推荐: + +- 放到周期任务中调用 +- 或者在阻塞等待循环里主动调用 + +例如: + +```c +void app_10ms_task(void) +{ + fee_mainfunction(); +} +``` + +## 6. 如何获取“最终结果” + +这是使用 FEE 时最重要的部分。 + +### 6.1 初始化结果怎么判断 + +判断顺序: + +1. 看 `fee_init()` 返回值 +2. 再看 `fee_get_init_state()` + +推荐规则: + +- `fee_init() != FEE_E_OK`:初始化失败 +- `fee_init() == FEE_E_OK && fee_get_init_state() == FEE_INIT_FULL_READY`:完全可用 +- `fee_init() == FEE_E_OK && fee_get_init_state() == FEE_INIT_CKPT_READY`:部分可用,继续调用 `fee_mainfunction()` + +### 6.2 读取结果怎么判断 + +读取是同步的,直接看 `fee_read()` 的返回值。 + +规则: + +- `FEE_E_OK`:`dst` 有效 +- 其他值:本次读取失败 + +### 6.3 写入/失效/回滚结果怎么判断 + +这三个是异步接口,要分两步判断: + +第一步,看“提交成功没有”: + +```c +ret = fee_write(...); +if (ret != FEE_E_OK) +{ + /* 连提交都没成功 */ +} +``` + +第二步,看“后台执行最终成功没有”: + +```c +while (fee_get_job_result() == FEE_JOB_PENDING) +{ + fee_mainfunction(); +} + +if (fee_get_job_result() == FEE_JOB_OK) +{ + /* 真正完成 */ +} +else +{ + /* 真正失败 */ +} +``` + +### 6.4 `fee_get_job_result()` 是不是“按 block 分开”的 + +不是。 + +当前 `fee_get_job_result()` 是 **全局最近 job 结果**,不是 per-block 结果。 + +这意味着: + +- 对简单业务,建议串行使用:上一条 write 完成后,再提下一条 +- 对复杂业务,如果要并发排队多个请求,需要自己在业务层维护请求上下文 + +## 7. 推荐调用范式 + +## 7.1 最简单、最稳妥的串行模式 + +这是最推荐新项目采用的模式。 + +```c +static fee_ret_t app_fee_wait_job_done(void) +{ + while (fee_get_job_result() == FEE_JOB_PENDING) + { + fee_mainfunction(); + } + + return (fee_get_job_result() == FEE_JOB_OK) ? FEE_E_OK : FEE_E_NOT_OK; +} + +fee_ret_t app_fee_write_block_sync(uint16_t block_id, const uint8_t *src, uint16_t len) +{ + fee_ret_t ret; + + ret = fee_write(block_id, src, len); + if (ret != FEE_E_OK) + { + return ret; + } + + return app_fee_wait_job_done(); +} +``` + +优点: + +- 最容易理解 +- 调试最简单 +- 不容易误用全局 `job_result` + +### 7.2 周期任务模式 + +如果系统里已经有 10ms / 5ms 周期任务,可以这样用: + +```c +void app_periodic_10ms(void) +{ + fee_mainfunction(); +} +``` + +业务发请求时: + +```c +if (fee_write(1U, data, len) == FEE_E_OK) +{ + /* 返回后不阻塞,等待后台完成 */ +} +``` + +之后由其他逻辑轮询: + +```c +if (fee_get_job_result() == FEE_JOB_OK) +{ + /* 最近 job 完成 */ +} +``` + +### 7.3 恢复期读 boot-critical block + +如果你有“系统先启动关键配置,其他数据稍后恢复”的需求,可以利用: + +- `FEE_INIT_CKPT_READY` +- block 配置里的 `boot_critical` + +逻辑如下: + +```c +if (fee_init() == FEE_E_OK) +{ + if (fee_get_init_state() == FEE_INIT_CKPT_READY) + { + /* 先读取 boot-critical block */ + } +} +``` + +注意: + +- 不是所有 block 在 `CKPT_READY` 都可读 +- 非 boot-critical block 可能仍返回 `FEE_E_BUSY` + +## 8. 队列和并发语义 + +当前实现里: + +- fast lane 请求进入 urgent queue +- normal/bulk 请求进入 normal queue +- 每个队列容量是 `FEE_CFG_MAX_PENDING_REQUESTS = 8` + +这意味着: + +- `fee_write()` / `fee_invalidate()` / `fee_rollback()` 连续提交太多时,可能返回 `FEE_E_BUSY` +- 读取不走队列 + +虽然实现支持排队,但对业务使用者仍建议: + +- 优先采用“提交一个,等它完成,再提下一个”的串行模式 + +原因: + +- `fee_get_job_result()` 是全局结果 +- 串行模式更容易和业务动作一一对应 + +## 9. 最小上手步骤 + +### 9.1 打开组件 + +启用: + +- `COMPONENT_USING_CUSTOM_FEE` + +见 [Kconfig](C:/sourcedata/rt-thread/components/custom_fee/Kconfig)。 + +### 9.2 包含头文件 + +```c +#include "fee_api.h" +``` + +### 9.3 系统启动时初始化 + +```c +void app_fee_init(void) +{ + if (fee_init() != FEE_E_OK) + { + /* error */ + return; + } + + while (fee_get_init_state() != FEE_INIT_FULL_READY) + { + fee_mainfunction(); + } +} +``` + +### 9.4 周期调用 `fee_mainfunction` + +```c +void app_periodic_10ms(void) +{ + fee_mainfunction(); +} +``` + +### 9.5 写入一个 block + +```c +uint8_t cfg[32]; + +if (fee_write(1U, cfg, sizeof(cfg)) == FEE_E_OK) +{ + while (fee_get_job_result() == FEE_JOB_PENDING) + { + fee_mainfunction(); + } +} +``` + +### 9.6 读取一个 block + +```c +uint8_t cfg[32]; + +if (fee_read(1U, 0U, cfg, sizeof(cfg)) == FEE_E_OK) +{ + /* cfg valid */ +} +``` + +## 10. 用户最常见的误区 + +### 10.1 误区一:`fee_write()` 返回成功,就等于已经写完 + +不是。 + +- `fee_write()` 成功,只是“请求已被接受” +- 真正完成,要等 `fee_get_job_result()` 变成 `FEE_JOB_OK` + +### 10.2 误区二:不调用 `fee_mainfunction()` + +如果不调用: + +- 初始化不会继续推进 +- 异步 job 不会完成 +- GC / checkpoint 不会推进 + +### 10.3 误区三:把 `fee_get_status()` 当成模块状态 + +不是。 + +- `fee_get_status(block_id, &status)` 查询的是 **某个 block** +- `fee_get_memif_status()` 查询的是 **整个 FEE 模块** + +### 10.4 误区四:把 `fee_get_job_result()` 当成每个 block 独立的结果 + +不是。 + +它是全局最近 job 的结果。 + +### 10.5 误区五:业务层直接调用 `fee_port_*` + +不建议。 + +业务层只应该通过 [fee_api.h](C:/sourcedata/rt-thread/components/custom_fee/fee_api.h) 使用 FEE。 + +## 11. 当前默认 block 配置示例 + +当前示例配置在 [fee_cfg.c](C:/sourcedata/rt-thread/components/custom_fee/fee_cfg.c): + +| block_id | max_len | lane | rollback | boot_critical | +| --- | --- | --- | --- | --- | +| `1` | `32` | fast | 是 | 是 | +| `2` | `128` | normal | 是 | 是 | +| `3` | `256` | normal | 是 | 否 | +| `4` | `768` | bulk | 否 | 否 | + +如果你要上项目,通常要做的不是改 API,而是改这张 block 表。 + +## 12. 如何适配到自己的工程 + +适配分成两部分: + +- **业务块配置** +- **底层 flash 驱动适配** + +### 12.1 配置业务 block + +当前 block 表在 [fee_cfg.c](C:/sourcedata/rt-thread/components/custom_fee/fee_cfg.c) 的 `g_fee_block_table`。 + +你通常需要为每个业务块确定: + +- `block_id` +- `max_len` +- `block_class` +- `lane_type` +- `keep_prev_copy` +- `allow_rollback` +- `crc_mode` +- `record_align` +- `boot_critical` + +建议规则: + +- 高频小数据:优先放 `fast` +- 普通配置数据:优先放 `normal` +- 大块、低频数据:优先放 `bulk` + +### 12.2 适配底层 flash driver + +板级或驱动层要实现的是 [fee_flash_drv.h](C:/sourcedata/rt-thread/components/custom_fee/fee_flash_drv.h) 中这些符号: + +```c +fee_ret_t fee_flash_driver_init(void); +fee_ret_t fee_flash_driver_get_caps(fee_flash_caps_t *caps); +fee_ret_t fee_flash_driver_read(uint32_t addr, uint8_t *dst, uint32_t len); +fee_ret_t fee_flash_driver_write(uint32_t addr, const uint8_t *src, uint32_t len); +fee_ret_t fee_flash_driver_erase(uint32_t addr, uint32_t len); +void fee_flash_driver_mainfunction(void); +fee_status_t fee_flash_driver_get_status(void); +fee_job_result_t fee_flash_driver_get_job_result(void); +``` + +说明: + +- 这些接口应由板级提供 **strong definition** +- 当前 [fee_port.c](C:/sourcedata/rt-thread/components/custom_fee/fee_port.c) 里自带的是默认 `weak` RAM mock 实现 +- 在真机工程中,你应替换成实际 flash 驱动 + +### 12.3 底层能力参数要返回什么 + +`fee_flash_driver_get_caps()` 至少要正确返回: + +- `total_size` +- `read_unit` +- `program_unit` +- `erase_unit` +- `preferred_chunk` + +FEE 会根据这些参数做: + +- 参数校验 +- 对齐检查 +- sector 布局 +- record span 校验 + +### 12.4 周期调度如何接入 + +把 `fee_mainfunction()` 放进: + +- OS 周期任务 +- bare-metal 主循环 +- 后台 worker + +都可以。 + +但必须满足: + +- 初始化阶段能持续推进 +- 异步 job 能持续推进 + +### 12.5 Mock 模式如何上手 + +如果只是想在 QEMU 或 PC 仿真中先跑通,不需要先接真实 flash driver。 + +当前默认行为: + +- `fee_port.c` 已自带 RAM mock flash +- 只要打开组件,就能先跑基本逻辑和测试 + +这适合: + +- 先验证 block 表是否合理 +- 先验证上层使用方式 +- 先做恢复 / GC / checkpoint 联调 + +## 13. 推荐的接入顺序 + +对新项目建议按下面顺序推进: + +1. 先打开 `custom_fee` 组件,用默认 mock backend 跑通基础用例 +2. 按业务需求整理 block 表 +3. 在应用层写最小读写封装 +4. 确保 `fee_mainfunction()` 已有稳定周期调用点 +5. 再接入真实 flash driver +6. 最后做真机掉电恢复、GC、耐久性验证 + +## 14. 一个完整的最小示例 + +```c +#include "fee_api.h" + +static fee_ret_t app_fee_wait_ready(void) +{ + while (fee_get_init_state() != FEE_INIT_FULL_READY) + { + if (fee_get_init_state() == FEE_INIT_FAILED) + { + return FEE_E_NOT_OK; + } + fee_mainfunction(); + } + + return FEE_E_OK; +} + +static fee_ret_t app_fee_wait_job_done(void) +{ + while (fee_get_job_result() == FEE_JOB_PENDING) + { + fee_mainfunction(); + } + + return (fee_get_job_result() == FEE_JOB_OK) ? FEE_E_OK : FEE_E_NOT_OK; +} + +fee_ret_t app_fee_startup(void) +{ + fee_ret_t ret; + + ret = fee_init(); + if (ret != FEE_E_OK) + { + return ret; + } + + return app_fee_wait_ready(); +} + +fee_ret_t app_fee_save_cfg(uint16_t block_id, const uint8_t *data, uint16_t len) +{ + fee_ret_t ret; + + ret = fee_write(block_id, data, len); + if (ret != FEE_E_OK) + { + return ret; + } + + return app_fee_wait_job_done(); +} + +fee_ret_t app_fee_load_cfg(uint16_t block_id, uint8_t *data, uint16_t len) +{ + return fee_read(block_id, 0U, data, len); +} +``` + +## 15. 一句话总结 + +如果你只记三件事,记这三条: + +1. 用户接口只看 [fee_api.h](C:/sourcedata/rt-thread/components/custom_fee/fee_api.h) +2. `fee_read()` 是同步的,`fee_write()/invalidate()/rollback()` 是异步的 +3. 想让 FEE 真正工作起来,必须持续调用 `fee_mainfunction()` diff --git a/components/custom_fee/doc/README.md b/components/custom_fee/doc/README.md new file mode 100644 index 00000000000..8be62a15313 --- /dev/null +++ b/components/custom_fee/doc/README.md @@ -0,0 +1,16 @@ +# custom_fee + +This directory contains the first code skeleton for the redesigned FEE module. + +Current scope: + +- public API and internal type definitions +- on-flash helper interfaces +- unified fee_port entry with overridable flash driver hooks +- scheduler / recovery / GC / lane module skeletons +- default RAM-backed flash backend for bring-up and QEMU tests +- public user API guide: API列表.md +- diagnostic test guide: fee_diag_test.md + +This is not a feature-complete implementation yet. It is intended to freeze module +boundaries and enable incremental bring-up. diff --git a/components/custom_fee/doc/fee_boot_recovery.md b/components/custom_fee/doc/fee_boot_recovery.md new file mode 100644 index 00000000000..e5ef1b06833 --- /dev/null +++ b/components/custom_fee/doc/fee_boot_recovery.md @@ -0,0 +1,197 @@ +# FEE Boot Recovery 设计 + +## 1. 目的 + +本文档冻结 FEE 的启动恢复策略,重点回答三个问题: + +1. 上电后如何尽快恢复 RAM cache +2. 何时允许首个 `read block` +3. checkpoint 失效、GC 中断或格式异常时如何降级 + +## 2. 设计目标 + +- 在 checkpoint 有效时,启动时间与 tail 大小相关,而不是与历史记录总量相关 +- `boot_critical` block 尽早可读 +- 恢复路径对 GC 中断和 checkpoint 部分损坏有确定裁决 +- 初始化状态对上层可见,不依赖隐式假设 + +## 3. 启动状态机 + +建议显式区分以下初始化状态: + +| 状态 | 说明 | 是否允许读 | +| --- | --- | --- | +| `INIT_RESET` | 模块刚上电,未访问 flash | 否 | +| `INIT_META_SCAN` | 正在读取 `META lane` 和双 checkpoint | 否 | +| `INIT_CKPT_READY` | checkpoint 已恢复到 RAM cache | 仅 `boot_critical` | +| `INIT_TAIL_SCAN` | 正在补扫 checkpoint 之后的 tail | `boot_critical` 与已验证 block | +| `INIT_FULL_READY` | 恢复完成 | 是 | +| `INIT_DEGRADED` | checkpoint 全损或恢复冲突,进入全量恢复 | 受限 | +| `INIT_FAILED` | 格式错误或介质错误,无法继续 | 否 | + +## 4. 首个可读时刻 + +### 4.1 设计结论 + +为了满足“上电快速读 block”,推荐把“可读”拆成两级: + +1. `CKPT-ready read` + 仅允许读取 `boot_critical = 1` 且 checkpoint 中存在有效映射的 block +2. `full-ready read` + 允许读取所有已配置 block + +### 4.2 规则 + +只有同时满足以下条件,模块才能进入 `INIT_CKPT_READY`: + +1. 至少 1 份 checkpoint 有效 +2. lane 角色表恢复成功 +3. `boot_critical` block 的 `cur_addr` 都已恢复到 RAM cache +4. 没有检测到需要立即中止的格式冲突 + +若某个 `boot_critical` block 在 checkpoint 中缺失,则不能宣称 `CKPT-ready`,必须继续进入 tail scan 或降级。 + +## 5. checkpoint 恢复流程 + +### 5.1 checkpoint 选择规则 + +双 checkpoint 的选择顺序如下: + +1. 过滤掉 CRC 错误或 `commit_marker` 缺失的副本 +2. 在剩余副本中选择 `ckpt_generation` 最大者 +3. 若代数相同,选择 `chunk_count` 完整者 +4. 若仍冲突,进入 `INIT_DEGRADED` + +### 5.2 从 checkpoint 恢复 RAM cache + +恢复内容至少包括: + +- 每个 lane 的 `active/dst/spare` +- 每个 lane 的 `free_offset` +- 每个 block 的 `cur_addr` +- 每个 block 的 `prev_addr` +- 每个 block 的 `seq` + +恢复后立即做以下校验: + +1. 地址必须落在对应 lane 范围内 +2. `cur_addr` 与 `prev_addr` 不得相同 +3. `seq(cur) >= seq(prev)` +4. `active/dst/spare` 不得重复指向同一扇区 + +## 6. tail scan 设计 + +### 6.1 目标 + +tail scan 只扫描 checkpoint 之后新增的数据,不回扫整个历史区域。 + +### 6.2 上界约束 + +为了让启动时间可证,建议增加以下配置: + +| 配置项 | 说明 | +| --- | --- | +| `max_uncheckpointed_records_per_lane` | 每个 lane 未 checkpoint 的最大 record 数 | +| `max_uncheckpointed_bytes_per_lane` | 每个 lane 未 checkpoint 的最大累计字节 | +| `max_tail_scan_pages_per_lane` | 每个 lane 启动阶段最多扫描页数 | +| `ckpt_force_flush_threshold` | 触发强制 checkpoint 的上界 | + +建议满足: + +```text +tail_scan_bytes(lane) <= max_uncheckpointed_bytes_per_lane +tail_scan_pages(lane) <= max_tail_scan_pages_per_lane +``` + +当任一 lane 即将突破上界时,运行期必须优先完成 checkpoint,而不是继续无限推迟。 + +### 6.3 tail scan 行为 + +1. 从 checkpoint 记录的 `free_offset` 开始向后扫描 +2. 遇到合法已提交 record 则更新 cache +3. 遇到未提交 record 则停止该连续区扫描 +4. 遇到 header 损坏则按 `align_unit` 前进并统计异常 +5. 达到 `data_end` 则结束该 lane 扫描 + +## 7. GC 中断恢复 + +### 7.1 裁决原则 + +GC 中断恢复不能只依赖 RAM,必须使用 `generation + sector_state + committed record` 裁决。 + +### 7.2 推荐裁决表 + +| 组合 | 结论 | +| --- | --- | +| `ACTIVE` 存在,`GC_DST` 不存在 | 继续使用旧 `ACTIVE` | +| `ACTIVE` 存在,`GC_DST` 存在但未提交 header | 丢弃 `GC_DST` | +| `ACTIVE` 存在,`GC_DST` 已提交且数据不完整 | 继续使用旧 `ACTIVE`,保留 `GC_DST` 待清理 | +| `ACTIVE` 存在,`GC_DST` 已提交且 live block 已复制完成 | 提升 `GC_DST` 为 `ACTIVE`,旧扇区转 `OLD_PENDING_ERASE` | +| 两个 `ACTIVE` 并存 | 选择 `generation` 更新且 live map 更完整者,另一方转降级恢复 | + +### 7.3 stale checkpoint + +checkpoint 可能落后于真实 flash 状态,因此恢复必须允许: + +1. 先按 checkpoint 建立初始 cache +2. 再通过 tail scan 和 sector state 纠偏 +3. 若发现 checkpoint 的 `active/dst/spare` 与 flash header 冲突,以合法 sector header 为准 + +## 8. 读接口语义 + +### 8.1 `fee_read` 在初始化阶段的行为 + +| 状态 | 行为 | +| --- | --- | +| `INIT_RESET/INIT_META_SCAN` | 返回 `BUSY` | +| `INIT_CKPT_READY` | 仅允许读取 `boot_critical` | +| `INIT_TAIL_SCAN` | 允许读取 `boot_critical` 和已验证 block | +| `INIT_FULL_READY` | 正常读 | +| `INIT_DEGRADED` | 按降级策略决定,默认只读已验证 block | +| `INIT_FAILED` | 返回 `E_NOT_OK` | + +### 8.2 状态查询接口 + +建议提供: + +```c +typedef enum +{ + FEE_INIT_RESET, + FEE_INIT_META_SCAN, + FEE_INIT_CKPT_READY, + FEE_INIT_TAIL_SCAN, + FEE_INIT_FULL_READY, + FEE_INIT_DEGRADED, + FEE_INIT_FAILED +} fee_init_state_t; + +fee_init_state_t fee_get_init_state(void); +``` + +## 9. 恢复实现建议 + +### 9.1 建议模块分工 + +- `fee_recovery.c`: 启动状态机、checkpoint 选择、tail scan、GC 恢复 +- `fee_ckpt.c`: checkpoint 解析和重建 +- `fee_cache.c`: cache 导入与冲突裁决 + +### 9.2 推荐接口 + +```c +Std_ReturnType fee_recovery_start(void); +Std_ReturnType fee_recovery_step(void); +boolean fee_recovery_can_read_block(uint16 block_id); +boolean fee_recovery_is_full_ready(void); +``` + +## 10. 测试关注点 + +- checkpoint A 正确、checkpoint B 损坏 +- checkpoint A/B 都正确,但 B 更新 +- checkpoint A/B 都损坏,进入 `INIT_DEGRADED` +- `GC_SWITCH_HEADER` 期间掉电 +- stale checkpoint 指向旧 `ACTIVE` +- `boot_critical` block 在 `INIT_CKPT_READY` 即可读 +- tail 超过上界时强制 checkpoint 的行为 diff --git a/components/custom_fee/doc/fee_cache_checkpoint.md b/components/custom_fee/doc/fee_cache_checkpoint.md new file mode 100644 index 00000000000..c5ae81b3df5 --- /dev/null +++ b/components/custom_fee/doc/fee_cache_checkpoint.md @@ -0,0 +1,194 @@ +# FEE Cache And Checkpoint 设计 + +## 1. 目的 + +本文档冻结 RAM cache 与 checkpoint 的数据结构、更新时机和容量边界。 + +关注点: + +- RAM cache 如何支持 O(1) block 查找 +- checkpoint 如何降低启动扫描成本 +- 如何限制未 checkpoint tail 的上界 + +## 2. RAM Cache 模型 + +### 2.1 block cache + +建议延续总设中的双副本模型: + +```c +typedef struct +{ + uint8 lane; + uint32 cur_addr; + uint32 prev_addr; + uint32 seq; + uint16 len; + uint8 cur_valid; + uint8 prev_valid; + uint8 cur_sector; + uint8 prev_sector; +} fee_cache_entry_t; +``` + +### 2.2 lane context + +```c +typedef struct +{ + uint8 active_sector; + uint8 dst_sector; + uint8 spare_sector; + uint16 gc_cursor; + uint8 gc_state; + uint8 gc_requested; + uint32 free_offset; + uint32 gc_start_threshold; + uint32 gc_force_threshold; + uint32 dirty_record_count; + uint32 dirty_bytes; +} fee_lane_ctx_t; +``` + +### 2.3 super context + +```c +typedef struct +{ + fee_lane_ctx_t fast; + fee_lane_ctx_t normal; + fee_lane_ctx_t bulk; + uint32 checkpoint_generation; + uint8 checkpoint_dirty; + uint8 init_state; +} fee_super_ctx_t; +``` + +## 3. cache 更新规则 + +1. 新 record 未提交前,不更新 cache +2. `CommitTail` 提交成功后: + - `prev = cur` + - `cur = new_record` +3. `TOMBSTONE` 提交后: + - `cur_valid = 0` + - `prev_valid` 按配置保留或清除 +4. `rollback` 成功后: + - `cur = copied_prev_record` + - `prev = old_cur` + +## 4. checkpoint 内容 + +### 4.1 必选内容 + +- `checkpoint_generation` +- 每个 lane 的 `active/dst/spare` +- 每个 lane 的 `free_offset` +- 每个 lane 的 `dirty_record_count` +- 每个 block 的 `cur_addr` +- 每个 block 的 `prev_addr` +- 每个 block 的 `seq` + +### 4.2 可选内容 + +- `boot_critical` bitmap +- 统计信息摘要 +- checkpoint 创建时间戳 + +## 5. checkpoint 刷写时机 + +### 5.1 背景触发 + +满足任一条件时请求后台 checkpoint: + +1. lane 完成一次 GC 切换 +2. `dirty_record_count >= ckpt_bg_records` +3. `dirty_bytes >= ckpt_bg_bytes` +4. `boot_critical` block 更新 + +### 5.2 强制触发 + +满足任一条件时必须优先完成 checkpoint: + +1. `dirty_record_count >= ckpt_force_records` +2. `dirty_bytes >= ckpt_force_bytes` +3. 即将超过 `max_uncheckpointed_records_per_lane` +4. 即将超过 `max_uncheckpointed_bytes_per_lane` + +## 6. tail 上界 + +### 6.1 设计目标 + +checkpoint 的价值不只是“平均更快”,而是要把启动 tail 扫描控制在明确上界内。 + +### 6.2 约束公式 + +对每个 lane 建议维护: + +```text +dirty_record_count(lane) <= max_uncheckpointed_records_per_lane +dirty_bytes(lane) <= max_uncheckpointed_bytes_per_lane +tail_scan_pages(lane) <= max_tail_scan_pages_per_lane +``` + +推荐关系: + +```text +ckpt_force_records <= max_uncheckpointed_records_per_lane +ckpt_force_bytes <= max_uncheckpointed_bytes_per_lane +``` + +### 6.3 设计含义 + +这样启动时就可以给出明确预算: + +```text +T_boot_tail_scan <= Σ lane_scan_time(max_tail_scan_pages_per_lane) +``` + +## 7. checkpoint 写入流程 + +### 7.1 chunk 化 + +当 `BLOCK_COUNT` 较大时,允许拆成多个 `CKPT_CHUNK` record。 + +### 7.2 提交流程 + +1. 写 checkpoint header / chunk metadata +2. 分块写 `BlockMapEntry[]` +3. 每个 chunk 独立提交 +4. 最后一个 chunk 提交后,视为新 checkpoint 生效 + +### 7.3 生效语义 + +新 checkpoint 生效后: + +- 旧 checkpoint 仍保留,直到下一次成功轮换 +- RAM 中 `checkpoint_generation` 递增 +- `dirty_record_count` / `dirty_bytes` 清零 + +## 8. 建议实现 + +### 8.1 建议模块 + +- `fee_cache.c`: cache 查找、更新、冲突解决 +- `fee_ckpt.c`: checkpoint 组包、写入、读取 + +### 8.2 建议接口 + +```c +void fee_cache_commit_write(uint16 block_id, uint32 new_addr, uint16 len); +void fee_cache_commit_tombstone(uint16 block_id, uint32 new_addr); +void fee_cache_commit_rollback(uint16 block_id, uint32 new_addr); +boolean fee_ckpt_due_background(void); +boolean fee_ckpt_due_force(void); +Std_ReturnType fee_ckpt_flush_step(void); +``` + +## 9. 测试关注点 + +- 多次写入后 `prev` 指针正确轮转 +- `rollback` 后 `cur/prev` 关系正确 +- checkpoint chunk 化写入中断 +- `dirty_bytes` 达到上界时强制 checkpoint +- 启动时 tail 扫描页数不超过配置预算 diff --git a/components/custom_fee/doc/fee_cfg_rules.md b/components/custom_fee/doc/fee_cfg_rules.md new file mode 100644 index 00000000000..7ef9192f40b --- /dev/null +++ b/components/custom_fee/doc/fee_cfg_rules.md @@ -0,0 +1,147 @@ +# FEE Configuration Rules 设计 + +## 1. 目的 + +本文档冻结 block 配置表、lane 映射、容量规划和编译期校验规则。 + +编码目标是: + +- 把错误配置挡在编译期 +- 让 `FAST/NORMAL/BULK/META` 的职责边界稳定 +- 让容量与启动时间预算可计算 + +## 2. block 配置模型 + +建议配置结构如下: + +```c +typedef struct +{ + uint16 block_id; + uint16 max_len; + uint8 block_class; + uint8 lane_type; + uint8 endurance_class; + uint8 keep_prev_copy; + uint8 allow_rollback; + uint8 crc_mode; + uint16 record_align; + uint8 boot_critical; +} fee_block_cfg_t; +``` + +## 3. block class 规则 + +| class | 规则 | +| --- | --- | +| `FAST` | immediate 语义;优先单 record / 单 wordline | +| `NORMAL` | 常规配置和状态数据 | +| `BULK` | 大块或低频大块;不得污染 `FAST/NORMAL` | +| `META` | 仅给 checkpoint / super state 使用 | + +## 4. lane 映射规则 + +### 4.1 必选规则 + +1. `FAST` 块只能映射到 `FAST lane` +2. `NORMAL` 块只能映射到 `NORMAL lane` +3. `BULK` 块只能映射到 `BULK lane` +4. 业务 block 不能映射到 `META lane` + +### 4.2 immediate 映射 + +若上层配置 `FeeImmediateData = TRUE`,内部应自动映射为: + +- `block_class = FAST` +- `lane_type = FAST` + +## 5. 大小与对齐规则 + +### 5.1 记录跨度 + +```text +record_span(i) = align(record_header + max_len(i) + commit_tail, record_align(i)) +``` + +### 5.2 推荐分类 + +| 类别 | payload 大小建议 | 规则 | +| --- | --- | --- | +| `small` | `<= 448 B` | 尽量单 wordline 提交 | +| `medium` | `449 B ~ 2048 B` | 允许多 wordline | +| `large` | `> 2048 B` | 默认归 `BULK lane` | + +### 5.3 约束 + +1. `FAST` 块必须满足 `record_span <= fast_single_record_limit` +2. `FAST` 块禁止跨 wordline 提交 +3. `BULK` 块不得配置为 immediate +4. `allow_rollback = 1` 时必须 `keep_prev_copy = 1` + +## 6. 容量规划 + +### 6.1 基础公式 + +```text +live_span(i) = record_span(i) * (keep_prev_copy(i) ? 2 : 1) +lane_live = Σ live_span(i), i ∈ lane +lane_payload = sector_count(lane) * sector_usable_bytes - lane_mgmt_bytes +lane_headroom = max_burst_records(lane) * max(record_span(i), i ∈ lane) + switch_guard(lane) +``` + +必须满足: + +```text +lane_payload >= lane_live + lane_headroom + gc_fragment_guard +``` + +### 6.2 建议经验值 + +- `FAST lane`: `max_burst_records = 2 ~ 4` +- `NORMAL lane`: `max_burst_records = 1 ~ 2` +- `BULK lane`: `max_burst_records = 1` +- `gc_fragment_guard >= 2 * max_record_span(lane)` + +## 7. 启动时间相关配置 + +为了保证上电快速读 block,配置中还应引入: + +| 配置项 | 说明 | +| --- | --- | +| `boot_critical` | 启动早期必须可读 | +| `ckpt_bg_records` / `ckpt_bg_bytes` | 背景 checkpoint 阈值 | +| `ckpt_force_records` / `ckpt_force_bytes` | 强制 checkpoint 阈值 | +| `max_uncheckpointed_records_per_lane` | 启动 tail 上界 | +| `max_uncheckpointed_bytes_per_lane` | 启动 tail 上界 | +| `max_tail_scan_pages_per_lane` | 启动时间预算输入 | + +## 8. 编译期校验清单 + +建议配置工具或静态断言至少检查: + +1. `block_id` 全局唯一 +2. `max_len > 0` +3. `record_align >= program_unit` +4. `FAST` 块满足 `fast_single_record_limit` +5. `allow_rollback -> keep_prev_copy` +6. `lane_payload >= lane_live + lane_headroom + gc_fragment_guard` +7. `checkpoint_size <= meta_lane_capacity` +8. `boot_critical` 块必须可进入 checkpoint + +## 9. 示例 + +```c +static const fee_block_cfg_t fee_block_table[] = +{ + { .block_id = 1, .max_len = 32, .block_class = FEE_BLOCK_FAST, .lane_type = FEE_LANE_FAST, .endurance_class = FEE_END_HOT, .keep_prev_copy = 1, .allow_rollback = 1, .crc_mode = FEE_CRC16, .record_align = 512, .boot_critical = 1 }, + { .block_id = 2, .max_len = 256, .block_class = FEE_BLOCK_NORMAL, .lane_type = FEE_LANE_NORMAL, .endurance_class = FEE_END_WARM, .keep_prev_copy = 1, .allow_rollback = 1, .crc_mode = FEE_CRC32, .record_align = 8, .boot_critical = 0 }, + { .block_id = 3, .max_len = 4096, .block_class = FEE_BLOCK_BULK, .lane_type = FEE_LANE_BULK, .endurance_class = FEE_END_COLD, .keep_prev_copy = 0, .allow_rollback = 0, .crc_mode = FEE_CRC32, .record_align = 512, .boot_critical = 0 }, +}; +``` + +## 10. 测试关注点 + +- 非法 immediate 大块被编译期拒绝 +- lane 容量不足被编译期拒绝 +- `boot_critical` 缺失 checkpoint 空间时报错 +- `record_align` 与 `program_unit` 不匹配时报错 diff --git a/components/custom_fee/doc/fee_diag_test.md b/components/custom_fee/doc/fee_diag_test.md new file mode 100644 index 00000000000..55eb18fa1f8 --- /dev/null +++ b/components/custom_fee/doc/fee_diag_test.md @@ -0,0 +1,710 @@ +# custom_fee 诊断测试用例说明 + +## 1. 目的 + +`custom_fee_diag_test` 用于在 QEMU + RAM mock flash 环境下验证 `custom_fee` 的以下行为: + +- block 数据写入、读回、一致性校验 +- rollback / invalidate 功能 +- 重新初始化后的恢复能力 +- fast lane GC 触发时的耗时和底层驱动访问次数 +- checkpoint / sector / record 在 mock flash 中的实际落盘格式 + +和 `custom_fee_test` 的区别: + +- `custom_fee_test` 只做通过/失败判定 +- `custom_fee_diag_test` 额外打印数据内容、耗时、驱动访问统计、flash 布局和原始内存 dump + +## 2. 执行命令 + +在 `bsp/qemu-vexpress-a9` 目录下执行: + +```powershell +cmd /c "call C:\Work\InstallTools\env-windows\tools\bin\env-init.bat && scons -j8" +powershell -NoProfile -ExecutionPolicy Bypass -File .\codex_qemu_ps.ps1 -MshCommand custom_fee_test +powershell -NoProfile -ExecutionPolicy Bypass -File .\codex_qemu_ps.ps1 -MshCommand custom_fee_diag_test +``` + +## 3. 用例覆盖范围 + +当前诊断用例按下面的顺序执行: + +| 步骤 | 内容 | 目的 | +| --- | --- | --- | +| 1 | backend 全擦除 + `fee_init()` | 构造 cold boot 场景 | +| 2 | block 1 写入 `fast_a` | 验证 fast lane 写入 | +| 3 | block 1 读回 `fast_a` | 验证读回一致性 | +| 4 | block 1 改写 `fast_b` | 验证覆盖写 | +| 5 | block 1 读回 `fast_b` | 验证最新版本生效 | +| 6 | `fee_rollback(1)` | 验证保留上一个版本 | +| 7 | block 1 再读 | 验证回滚到 `fast_a` | +| 8 | block 2 写入 `normal_a` | 验证 normal lane 写入 | +| 9 | block 2 读回 | 验证 normal lane 读回 | +| 10 | 再次 `fee_init()` | 验证重新初始化恢复 | +| 11 | block 1 / block 2 读回 | 验证恢复后数据仍可用 | +| 12 | block 1 连续写 160 次 | 触发 fast lane GC | +| 13 | block 1 再读 | 验证 GC 后最新数据仍正确 | +| 14 | `fee_invalidate(1)` | 验证失效流程 | +| 15 | 再读 block 1 | 期望返回 `FEE_E_NOT_OK` | +| 16 | dump 最终 flash 布局 | 验证 checkpoint、lane、record 的落盘结构 | + +## 4. 诊断输出字段 + +### 4.1 通用性能字段 + +每个步骤会打印: + +```text +custom_fee_diag_test: write block1 fast_a time=0 ms ticks=0 wait_loops=1 driver[init=0 read=0/0B write=4/1208B erase=1/57344B poll=1] +``` + +字段含义: + +- `time`: 墙钟毫秒,来自 `rt_tick_get_millisecond()` +- `ticks`: RT-Thread tick 差值 +- `wait_loops`: 该请求从提交到 idle 的轮询次数 +- `driver[read=X/YB write=X/YB erase=X/YB poll=X]` + - `read` / `write` / `erase`: 底层驱动真实调用次数 + - `YB`: 同类操作累计字节数 + - `poll`: `fee_port_mainfunction()` 被轮询的次数 + +注意:这里的驱动统计是端到端 flash 访问量,不仅包含 block record 本身,也包含 checkpoint、GC 搬迁等内部元数据流量。 + +### 4.2 GC 识别字段 + +GC 压测阶段会额外打印 fast lane 状态变化: + +```text +custom_fee_diag_test: gc_write[106] time=10 ms ticks=1 wait_loops=7 fast_lane[sector=0->2 gen=1->2 free=0x00029a40->0x00038440 gc=1] driver[read=4/1088B write=8/2360B erase=3/172032B poll=7] +``` + +字段含义: + +- `sector=A->B`: fast lane 活动 sector 的切换 +- `gen=A->B`: fast lane generation 变化 +- `free=old->new`: `free_offset` 变化 +- `gc=1`: 本次写入发生了真实 GC 扇区切换 + +当前样例中: + +- 160 次 fast write 里只有 `gc_write[106]` 触发了真实 GC +- 该次 GC 从 `sector 0` 切换到 `sector 2` +- GC 总结输出为: + +```text +custom_fee_diag_test: gc summary writes=160 total_time=1070 ms total_ticks=116 gc_events=1 gc_time=10 ms gc_ticks=1 gc_max_ticks=1 +``` + +## 5. Mock Flash 总体布局 + +QEMU 默认 RAM mock flash 参数: + +- 总容量:`0x000A0000` = 640 KiB +- 擦除粒度:`0x0000E000` = 56 KiB +- 读粒度:`1` +- 写粒度:`8` + +当前 `custom_fee` 实际使用 `10` 个 sector,总占用 `0x0008C000`,尾部 `0x00014000` 未使用。 + +### 5.1 地址布局图 + +```text +0x00000000 +-------------------------------+ + | Meta sector 0 (checkpoint A) | +0x0000E000 +-------------------------------+ + | Meta sector 1 (checkpoint B) | +0x0001C000 +-------------------------------+ + | Fast lane sector 0 | +0x0002A000 +-------------------------------+ + | Fast lane sector 1 | +0x00038000 +-------------------------------+ + | Fast lane sector 2 | +0x00046000 +-------------------------------+ + | Normal lane sector 0 | +0x00054000 +-------------------------------+ + | Normal lane sector 1 | +0x00062000 +-------------------------------+ + | Normal lane sector 2 | +0x00070000 +-------------------------------+ + | Bulk lane sector 0 | +0x0007E000 +-------------------------------+ + | Bulk lane sector 1 | +0x0008C000 +-------------------------------+ + | Unused tail | +0x000A0000 +-------------------------------+ +``` + +### 5.2 末态布局摘要 + +来自一次 `custom_fee_diag_test` 样例: + +```text +custom_fee_diag_test: meta[0] base=0x00000000 valid=1 generation=165 entries=2 commit=0x434f4d4d +custom_fee_diag_test: meta[1] base=0x0000e000 valid=1 generation=164 entries=2 commit=0x434f4d4d +custom_fee_diag_test: lane=fast range=[0x0001c000,0x00046000) active=2 dst=0 spare=0 free=0x0003f040 +custom_fee_diag_test: lane=normal range=[0x00046000,0x00070000) active=0 dst=1 spare=2 free=0x000460b0 +custom_fee_diag_test: lane=bulk range=[0x00070000,0x0008c000) active=0 dst=1 spare=1 free=0x00070040 +``` + +说明: + +- 最终活动 checkpoint 镜像在 `meta[0]` +- fast lane 的有效活动 sector 已经切换到 `sector 2` +- normal lane 只写入了一个 record +- bulk lane 仅格式化了 sector header,没有业务 record + +### 5.3 先建立一个简单心智模型 + +对新工程师来说,可以先把 `custom_fee` 想成三层: + +```text +应用 block 读写 + | + v +FEE 逻辑层 +- 根据 block_id 找到 lane +- 维护 cache +- 决定是否做 checkpoint / GC + | + v +flash 落盘层 +- sector header +- record header +- payload +- commit tail +``` + +再进一步,把一个 lane 想成“顺序追加日志”的空间: + +```text +lane sector + -> 先写一个 sector header + -> 后面不断顺序追加 record + -> free_offset 始终指向下一个可写位置 + -> 空间快用完时触发 GC,把仍然有效的 record 搬到新 sector +``` + +对业务 block 来说,有两个最关键的事实: + +- **写 block 不是覆盖旧地址,而是追加一个新 record** +- **读 block 不是扫描整个 flash,而是先查 cache,再去读当前 record** + +### 5.4 一次 block 写入,到底在 flash 里放了什么 + +以 `block 1` 写入 `fast_a` 为例,真正落盘的对象不是“只有 32 字节 payload”,而是一个完整 record: + +```text ++---------------------------+ +| fee_record_header_t 0x20 | ++---------------------------+ +| payload | +| len = block data len | ++---------------------------+ +| fee_commit_tail_t 0x10 | ++---------------------------+ +| erased padding | +| 对齐到 record_align | ++---------------------------+ +``` + +这个 record 的组成如下: + +| 部分 | 内容 | 作用 | +| --- | --- | --- | +| `record header` | block id、record type、data_len、seq、hdr_crc | 标识“这是哪个 block 的第几个版本” | +| `payload` | 业务数据本身 | 真正需要保存的 block 内容 | +| `commit tail` | `data_crc`、`tail_crc`、`commit_marker` | 标识“这条 record 已提交完成,可读” | +| `padding` | 仍然保持 `0xFF` 的空白区 | 保证下一条 record 从对齐地址开始 | + +#### 5.4.1 block 1 的写入排布 + +`block 1` 配置: + +- `max_len = 32` +- `lane = fast` +- `record_align = 512` +- `crc_mode = FEE_CRC16` + +因此它的 record span 不是 80B,而是被对齐到 `0x200 = 512B`: + +```text +header = 0x20 = 32B +payload = 0x20 = 32B +tail = 0x10 = 16B +raw sum = 0x50 = 80B +aligned span = 0x200 = 512B +padding = 0x200 - 0x50 = 0x1B0 = 432B +``` + +样例里的第一条 `block 1` record 地址形态可以理解成: + +```text +0x0001C040 +-------------------------------+ + | record header (0x20) | +0x0001C060 +-------------------------------+ + | payload 32B | +0x0001C080 +-------------------------------+ + | commit tail (0x10) | +0x0001C090 +-------------------------------+ + | padding / erased bytes | + | ... | +0x0001C240 +-------------------------------+ + | next record starts here | +``` + +所以: + +- `block 1` 每写一次,逻辑上只写了 32B 数据 +- 但在 flash 地址空间里,会消耗一个 `512B` 的 record 槽位 + +#### 5.4.2 block 2 的写入排布 + +`block 2` 配置: + +- `max_len = 128` +- `lane = normal` +- `record_align = 8` +- `crc_mode = FEE_CRC32` + +因此它的 record 更紧凑: + +```text +header = 0x20 = 32B +payload = 0x40 = 64B +tail = 0x10 = 16B +raw sum = 0x70 = 112B +aligned span = 0x70 = 112B +padding = 0 +``` + +样例里的 `block 2` record: + +```text +0x00046040 +-------------------------------+ + | record header (0x20) | +0x00046060 +-------------------------------+ + | payload 64B | +0x000460A0 +-------------------------------+ + | commit tail (0x10) | +0x000460B0 +-------------------------------+ + | next free_offset | +``` + +这说明: + +- fast lane 更偏向“热数据 + 大对齐 + 快速追加” +- normal lane 更偏向“普通数据 + 紧凑存储” + +### 5.5 一次 block 写入,除了 record 还会发生什么 + +很多新工程师第一次看这条统计会困惑: + +```text +custom_fee_diag_test: write block1 fast_a ... driver[write=4/1208B erase=1/57344B] +``` + +为什么明明只写了一个 block,却不是 `write=3`? + +原因是 **一次业务写入通常不只包含“record 本身”**,还可能包含 checkpoint 元数据更新。 + +以当前样例的 `write block1 fast_a` 为例,可按下面理解: + +```text +1. 写 record header +2. 写 payload +3. 写 commit tail +4. 写 checkpoint image +5. 擦除一个 meta sector,给 checkpoint image 轮换使用 +``` + +图示如下: + +```text +fee_write(block1) + | + +--> lane append record + | +--> write record header + | +--> write payload + | +--> write commit tail + | + +--> checkpoint flush + +--> erase inactive meta sector + +--> write new checkpoint image +``` + +因此性能统计里常见的现象是: + +- `write_calls` 比“header/payload/tail 三次写”更多 +- `erase_calls` 不一定代表 GC,也可能只是 checkpoint 轮换 + +### 5.6 一次 block 读取,到底读了什么 + +读路径不是“直接拿 payload 地址 memcpy 给用户”,而是更谨慎: + +```text +fee_read(block_id, offset, len) + | + +--> cache lookup + 找到当前有效 record 地址 + | + +--> read record header + 确认 block_id / len / type + | + +--> read commit tail + 确认 commit marker / tail crc + | + +--> read payload + 如 block 配置了 CRC,则读出 payload 做校验 + | + +--> copy 用户请求的 offset/len 到 dst +``` + +图示如下: + +```text +用户 read block + | + v +cache 告诉 FEE: +"当前 block 2 的最新 record 在 0x00046040" + | + v +读 0x00046040 处的 record header + | + v +读 0x000460A0 处的 commit tail + | + v +读 0x00046060 处的 payload + | + v +CRC 校验通过后,把结果返回给用户 +``` + +这就是为什么 `block 1` / `block 2` 的读统计常见是: + +```text +driver[read=3/...] +``` + +因为当前两个测试 block 都启用了 CRC,所以一次成功读取通常包含: + +1. 读 header +2. 读 tail +3. 读完整 payload + +注意: + +- 即使用户只读其中一部分 `offset/len` +- 在 `CRC16/CRC32` 模式下,FEE 仍会先把完整 payload 读出来做校验 + +只有未来某个 block 配置成 `FEE_CRC_NONE`,读路径才可能退化成更少的底层读取。 + +### 5.7 rollback 为什么比普通读写更重 + +`rollback block1` 样例里常看到: + +```text +driver[read=4/... write=4/... erase=1/...] +``` + +它比单次 `write` 更重,是因为 rollback 不是简单改一个指针,而是: + +```text +1. 找到上一版本 record +2. 读上一版本 header +3. 读上一版本 tail +4. 读上一版本 payload +5. 再把这份旧数据当成“新版本”重新追加写入 +6. 再触发 checkpoint 刷新 +``` + +也就是说,rollback 的本质是: + +- **先读旧版本** +- **再追加一条新 record 表示“我回到了旧内容”** + +### 5.8 GC 时内存里发生了什么 + +GC 不是“压缩整个 flash”,而是“当当前 active sector 快写满时,把仍然有效的记录搬到新的 sector”。 + +样例中真正的 GC 发生在: + +```text +gc_write[106] ... fast_lane[sector=0->2 gen=1->2 ... gc=1] +``` + +可以把它理解成: + +```text +旧状态: + fast lane active sector = 0 + free_offset 已接近 sector 尾部 + +GC 动作: + 1. 选择 sector 2 作为新的目标 sector + 2. 给 sector 2 写新的 sector header + 3. 把 sector 0 中仍然有效的 block record 搬过去 + 4. 再把这次用户新写入的 record 追加到 sector 2 + 5. 更新 active sector = 2, generation = 2 + 6. 擦除旧 sector,释放为空闲区 + 7. 刷新 checkpoint +``` + +图示如下: + +```text +GC 前: + fast sector 0 [old active] --> 有效 record + 历史 record + 即将写满 + fast sector 1 [spare] + fast sector 2 [erased] + +GC 中: + fast sector 2 <- 写新 sector header + fast sector 2 <- 搬迁有效 record + fast sector 2 <- 追加最新用户 record + +GC 后: + fast sector 2 [new active] + fast sector 0 [to be erased / erased] + fast sector 1 [spare] +``` + +所以样例里那次 GC 统计: + +```text +driver[read=4/1088B write=8/2360B erase=3/172032B poll=7] +``` + +可以粗略理解为: + +- 有效 record 搬迁需要额外读取旧 sector 内容 +- 新 sector header + 搬迁 record + 本次新写入 + checkpoint,导致写次数显著增加 +- 目标 sector 预处理、旧 sector 回收、meta sector checkpoint 轮换,会带来多次擦除 + +### 5.9 怎么看一条性能数据是不是“正常” + +对新工程师,建议先按下面的经验判断: + +| 场景 | 正常现象 | +| --- | --- | +| 普通 write | `write_calls` 通常大于 3,因为包含 checkpoint | +| 普通 read | 有 CRC 的 block 往往是 `read_calls = 3` | +| rollback | `read_calls` 和 `write_calls` 都会比普通写更大 | +| invalidate | 通常比 data write 少一次 payload 写 | +| GC write | `wait_loops`、`read_calls`、`write_calls`、`erase_calls` 会明显抬升 | + +可以把一条数据先拆成两部分看: + +```text +业务 record 成本 ++ 元数据成本(checkpoint / GC) += 总 driver 访问次数 +``` + +## 6. 落盘结构细节 + +### 6.1 Sector Header + +`fee_sector_header_t` 大小为 `0x40` 字节。 + +| 偏移 | 大小 | 字段 | 说明 | +| --- | --- | --- | --- | +| `0x00` | 4 | `magic` | `FEE_SECTOR_MAGIC = 0x46454553` | +| `0x04` | 2 | `format_version` | 当前为 `0x0100` | +| `0x06` | 1 | `lane_id` | meta/fast/normal/bulk | +| `0x07` | 1 | `state` | `ACTIVE` / `GC_DST` 等 | +| `0x08` | 4 | `generation` | 当前活动代次 | +| `0x0C` | 4 | `erase_count` | 当前 mock 驱动未累计 | +| `0x10` | 4 | `data_start` | record 区起始地址 | +| `0x14` | 4 | `data_end` | record 区结束地址 | +| `0x18` | 4 | `hdr_seq` | 当前等于 generation | +| `0x1C` | 4 | `hdr_crc` | header CRC | +| `0x20` | 4 | `commit_marker` | `0x434F4D4D` | +| `0x24` | 28 | `reserved[7]` | 预留 | + +### 6.2 Record Header + +`fee_record_header_t` 大小为 `0x20` 字节。 + +| 偏移 | 大小 | 字段 | 说明 | +| --- | --- | --- | --- | +| `0x00` | 4 | `magic` | `FEE_RECORD_MAGIC = 0x46454552` | +| `0x04` | 2 | `block_id` | 业务 block 号 | +| `0x06` | 1 | `record_type` | `DATA` / `TOMBSTONE` | +| `0x07` | 1 | `flags` | 当前固定 `0x01` | +| `0x08` | 2 | `data_len` | 业务 payload 长度 | +| `0x0A` | 2 | `header_len` | 当前固定 `0x20` | +| `0x0C` | 4 | `seq` | block 内递增版本号 | +| `0x10` | 4 | `generation` | 当前实现未单独填写 | +| `0x14` | 4 | `prev_addr_hint` | 当前实现未填写 | +| `0x18` | 4 | `hdr_crc` | record header CRC | +| `0x1C` | 4 | `reserved` | 预留 | + +### 6.3 Commit Tail + +`fee_commit_tail_t` 大小为 `0x10` 字节。 + +| 偏移 | 大小 | 字段 | 说明 | +| --- | --- | --- | --- | +| `0x00` | 4 | `data_crc` | payload CRC | +| `0x04` | 4 | `tail_crc` | tail CRC | +| `0x08` | 4 | `tail_flags` | 当前为 `0` | +| `0x0C` | 4 | `commit_marker` | `0x434F4D4D` | + +### 6.4 Record 占用公式 + +```text +record_span = + align_up( + sizeof(fee_record_header_t) + + align_up(data_len, program_unit) + + sizeof(fee_commit_tail_t), + record_align) +``` + +当前样例: + +- block 1 + - payload = `32B` + - program unit = `8B` + - record align = `512B` + - span = `0x200` +- block 2 + - payload = `64B` + - program unit = `8B` + - record align = `8B` + - span = `0x70` + +## 7. 样例 dump 与字段解读 + +以下 dump 为 little-endian 原始内存内容。 + +### 7.1 活动 checkpoint 镜像 + +```text +custom_fee_diag_test: meta active raw addr=0x00000000 len=64 +0x00000000: 4b 43 45 46 00 01 00 00 a5 00 00 00 00 00 00 00 +0x00000010: 00 00 00 00 00 00 00 00 00 00 00 00 40 f0 03 00 +0x00000020: 02 00 00 00 02 00 00 03 00 00 00 00 b0 60 04 00 +0x00000030: 01 00 00 00 00 01 02 03 00 00 00 00 40 00 07 00 +``` + +解读: + +- `0x00000000 = 0x4645434B`,即 checkpoint magic +- `0x00000008 = 0x000000A5`,即 generation `165` +- `0x0000001C = 0x0003F040`,是 fast lane `free_offset` +- `0x00000020 = 0x00000002`,是 fast lane `active_generation` +- `0x00000024 = 02 00 00 03` + - `active_sector = 2` + - `dst_sector = 0` + - `spare_sector = 0` + - `sector_count = 3` +- `0x0000002C = 0x000460B0`,是 normal lane `free_offset` + +### 7.2 Fast lane 活动 sector header + +```text +custom_fee_diag_test: sector raw addr=0x00038000 len=64 +0x00038000: 53 45 45 46 00 01 01 22 02 00 00 00 00 00 00 00 +0x00038010: 40 80 03 00 00 60 04 00 02 00 00 00 6a 80 24 00 +0x00038020: 4d 4d 4f 43 00 00 00 00 00 00 00 00 00 00 00 00 +0x00038030: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 +``` + +解读: + +- `magic = 0x46454553` +- `lane_id = 1`,对应 fast lane +- `state = 0x22`,对应 `ACTIVE` +- `generation = 2` +- `data_start = 0x00038040` +- `data_end = 0x00046000` +- `hdr_seq = 2` +- `commit_marker = 0x434F4D4D` + +样例输出里该 header 被标记为 `header-valid=0`,但原始 bytes 仍然完整打印,便于继续排查 header 校验路径。 + +### 7.3 Normal lane record 样例 + +record header: + +```text +custom_fee_diag_test: record header raw addr=0x00046040 len=32 +0x00046040: 52 45 45 46 02 00 d1 01 40 00 20 00 01 00 00 00 +0x00046050: 00 00 00 00 00 00 00 00 21 f7 a5 ae 00 00 00 00 +``` + +payload: + +```text +custom_fee_diag_test: record payload raw addr=0x00046060 len=64 +0x00046060: 21 24 27 2a 2d 30 33 36 39 3c 3f 42 45 48 4b 4e +0x00046070: 51 54 57 5a 5d 60 63 66 69 6c 6f 72 75 78 7b 7e +0x00046080: 81 84 87 8a 8d 90 93 96 99 9c 9f a2 a5 a8 ab ae +0x00046090: b1 b4 b7 ba bd c0 c3 c6 c9 cc cf d2 d5 d8 db de +``` + +tail: + +```text +custom_fee_diag_test: record tail raw addr=0x000460a0 len=16 +0x000460a0: 60 44 b0 0a 3e 69 7a 2e 00 00 00 00 4d 4d 4f 43 +``` + +解读: + +- `block_id = 2` +- `record_type = 0xD1`,即 `DATA` +- `data_len = 0x40`,即 `64B` +- `header_len = 0x20` +- `seq = 1` +- `hdr_crc = 0xAEA5F721` +- tail 中: + - `data_crc = 0x0AB04460` + - `tail_crc = 0x2E7A693E` + - `tail_flags = 0` + - `commit_marker = 0x434F4D4D` + +### 7.4 sector 内部排布图 + +以 normal lane 的 sector 0 为例: + +```text +0x00046000 +-------------------------------+ + | fee_sector_header_t (0x40) | +0x00046040 +-------------------------------+ + | fee_record_header_t (0x20) | +0x00046060 +-------------------------------+ + | payload (0x40) | +0x000460A0 +-------------------------------+ + | fee_commit_tail_t (0x10) | +0x000460B0 +-------------------------------+ + | next free_offset | + | erased area ... | +0x00054000 +-------------------------------+ +``` + +## 8. 结果解读建议 + +看 `custom_fee_diag_test` 输出时,优先关注下面几点: + +- block write/read 的数据内容是否和测试模式一致 +- `driver[...]` 是否符合预期 + - fast block 单次写入统计明显大于纯 payload,是因为包含 checkpoint traffic +- `gc_write[*]` 中 `gc=1` 的那一条是否存在 +- `gc summary` 是否能反映真实 sector 切换次数 +- 最终 `flash layout [final]` 中 lane 地址、active sector、free_offset 是否合理 +- `record header raw` / `record tail raw` 的 magic 和 commit marker 是否正确 + +## 9. 当前样例结论 + +基于当前 QEMU 样例: + +- `custom_fee_test` / `custom_fee_diag_test` 均通过 +- block 1 / block 2 的写入、读回、回滚、重启恢复、失效场景均正确 +- fast lane 在 `gc_write[106]` 发生一次真实 GC +- GC 后 fast lane 从 `sector 0` 切换到 `sector 2` +- checkpoint generation 最终增长到 `165` +- 诊断输出已经可以直接对应到 checkpoint、sector header、record header、payload、tail 的原始内存布局 diff --git a/components/custom_fee/doc/fee_onflash_format.md b/components/custom_fee/doc/fee_onflash_format.md new file mode 100644 index 00000000000..7eb630d7227 --- /dev/null +++ b/components/custom_fee/doc/fee_onflash_format.md @@ -0,0 +1,244 @@ +# FEE On-Flash Format 设计 + +## 1. 目的 + +本文档冻结 FEE 在 flash 上的持久化格式,包括: + +- lane / sector / record 的物理组织 +- `SectorHeader` / `RecordHeader` / `CommitTail` / checkpoint 的字段定义 +- 提交点、CRC 和掉电恢复判定规则 + +本文档是编码 `fee_port_fls.c`、`fee_ckpt.c`、`fee_recovery.c`、`fee_lane_*` 的直接输入。 + +## 2. 设计原则 + +1. 所有持久化结构必须满足底层 `program_unit` 对齐。 +2. 业务可见一致性只依赖持久化提交标记,不依赖 RAM 中间状态。 +3. `commit_marker` 是唯一提交点,必须最后落盘。 +4. old copy 在 new copy 提交前必须保持可读。 +5. 除 `META lane` 外,不允许在 flash 上原地修改 record 内容。 + +## 3. 基本约定 + +### 3.1 地址与对齐 + +- 最小地址单位:byte address +- 最小对齐单位:`align_unit = max(8, program_unit)` +- `FAST` record 推荐按 `1 wordline` 对齐 +- `NORMAL/BULK` record 至少按 `align_unit` 对齐 + +统一定义: + +```text +align_up(x, a) = ((x + a - 1) / a) * a +record_span = align_up(header_len + payload_len + tail_len, record_align) +``` + +### 3.2 lane 编码 + +| lane | 值 | 说明 | +| --- | --- | --- | +| `META` | `0x01` | checkpoint / super state | +| `FAST` | `0x02` | immediate 语义 | +| `NORMAL` | `0x03` | 主数据路径 | +| `BULK` | `0x04` | 大块或低频大块 | + +### 3.3 sector state 编码 + +| state | 值 | 说明 | +| --- | --- | --- | +| `ERASED` | `0xFF` | 空白扇区 | +| `PREPARE` | `0x11` | header 已写,尚未投入使用 | +| `ACTIVE` | `0x22` | 当前追加写扇区 | +| `GC_DST` | `0x33` | 当前 GC 复制目标扇区 | +| `OLD_PENDING_ERASE` | `0x44` | 已切换完成,等待擦除 | +| `BAD` | `0x55` | 检测到 header 损坏或非法状态 | + +`PREPARE -> ACTIVE / GC_DST -> OLD_PENDING_ERASE -> ERASED` 是唯一合法迁移方向。 + +## 4. SectorHeader + +### 4.1 布局基线 + +建议固定 `SectorHeader` 长度为 `64 B`。 + +| 偏移 | 字段 | 类型 | 说明 | +| --- | --- | --- | --- | +| `0x00` | `magic` | `uint32` | 固定扇区签名 | +| `0x04` | `format_version` | `uint16` | 版本号 | +| `0x06` | `lane_id` | `uint8` | lane 编码 | +| `0x07` | `state` | `uint8` | sector state | +| `0x08` | `generation` | `uint32` | lane 内单调递增代数 | +| `0x0C` | `erase_count` | `uint32` | 可选,产品化阶段启用 | +| `0x10` | `data_start` | `uint32` | record 区起始偏移 | +| `0x14` | `data_end` | `uint32` | record 区终止偏移 | +| `0x18` | `hdr_seq` | `uint32` | header 刷写序号 | +| `0x1C` | `hdr_crc` | `uint32` | header CRC | +| `0x20` | `commit_marker` | `uint32` | header 提交标记 | +| `0x24` | `reserved[7]` | `uint32[7]` | 预留 | + +### 4.2 判定规则 + +sector header 只有同时满足以下条件才视为有效: + +1. `magic` 正确 +2. `format_version` 在支持范围内 +3. `hdr_crc` 正确 +4. `commit_marker` 正确 +5. `lane_id` / `state` 组合合法 + +若 `hdr_crc` 正确但 `commit_marker` 缺失,则该 sector 视为未提交 header,不参与恢复。 + +## 5. RecordHeader + +### 5.1 布局基线 + +建议固定 `RecordHeader` 长度为 `32 B`。 + +| 偏移 | 字段 | 类型 | 说明 | +| --- | --- | --- | --- | +| `0x00` | `magic` | `uint32` | 固定 record 签名 | +| `0x04` | `block_id` | `uint16` | 逻辑块号 | +| `0x06` | `record_type` | `uint8` | `DATA/TOMBSTONE/CKPT` | +| `0x07` | `flags` | `uint8` | `PREPARE/COPIED/ROLLBACK` | +| `0x08` | `data_len` | `uint16` | payload 长度 | +| `0x0A` | `header_len` | `uint16` | header 长度 | +| `0x0C` | `seq` | `uint32` | block 内单调递增序号 | +| `0x10` | `generation` | `uint32` | 所在 sector generation 镜像 | +| `0x14` | `prev_addr_hint` | `uint32` | 可选,调试/恢复辅助 | +| `0x18` | `hdr_crc` | `uint32` | header CRC | +| `0x1C` | `reserved` | `uint32` | 预留 | + +### 5.2 record type 编码 + +| type | 值 | 说明 | +| --- | --- | --- | +| `DATA` | `0xD1` | 普通数据 | +| `TOMBSTONE` | `0xD2` | 失效记录 | +| `CKPT_CHUNK` | `0xC1` | checkpoint 片段 | + +### 5.3 flags 编码 + +按 bit 使用: + +- `bit0`: `PREPARE` +- `bit1`: `COPIED` +- `bit2`: `ROLLBACK` +- 其他 bit 保留 + +约束: + +1. 未提交 record 可以带 `PREPARE` +2. 已提交后 `PREPARE` 语义仍可保留为“曾经是正常写入”,不要求原地清零 +3. `COPIED` 仅用于 GC 迁移生成的记录 +4. `ROLLBACK` 仅用于显式回滚生成的记录 + +## 6. CommitTail + +### 6.1 布局基线 + +建议固定 `CommitTail` 长度为 `16 B`。 + +| 偏移 | 字段 | 类型 | 说明 | +| --- | --- | --- | --- | +| `0x00` | `data_crc` | `uint32` | payload CRC | +| `0x04` | `tail_crc` | `uint32` | tail 前部 CRC | +| `0x08` | `tail_flags` | `uint32` | 预留或扩展 | +| `0x0C` | `commit_marker` | `uint32` | 唯一提交点 | + +### 6.2 提交规则 + +写入顺序固定为: + +1. `RecordHeader` +2. `Payload` +3. `CommitTail` 的前 `12 B` +4. `commit_marker` + +恢复时只有满足下列条件的 record 才可接受: + +1. `RecordHeader` 合法 +2. `Payload` 可读 +3. `CommitTail` 的 `tail_crc` 正确 +4. `commit_marker` 正确 + +若 `commit_marker` 缺失,则整个 record 视为不存在。 + +## 7. 特殊记录 + +### 7.1 TOMBSTONE + +约束如下: + +- `record_type = TOMBSTONE` +- `data_len = 0` +- `payload` 为空 +- `seq = old_seq + 1` + +### 7.2 checkpoint 记录 + +checkpoint 只允许出现在 `META lane`。 + +建议结构为: + +```text +RecordHeader(type = CKPT_CHUNK) +CheckpointChunkPayload +CommitTail +``` + +`CheckpointChunkPayload` 内部再封装: + +- `ckpt_generation` +- `chunk_index` +- `chunk_count` +- `chunk_bytes` +- `payload_crc` +- `chunk_data` + +完整 checkpoint 以最后一个 chunk 的 `commit_marker` 和 `chunk_index == chunk_count - 1` 为完成标志。 + +## 8. Version 与升级策略 + +### 8.1 format version + +- `major` 变化表示不兼容变更 +- `minor` 变化表示向前兼容扩展 + +建议编码: + +```text +format_version = (major << 8) | minor +``` + +### 8.2 升级策略 + +1. 启动时若发现旧版本且支持迁移,进入只读恢复 + 后台重写流程 +2. 若版本不兼容且不支持迁移,必须显式返回格式错误 +3. 不允许不同 `major` 版本的 record 混写在同一 lane + +## 9. 编码实现建议 + +### 9.1 模块职责 + +- `fee_onflash.h`: 常量、magic、结构体和 encode/decode API +- `fee_onflash.c`: header/tail/checkpoint 编码与校验 +- `fee_recovery.c`: 遍历 flash 并调用 decode API + +### 9.2 建议接口 + +```c +uint32 fee_onflash_calc_record_span(const fee_block_cfg_t *cfg, uint16 data_len); +Std_ReturnType fee_onflash_encode_record_header(...); +Std_ReturnType fee_onflash_encode_commit_tail(...); +boolean fee_onflash_is_record_committed(const uint8 *buf); +boolean fee_onflash_validate_sector_header(const uint8 *buf); +``` + +## 10. 测试关注点 + +- `SectorHeader` CRC 正确但 `commit_marker` 缺失 +- `RecordHeader` 正确但 `CommitTail` 未完成 +- `TOMBSTONE` 与 `DATA` 的 `seq` 比较 +- `CKPT_CHUNK` 丢失最后一块时的恢复行为 +- 不同 `format_version` 的兼容与拒绝路径 diff --git a/components/custom_fee/doc/fee_port_adapter.md b/components/custom_fee/doc/fee_port_adapter.md new file mode 100644 index 00000000000..988abce9dd5 --- /dev/null +++ b/components/custom_fee/doc/fee_port_adapter.md @@ -0,0 +1,156 @@ +# FEE Port Adapter 设计 + +## 1. 目的 + +本文档定义 FEE 与底层 flash 驱动之间的适配边界,目标是: + +- 让 FEE 核心不直接依赖具体器件驱动细节 +- 支持 `Fls_17_Dmu` 与 RAM mock 两类后端 +- 冻结 `read/write/erase/status/job_result` 的最小接口集 + +## 2. 设计原则 + +1. FEE 不能假设底层支持任意编程粒度。 +2. FEE 的 header/payload/tail 切片必须满足驱动暴露的能力参数。 +3. 端口层负责把驱动语义转换为 FEE 可消费的统一语义。 +4. 同一时刻只允许 1 个底层 flash job 在飞行。 + +## 3. 能力参数 + +建议底层能力通过以下结构暴露: + +```c +typedef struct +{ + uint16 read_unit; + uint16 program_unit; + uint32 erase_unit; + uint16 preferred_chunk; + uint8 supports_suspend; + uint8 supports_compare; +} fee_flash_caps_t; +``` + +字段说明: + +- `read_unit`: 最小读粒度 +- `program_unit`: 最小编程粒度 +- `erase_unit`: 最小擦除单位 +- `preferred_chunk`: 推荐单次读写长度 +- `supports_suspend`: 是否支持擦除挂起 +- `supports_compare`: 是否支持驱动层 compare + +## 4. 统一接口 + +### 4.1 必选接口 + +```c +Std_ReturnType fee_port_init(void); +Std_ReturnType fee_port_get_caps(fee_flash_caps_t *caps); +Std_ReturnType fee_port_read(uint32 addr, uint8 *dst, uint32 len); +Std_ReturnType fee_port_write(uint32 addr, const uint8 *src, uint32 len); +Std_ReturnType fee_port_erase(uint32 addr, uint32 len); +MemIf_StatusType fee_port_get_status(void); +MemIf_JobResultType fee_port_get_job_result(void); +``` + +### 4.2 可选接口 + +```c +void fee_port_mainfunction(void); +Std_ReturnType fee_port_cancel(void); +Std_ReturnType fee_port_compare(uint32 addr, const uint8 *src, uint32 len); +``` + +## 5. 接口语义 + +### 5.1 参数约束 + +1. `addr` 必须满足对应操作的对齐要求 +2. `len` 必须是 `read_unit/program_unit/erase_unit` 的整数倍,或由上层先补齐 +3. 端口层不得默默修正非法参数,必须返回错误 + +### 5.2 job 语义 + +推荐按异步 job 语义设计: + +- `fee_port_write/erase` 仅发起 job +- 完成状态通过 `fee_port_get_status()` / `fee_port_get_job_result()` 查询 +- 上层 `fee_mainfunction()` 负责 poll + +如果底层驱动本身是同步接口,也建议在 port 层模拟出统一的 job 状态机,而不是把同步/异步差异泄露给 FEE 核心。 + +### 5.3 错误语义 + +建议至少区分: + +- 参数错误 +- 驱动忙 +- 介质错误 +- ECC / compare 错误 +- 未初始化 + +## 6. 对齐与切片规则 + +### 6.1 通用规则 + +```text +header_len % program_unit == 0 +tail_len % program_unit == 0 +payload_chunk % preferred_chunk == 0 or payload_chunk % program_unit == 0 +erase_len % erase_unit == 0 +``` + +### 6.2 `FAST` / `NORMAL` / `BULK` + +- `FAST`: 优先使用 `preferred_chunk`,目标是最少 job 数 +- `NORMAL`: 优先保证稳态吞吐和可恢复性 +- `BULK`: 优先按 `preferred_chunk` 或 wordline 连续切片 + +## 7. 与 `Fls_17_Dmu` 的映射 + +建议在 `fee_port_fls.c` 中做一层薄适配: + +- `fee_port_read` -> `Fls_17_Dmu_Read` +- `fee_port_write` -> `Fls_17_Dmu_Write` +- `fee_port_erase` -> `Fls_17_Dmu_Erase` +- `fee_port_get_status` -> `Fls_17_Dmu_GetStatus` +- `fee_port_get_job_result` -> `Fls_17_Dmu_GetJobResult` + +并在适配层内集中处理: + +- 地址偏移与物理基地址映射 +- 返回码翻译 +- 对齐断言 +- 可选的统计信息采集 + +## 8. RAM Mock 后端 + +### 8.1 目的 + +在真实 flash 驱动接入前,用 RAM mock 快速验证: + +- on-flash format 编解码 +- checkpoint / recovery +- scheduler / GC 状态机 + +### 8.2 建议实现 + +```c +static uint8 fee_mock_flash[0x10000]; +``` + +RAM mock 也应遵守与真实驱动一致的: + +- `program_unit` +- `erase_unit` +- 单 job in-flight 约束 + +避免 mock 过于宽松,导致切换到真驱动时暴露问题。 + +## 9. 建议测试 + +- 非法对齐写入被拒绝 +- 驱动 busy 时上层正确排队 +- job result 从 `PENDING -> OK/FAILED` +- RAM mock 与 `Fls_17_Dmu` 的能力参数差异对上层透明 diff --git a/components/custom_fee/doc/fee_redesign.md b/components/custom_fee/doc/fee_redesign.md new file mode 100644 index 00000000000..b96a966c2e9 --- /dev/null +++ b/components/custom_fee/doc/fee_redesign.md @@ -0,0 +1,1367 @@ +# TC397 DFlash FEE 重构设计方案 + +## 1. 目的 + +本文档用于指导在 TC397 DFlash 上重新实现一套面向固定逻辑块的 FEE 机制,目标是替代通用 KV 模型在 GC 阶段带来的长尾延迟问题,并保留掉电恢复、块级回滚和异步后台整理能力。 + +本文档基于以下现状: + +- 当前项目使用 Infineon MCAL `Fee/Fls_17_Dmu/NvM` 组合完成持久化。 +- 参考对比对象为 `FlashDB` 的 `KVDB` 代码路径。 +- 底层介质为 TC397 DFlash,当前配置为 8 B page、512 B wordline、单个 NVM sector 56 KiB。 + +说明: + +- 文档前半部分会使用当前工程和 AUTOSAR 规范做对比,目的是解释问题来源。 +- 文档后半部分的“最终方案”按 greenfield 前提设计,可以推翻当前的 immediate block、QS、双 sector 划分和现有地址布局。 + +配套子文档: + +- `fee_onflash_format.md`: 约束 on-flash 数据结构、字段编码和提交规则 +- `fee_boot_recovery.md`: 约束启动恢复、checkpoint 恢复和首个可读时刻 +- `fee_port_adapter.md`: 约束底层 flash 驱动适配接口和能力参数 +- `fee_scheduler_gc.md`: 约束调度、排队、抢占和 GC 步进 +- `fee_cache_checkpoint.md`: 约束 RAM cache、checkpoint 刷写和 tail 上界 +- `fee_cfg_rules.md`: 约束 block 配置表、lane 映射和编译期校验 + +## 2. 背景与问题 + +### 2.1 当前工程中的 FEE 机制 + +当前工程的持久化调用链为: + +```text +App + -> NvM + -> MemIf + -> Fee + -> Fls_17_Dmu + -> DFlash +``` + +当前 MCAL FEE 的关键特征: + +- 逻辑单元是固定 `block_id`,不是通用 KV。 +- 写入采用追加写,不对旧数据原地覆盖。 +- Cache 中保留当前副本与前一副本状态。 +- 上电初始化通过扫描 sector 重建块缓存。 +- GC 由 `Fee_MainFunction()` 驱动,以状态机方式异步推进。 + +当前代码里可以看到的回滚相关状态包括: + +- `Valid` +- `Consistent` +- `PrevCopyValid` +- `PrevCopyConsistent` +- `PrevCopyAddress` + +这说明当前工程里的“数据回滚”本质上已经不是业务层版本回退,而是“回退到最近一次写成功的完整块副本”。 + +### 2.2 FlashDB GC 慢的根因 + +以 `FlashDB` 的 `KVDB` 为例,GC 的典型路径是: + +```text +alloc/new kv 失败 + -> gc_collect_by_free_size() + -> sector_iterator() + -> do_gc() + -> read_kv() + -> 检查 crc/status + -> move_kv() + -> format_sector() +``` + +这类机制的问题是: + +- GC 常在空间不够时被动触发,容易卡在业务写路径上。 +- GC 需要枚举 sector 内所有物理记录,复杂度与脏扇区内容直接相关。 +- 每条记录都要校验和搬运,尾时延不稳定。 +- 通用 KV 模型对固定块场景有额外元数据和查找成本。 + +因此,如果目标是实现一个更稳的 automotive 风格 FEE,就不应继续按通用 KVDB 的思路设计。 + +## 3. 重构目标 + +### 3.1 目标 + +- 面向固定逻辑块 `block_id`,而不是任意字符串 key。 +- 写入路径保持追加写,避免原地修改。 +- 读路径在初始化后保持 O(1)。 +- 支持掉电恢复。 +- 支持块级回滚到上一份完整数据。 +- GC 改为提前触发、增量执行、每周期工作量可控。 +- DFlash 的写入粒度需要配置可选。 + +### 3.2 非目标 + +- 不实现通用 KV 数据库。 +- 第一阶段不实现复杂查询、范围遍历和字符串 key。 +- 第一阶段不复刻 AUTOSAR Fee 的所有扩展能力,例如 QS、uncfg block、完整诊断钩子。 + +## 4. 底层约束 + +当前配置可从现有工程抽取出以下约束: + +| 项目 | 当前值 | +| --- | --- | +| DFlash base | `0xAF000000` | +| DFlash total size | `0x80000` | +| NVM Sector0 | `0xAF000000 ~ 0xAF00DFFF` | +| NVM Sector1 | `0xAF00E000 ~ 0xAF01BFFF` | +| 单个 NVM sector 大小 | `0xE000` = 56 KiB | +| QS area | `0xAF01C000 ~ 0xAF01FFFF` | +| page size | `8 B` | +| wordline size | `512 B` | +| erase suspend | `OFF` | + +这些约束直接影响设计: + +- 所有头部、尾部和状态位必须按 8 B 对齐,需保留配置项可变。 +- 大块写入应尽量按 512 B 切片,需保留配置项可变,降低底层驱动适配复杂度。 +- 擦除无法安全抢占时,GC 必须更早开始。 +- 推荐优先考虑 3-sector ring,而不是极限压缩到 2-sector。 + +### 4.1 AUTOSAR 4.4 约束抽取 + +结合 `AUTOSAR_SWS_FlashEEPROMEmulation.pdf`,对性能最有影响的规范约束主要有以下几条: + +| 规范项 | 含义 | 对新设计的影响 | +| --- | --- | --- | +| `SWS_Fee_00022` | `Fee_Read` 在 `MEMIF_IDLE` 或 `MEMIF_BUSY_INTERNAL` 时应接受请求 | 内部 GC 进行中时,读请求不能一律拒绝 | +| `SWS_Fee_00025` | `Fee_Write` 在 `MEMIF_IDLE` 或 `MEMIF_BUSY_INTERNAL` 时应接受请求 | 内部整理进行中时,写请求应支持排队或抢占策略 | +| `SWS_Fee_00192` | `Fee_InvalidateBlock` 在 `MEMIF_BUSY_INTERNAL` 时也应接受 | 失效操作不能因 GC 一刀切拒绝 | +| `SWS_Fee_00009` | immediate block 必须可立即写,不能等待内部管理操作,也不能等待先擦除目标区 | 必须存在预擦除保留区或独立 immediate lane | +| `SWS_Fee_00067` | `Fee_EraseImmediateBlock` 的职责是保证 immediate 可写 | 设计中必须明确 immediate 容量保证机制 | +| `SWS_Fee_00153/00154` | 写开始时块应标记为 corrupted,写成功后恢复为 not corrupted | 必须有明确的提交点,不能靠 RAM 状态判断一致性 | +| `SWS_Fee_00102/00103` | 每个块应可配置期望擦写次数,FEE 应分散写入 | 应按热度或擦写寿命等级分层布局,而不是所有块同等待遇 | + +由此可以得到一个重要结论: + +- AUTOSAR 并没有要求 FEE 必须做成“复杂通用数据库”。 +- AUTOSAR 真正要求的是:异步接口语义、内部状态可恢复、immediate 可快速完成、块一致性可判定、擦写寿命可配置。 +- 因此,针对固定块场景完全可以用更强的布局约束换取更快的实现。 + +### 4.2 当前工程实现中的性能相关事实 + +结合当前 `Fee.c` 和配置,已经能看到几个直接影响读写时延的点: + +| 项目 | 当前实现 | 对性能的影响 | +| --- | --- | --- | +| 数据类型 | `FEE_DOUBLE_SECTOR_AND_QUASI_STATIC_DATA` | 同一套状态机同时处理 normal data 和 QS,代码路径更重 | +| immediate block | Block 16,大小 `4096 B` | immediate 语义和低尾时延之间存在明显冲突 | +| GC 阈值 | `2048 B` | 若把该阈值当 immediate 预留区,则小于当前 `4096 B` immediate block 大小 | +| 单次写/比较长度 | `FEE_MAX_BYTES_PER_CYCLE = 64` | 在 512 B wordline 上会放大状态切换次数和同步 compare 成本 | +| 每轮缓存扫描页数 | `FEE_PAGES_PER_FEEMAIN = 65535` | 初始化和 GC 扫描粒度几乎不受控 | +| 擦除挂起 | `OFF` | 擦除期间的读写插队能力受限 | +| unconfigured block | `FEE_UNCONFIG_BLOCK_IGNORE` | 配置上已偏向简化,这一点可以继续强化 | +| previous copy API | `STD_OFF` | 业务侧没有公开的“读旧版本”接口,但内部仍维护 previous copy | + +除此之外,当前实现还有三个很重的成本来源: + +1. 写入粒度偏小 + 当前 `Fee.c` 将写和比较的单次长度都限制为 `64 B`。对于 `4096 B` 这类大块,请求完成需要跨很多状态机周期推进。 + +2. wordline 共享处理复杂 + 当前实现包含大量“最后写入 wordline 受影响”的处理逻辑,例如: + - 同一 wordline 内多块共享 + - 跨 wordline 尾块修复 + - 写后整 wordline compare + + 这说明现有布局为了容量利用率,接受了较复杂的恢复路径。 + +3. 4.2.2 分支对 `BUSY_INTERNAL` 更保守 + AUTOSAR 4.4 允许 `Read/Write/Invalidate` 在 `MEMIF_BUSY_INTERNAL` 时接受请求并异步执行;而当前实现里至少 `Invalidate` 在 4.2.2 分支下仍倾向于拒绝内部忙状态下的请求。 + +### 4.3 为快速读写收紧的设计约束 + +如果目标明确是“更快的读写”和“更短的 GC 长尾”,建议把 FEE 的支持范围主动收紧为下列约束。 + +#### 4.3.1 接口和功能约束 + +| 约束 | 目的 | 代价 | +| --- | --- | --- | +| 只支持 `native` block,不在 FEE 层实现 dataset/redundant | 降低地址换算和状态管理复杂度 | dataset/redundant 交给 NvM 或上层处理 | +| 不在 fast path 中支持 QS block | 拆掉额外状态机和分支 | QS 需单独模块或单独区域实现 | +| 不支持 unconfigured block copy | 避免 GC 复制未知对象 | 迁移数据需显式版本管理 | +| `Read/Write/Invalidate` 在 `BUSY_INTERNAL` 时允许排队 | 符合 AUTOSAR 4.4,并减少 GC 期间业务抖动 | 需要 1 级或小队列 pending buffer | +| 公开接口只保留 current copy 读 | 保持业务接口简单 | previous copy 仅内部用于回滚 | + +#### 4.3.2 immediate 数据约束 + +immediate 数据不应只靠“块类型”命名,而应配套容量约束。 + +建议新增如下条件: + +1. `max_immediate_record_span <= immediate_reserve_bytes` +2. `gc_start_threshold >= immediate_reserve_bytes + gc_switch_guard` +3. `immediate_reserve_bytes` 只允许 immediate block 使用,normal block 不得侵占 +4. immediate block 的推荐上限为 `<= 1 wordline`,即优先控制在 `<= 512 B` +5. 若业务块大于该上限,不允许定义为 immediate,而应拆成: + - 小 immediate 索引/标志块 + - 大 normal 数据块 + +推荐公式: + +```text +record_span(block) = align8(record_header + payload + commit_tail) +immediate_reserve_bytes >= max(record_span(all_immediate_blocks)) * burst_factor +gc_start_threshold >= immediate_reserve_bytes + one_record_guard + sector_switch_guard +``` + +其中: + +- `burst_factor = 1` 适用于“任一时刻只保证一次 immediate 写入” +- `burst_factor = N` 适用于“连续 N 次 immediate 写入不等待 GC” + +对当前工程,`4096 B` immediate block 是最值得先调整的点。若保留该定义,则: + +- immediate reserve 会被迫做得很大 +- 单次请求本身也会很长 +- 与“内部管理操作不应延迟 immediate 写入”的目标相冲突 + +#### 4.3.3 记录布局约束 + +为了降低恢复和 compare 成本,建议增加以下布局规则: + +1. 一个 logical record 不能与其他 block 共享同一个 wordline。 +2. record 起始地址必须按 `wordline` 对齐,或者至少 header 按 `wordline` 对齐。 +3. 若 `record_span(block) > 1 wordline`,该块自动归类为 `large block`。 +4. `large block` 不与小块混写在同一条追加日志内。 + +这会损失一部分容量利用率,但可以显著减少: + +- WL 共享恢复 +- 最后写入 WL 污染判定 +- WL 全量 compare +- 中断恢复状态数 + +#### 4.3.4 块大小分类约束 + +建议在配置阶段按大小把块分成三类: + +| 类型 | 推荐大小 | 策略 | +| --- | --- | --- | +| `small` | `<= 448 B` | 单条 record 尽量控制在一个 wordline 内完成 | +| `medium` | `449 B ~ 2048 B` | 允许多 wordline,仍走普通 append log | +| `large` | `> 2048 B` | 单独放入 large-block lane,避免拖慢小块 | + +这里将 `448 B` 作为 small 上限,是为了给 header/tail 留出空间,尽量保持“单 wordline 提交”。 + +对 large block,建议至少满足以下约束之一: + +- 不允许定义为 immediate +- 单独使用一组 sector +- 独立 GC,不与小块混搬 + +#### 4.3.5 写入粒度约束 + +当前实现的 `64 B` 单次写/比较粒度偏保守。若目标是更快完成一次业务写,应优先改成与底层介质更一致的粒度: + +| 方案 | 特点 | +| --- | --- | +| `64 B` | 状态切换多,吞吐偏低,但单步工作量小 | +| `256 B` | 折中 | +| `512 B = 1 wordline` | 吞吐最高,最利于简化恢复和 compare 路径 | + +建议新实现按下述规则: + +1. 小块优先单 wordline 提交 +2. 大块按 wordline 切片写入 +3. 不再做“任意 64 B 软件 compare”作为主路径 +4. 一致性判定优先依赖: + - `commit tail` + - `CRC` + - 底层 FLS/ECC 错误反馈 + +如果项目安全要求必须做 compare,也建议按完整 wordline 或完整 record 做,而不是 64 B 小片比较。 + +#### 4.3.6 热冷分离约束 + +AUTOSAR 要求可配置擦写次数,因此配置里应明确块热度,而不是默认所有块同策略。 + +建议为每个块新增: + +- `write_cycle_class` +- `temperature_class` +- `lane_type` + +例如: + +| 类别 | 说明 | 存放策略 | +| --- | --- | --- | +| `HOT_IMM` | 高频且 immediate | 独立 immediate reserve / hot lane | +| `HOT_NORM` | 高频普通块 | 独立 hot sector ring | +| `COLD_NORM` | 低频普通块 | 冷数据 sector | +| `LARGE_COLD` | 低频大块 | large-block lane | + +这样做的收益是: + +- hot block 不会拖累 cold block 的 GC +- hot block 可单独做更高磨损均衡 +- cold block 的有效空间利用率更高 + +#### 4.3.7 启动时间约束 + +如果后续也关心“上电后尽快可读”,建议增加: + +1. `checkpoint` 或 `mini index page` +2. 限定一次初始化扫描的最大页数 +3. 上电先恢复索引,再后台补全深度校验 + +否则当前“全量顺序扫 sector 重建 cache”的路径,在块数和历史记录增多后会逐步变慢。 + +### 4.4 推倒重来的完整设计基线 + +从这一节开始,后续章节默认采用完整重构方案,而不是在当前配置上打补丁。 + +#### 4.4.1 总体思想 + +完整方案不再把所有块混在一条追加日志里,而是按业务特性拆成多个 lane: + +- `META lane`: 保存 checkpoint / superblock / 全局代数 +- `FAST lane`: 只放高优先级小块,保证最短写入时延 +- `NORMAL lane`: 放绝大多数普通块 +- `BULK lane`: 放大块或低频大块,避免拖慢小块 + +每个 lane 独立维护: + +- active sector +- gc destination sector +- spare sector +- lane 级 free pointer +- lane 级 GC 状态机 + +这样做的核心收益是: + +- FAST 写不会被 NORMAL/BULK GC 拖住 +- 大块搬迁不会影响小块尾时延 +- 不同热度块可以独立做磨损均衡 +- 启动恢复和容量规划都更清晰 + +#### 4.4.2 逻辑块分类 + +建议 block 配置不再只区分 `normal/immediate`,而是至少包含以下四类: + +| 类别 | 适用场景 | 设计规则 | +| --- | --- | --- | +| `FAST` | 故障标志、状态字、短配置项 | 必须单次快速写入,优先限制到单 record/单 wordline | +| `NORMAL` | 常规配置和状态块 | 走主日志和增量 GC | +| `BULK` | 大块、镜像、长报文、trace | 独立 lane,独立 GC | +| `META` | checkpoint、superblock | 双副本或双页轮换 | + +其中: + +- 对 AUTOSAR 语义,`FAST` 可视为 `immediate block` 的实现映射。 +- 对外接口仍可保留 `immediate` 配置项,但内部统一落到 `FAST lane`。 +- 不再推荐单独设计“immediate reserve 区”;保留能力由 `FAST lane` 的独立 `SPARE` 和 headroom 保证。 + +建议配置阶段做强校验: + +1. `FAST` 块的 `record_span` 必须小于等于配置上限。 +2. `BULK` 块不得标记为 `FAST`。 +3. `FAST/NORMAL/BULK` 的 lane 不允许混用。 +4. 同一 lane 内不得混入不同一致性策略的块。 + +#### 4.4.2.1 block 配置模型 + +建议把 block 配置做成强约束描述,而不是只保留 `block_id + size`。 + +| 字段 | 说明 | 约束建议 | +| --- | --- | --- | +| `block_id` | 逻辑块号 | 全局唯一 | +| `block_class` | `FAST/NORMAL/BULK/META` | 决定 lane 和调度优先级 | +| `max_len` | 最大有效载荷长度 | 编译期固定 | +| `lane_type` | 目标 lane | 必须与 `block_class` 一致 | +| `endurance_class` | 擦写寿命等级 | `HOT/WARM/COLD` 等 | +| `keep_prev_copy` | 是否保留上一份完整副本 | 推荐 `FAST/NORMAL` 打开 | +| `allow_rollback` | 是否允许显式回滚 | 安全关键块可关闭 | +| `crc_mode` | `NONE/CRC16/CRC32` | 建议按块大小选择 | +| `record_align` | `8B` 或 `1 wordline` | `FAST` 建议固定为 `1 wordline` | +| `boot_critical` | 是否要求启动后优先可读 | 可影响 checkpoint 粒度 | + +推荐配置结构示例: + +```c +typedef struct +{ + uint16 block_id; + uint16 max_len; + uint8 block_class; + uint8 lane_type; + uint8 endurance_class; + uint8 keep_prev_copy; + uint8 allow_rollback; + uint8 crc_mode; + uint16 record_align; +} fee_block_cfg_t; +``` + +建议补充编译期检查: + +1. `FAST` 块必须满足 `max_len + header + tail <= fast_single_record_limit` +2. `BULK` 块必须满足 `record_align == wordline` +3. `keep_prev_copy = 1` 的块要计入 lane live 空间预算 +4. `allow_rollback = 1` 但 `keep_prev_copy = 0` 时直接报配置错误 + +#### 4.4.3 推荐物理拓扑 + +完整方案建议至少具备以下物理拓扑: + +| 区域 | 推荐扇区数 | 说明 | +| --- | --- | --- | +| `META lane` | 2 个最小管理单元 | checkpoint A/B 轮换 | +| `FAST lane` | 3 个 erase sector | `ACTIVE + GC_DST + SPARE` | +| `NORMAL lane` | 3 个 erase sector | `ACTIVE + GC_DST + SPARE` | +| `BULK lane` | 2 或 3 个 erase sector | 视容量和业务需求决定 | + +其中: + +- `FAST lane` 即使空间利用率偏低,也要优先保证可写性。 +- `NORMAL lane` 是容量主力。 +- `BULK lane` 的目标不是最低时延,而是避免污染小块路径。 + +如果总容量有限,最先不能省掉的是: + +1. `META lane` +2. `FAST lane` 的独立性 +3. `NORMAL lane` 的 `ACTIVE + GC_DST + SPARE` + +#### 4.4.3.1 容量规划公式 + +完整方案建议先按 lane 做容量预算,再决定 sector 数量。 + +定义: + +```text +record_span(i) = align(record_header + max_len(i) + commit_tail, record_align(i)) +live_span(i) = record_span(i) * (keep_prev_copy(i) ? 2 : 1) +lane_live = Σ live_span(i), i ∈ lane +lane_payload = sector_count(lane) * sector_usable_bytes - lane_mgmt_bytes +lane_headroom = max_burst_records(lane) * max(record_span(i), i ∈ lane) + switch_guard(lane) +``` + +建议满足: + +```text +lane_payload >= lane_live + lane_headroom + gc_fragment_guard +gc_force_threshold[lane] >= max(record_span(i), i ∈ lane) + switch_guard(lane) +gc_start_threshold[lane] >= gc_force_threshold[lane] + gc_step_slack(lane) +``` + +其中: + +- `lane_live` 反映“当前副本 + 保留上一副本”的最坏常驻空间 +- `lane_headroom` 反映 lane 在开始 GC 后仍需接纳的突发写入 +- `gc_fragment_guard` 用于吸收对齐和 tombstone 带来的碎片损耗 + +建议经验值: + +- `FAST lane`: `max_burst_records = 2 ~ 4` +- `NORMAL lane`: `max_burst_records = 1 ~ 2` +- `BULK lane`: `max_burst_records = 1` +- `gc_fragment_guard`: 至少 `2 * max_record_span(lane)` + +#### 4.4.4 Checkpoint 基线 + +完整方案建议引入显式 checkpoint,而不是每次上电都全量扫全区域。 + +checkpoint 至少包含: + +- `global_generation` +- 每个 lane 的 `active/dst/spare` +- 每个 lane 的 `free_offset` +- 每个 block 的最新地址 +- 每个 block 的 `seq` +- checkpoint 自身 CRC + +建议使用: + +- 双 checkpoint 副本 +- 代数递增 +- 最后提交标记 + +推荐 checkpoint 结构: + +```text ++-----------------------------+ +| CkptHeader | ++-----------------------------+ +| LaneState[LANE_COUNT] | ++-----------------------------+ +| BlockMapEntry[BLOCK_COUNT] | ++-----------------------------+ +| CkptTail(COMMIT) | ++-----------------------------+ +``` + +其中 `BlockMapEntry` 建议至少包含: + +- `block_id` +- `lane` +- `cur_addr` +- `prev_addr` +- `cur_seq` +- `flags` + +如果 `BLOCK_COUNT` 较大,可把 `BlockMapEntry[]` 拆成多个 checkpoint page 分步写入,但仍必须以统一的 `CkptTail(COMMIT)` 作为唯一提交点。 + +checkpoint 刷写原则建议为: + +1. 不在每次 block 写成功后立即刷 checkpoint +2. 在以下事件后触发 checkpoint: + - lane GC 完成 + - active sector 切换 + - 连续 `N` 次业务写成功 + - 关键 `boot_critical` 块更新 +3. checkpoint 自身也遵循 `header -> payload -> tail commit` 提交规则 + +启动时流程变为: + +1. 读取双 checkpoint,选最新有效副本 +2. 按 checkpoint 恢复 RAM cache +3. 只扫描 checkpoint 之后的 tail 区域 +4. 如发现中断 GC,则进入 lane 级修复 + +这样启动时间从“与所有历史记录数量相关”收敛为“与 checkpoint 间隔和尾部脏区相关”。 + +#### 4.4.5 编译期约束 + +建议在配置工具或静态检查里直接校验以下规则: + +1. `FAST` 块必须满足 `record_span <= fast_record_limit` +2. `FAST` 块禁止跨 wordline 提交 +3. `BULK` 块必须映射到 `BULK lane` +4. `lane_reserved_bytes >= max_burst_bytes` +5. `gc_start_threshold[lane] > gc_force_threshold[lane]` +6. `checkpoint_size <= meta_lane_capacity` +7. `sum(block_reserved_bytes) <= lane_usable_capacity` + +推荐公式: + +```text +record_span = align8(header + payload + tail) +lane_usable_capacity = sector_count * sector_payload - reserved_management_bytes +gc_force_threshold[lane] = max_record_span[lane] + switch_guard[lane] +gc_start_threshold[lane] = gc_force_threshold[lane] + gc_step_budget[lane] +checkpoint_period = f(block_count, write_rate, boot_time_target) +``` + +#### 4.4.6 一致性策略 + +完整方案建议把一致性策略定成以下固定规则: + +1. `header` 先写 +2. `payload` 后写 +3. `tail` 最后写 +4. `tail.commit_marker` 是唯一提交点 +5. cache 只在 `tail` 成功后更新 +6. old copy 在 new copy 提交前必须保持可读 + +因此: + +- 业务回滚永远通过 old copy/new copy 关系实现 +- 不允许依赖“中间 RAM 状态”判断块有效性 +- 不允许在 record 中途原地修正状态 + +#### 4.4.7 调度策略 + +完整方案建议区分两级请求队列: + +- `urgent queue`: 仅给 `FAST` 请求使用 +- `normal queue`: 给 `NORMAL/BULK/read/invalidate` 使用 + +调度原则: + +1. 正在执行的硬件页写不可打断 +2. 未开始的普通请求可被 `FAST` 请求抢占 +3. `BUSY_INTERNAL` 时允许受理用户请求,进入队列 +4. lane 级 GC 只在没有更高优先级用户请求时推进 + +这比“只要 GC 在跑就拒绝请求”的策略更符合 AUTOSAR 4.4 语义,也更利于系统实时性。 + +推荐主调度伪代码: + +```c +void fee_mainfunction(void) +{ + fee_hw_poll(); + + if (fee_hw_is_busy()) + { + return; + } + + if (urgent_queue_not_empty()) + { + dispatch_fast_request(); + return; + } + + if (checkpoint_due() && !normal_queue_overloaded()) + { + dispatch_checkpoint(); + return; + } + + if (normal_queue_not_empty()) + { + dispatch_normal_request(); + return; + } + + if (any_lane_force_gc()) + { + dispatch_force_gc_lane(); + return; + } + + if (any_lane_requested_gc()) + { + dispatch_gc_round_robin(); + return; + } +} +``` + +建议补充两条公平性规则: + +1. `NORMAL/BULK` 用户请求连续等待超过阈值时,禁止 `FAST` 之外的后台任务继续插队 +2. 多个 lane 都请求 GC 时,按 round-robin 推进,避免某个 lane 长期饿死 + +## 5. 总体架构 + +建议重写为专用 FEE,而不是“另一个 FlashDB”。 + +```text +App / NvM adapter / fee device + -> fee_api.c + -> fee_sched.c + -> fee_core.c + -> fee_gc.c + -> fee_recovery.c + -> fee_cache.c + -> fee_ckpt.c + -> fee_lane_fast.c + -> fee_lane_log.c + -> fee_lane_bulk.c + -> fee_port_fls.c + -> Fls_17_Dmu + -> DFlash +``` + +模块职责建议如下: + +- `fee_api.c`: 对外 API,参数检查,请求排队。 +- `fee_sched.c`: 请求分级、队列管理、lane 调度。 +- `fee_core.c`: 读写/失效主流程。 +- `fee_gc.c`: GC 状态机和 sector 切换。 +- `fee_recovery.c`: 上电扫描和中断恢复。 +- `fee_cache.c`: RAM 索引维护。 +- `fee_ckpt.c`: checkpoint 刷写和恢复。 +- `fee_lane_fast.c`: FAST lane 分配、提交和回滚。 +- `fee_lane_log.c`: NORMAL lane 追加日志和 GC。 +- `fee_lane_bulk.c`: BULK lane 大块写入和 GC。 +- `fee_port_fls.c`: 与 `Fls_17_Dmu` 适配,封装 page/wordline 读写。 +- `fee_cfg.h`: block 表、sector 数量、阈值等静态配置。 + +### 5.1 驱动适配约束 + +`fee_port_fls.c` 建议作为唯一的底层驱动适配层,FEE 核心逻辑不直接依赖具体 flash 驱动实现。 + +设计约束建议如下: + +1. FEE 不应假设底层支持任意软件切片粒度,实际写入对齐和单次编程长度必须受驱动能力约束。 +2. 底层驱动应显式暴露以下能力参数: + - `read_unit` + - `program_unit` + - `erase_unit` + - `preferred_chunk` +3. record header、payload 切片、commit tail 的对齐规则都必须满足 `program_unit`。 +4. 大块搬运和 GC 单步长度优先参考 `preferred_chunk`,但不得破坏上层定义的单周期工作量上界。 +5. 若底层驱动切换到其他器件或仿真后端,FEE 核心不需要修改,只调整 `fee_port_fls.c` 和能力参数。 + +建议适配层至少提供以下统一接口: + +```c +typedef struct +{ + uint16 read_unit; + uint16 program_unit; + uint32 erase_unit; + uint16 preferred_chunk; +} fee_flash_caps_t; + +Std_ReturnType fee_port_init(void); +Std_ReturnType fee_port_get_caps(fee_flash_caps_t *caps); +Std_ReturnType fee_port_read(uint32 addr, uint8 *dst, uint32 len); +Std_ReturnType fee_port_write(uint32 addr, const uint8 *src, uint32 len); +Std_ReturnType fee_port_erase(uint32 addr, uint32 len); +MemIf_StatusType fee_port_get_status(void); +MemIf_JobResultType fee_port_get_job_result(void); +``` + +如果底层驱动是异步 job 模型,还应补充: + +- `fee_port_mainfunction()` 或等价轮询接口 +- 可选的 job end / job error 回调桥接 + +## 6. 存储布局设计 + +### 6.1 扇区角色 + +完整设计按 lane 维护扇区角色,而不是全局只维护一对 sector。 + +每个 lane 推荐至少包含以下角色: + +- `ACTIVE`: 当前追加写扇区 +- `GC_DST`: 本 lane 的 GC 复制目标扇区 +- `SPARE`: 本 lane 的预擦除备用扇区 + +推荐部署方式: + +1. `FAST lane`: 3-sector ring +2. `NORMAL lane`: 3-sector ring +3. `BULK lane`: 2 或 3-sector ring +4. `META lane`: 双 checkpoint 副本 + +如果容量极端受限,也可以把 `BULK lane` 退化成双扇区,但 `FAST/NORMAL` 的 3-sector 独立性不建议省掉。 + +### 6.2 sector 布局 + +```text ++-----------------------------+ +| SectorHeader | ++-----------------------------+ +| Record 0 | +| Record 1 | +| Record 2 | +| ... | +| free area | ++-----------------------------+ +``` + +#### SectorHeader 建议字段 + +| 字段 | 说明 | +| --- | --- | +| `magic` | 扇区签名 | +| `format_version` | 布局版本 | +| `generation` | 扇区代数,切换时递增 | +| `state` | `ERASED/ACTIVE/GC_DST/FULL` | +| `hdr_crc` | sector header 校验 | + +### 6.3 record 布局 + +建议按“头 + 数据 + 提交尾”设计: + +```text ++-----------------------------+ +| RecordHeader | ++-----------------------------+ +| Payload | ++-----------------------------+ +| CommitTail | ++-----------------------------+ +``` + +#### RecordHeader 建议字段 + +| 字段 | 说明 | +| --- | --- | +| `magic` | 记录签名 | +| `block_id` | 逻辑块号 | +| `record_type` | `DATA` 或 `TOMBSTONE` | +| `data_len` | 数据长度 | +| `seq` | 单块递增序号 | +| `flags` | `PREPARE/COPIED/ROLLBACK` 等 | +| `hdr_crc` | 头部校验 | + +#### CommitTail 建议字段 + +| 字段 | 说明 | +| --- | --- | +| `data_crc` | 数据区 CRC | +| `commit_marker` | 最终提交标记 | +| `tail_crc` | 尾部校验 | + +关键要求: + +- `commit_marker` 必须最后写入。 +- 没有 `commit_marker` 的记录一律视为未提交。 +- 启动恢复时只接受 “头合法 + 尾合法 + 提交完成” 的记录。 + +建议把 `flags` 收紧为极少数状态,避免运行期状态爆炸: + +- `PREPARE`: 已开始写但未提交 +- `COPIED`: 该记录由 GC 迁移生成 +- `ROLLBACK`: 该记录由显式回滚生成 + +不建议再保留与旧实现强绑定的 `IMMEDIATE` 标记,lane 已经承担优先级语义。 + +### 6.4 推荐分区布局 + +如果按完整方案重构,建议使用 lane 级分区,而不是只在单个 active sector 内预留 immediate 区。 + +```text +DFlash + |- META lane + | |- checkpoint A + | |- checkpoint B + | + |- FAST lane + | |- sector F0 ACTIVE + | |- sector F1 GC_DST + | |- sector F2 SPARE + | + |- NORMAL lane + | |- sector N0 ACTIVE + | |- sector N1 GC_DST + | |- sector N2 SPARE + | + |- BULK lane + |- sector B0 ACTIVE + |- sector B1 GC_DST + |- sector B2 SPARE(optional) +``` + +推荐规则: + +1. `FAST` 块只写入 `FAST lane`。 +2. `NORMAL` 块只写入 `NORMAL lane`。 +3. `BULK` 块只写入 `BULK lane`。 +4. `META lane` 不承载业务数据,只用于恢复加速和全局角色持久化。 + +如果后续还需要更强的热冷隔离,可以在 `NORMAL lane` 内继续细分为: + +- `HOT_NORM lane` +- `COLD_NORM lane` + +但这属于第二阶段增强,不是完整方案的必要前提。 + +## 7. RAM 索引设计 + +每个逻辑块只保留当前副本和前一副本索引: + +```c +typedef struct +{ + uint8 lane; + uint32 cur_addr; + uint32 prev_addr; + uint32 seq; + uint16 len; + uint8 cur_valid; + uint8 prev_valid; + uint8 cur_sector; + uint8 prev_sector; +} fee_cache_entry_t; +``` + +单个 lane 的上下文建议如下: + +```c +typedef struct +{ + uint8 active_sector; + uint8 dst_sector; + uint8 spare_sector; + uint16 gc_cursor; + uint8 gc_state; + uint8 gc_requested; + uint32 free_offset; + uint32 gc_start_threshold; + uint32 gc_force_threshold; +} fee_lane_ctx_t; +``` + +全局上下文建议如下: + +```c +typedef struct +{ + fee_lane_ctx_t fast; + fee_lane_ctx_t normal; + fee_lane_ctx_t bulk; + uint32 checkpoint_generation; + uint8 urgent_req_pending; + uint8 normal_req_pending; +} fee_super_ctx_t; +``` + +这个设计与 FlashDB 的关键区别是: + +- FlashDB 按物理记录遍历。 +- 新 FEE 按逻辑块 cache 遍历。 + +因此 GC 复制复杂度从“sector 内全部记录数”收缩为“当前 live block 数”。 + +## 8. 关键流程设计 + +### 8.1 初始化恢复 + +启动流程建议如下: + +1. 先读取 `META lane` 的双 checkpoint。 +2. 若 checkpoint 有效,则恢复各 lane 的角色和 free pointer。 +3. 只扫描各 lane checkpoint 之后的 tail 区域。 +4. 对每个已提交记录按 `block_id + seq` 更新 cache。 +5. 如果 checkpoint 无效,再回退到全量 sector 扫描。 +6. 如果发现某个 lane 上次 GC 中断,则只修复该 lane。 + +初始化扫描只做一次,后续读路径全部走 RAM cache。 + +### 8.2 写流程 + +写流程建议如下: + +1. 查询 block 配置,检查长度是否合法,并确定所属 lane。 +2. 若 lane 处于 `BUSY_INTERNAL`,请求进入相应队列。 +3. 若 lane 剩余空间低于 `gc_start_threshold[lane]`,提前置该 lane 的 `gc_requested`。 +4. 若 lane 当前空间不足以放下完整记录,等待本 lane GC 或返回 `BUSY`。 +5. 生成新记录 header,状态视为 `PREPARE`。 +6. 按 lane 策略写入 payload: + - `FAST`: 尽量单 record/单 wordline 完成 + - `NORMAL`: 普通分段写入 + - `BULK`: 按 wordline 连续切片 +7. 最后写 `CommitTail`。 +8. 写成功后更新 RAM cache: + - `prev = cur` + - `cur = new_record` + +写路径的核心原则是: + +- 新副本提交前不破坏旧副本。 +- cache 只在新副本提交后更新。 + +建议补充一条请求调度规则: + +- 当模块处于 `BUSY_INTERNAL` 时,允许接受新的 `Read/Write/Invalidate` 请求,但仅进入 pending queue,不立即抢占正在执行的硬件写操作。 + +这条规则与 AUTOSAR 4.4 的语义一致,也能避免“GC 期间业务接口大量返回 `E_NOT_OK`”。 + +建议再补充一条 lane 级 admission 规则: + +- `FAST lane` 空间不足时,不允许借用 `NORMAL/BULK lane`;必须通过本 lane 的 `SPARE` 和 `GC_DST` 自给自足。 + +### 8.3 读流程 + +读流程建议如下: + +1. 根据 `block_id` 直接命中 cache。 +2. 若 `cur_valid = 1`,读取当前副本。 +3. 如当前副本校验失败且 `prev_valid = 1`,回退读取前一副本。 +4. 若两者都不可用,返回 `INVALID/INCONSISTENT`。 + +### 8.4 失效流程 + +块失效不要原地写标记,直接在所属 lane 追加一条 `TOMBSTONE` 记录: + +- `record_type = TOMBSTONE` +- `data_len = 0` +- `seq = old_seq + 1` + +cache 在提交后更新为: + +- `cur_valid = 0` +- `prev_valid` 仍可按策略保留 + +如果上层需要“逻辑删除后仍可人工恢复”,则可以保留 `prev_valid = 1`;如果上层要求强失效,则可以在 GC 后彻底清除 previous copy。 + +### 8.5 数据回滚 + +本文档里的“回滚”定义为: + +- 新写入未提交或提交校验失败时,自动回到上一次完整副本。 +- 业务层显式请求回滚时,可把 `prev` 重新复制为新 `cur`。 + +建议支持两种回滚模式: + +1. 隐式回滚 + - 用于掉电恢复和写失败恢复。 + - 读路径自动使用 `prev`。 + +2. 显式回滚 + - 提供 `fee_rollback(block_id)`。 + - 将 `prev` 追加复制成一条新的当前记录。 + - 不建议直接把 cache 指针回拨到旧地址,避免状态不可追踪。 + +建议增加显式回滚的准入约束: + +1. 仅 `keep_prev_copy = 1` 的块允许显式回滚 +2. `BULK` 块默认关闭显式回滚,除非业务强需求 +3. 同一块在 `rollback` 未完成前,新的 `write` 请求进入串行队列 + +## 9. GC 设计 + +### 9.1 GC 触发条件 + +不要等写失败再启动 GC,建议至少有两个阈值: + +- `gc_start_threshold` +- `gc_force_threshold` + +建议定义: + +- 当 `free_bytes < gc_start_threshold` 时,后台进入 `GC_REQUESTED` +- 当 `free_bytes < gc_force_threshold` 时,该 lane 只允许已经入队的更高优先级请求继续推进,低优先级请求返回 `BUSY` + +也就是说: + +- 每个 lane 维护自己的阈值 +- `FAST` lane 的阈值最保守,优先保证接纳能力 +- `NORMAL/BULK` lane 的阈值可按吞吐和容量折中 +- `FAST` 的“立即可写”由独立 lane headroom 保证,而不是由共享 reserve 区保证 + +### 9.2 GC 核心策略 + +GC 在每个 lane 内独立运行,不扫描所有物理记录,而是扫描 RAM cache: + +1. 选择该 lane 的 `ACTIVE` 为源扇区,`GC_DST` 为目标扇区。 +2. 从该 lane 的 `gc_cursor = 0` 开始遍历属于该 lane 的配置块。 +3. 如果某块当前有效副本位于源扇区,则复制其当前副本到目标扇区。 +4. 每次 `MainFunction()` 每个 lane 最多搬 1 个块,或者最多写 1 个 wordline。 +5. 所有 live block 搬完后,更新该 lane 的 sector state。 +6. 把旧 `ACTIVE` 擦除并转为该 lane 的 `SPARE`。 + +### 9.3 GC 状态机 + +建议状态如下: + +```text +GC_IDLE + -> GC_REQUESTED + -> GC_PREPARE_DST + -> GC_COPY_ONE + -> GC_SWITCH_HEADER + -> GC_ERASE_OLD + -> GC_FINISH + -> GC_IDLE +``` + +建议说明: + +- `GC_PREPARE_DST`: 擦除并写好目标扇区 header +- `GC_COPY_ONE`: 每周期搬一块 +- `GC_SWITCH_HEADER`: 先标记新扇区为 ACTIVE,再把旧扇区标记 FULL/OLD +- `GC_ERASE_OLD`: 擦旧扇区,可后台延后 + +### 9.4 为什么这种 GC 会更快 + +相对 FlashDB,新的 GC 长尾更短,原因有四点: + +- 不在分配失败时整扇区扫描。 +- 不遍历无效物理记录,只遍历 live block。 +- 每个周期工作量固定,可配置。 +- 复制路径是“按逻辑块”而不是“按所有历史记录”。 + +### 9.5 有界工作量与实时性声明 + +为了把“快速读写”从经验判断变成设计承诺,建议在实现中显式固定每个周期的最大工作量。 + +建议约束如下: + +1. `fee_mainfunction()` 每次最多启动 1 个底层 flash job +2. 单次 GC 步进最多复制 1 个 block,或者最多推进 1 个 wordline +3. `FAST` 写入必须满足单条 record 在有限步数内提交完成 +4. `NORMAL/BULK` 大块写入必须切片,禁止一次性长时间占用调度器 +5. checkpoint 写入必须可分步推进,不得阻塞 `FAST` 请求接纳 + +可以把可验证的上界写成: + +```text +T_read(block) <= T_cache_lookup + T_flash_read(record_span(block)) +T_fast_write <= T_hdr + T_payload_fast + T_tail +T_gc_step(lane) <= max(T_copy_one_record, T_prepare_one_sector_poll) +T_mainfunction <= T_dispatch + one_flash_job_start_or_poll +``` + +其中最关键的是: + +- `T_fast_write` 必须通过 `FAST` 块大小上限来保证 +- `T_gc_step` 必须通过“每周期只搬 1 块或 1 wordline”来保证 +- `T_mainfunction` 必须与 lane 中累计脏数据量解耦 + +## 10. 掉电安全设计 + +掉电保护是该设计的核心要求。 + +### 10.1 单块写入中断 + +如果掉电发生在: + +- 头部写完之前:记录无效。 +- 数据写到一半:记录无效。 +- 尾部未写 `commit_marker`:记录无效。 +- `commit_marker` 写完但 cache 未更新:启动扫描仍能识别新记录,cache 可重建。 + +因此单块写入恢复不依赖 RAM。 + +### 10.2 GC 中断 + +GC 中断可能出现在: + +- 新 sector 擦除完成前 +- 已搬迁部分 block 后 +- 切换 header 过程中 +- 旧 sector 擦除前 + +恢复策略: + +- 通过 `generation + sector_state` 判断新旧扇区关系。 +- 两个扇区都存在有效记录时,以“提交完整记录 + 最新 generation”重建 cache。 +- 若检测到未完成 GC,则继续完成,而不是回退到初始状态。 + +## 11. 存储管理图 + +### 11.1 逻辑结构图 + +```mermaid +flowchart LR + APP["App / NvM / fee device"] --> API["fee_api"] + API --> SCHED["fee_sched"] + SCHED --> CORE["fee_core"] + SCHED --> GC["fee_gc"] + API --> REC["fee_recovery"] + SCHED --> CKPT["fee_ckpt"] + CORE --> CACHE["fee_cache"] + CORE --> LFAST["fee_lane_fast"] + CORE --> LNORM["fee_lane_log"] + CORE --> LBULK["fee_lane_bulk"] + GC --> CACHE + GC --> LFAST + GC --> LNORM + GC --> LBULK + CKPT --> CACHE + CKPT --> META["META lane state"] + REC --> CACHE + REC --> META + LFAST --> PORT["fee_port_fls"] + LNORM --> PORT + LBULK --> PORT + REC --> PORT + PORT --> FLS["Fls_17_Dmu"] + FLS --> DFLASH["TC397 DFlash"] +``` + +### 11.2 Flash 布局图 + +```mermaid +flowchart LR + subgraph META["META lane"] + M1["checkpoint A"] + M2["checkpoint B"] + end + + subgraph FAST["FAST lane"] + F0["F0 ACTIVE"] + F1["F1 GC_DST"] + F2["F2 SPARE"] + end + + subgraph NORMAL["NORMAL lane"] + N0["N0 ACTIVE: blk1 seq21 / blk7 seq03"] + N1["N1 GC_DST"] + N2["N2 SPARE"] + end + + subgraph BULK["BULK lane"] + B0["B0 ACTIVE"] + B1["B1 GC_DST"] + B2["B2 SPARE(optional)"] + end + + subgraph RAM["RAM cache"] + C1["blk1 lane=NORMAL cur=seq21 prev=seq20"] + C2["blk7 lane=NORMAL cur=seq03"] + C3["blk_fast lane=FAST cur=seq8"] + C4["blk_bulk lane=BULK cur=seq2"] + end + + META --> RAM + RAM --> FAST + RAM --> NORMAL + RAM --> BULK +``` + +### 11.3 写入与回滚时序图 + +```mermaid +sequenceDiagram + participant App + participant FEE + participant Flash + + App->>FEE: fee_write(block_id, data) + FEE->>Flash: write RecordHeader(PREPARE) + FEE->>Flash: write Payload + FEE->>Flash: write CommitTail(COMMIT) + FEE->>FEE: cache.prev = cache.cur + FEE->>FEE: cache.cur = new_record + + Note over Flash: 若中途掉电,新记录无 COMMIT\n启动后忽略该记录 + Note over FEE: 读取时回退到 prev +``` + +## 12. 对外接口建议 + +建议最小接口集如下: + +```c +Std_ReturnType fee_init(void); +Std_ReturnType fee_read(uint16 block_id, uint16 offset, uint8 *dst, uint16 len); +Std_ReturnType fee_write(uint16 block_id, const uint8 *src, uint16 len); +Std_ReturnType fee_invalidate(uint16 block_id); +Std_ReturnType fee_get_status(uint16 block_id, fee_block_status_t *status); +Std_ReturnType fee_rollback(uint16 block_id); +void fee_mainfunction(void); +``` + +如果需要兼容 AUTOSAR immediate 语义,建议做下面这层映射: + +- `FeeImmediateData = TRUE` 的块在配置阶段自动映射为 `FAST` +- `Fee_EraseImmediateBlock()` 不必再针对单块做特殊擦除,而是转化为一次 `FAST lane` 可写性检查或预擦除触发 + +建议同时定义内部配置接口: + +```c +const fee_block_cfg_t *fee_get_block_cfg(uint16 block_id); +fee_lane_t fee_block_to_lane(uint16 block_id); +bool fee_block_supports_rollback(uint16 block_id); +``` + +如果需要与 NvM 对接,可再提供: + +```c +Std_ReturnType fee_job_queue_push(...); +MemIf_StatusType fee_get_memif_status(void); +MemIf_JobResultType fee_get_job_result(void); +``` + +对内部端口层,建议明确: + +- 外部 `fee_read/fee_write` 是 FEE 语义接口 +- `fee_port_read/fee_port_write/fee_port_erase` 是底层驱动语义接口 +- 两层之间通过 `fee_flash_caps_t` 解耦,而不是把具体器件粒度散落到各个 lane 模块里 + +## 13. 代码组织建议 + +建议新实现放在独立目录,避免直接侵入现有 MCAL Fee: + +```text +software/rt-thread/components/custom_fee/ + fee_api.c + fee_sched.c + fee_core.c + fee_gc.c + fee_recovery.c + fee_cache.c + fee_ckpt.c + fee_lane_fast.c + fee_lane_log.c + fee_lane_bulk.c + fee_onflash.c + fee_port_fls.c + fee_port_mock_ram.c + fee_api.h + fee_internal.h + fee_cfg.h + fee_onflash.h + fee_port.h + Kconfig + SConscript + README.md +``` + +这样做的好处: + +- 便于和现有 MCAL Fee 并行验证。 +- 便于做 A/B 测试和启动切换。 +- 便于后续只替换 `MemIf/Fee` 接口层。 + +## 14. 实现阶段建议 + +### 阶段 1:最小可运行版本 + +- 支持 `init/read/write/invalidate/mainfunction` +- 只支持固定 block 表 +- 只实现 `META + FAST + NORMAL` +- 每个 lane 独立 3-sector +- 支持掉电恢复 +- 支持 checkpoint +- GC 每次每个 lane 搬 1 个 block + +### 阶段 2:增强版本 + +- 增加 `fee_rollback(block_id)` +- 增加 `BULK lane` +- 增加 hot/cold normal lane +- 增加 lane 级寿命均衡策略 + +### 阶段 3:产品化版本 + +- 对接 NvM/MemIf +- 增加统计信息和诊断钩子 +- 增加擦写寿命监控 +- 增加异常注入测试 + +## 15. 测试建议 + +必须覆盖以下场景: + +- 空白 flash 初始化 +- 单块重复写入 +- 多块交叉写入 +- 写 header 后掉电 +- 写 payload 中途掉电 +- 写 commit tail 前掉电 +- GC 复制一半掉电 +- sector 切换时掉电 +- 旧 sector 擦除前掉电 +- 当前副本 CRC 错误,自动回退到上一副本 +- `FAST` 写入与 `NORMAL` lane GC 并发 +- `FAST` 连续突发写入直到触发 lane 内部 GC +- `BULK` 写入过程中插入 `FAST` 请求 +- checkpoint A 损坏、checkpoint B 有效 +- checkpoint A/B 都损坏,回退全量扫描 +- 某一 lane 的 `GC_DST` header 损坏后恢复 +- 显式 `rollback` 与后续 `write` 串行化 +- `BUSY_INTERNAL` 状态下接受 `Read/Write/Invalidate` 并排队 +- 每次 `MainFunction()` 的工作量不超过配置上限 + +建议至少记录以下指标: + +- 写平均时延 +- 写尾时延 +- GC 单步时延 +- GC 完成总周期数 +- 启动扫描时长 +- 有效空间利用率 +- 擦写放大系数 +- lane 间请求抢占延迟 +- checkpoint 周期对启动时间的影响 + +## 16. 结论 + +如果目标是解决 FlashDB 在 GC 上的长尾问题,正确方向不是继续优化通用 KV 的 sector 扫描,而是实现一套面向固定逻辑块的专用 FEE: + +- 写路径使用追加写和提交尾标记。 +- 读路径依赖 RAM cache,实现 O(1) 访问。 +- 回滚依赖 `current + previous` 双副本索引。 +- immediate 语义内部映射为 `FAST lane`,不再依赖共享预留区。 +- GC 依赖 cache 驱动的 lane 级增量搬迁,而不是全扇区物理记录扫描。 + + +## 17. 检验 + +- 修改代码后,在 `C:\Work\Code\weride\tc397_release\software\bsp\app_kit_tc397` 目录执行: + +```powershell +cmd /c "call C:\Work\InstallTools\env-windows\tools\bin\env-init.bat && scons -j8" +``` + +- 验证阶段可先用 RAM mock 替代真实 flash 驱动,例如: + +```c +static uint8 fee_mock_flash[0x10000]; +``` diff --git a/components/custom_fee/doc/fee_scheduler_gc.md b/components/custom_fee/doc/fee_scheduler_gc.md new file mode 100644 index 00000000000..bc83491b5b4 --- /dev/null +++ b/components/custom_fee/doc/fee_scheduler_gc.md @@ -0,0 +1,216 @@ +# FEE Scheduler And GC 设计 + +## 1. 目的 + +本文档冻结 FEE 的请求调度、后台 GC 和 `BUSY_INTERNAL` 语义。 + +目标是同时满足: + +- `FAST` 请求低尾时延 +- `Read/Write/Invalidate` 在 `BUSY_INTERNAL` 下可受理 +- checkpoint 与 GC 都是后台任务,不抢走读请求的确定性 + +## 2. 调度原则 + +1. 同一时刻只允许 1 个底层 flash job 在执行。 +2. `FAST` 用户请求优先于后台任务。 +3. cache 已命中的 `read` 优先于 checkpoint 与 GC。 +4. checkpoint 与 GC 都属于背景流量,只能在没有更高优先级用户请求时推进。 +5. 多 lane GC 采用 round-robin,避免饥饿。 + +## 3. 请求分级 + +建议把请求分成四类,而不是把所有内容塞进一个 normal queue。 + +| 级别 | 类型 | 说明 | +| --- | --- | --- | +| `READ_FAST_PATH` | cache 已命中读 | 优先于 checkpoint / GC | +| `URGENT_QUEUE` | `FAST` write / invalidate / rollback | immediate 语义 | +| `NORMAL_QUEUE` | `NORMAL/BULK` write / invalidate / rollback | 普通用户请求 | +| `BACKGROUND` | checkpoint / GC / 擦除清理 | 仅空闲时推进 | + +## 4. `BUSY_INTERNAL` 语义 + +### 4.1 设计结论 + +`BUSY_INTERNAL` 表示模块内部正在做 checkpoint / GC / recovery 的后台工作,不等价于拒绝新请求。 + +### 4.2 受理规则 + +| 请求 | `MEMIF_BUSY_INTERNAL` 下行为 | +| --- | --- | +| `Read` | 允许受理;cache 命中时优先调度 | +| `FAST Write` | 允许受理;进入 `URGENT_QUEUE` | +| `NORMAL/BULK Write` | 允许受理;进入 `NORMAL_QUEUE` | +| `Invalidate` | 允许受理;按所属 lane 入队 | +| `Rollback` | 允许受理;与同块写请求串行 | + +不允许做的事: + +- 后台 GC 运行时一刀切拒绝所有 API +- checkpoint 长时间压住 cache-hit read + +## 5. 主调度流程 + +推荐主调度伪代码如下: + +```c +void fee_mainfunction(void) +{ + fee_port_mainfunction(); + + if (fee_port_get_status() == MEMIF_BUSY) + { + return; + } + + if (read_fast_pending()) + { + dispatch_read_fast(); + return; + } + + if (urgent_queue_not_empty()) + { + dispatch_urgent_request(); + return; + } + + if (normal_queue_not_empty()) + { + dispatch_normal_request(); + return; + } + + if (checkpoint_force_due()) + { + dispatch_checkpoint(); + return; + } + + if (gc_force_pending()) + { + dispatch_force_gc_lane(); + return; + } + + if (checkpoint_bg_due()) + { + dispatch_checkpoint(); + return; + } + + if (gc_bg_pending()) + { + dispatch_gc_round_robin(); + return; + } +} +``` + +## 6. Read 路径 + +### 6.1 cache 命中读 + +如果 `block_id` 在 RAM cache 中命中,且模块初始化状态允许读: + +1. 直接解析物理地址 +2. 若底层硬件空闲,则优先发起 read +3. 若底层硬件忙,则挂入 `read_fast_pending` +4. 下一个空闲点优先于 checkpoint / GC 调度 + +### 6.2 cache 未命中读 + +cache 未命中通常说明: + +- 模块未 full-ready +- block 从未写入 +- cache 损坏或配置错误 + +此时不允许走“临时全盘扫描”补救,避免读时长尾。应返回: + +- `BUSY`,若尚未完成恢复 +- `INVALID/INCONSISTENT`,若恢复已完成但无有效副本 + +## 7. GC 调度 + +### 7.1 触发级别 + +每个 lane 独立维护: + +- `gc_requested` +- `gc_force` +- `gc_in_progress` + +### 7.2 步进约束 + +每次 `fee_mainfunction()` 对单个 lane 最多推进一项工作: + +1. 搬运 1 个 live block +2. 或写 1 个 sector header +3. 或轮询 1 个 erase job + +不允许在一个周期内完成“整扇区扫描 + 多块搬迁”。 + +### 7.3 lane 优先级 + +建议背景 GC 的优先级为: + +1. `FAST lane` 的 `gc_force` +2. `NORMAL lane` 的 `gc_force` +3. `BULK lane` 的 `gc_force` +4. 其余 `gc_requested` round-robin + +## 8. 公平性规则 + +1. `NORMAL/BULK` 用户请求等待超过阈值时,后台任务停止插队 +2. `FAST` 请求可以抢占未开始的后台任务,但不能打断已发起的 flash job +3. 同一 block 的 `write/invalidate/rollback` 必须串行 +4. checkpoint 连续占用周期数达到阈值后,必须让出执行权 + +## 9. GC 状态机 + +建议状态: + +```text +GC_IDLE + -> GC_REQUESTED + -> GC_PREPARE_DST + -> GC_COPY_ONE + -> GC_SWITCH_COMMIT + -> GC_OLD_PENDING_ERASE + -> GC_FINISH + -> GC_IDLE +``` + +说明: + +- `GC_PREPARE_DST`: 擦除并提交目标扇区 header +- `GC_COPY_ONE`: 搬运单块 live data +- `GC_SWITCH_COMMIT`: 提升新扇区、降级旧扇区 +- `GC_OLD_PENDING_ERASE`: 后台擦除旧扇区 + +## 10. checkpoint 与 GC 的关系 + +1. lane 角色切换完成后应尽快触发 checkpoint +2. checkpoint 未完成前,不影响新 `ACTIVE` 扇区继续写入 +3. 若 checkpoint 长期落后,运行期必须通过 `checkpoint_force_due()` 纠偏 + +## 11. 建议接口 + +```c +void fee_sched_mainfunction(void); +Std_ReturnType fee_sched_submit_read(...); +Std_ReturnType fee_sched_submit_write(...); +Std_ReturnType fee_sched_submit_invalidate(...); +Std_ReturnType fee_sched_submit_rollback(...); +boolean fee_sched_has_background_work(void); +``` + +## 12. 测试关注点 + +- checkpoint 到期时 cache-hit read 仍优先 +- `FAST` 写入插队 `NORMAL` lane GC +- `NORMAL` 长时间积压时后台任务让路 +- `gc_force` 与 `gc_requested` 的优先级 +- 同一 block 的 `write + rollback` 串行化 diff --git a/components/custom_fee/fee_api.c b/components/custom_fee/fee_api.c new file mode 100644 index 00000000000..2fc05ae6391 --- /dev/null +++ b/components/custom_fee/fee_api.c @@ -0,0 +1,224 @@ +#include "fee_internal.h" + +fee_super_ctx_t g_fee_ctx; + +static fee_ret_t fee_validate_rw_params(uint8_t *dst, const uint8_t *src, uint16_t len) +{ + if ((len > 0U) && (dst == RT_NULL) && (src == RT_NULL)) + { + return FEE_E_PARAM; + } + + return FEE_E_OK; +} + +fee_ret_t fee_init(void) +{ + fee_ret_t ret; + + fee_core_reset_context(); + + ret = fee_port_init(); + if (ret != FEE_E_OK) + { + g_fee_ctx.init_state = FEE_INIT_FAILED; + g_fee_ctx.status = FEE_STATUS_UNINIT; + g_fee_ctx.job_result = FEE_JOB_FAILED; + return ret; + } + + ret = fee_core_init(); + if (ret != FEE_E_OK) + { + g_fee_ctx.init_state = FEE_INIT_FAILED; + g_fee_ctx.status = FEE_STATUS_UNINIT; + g_fee_ctx.job_result = FEE_JOB_FAILED; + return ret; + } + + while ((g_fee_ctx.init_state != FEE_INIT_CKPT_READY) && + (g_fee_ctx.init_state != FEE_INIT_FULL_READY) && + (g_fee_ctx.init_state != FEE_INIT_FAILED)) + { + ret = fee_recovery_step(); + if (ret != FEE_E_OK) + { + g_fee_ctx.init_state = FEE_INIT_FAILED; + g_fee_ctx.status = FEE_STATUS_UNINIT; + g_fee_ctx.job_result = FEE_JOB_FAILED; + return ret; + } + } + + g_fee_ctx.job_result = FEE_JOB_OK; + return FEE_E_OK; +} + +fee_ret_t fee_read(uint16_t block_id, uint16_t offset, uint8_t *dst, uint16_t len) +{ + fee_ret_t ret; + + ret = fee_validate_rw_params(dst, RT_NULL, len); + if (ret != FEE_E_OK) + { + return ret; + } + + if (g_fee_ctx.status == FEE_STATUS_UNINIT) + { + return FEE_E_UNINIT; + } + + return fee_sched_submit_read(block_id, offset, dst, len); +} + +fee_ret_t fee_write(uint16_t block_id, const uint8_t *src, uint16_t len) +{ + fee_ret_t ret; + + ret = fee_validate_rw_params(RT_NULL, src, len); + if (ret != FEE_E_OK) + { + return ret; + } + + if (g_fee_ctx.status == FEE_STATUS_UNINIT) + { + return FEE_E_UNINIT; + } + + return fee_sched_submit_write(block_id, src, len); +} + +fee_ret_t fee_invalidate(uint16_t block_id) +{ + if (g_fee_ctx.status == FEE_STATUS_UNINIT) + { + return FEE_E_UNINIT; + } + + return fee_sched_submit_invalidate(block_id); +} + +fee_ret_t fee_get_status(uint16_t block_id, fee_block_status_t *status) +{ + const fee_block_cfg_t *cfg; + fee_cache_entry_t *entry; + + if (status == RT_NULL) + { + return FEE_E_PARAM; + } + + if (g_fee_ctx.status == FEE_STATUS_UNINIT) + { + return FEE_E_UNINIT; + } + + cfg = fee_cfg_find_block(block_id); + if (cfg == RT_NULL) + { + return FEE_E_PARAM; + } + + if (!fee_recovery_can_read_block(block_id)) + { + return FEE_E_BUSY; + } + + entry = fee_cache_lookup(block_id); + if (entry == RT_NULL) + { + *status = FEE_BLOCK_STATUS_EMPTY; + return FEE_E_OK; + } + + if (entry->cur_valid != 0U) + { + *status = FEE_BLOCK_STATUS_VALID; + } + else if (entry->prev_valid != 0U) + { + *status = FEE_BLOCK_STATUS_INVALIDATED; + } + else + { + *status = FEE_BLOCK_STATUS_EMPTY; + } + + return FEE_E_OK; +} + +fee_ret_t fee_rollback(uint16_t block_id) +{ + if (g_fee_ctx.status == FEE_STATUS_UNINIT) + { + return FEE_E_UNINIT; + } + + return fee_sched_submit_rollback(block_id); +} + +void fee_mainfunction(void) +{ + rt_bool_t has_pending_work; + rt_bool_t has_background_work; + + if (g_fee_ctx.status == FEE_STATUS_UNINIT) + { + return; + } + + fee_port_mainfunction(); + + if (!fee_recovery_is_full_ready()) + { + (void)fee_recovery_step(); + } + + fee_sched_mainfunction(); + fee_gc_mainfunction(); + fee_core_mainfunction(); + + has_pending_work = fee_sched_has_pending_work(); + has_background_work = fee_gc_has_background_work(); + if ((g_fee_ctx.checkpoint_requested != 0U) || (g_fee_ctx.checkpoint_force != 0U)) + { + has_background_work = RT_TRUE; + } + + if (g_fee_ctx.init_state == FEE_INIT_FULL_READY) + { + if ((has_pending_work != RT_FALSE) || (has_background_work != RT_FALSE)) + { + g_fee_ctx.status = FEE_STATUS_BUSY_INTERNAL; + if (g_fee_ctx.job_result != FEE_JOB_FAILED) + { + g_fee_ctx.job_result = FEE_JOB_PENDING; + } + } + else + { + g_fee_ctx.status = FEE_STATUS_IDLE; + if (g_fee_ctx.job_result != FEE_JOB_FAILED) + { + g_fee_ctx.job_result = FEE_JOB_OK; + } + } + } +} + +fee_status_t fee_get_memif_status(void) +{ + return g_fee_ctx.status; +} + +fee_job_result_t fee_get_job_result(void) +{ + return g_fee_ctx.job_result; +} + +fee_init_state_t fee_get_init_state(void) +{ + return g_fee_ctx.init_state; +} diff --git a/components/custom_fee/fee_api.h b/components/custom_fee/fee_api.h new file mode 100644 index 00000000000..73edd23af5e --- /dev/null +++ b/components/custom_fee/fee_api.h @@ -0,0 +1,72 @@ +#ifndef CUSTOM_FEE_API_H +#define CUSTOM_FEE_API_H + +#include +#include "rtdef.h" + +#ifdef __cplusplus +extern "C" { +#endif + +typedef uint8_t fee_ret_t; + +#define FEE_E_OK ((fee_ret_t)0U) +#define FEE_E_NOT_OK ((fee_ret_t)1U) +#define FEE_E_BUSY ((fee_ret_t)2U) +#define FEE_E_UNINIT ((fee_ret_t)3U) +#define FEE_E_PARAM ((fee_ret_t)4U) + +typedef enum +{ + FEE_STATUS_UNINIT = 0, + FEE_STATUS_IDLE, + FEE_STATUS_BUSY, + FEE_STATUS_BUSY_INTERNAL +} fee_status_t; + +typedef enum +{ + FEE_JOB_NONE = 0, + FEE_JOB_OK, + FEE_JOB_PENDING, + FEE_JOB_FAILED, + FEE_JOB_CANCELED, + FEE_JOB_INCONSISTENT +} fee_job_result_t; + +typedef enum +{ + FEE_INIT_RESET = 0, + FEE_INIT_META_SCAN, + FEE_INIT_CKPT_READY, + FEE_INIT_TAIL_SCAN, + FEE_INIT_FULL_READY, + FEE_INIT_DEGRADED, + FEE_INIT_FAILED +} fee_init_state_t; + +typedef enum +{ + FEE_BLOCK_STATUS_EMPTY = 0, + FEE_BLOCK_STATUS_VALID, + FEE_BLOCK_STATUS_INVALIDATED, + FEE_BLOCK_STATUS_INCONSISTENT +} fee_block_status_t; + +fee_ret_t fee_init(void); +fee_ret_t fee_read(uint16_t block_id, uint16_t offset, uint8_t *dst, uint16_t len); +fee_ret_t fee_write(uint16_t block_id, const uint8_t *src, uint16_t len); +fee_ret_t fee_invalidate(uint16_t block_id); +fee_ret_t fee_get_status(uint16_t block_id, fee_block_status_t *status); +fee_ret_t fee_rollback(uint16_t block_id); +void fee_mainfunction(void); + +fee_status_t fee_get_memif_status(void); +fee_job_result_t fee_get_job_result(void); +fee_init_state_t fee_get_init_state(void); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/components/custom_fee/fee_cache.c b/components/custom_fee/fee_cache.c new file mode 100644 index 00000000000..b6e75d680c0 --- /dev/null +++ b/components/custom_fee/fee_cache.c @@ -0,0 +1,303 @@ +#include "fee_internal.h" + +#define FEE_CKPT_FLAG_CUR_VALID (0x01UL) +#define FEE_CKPT_FLAG_PREV_VALID (0x02UL) +#define FEE_CACHE_INDEX_SIZE (64U) +#define FEE_CACHE_INDEX_EMPTY (0xFFFFU) + +typedef struct +{ + uint8_t used; + uint16_t block_id; + fee_cache_entry_t entry; +} fee_cache_slot_t; + +static fee_cache_slot_t g_fee_cache[FEE_CACHE_MAX_ENTRIES]; +static uint16_t g_fee_cache_index[FEE_CACHE_INDEX_SIZE]; + +static rt_bool_t fee_cache_addr_in_lane(uint8_t lane, uint32_t addr) +{ + const fee_lane_ctx_t *lane_ctx; + + if ((lane >= (uint8_t)FEE_LANE_COUNT) || (addr == FEE_INVALID_ADDR)) + { + return RT_FALSE; + } + + lane_ctx = &g_fee_ctx.lane[lane]; + if ((addr < lane_ctx->range_base) || (addr >= lane_ctx->range_limit)) + { + return RT_FALSE; + } + + return RT_TRUE; +} + +static uint32_t fee_cache_hash_block_id(uint16_t block_id) +{ + return ((uint32_t)block_id % FEE_CACHE_INDEX_SIZE); +} + +static rt_bool_t fee_cache_bind_slot(uint16_t block_id, uint16_t slot_idx) +{ + uint32_t probe; + uint32_t start; + + probe = fee_cache_hash_block_id(block_id); + start = probe; + + do + { + if ((g_fee_cache_index[probe] == FEE_CACHE_INDEX_EMPTY) || + (g_fee_cache_index[probe] == slot_idx)) + { + g_fee_cache_index[probe] = slot_idx; + return RT_TRUE; + } + + probe = (probe + 1U) % FEE_CACHE_INDEX_SIZE; + } while (probe != start); + + return RT_FALSE; +} + +static fee_cache_slot_t *fee_cache_find_slot(uint16_t block_id) +{ + uint32_t probe; + uint32_t start; + + probe = fee_cache_hash_block_id(block_id); + start = probe; + + do + { + uint16_t slot_idx = g_fee_cache_index[probe]; + + if (slot_idx == FEE_CACHE_INDEX_EMPTY) + { + return RT_NULL; + } + + if ((slot_idx < FEE_CACHE_MAX_ENTRIES) && + (g_fee_cache[slot_idx].used != 0U) && + (g_fee_cache[slot_idx].block_id == block_id)) + { + return &g_fee_cache[slot_idx]; + } + + probe = (probe + 1U) % FEE_CACHE_INDEX_SIZE; + } while (probe != start); + + return RT_NULL; +} + +static fee_cache_slot_t *fee_cache_alloc_slot(uint16_t block_id) +{ + uint32_t idx; + + for (idx = 0U; idx < FEE_CACHE_MAX_ENTRIES; ++idx) + { + if (g_fee_cache[idx].used == 0U) + { + if (fee_cache_bind_slot(block_id, (uint16_t)idx) == RT_FALSE) + { + return RT_NULL; + } + + g_fee_cache[idx].used = 1U; + g_fee_cache[idx].block_id = block_id; + g_fee_cache[idx].entry.cur_addr = FEE_INVALID_ADDR; + g_fee_cache[idx].entry.prev_addr = FEE_INVALID_ADDR; + return &g_fee_cache[idx]; + } + } + + return RT_NULL; +} + +void fee_cache_init(void) +{ + (void)memset(g_fee_cache, 0, sizeof(g_fee_cache)); + (void)memset(g_fee_cache_index, 0xFF, sizeof(g_fee_cache_index)); +} + +fee_cache_entry_t *fee_cache_lookup(uint16_t block_id) +{ + fee_cache_slot_t *slot = fee_cache_find_slot(block_id); + + if (slot == RT_NULL) + { + return RT_NULL; + } + + return &slot->entry; +} + +void fee_cache_update_data(uint16_t block_id, uint8_t lane, uint32_t addr, uint16_t len, uint32_t seq) +{ + fee_cache_slot_t *slot = fee_cache_find_slot(block_id); + + if (slot == RT_NULL) + { + slot = fee_cache_alloc_slot(block_id); + } + + if (slot == RT_NULL) + { + return; + } + + if (slot->entry.cur_valid != 0U) + { + slot->entry.prev_addr = slot->entry.cur_addr; + slot->entry.prev_valid = slot->entry.cur_valid; + slot->entry.prev_sector = slot->entry.cur_sector; + } + + slot->entry.lane = lane; + slot->entry.cur_addr = addr; + slot->entry.cur_valid = 1U; + slot->entry.cur_sector = 0U; + slot->entry.len = len; + slot->entry.seq = seq; +} + +void fee_cache_update_tombstone(uint16_t block_id, uint8_t lane, uint32_t addr, uint32_t seq) +{ + fee_cache_slot_t *slot = fee_cache_find_slot(block_id); + + if (slot == RT_NULL) + { + slot = fee_cache_alloc_slot(block_id); + } + + if (slot == RT_NULL) + { + return; + } + + if (slot->entry.cur_valid != 0U) + { + slot->entry.prev_addr = slot->entry.cur_addr; + slot->entry.prev_valid = slot->entry.cur_valid; + slot->entry.prev_sector = slot->entry.cur_sector; + } + + slot->entry.lane = lane; + slot->entry.cur_addr = addr; + slot->entry.cur_valid = 0U; + slot->entry.cur_sector = 0U; + slot->entry.seq = seq; +} + +void fee_cache_relocate_address(uint16_t block_id, uint32_t old_addr, uint32_t new_addr) +{ + fee_cache_slot_t *slot = fee_cache_find_slot(block_id); + + if ((slot == RT_NULL) || (old_addr == FEE_INVALID_ADDR) || (new_addr == FEE_INVALID_ADDR)) + { + return; + } + + if (slot->entry.cur_addr == old_addr) + { + slot->entry.cur_addr = new_addr; + } + + if (slot->entry.prev_addr == old_addr) + { + slot->entry.prev_addr = new_addr; + } +} + +uint16_t fee_cache_export_ckpt(fee_ckpt_cache_entry_t *entries, uint16_t max_entries) +{ + uint16_t count = 0U; + uint32_t idx; + + if (entries == RT_NULL) + { + return 0U; + } + + for (idx = 0U; idx < FEE_CACHE_MAX_ENTRIES; ++idx) + { + uint32_t flags = 0U; + + if ((g_fee_cache[idx].used == 0U) || (count >= max_entries)) + { + continue; + } + + if (g_fee_cache[idx].entry.cur_valid != 0U) + { + flags |= FEE_CKPT_FLAG_CUR_VALID; + } + + if (g_fee_cache[idx].entry.prev_valid != 0U) + { + flags |= FEE_CKPT_FLAG_PREV_VALID; + } + + entries[count].block_id = g_fee_cache[idx].block_id; + entries[count].lane = g_fee_cache[idx].entry.lane; + entries[count].flags = flags; + entries[count].len = g_fee_cache[idx].entry.len; + entries[count].cur_addr = g_fee_cache[idx].entry.cur_addr; + entries[count].prev_addr = g_fee_cache[idx].entry.prev_addr; + entries[count].seq = g_fee_cache[idx].entry.seq; + entries[count].reserved = 0U; + ++count; + } + + return count; +} + +void fee_cache_import_ckpt(const fee_ckpt_cache_entry_t *entries, uint16_t entry_count) +{ + uint16_t idx; + + fee_cache_init(); + + if (entries == RT_NULL) + { + return; + } + + for (idx = 0U; (idx < entry_count) && (idx < FEE_CACHE_MAX_ENTRIES); ++idx) + { + const fee_block_cfg_t *cfg = fee_cfg_find_block((uint16_t)entries[idx].block_id); + fee_cache_slot_t *slot; + uint8_t lane = (uint8_t)entries[idx].lane; + + if ((cfg == RT_NULL) || (cfg->lane_type != lane) || + ((entries[idx].cur_addr != FEE_INVALID_ADDR) && + (fee_cache_addr_in_lane(lane, entries[idx].cur_addr) == RT_FALSE)) || + ((entries[idx].prev_addr != FEE_INVALID_ADDR) && + (fee_cache_addr_in_lane(lane, entries[idx].prev_addr) == RT_FALSE)) || + (((entries[idx].flags & FEE_CKPT_FLAG_PREV_VALID) != 0UL) && + (entries[idx].prev_addr == FEE_INVALID_ADDR)) || + (((entries[idx].flags & FEE_CKPT_FLAG_CUR_VALID) != 0UL) && + (entries[idx].cur_addr == FEE_INVALID_ADDR)) || + ((entries[idx].prev_addr != FEE_INVALID_ADDR) && (entries[idx].prev_addr == entries[idx].cur_addr))) + { + continue; + } + + slot = fee_cache_alloc_slot((uint16_t)entries[idx].block_id); + if (slot == RT_NULL) + { + break; + } + + slot->entry.lane = (uint8_t)entries[idx].lane; + slot->entry.cur_addr = entries[idx].cur_addr; + slot->entry.prev_addr = entries[idx].prev_addr; + slot->entry.seq = entries[idx].seq; + slot->entry.len = (uint16_t)entries[idx].len; + slot->entry.cur_valid = ((entries[idx].flags & FEE_CKPT_FLAG_CUR_VALID) != 0UL) ? 1U : 0U; + slot->entry.prev_valid = ((entries[idx].flags & FEE_CKPT_FLAG_PREV_VALID) != 0UL) ? 1U : 0U; + slot->entry.cur_sector = 0U; + slot->entry.prev_sector = 0U; + } +} diff --git a/components/custom_fee/fee_cfg.c b/components/custom_fee/fee_cfg.c new file mode 100644 index 00000000000..3bebd116d3d --- /dev/null +++ b/components/custom_fee/fee_cfg.c @@ -0,0 +1,199 @@ +#include "fee_cfg.h" +#include "fee_onflash.h" +#include "fee_port.h" + +static const fee_block_cfg_t g_fee_block_table[] = +{ + { 1U, 32U, FEE_BLOCK_FAST, FEE_LANE_FAST, FEE_ENDURANCE_HOT, 1U, 1U, FEE_CRC16, 512U, 1U }, + { 2U, 128U, FEE_BLOCK_NORMAL, FEE_LANE_NORMAL, FEE_ENDURANCE_WARM, 1U, 1U, FEE_CRC32, 8U, 1U }, + { 3U, 256U, FEE_BLOCK_NORMAL, FEE_LANE_NORMAL, FEE_ENDURANCE_WARM, 1U, 1U, FEE_CRC32, 8U, 0U }, + { 4U, 768U, FEE_BLOCK_BULK, FEE_LANE_BULK, FEE_ENDURANCE_COLD, 0U, 0U, FEE_CRC32, 512U, 0U }, +}; + +static rt_bool_t fee_cfg_lane_matches_class(const fee_block_cfg_t *cfg) +{ + if (cfg == RT_NULL) + { + return RT_FALSE; + } + + if ((cfg->block_class == FEE_BLOCK_FAST) && (cfg->lane_type == FEE_LANE_FAST)) + { + return RT_TRUE; + } + + if ((cfg->block_class == FEE_BLOCK_NORMAL) && (cfg->lane_type == FEE_LANE_NORMAL)) + { + return RT_TRUE; + } + + if ((cfg->block_class == FEE_BLOCK_BULK) && (cfg->lane_type == FEE_LANE_BULK)) + { + return RT_TRUE; + } + + if ((cfg->block_class == FEE_BLOCK_META) && (cfg->lane_type == FEE_LANE_META)) + { + return RT_TRUE; + } + + return RT_FALSE; +} + +const fee_block_cfg_t *fee_cfg_find_block(uint16_t block_id) +{ + uint16_t idx; + + for (idx = 0U; idx < fee_cfg_get_block_count(); ++idx) + { + if (g_fee_block_table[idx].block_id == block_id) + { + return &g_fee_block_table[idx]; + } + } + + return RT_NULL; +} + +const fee_block_cfg_t *fee_cfg_get_block_table(void) +{ + return &g_fee_block_table[0]; +} + +uint16_t fee_cfg_get_block_count(void) +{ + return (uint16_t)(sizeof(g_fee_block_table) / sizeof(g_fee_block_table[0])); +} + +fee_ret_t fee_cfg_validate_table(void) +{ + fee_flash_caps_t caps; + uint16_t count; + uint16_t idx; + uint16_t jdx; + + if (fee_port_get_caps(&caps) != FEE_E_OK) + { + return FEE_E_NOT_OK; + } + + count = fee_cfg_get_block_count(); + if (count == 0U) + { + return FEE_E_NOT_OK; + } + + for (idx = 0U; idx < count; ++idx) + { + const fee_block_cfg_t *cfg = &g_fee_block_table[idx]; + + if ((cfg->block_id == 0U) || (cfg->max_len == 0U) || (cfg->max_len > FEE_CFG_MAX_BLOCK_LEN)) + { + return FEE_E_NOT_OK; + } + + if ((cfg->record_align == 0U) || (cfg->record_align < caps.program_unit) || + ((cfg->record_align % caps.program_unit) != 0U)) + { + return FEE_E_NOT_OK; + } + + if (fee_cfg_lane_matches_class(cfg) == RT_FALSE) + { + return FEE_E_NOT_OK; + } + + if ((cfg->allow_rollback != 0U) && (cfg->keep_prev_copy == 0U)) + { + return FEE_E_NOT_OK; + } + + if ((cfg->block_class == FEE_BLOCK_FAST) && + (fee_onflash_calc_record_span(cfg, cfg->max_len) > FEE_CFG_FAST_SINGLE_RECORD_LIMIT)) + { + return FEE_E_NOT_OK; + } + + if ((cfg->block_class == FEE_BLOCK_BULK) && (cfg->boot_critical != 0U)) + { + return FEE_E_NOT_OK; + } + + for (jdx = (uint16_t)(idx + 1U); jdx < count; ++jdx) + { + if (cfg->block_id == g_fee_block_table[jdx].block_id) + { + return FEE_E_NOT_OK; + } + } + } + + return FEE_E_OK; +} + +rt_bool_t fee_cfg_is_boot_critical(uint16_t block_id) +{ + const fee_block_cfg_t *cfg = fee_cfg_find_block(block_id); + + if (cfg == RT_NULL) + { + return RT_FALSE; + } + + return (cfg->boot_critical != 0U) ? RT_TRUE : RT_FALSE; +} + +uint32_t fee_cfg_get_lane_max_span(uint8_t lane) +{ + uint32_t max_span = 0U; + uint16_t idx; + + for (idx = 0U; idx < fee_cfg_get_block_count(); ++idx) + { + const fee_block_cfg_t *cfg = &g_fee_block_table[idx]; + uint32_t span; + + if (cfg->lane_type != lane) + { + continue; + } + + span = fee_onflash_calc_record_span(cfg, cfg->max_len); + if (span > max_span) + { + max_span = span; + } + } + + return max_span; +} + +uint8_t fee_cfg_get_lane_sector_count(uint8_t lane) +{ + if (lane == (uint8_t)FEE_LANE_META) + { + return FEE_CFG_META_SECTOR_COUNT; + } + + if (lane == (uint8_t)FEE_LANE_FAST) + { + return FEE_CFG_FAST_SECTOR_COUNT; + } + + if (lane == (uint8_t)FEE_LANE_NORMAL) + { + return FEE_CFG_NORMAL_SECTOR_COUNT; + } + + if (lane == (uint8_t)FEE_LANE_BULK) + { + return FEE_CFG_BULK_SECTOR_COUNT; + } + + return 0U; +} + +uint32_t fee_cfg_get_total_sector_count(void) +{ + return FEE_CFG_TOTAL_SECTOR_COUNT; +} diff --git a/components/custom_fee/fee_cfg.h b/components/custom_fee/fee_cfg.h new file mode 100644 index 00000000000..7df0252fdb9 --- /dev/null +++ b/components/custom_fee/fee_cfg.h @@ -0,0 +1,89 @@ +#ifndef CUSTOM_FEE_CFG_H +#define CUSTOM_FEE_CFG_H + +#include +#include "fee_api.h" + +typedef enum +{ + FEE_LANE_META = 0, + FEE_LANE_FAST, + FEE_LANE_NORMAL, + FEE_LANE_BULK, + FEE_LANE_COUNT +} fee_lane_t; + +typedef enum +{ + FEE_BLOCK_META = 0, + FEE_BLOCK_FAST, + FEE_BLOCK_NORMAL, + FEE_BLOCK_BULK +} fee_block_class_t; + +typedef enum +{ + FEE_ENDURANCE_HOT = 0, + FEE_ENDURANCE_WARM, + FEE_ENDURANCE_COLD +} fee_endurance_class_t; + +typedef enum +{ + FEE_CRC_NONE = 0, + FEE_CRC16, + FEE_CRC32 +} fee_crc_mode_t; + +typedef struct +{ + uint16_t block_id; + uint16_t max_len; + uint8_t block_class; + uint8_t lane_type; + uint8_t endurance_class; + uint8_t keep_prev_copy; + uint8_t allow_rollback; + uint8_t crc_mode; + uint16_t record_align; + uint8_t boot_critical; +} fee_block_cfg_t; + +#define FEE_CFG_FORMAT_VERSION (0x0100U) +#define FEE_CFG_ALIGN_UNIT (8U) +#define FEE_CFG_WORDLINE_SIZE (512U) +#define FEE_CFG_FAST_SINGLE_RECORD_LIMIT (512U) +#define FEE_CFG_MAX_PENDING_REQUESTS (8U) +#define FEE_CFG_MAX_BLOCK_LEN (1024U) +#define FEE_CFG_META_SECTOR_COUNT (2U) +#define FEE_CFG_FAST_SECTOR_COUNT (3U) +#define FEE_CFG_NORMAL_SECTOR_COUNT (3U) +#define FEE_CFG_BULK_SECTOR_COUNT (2U) +#define FEE_CFG_MAX_LANE_SECTOR_COUNT (3U) +#define FEE_CFG_TOTAL_SECTOR_COUNT \ + (FEE_CFG_META_SECTOR_COUNT + FEE_CFG_FAST_SECTOR_COUNT + \ + FEE_CFG_NORMAL_SECTOR_COUNT + FEE_CFG_BULK_SECTOR_COUNT) +#define FEE_CFG_CKPT_BG_RECORDS_FAST (1U) +#define FEE_CFG_CKPT_BG_RECORDS_NORMAL (2U) +#define FEE_CFG_CKPT_BG_RECORDS_BULK (1U) +#define FEE_CFG_CKPT_FORCE_RECORDS_FAST (1U) +#define FEE_CFG_CKPT_FORCE_RECORDS_NORMAL (8U) +#define FEE_CFG_CKPT_FORCE_RECORDS_BULK (2U) +#define FEE_CFG_CKPT_BG_BYTES_FAST (FEE_CFG_FAST_SINGLE_RECORD_LIMIT) +#define FEE_CFG_CKPT_BG_BYTES_NORMAL (512U) +#define FEE_CFG_CKPT_BG_BYTES_BULK (1024U) +#define FEE_CFG_CKPT_FORCE_BYTES_FAST (FEE_CFG_FAST_SINGLE_RECORD_LIMIT) +#define FEE_CFG_CKPT_FORCE_BYTES_NORMAL (2048U) +#define FEE_CFG_CKPT_FORCE_BYTES_BULK (2048U) +#define FEE_CFG_RECOVERY_TAIL_RECORDS_PER_STEP (4U) + +const fee_block_cfg_t *fee_cfg_find_block(uint16_t block_id); +const fee_block_cfg_t *fee_cfg_get_block_table(void); +uint16_t fee_cfg_get_block_count(void); +fee_ret_t fee_cfg_validate_table(void); +rt_bool_t fee_cfg_is_boot_critical(uint16_t block_id); +uint32_t fee_cfg_get_lane_max_span(uint8_t lane); +uint8_t fee_cfg_get_lane_sector_count(uint8_t lane); +uint32_t fee_cfg_get_total_sector_count(void); + +#endif diff --git a/components/custom_fee/fee_ckpt.c b/components/custom_fee/fee_ckpt.c new file mode 100644 index 00000000000..2a34d9626ff --- /dev/null +++ b/components/custom_fee/fee_ckpt.c @@ -0,0 +1,361 @@ +#include "fee_internal.h" + +#define FEE_CKPT_MAGIC (0x4645434BUL) +#define FEE_CKPT_ENTRY_MAX (FEE_CACHE_MAX_ENTRIES) + +typedef struct +{ + uint32_t free_offset; + uint32_t active_generation; + uint8_t active_sector; + uint8_t dst_sector; + uint8_t spare_sector; + uint8_t sector_count; + uint32_t reserved; +} fee_ckpt_lane_state_t; + +typedef struct +{ + fee_ckpt_lane_state_t lane[FEE_LANE_COUNT]; + uint32_t entry_count; + fee_ckpt_cache_entry_t entries[FEE_CKPT_ENTRY_MAX]; +} fee_ckpt_payload_t; + +typedef struct +{ + uint32_t magic; + uint32_t format_version; + uint32_t generation; + fee_ckpt_payload_t payload; + uint32_t payload_crc; + uint32_t commit_marker; + uint32_t reserved[4]; +} fee_ckpt_image_t; + +static uint32_t g_fee_ckpt_active_base = FEE_INVALID_ADDR; +static uint8_t g_fee_ckpt_restored = 0U; + +static uint32_t fee_ckpt_crc32_accumulate(const uint8_t *data, uint32_t len) +{ + uint32_t crc = 0U; + uint32_t idx; + + if (data == RT_NULL) + { + return 0U; + } + + for (idx = 0U; idx < len; ++idx) + { + crc = (crc << 5) - crc + data[idx]; + } + + return crc; +} + +static fee_ret_t fee_ckpt_get_layout(uint32_t *meta0_base, uint32_t *meta1_base) +{ + fee_flash_caps_t caps; + fee_ret_t ret; + + ret = fee_port_get_caps(&caps); + if (ret != FEE_E_OK) + { + return ret; + } + + if (caps.total_size < (caps.erase_unit * fee_cfg_get_total_sector_count())) + { + return FEE_E_NOT_OK; + } + + if (meta0_base != RT_NULL) + { + *meta0_base = 0U; + } + + if (meta1_base != RT_NULL) + { + *meta1_base = caps.erase_unit; + } + + return FEE_E_OK; +} + +static uint32_t fee_ckpt_payload_crc(const fee_ckpt_image_t *image) +{ + return fee_ckpt_crc32_accumulate((const uint8_t *)&image->payload, + (uint32_t)sizeof(image->payload)); +} + +static rt_bool_t fee_ckpt_is_valid(const fee_ckpt_image_t *image) +{ + if (image == RT_NULL) + { + return RT_FALSE; + } + + if ((image->magic != FEE_CKPT_MAGIC) || + (image->format_version != FEE_CFG_FORMAT_VERSION) || + (image->commit_marker != FEE_COMMIT_MARKER)) + { + return RT_FALSE; + } + + if (image->payload.entry_count > FEE_CKPT_ENTRY_MAX) + { + return RT_FALSE; + } + + return (image->payload_crc == fee_ckpt_payload_crc(image)) ? RT_TRUE : RT_FALSE; +} + +static void fee_ckpt_capture_lane_state(uint8_t lane, fee_ckpt_lane_state_t *state) +{ + const fee_lane_ctx_t *lane_ctx; + + if ((state == RT_NULL) || (lane >= (uint8_t)FEE_LANE_COUNT)) + { + return; + } + + lane_ctx = &g_fee_ctx.lane[lane]; + (void)memset(state, 0, sizeof(*state)); + state->free_offset = lane_ctx->free_offset; + state->active_generation = lane_ctx->active_generation; + state->active_sector = lane_ctx->active_sector; + state->dst_sector = lane_ctx->dst_sector; + state->spare_sector = lane_ctx->spare_sector; + state->sector_count = lane_ctx->sector_count; +} + +static rt_bool_t fee_ckpt_validate_lane_state(uint8_t lane, const fee_ckpt_lane_state_t *state) +{ + const fee_lane_ctx_t *lane_ctx; + uint8_t expected_sector_count; + + if ((state == RT_NULL) || (lane <= (uint8_t)FEE_LANE_META) || (lane >= (uint8_t)FEE_LANE_COUNT)) + { + return RT_FALSE; + } + + lane_ctx = &g_fee_ctx.lane[lane]; + expected_sector_count = lane_ctx->sector_count; + if ((expected_sector_count == 0U) || (state->sector_count != expected_sector_count)) + { + return RT_FALSE; + } + + if ((state->active_generation == 0U) || + (state->active_sector >= expected_sector_count) || + (state->dst_sector >= expected_sector_count) || + (state->spare_sector >= expected_sector_count)) + { + return RT_FALSE; + } + + if (expected_sector_count == 1U) + { + if ((state->active_sector != 0U) || (state->dst_sector != 0U) || (state->spare_sector != 0U)) + { + return RT_FALSE; + } + } + else + { + if (state->active_sector == state->dst_sector) + { + return RT_FALSE; + } + + if (state->spare_sector == state->active_sector) + { + return RT_FALSE; + } + + if ((expected_sector_count > 2U) && (state->spare_sector == state->dst_sector)) + { + return RT_FALSE; + } + } + + if ((state->free_offset < lane_ctx->range_base) || (state->free_offset > lane_ctx->range_limit)) + { + return RT_FALSE; + } + + return RT_TRUE; +} + +static void fee_ckpt_apply_lane_state(uint8_t lane, const fee_ckpt_lane_state_t *state) +{ + fee_lane_ctx_t *lane_ctx; + + if ((state == RT_NULL) || (lane <= (uint8_t)FEE_LANE_META) || (lane >= (uint8_t)FEE_LANE_COUNT)) + { + return; + } + + lane_ctx = &g_fee_ctx.lane[lane]; + lane_ctx->active_sector = state->active_sector; + lane_ctx->dst_sector = state->dst_sector; + lane_ctx->spare_sector = state->spare_sector; + lane_ctx->gc_old_sector = state->active_sector; + lane_ctx->active_generation = state->active_generation; + lane_ctx->free_offset = state->free_offset; + lane_ctx->scan_start = state->free_offset; +} + +void fee_ckpt_init(void) +{ + g_fee_ckpt_active_base = FEE_INVALID_ADDR; + g_fee_ckpt_restored = 0U; +} + +static void fee_ckpt_reset_dirty_accounting(void) +{ + uint8_t lane; + + for (lane = (uint8_t)FEE_LANE_FAST; lane <= (uint8_t)FEE_LANE_BULK; ++lane) + { + g_fee_ctx.lane[lane].dirty_record_count = 0U; + g_fee_ctx.lane[lane].dirty_bytes = 0U; + } +} + +fee_ret_t fee_ckpt_restore(void) +{ + fee_ckpt_image_t image0; + fee_ckpt_image_t image1; + const fee_ckpt_image_t *selected = RT_NULL; + uint32_t meta0_base; + uint32_t meta1_base; + uint8_t lane; + fee_ret_t ret; + + g_fee_ckpt_restored = 0U; + + ret = fee_ckpt_get_layout(&meta0_base, &meta1_base); + if (ret != FEE_E_OK) + { + return ret; + } + + ret = fee_port_read(meta0_base, (uint8_t *)&image0, (uint32_t)sizeof(image0)); + if (ret != FEE_E_OK) + { + return ret; + } + + ret = fee_port_read(meta1_base, (uint8_t *)&image1, (uint32_t)sizeof(image1)); + if (ret != FEE_E_OK) + { + return ret; + } + + if (fee_ckpt_is_valid(&image0) && fee_ckpt_is_valid(&image1)) + { + if (image0.generation >= image1.generation) + { + selected = &image0; + g_fee_ckpt_active_base = meta0_base; + } + else + { + selected = &image1; + g_fee_ckpt_active_base = meta1_base; + } + } + else if (fee_ckpt_is_valid(&image0)) + { + selected = &image0; + g_fee_ckpt_active_base = meta0_base; + } + else if (fee_ckpt_is_valid(&image1)) + { + selected = &image1; + g_fee_ckpt_active_base = meta1_base; + } + + if (selected == RT_NULL) + { + return FEE_E_NOT_OK; + } + + for (lane = (uint8_t)FEE_LANE_FAST; lane <= (uint8_t)FEE_LANE_BULK; ++lane) + { + if (fee_ckpt_validate_lane_state(lane, &selected->payload.lane[lane]) == RT_FALSE) + { + return FEE_E_NOT_OK; + } + } + + for (lane = (uint8_t)FEE_LANE_FAST; lane <= (uint8_t)FEE_LANE_BULK; ++lane) + { + fee_ckpt_apply_lane_state(lane, &selected->payload.lane[lane]); + } + + fee_cache_import_ckpt(&selected->payload.entries[0], (uint16_t)selected->payload.entry_count); + g_fee_ctx.checkpoint_generation = selected->generation; + g_fee_ctx.checkpoint_dirty = 0U; + g_fee_ctx.checkpoint_requested = 0U; + g_fee_ctx.checkpoint_force = 0U; + g_fee_ckpt_restored = 1U; + + return FEE_E_OK; +} + +fee_ret_t fee_ckpt_flush(void) +{ + fee_ckpt_image_t image; + uint32_t meta0_base; + uint32_t meta1_base; + uint32_t target_base; + fee_ret_t ret; + + ret = fee_ckpt_get_layout(&meta0_base, &meta1_base); + if (ret != FEE_E_OK) + { + return ret; + } + + target_base = (g_fee_ckpt_active_base == meta0_base) ? meta1_base : meta0_base; + + (void)memset(&image, 0, sizeof(image)); + image.magic = FEE_CKPT_MAGIC; + image.format_version = FEE_CFG_FORMAT_VERSION; + image.generation = g_fee_ctx.checkpoint_generation + 1U; + fee_ckpt_capture_lane_state((uint8_t)FEE_LANE_FAST, &image.payload.lane[FEE_LANE_FAST]); + fee_ckpt_capture_lane_state((uint8_t)FEE_LANE_NORMAL, &image.payload.lane[FEE_LANE_NORMAL]); + fee_ckpt_capture_lane_state((uint8_t)FEE_LANE_BULK, &image.payload.lane[FEE_LANE_BULK]); + image.payload.entry_count = fee_cache_export_ckpt(&image.payload.entries[0], FEE_CKPT_ENTRY_MAX); + image.payload_crc = fee_ckpt_payload_crc(&image); + image.commit_marker = FEE_COMMIT_MARKER; + + ret = fee_port_erase(target_base, meta1_base - meta0_base); + if (ret != FEE_E_OK) + { + return ret; + } + + ret = fee_port_write(target_base, (const uint8_t *)&image, (uint32_t)sizeof(image)); + if (ret != FEE_E_OK) + { + return ret; + } + + g_fee_ckpt_active_base = target_base; + g_fee_ctx.checkpoint_generation = image.generation; + g_fee_ctx.checkpoint_dirty = 0U; + g_fee_ctx.checkpoint_requested = 0U; + g_fee_ctx.checkpoint_force = 0U; + g_fee_ckpt_restored = 1U; + fee_ckpt_reset_dirty_accounting(); + + return FEE_E_OK; +} + +rt_bool_t fee_ckpt_has_restored_image(void) +{ + return (g_fee_ckpt_restored != 0U) ? RT_TRUE : RT_FALSE; +} diff --git a/components/custom_fee/fee_core.c b/components/custom_fee/fee_core.c new file mode 100644 index 00000000000..1a54523b6ce --- /dev/null +++ b/components/custom_fee/fee_core.c @@ -0,0 +1,580 @@ +#include "fee_internal.h" + +static fee_ret_t fee_core_read_header(uint32_t addr, fee_record_header_t *header) +{ + return fee_port_read(addr, (uint8_t *)header, (uint32_t)sizeof(*header)); +} + +static fee_ret_t fee_core_read_tail(uint32_t addr, uint16_t data_len, fee_commit_tail_t *tail) +{ + fee_flash_caps_t caps; + uint32_t stored_len; + uint32_t tail_addr; + + if (fee_port_get_caps(&caps) != FEE_E_OK) + { + return FEE_E_NOT_OK; + } + + stored_len = fee_onflash_align_up((uint32_t)data_len, caps.program_unit); + tail_addr = addr + (uint32_t)sizeof(fee_record_header_t) + stored_len; + + return fee_port_read(tail_addr, (uint8_t *)tail, (uint32_t)sizeof(*tail)); +} + +static uint32_t fee_core_record_next_addr(const fee_block_cfg_t *cfg, uint16_t data_len) +{ + return fee_onflash_calc_record_span(cfg, data_len); +} + +static void fee_core_update_gc_request(uint8_t lane) +{ + fee_lane_ctx_t *lane_ctx; + uint32_t remaining_bytes; + + if ((lane <= (uint8_t)FEE_LANE_META) || (lane >= (uint8_t)FEE_LANE_COUNT)) + { + return; + } + + lane_ctx = &g_fee_ctx.lane[lane]; + if ((lane_ctx->sector_count < 2U) || (lane_ctx->limit_addr <= lane_ctx->free_offset)) + { + return; + } + + remaining_bytes = lane_ctx->limit_addr - lane_ctx->free_offset; + + if ((lane_ctx->gc_start_threshold != 0U) && (remaining_bytes <= lane_ctx->gc_start_threshold)) + { + lane_ctx->gc_requested = 1U; + } + + if ((lane_ctx->gc_force_threshold != 0U) && (remaining_bytes <= lane_ctx->gc_force_threshold)) + { + lane_ctx->gc_force = 1U; + } +} + +static uint32_t fee_core_ckpt_record_threshold(uint8_t lane, rt_bool_t force_flush) +{ + if (lane == (uint8_t)FEE_LANE_FAST) + { + return force_flush ? FEE_CFG_CKPT_FORCE_RECORDS_FAST : FEE_CFG_CKPT_BG_RECORDS_FAST; + } + + if (lane == (uint8_t)FEE_LANE_NORMAL) + { + return force_flush ? FEE_CFG_CKPT_FORCE_RECORDS_NORMAL : FEE_CFG_CKPT_BG_RECORDS_NORMAL; + } + + if (lane == (uint8_t)FEE_LANE_BULK) + { + return force_flush ? FEE_CFG_CKPT_FORCE_RECORDS_BULK : FEE_CFG_CKPT_BG_RECORDS_BULK; + } + + return 0U; +} + +static uint32_t fee_core_ckpt_byte_threshold(uint8_t lane, rt_bool_t force_flush) +{ + if (lane == (uint8_t)FEE_LANE_FAST) + { + return force_flush ? FEE_CFG_CKPT_FORCE_BYTES_FAST : FEE_CFG_CKPT_BG_BYTES_FAST; + } + + if (lane == (uint8_t)FEE_LANE_NORMAL) + { + return force_flush ? FEE_CFG_CKPT_FORCE_BYTES_NORMAL : FEE_CFG_CKPT_BG_BYTES_NORMAL; + } + + if (lane == (uint8_t)FEE_LANE_BULK) + { + return force_flush ? FEE_CFG_CKPT_FORCE_BYTES_BULK : FEE_CFG_CKPT_BG_BYTES_BULK; + } + + return 0U; +} + +static rt_bool_t fee_core_lane_hits_ckpt_threshold(uint8_t lane, rt_bool_t force_flush) +{ + fee_lane_ctx_t *lane_ctx; + uint32_t record_threshold; + uint32_t byte_threshold; + + if ((lane >= FEE_LANE_COUNT) || (lane == (uint8_t)FEE_LANE_META)) + { + return RT_FALSE; + } + + lane_ctx = &g_fee_ctx.lane[lane]; + record_threshold = fee_core_ckpt_record_threshold(lane, force_flush); + byte_threshold = fee_core_ckpt_byte_threshold(lane, force_flush); + + if ((record_threshold != 0U) && (lane_ctx->dirty_record_count >= record_threshold)) + { + return RT_TRUE; + } + + if ((byte_threshold != 0U) && (lane_ctx->dirty_bytes >= byte_threshold)) + { + return RT_TRUE; + } + + return RT_FALSE; +} + +static rt_bool_t fee_core_should_force_ckpt(uint8_t lane) +{ + if ((lane <= (uint8_t)FEE_LANE_META) || (lane >= (uint8_t)FEE_LANE_COUNT)) + { + return RT_FALSE; + } + + return fee_core_lane_hits_ckpt_threshold(lane, RT_TRUE); +} + +static rt_bool_t fee_core_should_request_ckpt(const fee_block_cfg_t *cfg) +{ + if (cfg == RT_NULL) + { + return RT_FALSE; + } + + if ((cfg->boot_critical != 0U) || (cfg->lane_type == (uint8_t)FEE_LANE_FAST)) + { + return RT_TRUE; + } + + if (fee_core_should_force_ckpt(cfg->lane_type) != RT_FALSE) + { + return RT_TRUE; + } + + return fee_core_lane_hits_ckpt_threshold(cfg->lane_type, RT_FALSE); +} + +static void fee_core_request_checkpoint(const fee_block_cfg_t *cfg) +{ + if ((cfg == RT_NULL) || (g_fee_ctx.checkpoint_dirty == 0U)) + { + return; + } + + if (fee_core_should_force_ckpt(cfg->lane_type) != RT_FALSE) + { + g_fee_ctx.checkpoint_force = 1U; + g_fee_ctx.checkpoint_requested = 1U; + return; + } + + if (fee_core_should_request_ckpt(cfg) != RT_FALSE) + { + g_fee_ctx.checkpoint_requested = 1U; + } +} + +static fee_ret_t fee_core_append_record(uint16_t block_id, const uint8_t *src, uint16_t len, + uint8_t record_type, uint8_t lane, uint32_t *out_addr, uint32_t *out_seq) +{ + const fee_block_cfg_t *cfg; + fee_cache_entry_t *entry; + fee_flash_caps_t caps; + fee_record_header_t header; + fee_commit_tail_t tail; + uint8_t padded_buf[FEE_CFG_MAX_BLOCK_LEN + FEE_CFG_ALIGN_UNIT]; + fee_lane_ctx_t *lane_ctx; + uint32_t addr; + uint32_t next_addr; + uint32_t seq; + uint32_t stored_len; + fee_ret_t ret; + + cfg = fee_cfg_find_block(block_id); + if (cfg == RT_NULL) + { + return FEE_E_PARAM; + } + + if ((record_type == FEE_RECORD_DATA) && ((src == RT_NULL) || (len == 0U) || (len > cfg->max_len))) + { + return FEE_E_PARAM; + } + + if (fee_port_get_caps(&caps) != FEE_E_OK) + { + return FEE_E_NOT_OK; + } + + if (lane >= FEE_LANE_COUNT) + { + return FEE_E_PARAM; + } + + lane_ctx = &g_fee_ctx.lane[lane]; + if ((lane_ctx->free_offset == FEE_INVALID_ADDR) || + (lane_ctx->data_start == 0U) || (lane_ctx->limit_addr <= lane_ctx->data_start)) + { + return FEE_E_NOT_OK; + } + + if (fee_gc_lane_blocks_io(lane) != RT_FALSE) + { + return FEE_E_BUSY; + } + + addr = lane_ctx->free_offset; + stored_len = fee_onflash_align_up((uint32_t)len, caps.program_unit); + + if (stored_len > sizeof(padded_buf)) + { + return FEE_E_PARAM; + } + + next_addr = addr + fee_core_record_next_addr(cfg, len); + if (next_addr > lane_ctx->limit_addr) + { + if (lane_ctx->sector_count > 1U) + { + lane_ctx->gc_requested = 1U; + lane_ctx->gc_force = 1U; + } + return FEE_E_BUSY; + } + + entry = fee_cache_lookup(block_id); + seq = (entry == RT_NULL) ? 1U : (entry->seq + 1U); + + ret = fee_onflash_encode_record_header(&header, block_id, record_type, len, seq); + if (ret != FEE_E_OK) + { + return ret; + } + + ret = fee_onflash_encode_commit_tail(&tail, src, len); + if (ret != FEE_E_OK) + { + return ret; + } + + ret = fee_port_write(addr, (const uint8_t *)&header, (uint32_t)sizeof(header)); + if (ret != FEE_E_OK) + { + return ret; + } + + if (len > 0U) + { + (void)memset(padded_buf, 0, sizeof(padded_buf)); + (void)memcpy(padded_buf, src, len); + ret = fee_port_write(addr + (uint32_t)sizeof(header), padded_buf, stored_len); + if (ret != FEE_E_OK) + { + return ret; + } + } + + ret = fee_port_write(addr + (uint32_t)sizeof(header) + stored_len, + (const uint8_t *)&tail, (uint32_t)sizeof(tail)); + if (ret != FEE_E_OK) + { + return ret; + } + + lane_ctx->free_offset = next_addr; + lane_ctx->dirty_record_count++; + lane_ctx->dirty_bytes += (next_addr - addr); + g_fee_ctx.checkpoint_dirty = 1U; + fee_core_update_gc_request(lane); + + if (out_addr != RT_NULL) + { + *out_addr = addr; + } + + if (out_seq != RT_NULL) + { + *out_seq = seq; + } + + return FEE_E_OK; +} + +static fee_ret_t fee_core_read_payload(uint32_t addr, uint16_t offset, uint8_t *dst, uint16_t len) +{ + fee_record_header_t header; + fee_commit_tail_t tail; + const fee_block_cfg_t *cfg; + uint8_t verify_buf[FEE_CFG_MAX_BLOCK_LEN]; + fee_ret_t ret; + + ret = fee_core_read_header(addr, &header); + if (ret != FEE_E_OK) + { + return ret; + } + + cfg = fee_cfg_find_block(header.block_id); + if ((cfg == RT_NULL) || + (fee_onflash_validate_record_header(&header, cfg) == RT_FALSE) || + (header.record_type != (uint8_t)FEE_RECORD_DATA)) + { + return FEE_E_NOT_OK; + } + + ret = fee_core_read_tail(addr, header.data_len, &tail); + if (ret != FEE_E_OK) + { + return ret; + } + + if (fee_onflash_validate_commit_tail(&tail) == RT_FALSE) + { + return FEE_E_NOT_OK; + } + + if (((uint32_t)offset + (uint32_t)len) > (uint32_t)header.data_len) + { + return FEE_E_PARAM; + } + + if ((cfg->crc_mode != (uint8_t)FEE_CRC_NONE) && (header.data_len > 0U)) + { + if (header.data_len > (uint16_t)sizeof(verify_buf)) + { + return FEE_E_NOT_OK; + } + + ret = fee_port_read(addr + (uint32_t)sizeof(header), &verify_buf[0], header.data_len); + if (ret != FEE_E_OK) + { + return ret; + } + + if (fee_onflash_validate_payload_crc(&tail, &verify_buf[0], header.data_len) == RT_FALSE) + { + return FEE_E_NOT_OK; + } + + if (len > 0U) + { + (void)memcpy(dst, &verify_buf[offset], len); + } + + return FEE_E_OK; + } + + if (len == 0U) + { + return FEE_E_OK; + } + + return fee_port_read(addr + (uint32_t)sizeof(header) + (uint32_t)offset, dst, len); +} + +void fee_core_reset_context(void) +{ + (void)memset(&g_fee_ctx, 0, sizeof(g_fee_ctx)); + g_fee_ctx.status = FEE_STATUS_UNINIT; + g_fee_ctx.job_result = FEE_JOB_NONE; + g_fee_ctx.init_state = FEE_INIT_RESET; +} + +fee_ret_t fee_core_init(void) +{ + fee_ret_t ret; + + ret = fee_cfg_validate_table(); + if (ret != FEE_E_OK) + { + return ret; + } + + fee_cache_init(); + fee_ckpt_init(); + fee_lane_fast_init(); + fee_lane_log_init(); + fee_lane_bulk_init(); + + g_fee_ctx.status = FEE_STATUS_BUSY_INTERNAL; + g_fee_ctx.job_result = FEE_JOB_PENDING; + g_fee_ctx.lane[FEE_LANE_FAST].free_offset = FEE_INVALID_ADDR; + g_fee_ctx.lane[FEE_LANE_NORMAL].free_offset = FEE_INVALID_ADDR; + g_fee_ctx.lane[FEE_LANE_BULK].free_offset = FEE_INVALID_ADDR; + g_fee_ctx.lane[FEE_LANE_FAST].scan_start = FEE_INVALID_ADDR; + g_fee_ctx.lane[FEE_LANE_NORMAL].scan_start = FEE_INVALID_ADDR; + g_fee_ctx.lane[FEE_LANE_BULK].scan_start = FEE_INVALID_ADDR; + + return fee_recovery_start(); +} + +fee_ret_t fee_core_read(uint16_t block_id, uint16_t offset, uint8_t *dst, uint16_t len) +{ + fee_cache_entry_t *entry; + const fee_block_cfg_t *cfg; + fee_ret_t ret; + + cfg = fee_cfg_find_block(block_id); + if (cfg == RT_NULL) + { + return FEE_E_PARAM; + } + + if (!fee_recovery_can_read_block(block_id)) + { + return FEE_E_BUSY; + } + + entry = fee_cache_lookup(block_id); + if ((entry == RT_NULL) || (entry->cur_valid == 0U)) + { + return FEE_E_NOT_OK; + } + + ret = fee_core_read_payload(entry->cur_addr, offset, dst, len); + if ((ret != FEE_E_OK) && (entry->prev_valid != 0U)) + { + ret = fee_core_read_payload(entry->prev_addr, offset, dst, len); + } + + return ret; +} + +fee_ret_t fee_core_write(uint16_t block_id, const uint8_t *src, uint16_t len) +{ + const fee_block_cfg_t *cfg; + uint32_t addr; + uint32_t seq; + fee_ret_t ret; + + if (g_fee_ctx.init_state != FEE_INIT_FULL_READY) + { + return FEE_E_BUSY; + } + + cfg = fee_cfg_find_block(block_id); + if ((cfg == RT_NULL) || (len > cfg->max_len)) + { + return FEE_E_PARAM; + } + + ret = fee_core_append_record(block_id, src, len, FEE_RECORD_DATA, cfg->lane_type, &addr, &seq); + if (ret != FEE_E_OK) + { + return ret; + } + + fee_cache_update_data(block_id, cfg->lane_type, addr, len, seq); + fee_core_request_checkpoint(cfg); + return FEE_E_OK; +} + +fee_ret_t fee_core_invalidate(uint16_t block_id) +{ + const fee_block_cfg_t *cfg; + uint32_t addr; + uint32_t seq; + fee_ret_t ret; + + if (g_fee_ctx.init_state != FEE_INIT_FULL_READY) + { + return FEE_E_BUSY; + } + + cfg = fee_cfg_find_block(block_id); + if (cfg == RT_NULL) + { + return FEE_E_PARAM; + } + + ret = fee_core_append_record(block_id, (const uint8_t *)"", 0U, FEE_RECORD_TOMBSTONE, + cfg->lane_type, &addr, &seq); + if (ret != FEE_E_OK) + { + return ret; + } + + fee_cache_update_tombstone(block_id, cfg->lane_type, addr, seq); + fee_core_request_checkpoint(cfg); + return FEE_E_OK; +} + +fee_ret_t fee_core_rollback(uint16_t block_id) +{ + fee_cache_entry_t *entry; + const fee_block_cfg_t *cfg; + uint8_t buffer[FEE_CFG_MAX_BLOCK_LEN]; + fee_record_header_t prev_header; + fee_ret_t ret; + + if (g_fee_ctx.init_state != FEE_INIT_FULL_READY) + { + return FEE_E_BUSY; + } + + entry = fee_cache_lookup(block_id); + if ((entry == RT_NULL) || (entry->prev_valid == 0U)) + { + return FEE_E_NOT_OK; + } + + cfg = fee_cfg_find_block(block_id); + if (cfg == RT_NULL) + { + return FEE_E_PARAM; + } + + ret = fee_core_read_header(entry->prev_addr, &prev_header); + if (ret != FEE_E_OK) + { + return ret; + } + + if ((fee_onflash_validate_record_header(&prev_header, cfg) == RT_FALSE) || + (prev_header.record_type != (uint8_t)FEE_RECORD_DATA) || + (prev_header.data_len > (uint16_t)sizeof(buffer))) + { + return FEE_E_NOT_OK; + } + + ret = fee_core_read_payload(entry->prev_addr, 0U, buffer, prev_header.data_len); + if (ret != FEE_E_OK) + { + return ret; + } + + return fee_core_write(block_id, buffer, prev_header.data_len); +} + +void fee_core_mainfunction(void) +{ + if (g_fee_ctx.init_state != FEE_INIT_FULL_READY) + { + return; + } + + if ((g_fee_ctx.checkpoint_requested == 0U) || (g_fee_ctx.checkpoint_dirty == 0U)) + { + return; + } + + if (fee_gc_allows_checkpoint() == RT_FALSE) + { + return; + } + + if ((g_fee_ctx.checkpoint_force == 0U) && (fee_sched_has_pending_work() != RT_FALSE)) + { + return; + } + + if (fee_ckpt_flush() == FEE_E_OK) + { + g_fee_ctx.checkpoint_requested = 0U; + g_fee_ctx.checkpoint_force = 0U; + } + else + { + g_fee_ctx.job_result = FEE_JOB_FAILED; + } +} diff --git a/components/custom_fee/fee_flash_drv.h b/components/custom_fee/fee_flash_drv.h new file mode 100644 index 00000000000..7577540a62a --- /dev/null +++ b/components/custom_fee/fee_flash_drv.h @@ -0,0 +1,23 @@ +#ifndef CUSTOM_FEE_FLASH_DRV_H +#define CUSTOM_FEE_FLASH_DRV_H + +#include "fee_port.h" + +/* + * Board or driver-specific flash adapters may provide strong definitions for + * these symbols. The default weak implementation lives in fee_port.c and uses + * a RAM-backed mock flash for bring-up and QEMU testing. + */ +fee_ret_t fee_flash_driver_init(void); +fee_ret_t fee_flash_driver_get_caps(fee_flash_caps_t *caps); +fee_ret_t fee_flash_driver_read(uint32_t addr, uint8_t *dst, uint32_t len); +fee_ret_t fee_flash_driver_write(uint32_t addr, const uint8_t *src, uint32_t len); +fee_ret_t fee_flash_driver_erase(uint32_t addr, uint32_t len); +void fee_flash_driver_mainfunction(void); +fee_status_t fee_flash_driver_get_status(void); +fee_job_result_t fee_flash_driver_get_job_result(void); +fee_ret_t fee_flash_driver_debug_reset_stats(void); +fee_ret_t fee_flash_driver_debug_get_stats(fee_port_debug_stats_t *stats); +fee_ret_t fee_flash_driver_debug_get_storage(const uint8_t **storage, uint32_t *size); + +#endif diff --git a/components/custom_fee/fee_gc.c b/components/custom_fee/fee_gc.c new file mode 100644 index 00000000000..cf347e43ad3 --- /dev/null +++ b/components/custom_fee/fee_gc.c @@ -0,0 +1,537 @@ +#include "fee_internal.h" + +#define FEE_GC_PREV_VALID_FLAG (0x02UL) +#define FEE_GC_COPY_BUFFER_SIZE (2048U) + +static uint8_t g_fee_gc_rr_next = (uint8_t)FEE_LANE_FAST; + +static rt_bool_t fee_gc_lane_supports_reclaim(uint8_t lane) +{ + if ((lane > (uint8_t)FEE_LANE_META) && + (lane < (uint8_t)FEE_LANE_COUNT) && + (g_fee_ctx.lane[lane].sector_count > 1U)) + { + return RT_TRUE; + } + + return RT_FALSE; +} + +static uint8_t fee_gc_next_business_lane(uint8_t lane) +{ + if ((lane < (uint8_t)FEE_LANE_FAST) || (lane >= (uint8_t)FEE_LANE_BULK)) + { + return (uint8_t)FEE_LANE_FAST; + } + + return (uint8_t)(lane + 1U); +} + +static uint8_t fee_gc_find_active_lane_rr(uint8_t start_lane) +{ + uint8_t lane = start_lane; + uint8_t count; + + for (count = 0U; count < 3U; ++count) + { + if ((lane >= (uint8_t)FEE_LANE_FAST) && + (lane <= (uint8_t)FEE_LANE_BULK) && + (g_fee_ctx.lane[lane].gc_state != (uint8_t)FEE_GC_IDLE)) + { + return lane; + } + + lane = fee_gc_next_business_lane(lane); + } + + return (uint8_t)FEE_LANE_COUNT; +} + +static uint8_t fee_gc_find_requested_lane_rr(uint8_t start_lane) +{ + uint8_t lane = start_lane; + uint8_t count; + + for (count = 0U; count < 3U; ++count) + { + if ((fee_gc_lane_supports_reclaim(lane) != RT_FALSE) && + (g_fee_ctx.lane[lane].gc_requested != 0U)) + { + return lane; + } + + lane = fee_gc_next_business_lane(lane); + } + + return (uint8_t)FEE_LANE_COUNT; +} + +static rt_bool_t fee_gc_addr_in_sector(uint32_t sector_base, uint32_t sector_limit, uint32_t addr) +{ + if ((addr >= sector_base) && (addr < sector_limit)) + { + return RT_TRUE; + } + + return RT_FALSE; +} + +static uint32_t fee_gc_sector_base(const fee_lane_ctx_t *lane_ctx, const fee_flash_caps_t *caps, uint8_t sector_idx) +{ + return lane_ctx->range_base + ((uint32_t)sector_idx * caps->erase_unit); +} + +static uint32_t fee_gc_sector_data_start(uint32_t sector_base, const fee_flash_caps_t *caps) +{ + return sector_base + fee_onflash_align_up((uint32_t)sizeof(fee_sector_header_t), caps->program_unit); +} + +static fee_ret_t fee_gc_write_sector_header(uint8_t lane, uint8_t sector_idx, uint8_t state, + uint32_t generation, const fee_flash_caps_t *caps, rt_bool_t erase_first) +{ + fee_sector_header_t header; + fee_lane_ctx_t *lane_ctx = &g_fee_ctx.lane[lane]; + uint32_t sector_base = fee_gc_sector_base(lane_ctx, caps, sector_idx); + fee_ret_t ret; + + ret = fee_onflash_encode_sector_header(&header, lane, state, generation, + fee_gc_sector_data_start(sector_base, caps), sector_base + caps->erase_unit); + if (ret != FEE_E_OK) + { + return ret; + } + + if ((erase_first != RT_FALSE) && (fee_port_erase(sector_base, caps->erase_unit) != FEE_E_OK)) + { + return FEE_E_NOT_OK; + } + + if (fee_port_write(sector_base, (const uint8_t *)&header, (uint32_t)sizeof(header)) != FEE_E_OK) + { + return FEE_E_NOT_OK; + } + + return FEE_E_OK; +} + +static fee_ret_t fee_gc_copy_record(uint32_t src_addr, uint32_t dst_addr, uint32_t dst_limit, + const fee_block_cfg_t *cfg, const fee_flash_caps_t *caps, uint32_t *out_span) +{ + fee_record_header_t header; + fee_commit_tail_t tail; + uint8_t buffer[FEE_GC_COPY_BUFFER_SIZE]; + uint32_t stored_len; + uint32_t tail_addr; + uint32_t span; + fee_ret_t ret; + + ret = fee_port_read(src_addr, (uint8_t *)&header, (uint32_t)sizeof(header)); + if (ret != FEE_E_OK) + { + return ret; + } + + if ((cfg == RT_NULL) || (fee_onflash_validate_record_header(&header, cfg) == RT_FALSE)) + { + return FEE_E_NOT_OK; + } + + span = fee_onflash_calc_record_span(cfg, header.data_len); + if (span > (uint32_t)sizeof(buffer)) + { + return FEE_E_NOT_OK; + } + + if ((dst_addr + span) > dst_limit) + { + return FEE_E_NOT_OK; + } + + ret = fee_port_read(src_addr, &buffer[0], span); + if (ret != FEE_E_OK) + { + return ret; + } + + stored_len = fee_onflash_align_up((uint32_t)header.data_len, caps->program_unit); + tail_addr = (uint32_t)sizeof(header) + stored_len; + if ((tail_addr + (uint32_t)sizeof(tail)) > span) + { + return FEE_E_NOT_OK; + } + + (void)memcpy(&tail, &buffer[tail_addr], sizeof(tail)); + if ((fee_onflash_validate_commit_tail(&tail) == RT_FALSE) || + (fee_onflash_validate_payload_crc(&tail, &buffer[sizeof(header)], header.data_len) == RT_FALSE)) + { + return FEE_E_NOT_OK; + } + + ret = fee_port_write(dst_addr, &buffer[0], span); + if (ret != FEE_E_OK) + { + return ret; + } + + if (out_span != RT_NULL) + { + *out_span = span; + } + + return FEE_E_OK; +} + +static fee_ret_t fee_gc_copy_one_cache_record(uint16_t block_id, uint32_t src_addr, uint32_t dst_limit, + uint32_t *dst_cursor, const fee_flash_caps_t *caps) +{ + const fee_block_cfg_t *cfg = fee_cfg_find_block(block_id); + uint32_t span; + fee_ret_t ret; + + if ((cfg == RT_NULL) || (dst_cursor == RT_NULL)) + { + return FEE_E_NOT_OK; + } + + ret = fee_gc_copy_record(src_addr, *dst_cursor, dst_limit, cfg, caps, &span); + if (ret != FEE_E_OK) + { + return ret; + } + + fee_cache_relocate_address(block_id, src_addr, *dst_cursor); + *dst_cursor += span; + return FEE_E_OK; +} + +static fee_ret_t fee_gc_copy_one_live_record(uint8_t lane, const fee_flash_caps_t *caps, rt_bool_t *copied) +{ + fee_lane_ctx_t *lane_ctx = &g_fee_ctx.lane[lane]; + fee_ckpt_cache_entry_t entries[FEE_CACHE_MAX_ENTRIES]; + uint16_t entry_count; + uint32_t cursor; + uint32_t total_cursor; + uint32_t src_base; + uint32_t src_limit; + uint32_t dst_limit; + + if (copied == RT_NULL) + { + return FEE_E_PARAM; + } + + src_base = fee_gc_sector_base(lane_ctx, caps, lane_ctx->gc_old_sector); + src_limit = src_base + caps->erase_unit; + dst_limit = fee_gc_sector_base(lane_ctx, caps, lane_ctx->dst_sector) + caps->erase_unit; + entry_count = fee_cache_export_ckpt(&entries[0], FEE_CACHE_MAX_ENTRIES); + total_cursor = (uint32_t)entry_count * 2U; + + for (cursor = (uint32_t)lane_ctx->gc_cursor; cursor < total_cursor; ++cursor) + { + uint16_t entry_idx = (uint16_t)(cursor / 2U); + rt_bool_t use_prev = ((cursor & 1UL) == 0UL) ? RT_TRUE : RT_FALSE; + uint32_t src_addr = FEE_INVALID_ADDR; + + if (entries[entry_idx].lane != lane) + { + continue; + } + + if (use_prev != RT_FALSE) + { + if (((entries[entry_idx].flags & FEE_GC_PREV_VALID_FLAG) != 0UL) && + fee_gc_addr_in_sector(src_base, src_limit, entries[entry_idx].prev_addr)) + { + src_addr = entries[entry_idx].prev_addr; + } + } + else if ((entries[entry_idx].cur_addr != FEE_INVALID_ADDR) && + (entries[entry_idx].cur_addr != entries[entry_idx].prev_addr) && + fee_gc_addr_in_sector(src_base, src_limit, entries[entry_idx].cur_addr)) + { + src_addr = entries[entry_idx].cur_addr; + } + + if (src_addr == FEE_INVALID_ADDR) + { + continue; + } + + if (fee_gc_copy_one_cache_record((uint16_t)entries[entry_idx].block_id, src_addr, dst_limit, + &lane_ctx->gc_write_offset, caps) != FEE_E_OK) + { + return FEE_E_NOT_OK; + } + + lane_ctx->gc_cursor = (uint16_t)(cursor + 1U); + *copied = RT_TRUE; + return FEE_E_OK; + } + + lane_ctx->gc_cursor = (uint16_t)total_cursor; + *copied = RT_FALSE; + return FEE_E_OK; +} + +static uint8_t fee_gc_choose_dst_sector(const fee_lane_ctx_t *lane_ctx) +{ + uint8_t idx; + + if ((lane_ctx->spare_sector < lane_ctx->sector_count) && + (lane_ctx->spare_sector != lane_ctx->active_sector)) + { + return lane_ctx->spare_sector; + } + + for (idx = 0U; idx < lane_ctx->sector_count; ++idx) + { + if (idx != lane_ctx->active_sector) + { + return idx; + } + } + + return lane_ctx->active_sector; +} + +static fee_ret_t fee_gc_step_lane(uint8_t lane) +{ + fee_lane_ctx_t *lane_ctx = &g_fee_ctx.lane[lane]; + fee_flash_caps_t caps; + fee_ret_t ret; + + if (fee_gc_lane_supports_reclaim(lane) == RT_FALSE) + { + return FEE_E_NOT_OK; + } + + ret = fee_port_get_caps(&caps); + if (ret != FEE_E_OK) + { + return ret; + } + + if (lane_ctx->gc_state == (uint8_t)FEE_GC_IDLE) + { + if ((lane_ctx->gc_requested == 0U) && (lane_ctx->gc_force == 0U)) + { + return FEE_E_OK; + } + + lane_ctx->gc_old_sector = lane_ctx->active_sector; + lane_ctx->dst_sector = fee_gc_choose_dst_sector(lane_ctx); + if (lane_ctx->dst_sector == lane_ctx->active_sector) + { + return FEE_E_NOT_OK; + } + + lane_ctx->gc_state = (uint8_t)FEE_GC_PREPARE_DST; + lane_ctx->gc_cursor = 0U; + lane_ctx->gc_write_offset = FEE_INVALID_ADDR; + return FEE_E_OK; + } + + if (lane_ctx->gc_state == (uint8_t)FEE_GC_PREPARE_DST) + { + uint32_t dst_base = fee_gc_sector_base(lane_ctx, &caps, lane_ctx->dst_sector); + + ret = fee_gc_write_sector_header(lane, lane_ctx->dst_sector, (uint8_t)FEE_SECTOR_GC_DST, + lane_ctx->active_generation + 1U, &caps, RT_TRUE); + if (ret != FEE_E_OK) + { + return ret; + } + + lane_ctx->gc_write_offset = fee_gc_sector_data_start(dst_base, &caps); + lane_ctx->gc_state = (uint8_t)FEE_GC_COPY_ONE; + return FEE_E_OK; + } + + if (lane_ctx->gc_state == (uint8_t)FEE_GC_COPY_ONE) + { + rt_bool_t copied = RT_FALSE; + + ret = fee_gc_copy_one_live_record(lane, &caps, &copied); + if (ret != FEE_E_OK) + { + return ret; + } + + if (copied == RT_FALSE) + { + lane_ctx->gc_state = (uint8_t)FEE_GC_SWITCH_ACTIVE; + } + + return FEE_E_OK; + } + + if (lane_ctx->gc_state == (uint8_t)FEE_GC_SWITCH_ACTIVE) + { + uint32_t dst_base = fee_gc_sector_base(lane_ctx, &caps, lane_ctx->dst_sector); + + ret = fee_gc_write_sector_header(lane, lane_ctx->dst_sector, (uint8_t)FEE_SECTOR_ACTIVE, + lane_ctx->active_generation + 1U, &caps, RT_FALSE); + if (ret != FEE_E_OK) + { + return ret; + } + + lane_ctx->active_sector = lane_ctx->dst_sector; + lane_ctx->base_addr = dst_base; + lane_ctx->data_start = fee_gc_sector_data_start(dst_base, &caps); + lane_ctx->limit_addr = dst_base + caps.erase_unit; + lane_ctx->free_offset = lane_ctx->gc_write_offset; + lane_ctx->scan_start = lane_ctx->free_offset; + lane_ctx->active_generation += 1U; + lane_ctx->spare_sector = lane_ctx->gc_old_sector; + g_fee_ctx.checkpoint_dirty = 1U; + g_fee_ctx.checkpoint_requested = 1U; + lane_ctx->gc_state = (uint8_t)FEE_GC_ERASE_OLD; + return FEE_E_OK; + } + + if (lane_ctx->gc_state == (uint8_t)FEE_GC_ERASE_OLD) + { + ret = fee_port_erase(fee_gc_sector_base(lane_ctx, &caps, lane_ctx->gc_old_sector), caps.erase_unit); + if (ret != FEE_E_OK) + { + return ret; + } + + lane_ctx->dst_sector = lane_ctx->spare_sector; + lane_ctx->gc_state = (uint8_t)FEE_GC_IDLE; + lane_ctx->gc_cursor = 0U; + lane_ctx->gc_requested = 0U; + lane_ctx->gc_force = 0U; + lane_ctx->gc_write_offset = FEE_INVALID_ADDR; + return FEE_E_OK; + } + + return FEE_E_NOT_OK; +} + +fee_ret_t fee_gc_reclaim_sync(uint8_t lane) +{ + uint16_t step_budget = (uint16_t)(FEE_CACHE_MAX_ENTRIES * 2U + 8U); + fee_ret_t ret = FEE_E_OK; + + if (fee_gc_lane_supports_reclaim(lane) == RT_FALSE) + { + return FEE_E_NOT_OK; + } + + g_fee_ctx.lane[lane].gc_requested = 1U; + g_fee_ctx.lane[lane].gc_force = 1U; + + while (step_budget > 0U) + { + ret = fee_gc_step_lane(lane); + if (ret != FEE_E_OK) + { + return ret; + } + + if (g_fee_ctx.lane[lane].gc_state == (uint8_t)FEE_GC_IDLE) + { + return FEE_E_OK; + } + + --step_budget; + } + + return FEE_E_BUSY; +} + +rt_bool_t fee_gc_lane_blocks_io(uint8_t lane) +{ + if ((lane >= (uint8_t)FEE_LANE_COUNT) || (lane <= (uint8_t)FEE_LANE_META)) + { + return RT_FALSE; + } + + if ((g_fee_ctx.lane[lane].gc_state != (uint8_t)FEE_GC_IDLE) && + (g_fee_ctx.lane[lane].gc_state != (uint8_t)FEE_GC_ERASE_OLD)) + { + return RT_TRUE; + } + + return RT_FALSE; +} + +rt_bool_t fee_gc_has_background_work(void) +{ + uint8_t lane; + + for (lane = (uint8_t)FEE_LANE_FAST; lane <= (uint8_t)FEE_LANE_BULK; ++lane) + { + if ((g_fee_ctx.lane[lane].gc_state != (uint8_t)FEE_GC_IDLE) || + (g_fee_ctx.lane[lane].gc_requested != 0U) || + (g_fee_ctx.lane[lane].gc_force != 0U)) + { + return RT_TRUE; + } + } + + return RT_FALSE; +} + +rt_bool_t fee_gc_allows_checkpoint(void) +{ + uint8_t lane; + + for (lane = (uint8_t)FEE_LANE_FAST; lane <= (uint8_t)FEE_LANE_BULK; ++lane) + { + if ((g_fee_ctx.lane[lane].gc_state == (uint8_t)FEE_GC_PREPARE_DST) || + (g_fee_ctx.lane[lane].gc_state == (uint8_t)FEE_GC_COPY_ONE) || + (g_fee_ctx.lane[lane].gc_state == (uint8_t)FEE_GC_SWITCH_ACTIVE)) + { + return RT_FALSE; + } + } + + return RT_TRUE; +} + +void fee_gc_mainfunction(void) +{ + uint8_t target_lane = (uint8_t)FEE_LANE_COUNT; + + if (fee_recovery_is_full_ready() == RT_FALSE) + { + return; + } + + target_lane = fee_gc_find_active_lane_rr(g_fee_gc_rr_next); + + if (target_lane == (uint8_t)FEE_LANE_COUNT) + { + uint8_t lane; + + for (lane = (uint8_t)FEE_LANE_FAST; lane <= (uint8_t)FEE_LANE_BULK; ++lane) + { + if ((fee_gc_lane_supports_reclaim(lane) != RT_FALSE) && + (g_fee_ctx.lane[lane].gc_force != 0U)) + { + target_lane = lane; + break; + } + } + } + + if (target_lane == (uint8_t)FEE_LANE_COUNT) + { + target_lane = fee_gc_find_requested_lane_rr(g_fee_gc_rr_next); + } + + if (target_lane < (uint8_t)FEE_LANE_COUNT) + { + if (fee_gc_step_lane(target_lane) != FEE_E_OK) + { + g_fee_ctx.job_result = FEE_JOB_FAILED; + } + else + { + g_fee_gc_rr_next = fee_gc_next_business_lane(target_lane); + } + } +} diff --git a/components/custom_fee/fee_internal.h b/components/custom_fee/fee_internal.h new file mode 100644 index 00000000000..661f4f16f8e --- /dev/null +++ b/components/custom_fee/fee_internal.h @@ -0,0 +1,144 @@ +#ifndef CUSTOM_FEE_INTERNAL_H +#define CUSTOM_FEE_INTERNAL_H + +#include +#include "fee_api.h" +#include "fee_cfg.h" +#include "fee_port.h" +#include "fee_onflash.h" + +#define FEE_INVALID_ADDR (0xFFFFFFFFUL) +#define FEE_CACHE_MAX_ENTRIES (32U) + +typedef enum +{ + FEE_GC_IDLE = 0, + FEE_GC_PREPARE_DST, + FEE_GC_COPY_ONE, + FEE_GC_SWITCH_ACTIVE, + FEE_GC_ERASE_OLD +} fee_gc_state_t; + +typedef struct +{ + uint8_t lane; + uint32_t cur_addr; + uint32_t prev_addr; + uint32_t seq; + uint16_t len; + uint8_t cur_valid; + uint8_t prev_valid; + uint8_t cur_sector; + uint8_t prev_sector; +} fee_cache_entry_t; + +typedef struct +{ + uint8_t active_sector; + uint8_t dst_sector; + uint8_t spare_sector; + uint8_t gc_old_sector; + uint8_t sector_count; + uint16_t gc_cursor; + uint8_t gc_state; + uint8_t gc_requested; + uint8_t gc_force; + uint32_t range_base; + uint32_t range_limit; + uint32_t base_addr; + uint32_t data_start; + uint32_t limit_addr; + uint32_t scan_start; + uint32_t free_offset; + uint32_t gc_write_offset; + uint32_t active_generation; + uint32_t gc_start_threshold; + uint32_t gc_force_threshold; + uint32_t dirty_record_count; + uint32_t dirty_bytes; +} fee_lane_ctx_t; + +typedef struct +{ + fee_lane_ctx_t lane[FEE_LANE_COUNT]; + fee_status_t status; + fee_job_result_t job_result; + fee_init_state_t init_state; + uint32_t checkpoint_generation; + uint8_t checkpoint_dirty; + uint8_t checkpoint_requested; + uint8_t checkpoint_force; +} fee_super_ctx_t; + +typedef struct +{ + uint16_t block_id; + uint16_t offset; + uint16_t len; + const uint8_t *src; + uint8_t *dst; +} fee_request_t; + +typedef enum +{ + FEE_REQ_NONE = 0, + FEE_REQ_WRITE, + FEE_REQ_INVALIDATE, + FEE_REQ_ROLLBACK +} fee_request_type_t; + +typedef struct +{ + uint32_t block_id; + uint32_t lane; + uint32_t flags; + uint32_t len; + uint32_t cur_addr; + uint32_t prev_addr; + uint32_t seq; + uint32_t reserved; +} fee_ckpt_cache_entry_t; + +extern fee_super_ctx_t g_fee_ctx; + +void fee_core_reset_context(void); +fee_ret_t fee_core_init(void); +fee_ret_t fee_core_read(uint16_t block_id, uint16_t offset, uint8_t *dst, uint16_t len); +fee_ret_t fee_core_write(uint16_t block_id, const uint8_t *src, uint16_t len); +fee_ret_t fee_core_invalidate(uint16_t block_id); +fee_ret_t fee_core_rollback(uint16_t block_id); +void fee_core_mainfunction(void); + +fee_ret_t fee_sched_submit_read(uint16_t block_id, uint16_t offset, uint8_t *dst, uint16_t len); +fee_ret_t fee_sched_submit_write(uint16_t block_id, const uint8_t *src, uint16_t len); +fee_ret_t fee_sched_submit_invalidate(uint16_t block_id); +fee_ret_t fee_sched_submit_rollback(uint16_t block_id); +void fee_sched_mainfunction(void); +rt_bool_t fee_sched_has_pending_work(void); + +fee_ret_t fee_recovery_start(void); +fee_ret_t fee_recovery_step(void); +rt_bool_t fee_recovery_can_read_block(uint16_t block_id); +rt_bool_t fee_recovery_is_full_ready(void); + +void fee_gc_mainfunction(void); +fee_ret_t fee_gc_reclaim_sync(uint8_t lane); +rt_bool_t fee_gc_lane_blocks_io(uint8_t lane); +rt_bool_t fee_gc_has_background_work(void); +rt_bool_t fee_gc_allows_checkpoint(void); +void fee_cache_init(void); +fee_cache_entry_t *fee_cache_lookup(uint16_t block_id); +void fee_cache_update_data(uint16_t block_id, uint8_t lane, uint32_t addr, uint16_t len, uint32_t seq); +void fee_cache_update_tombstone(uint16_t block_id, uint8_t lane, uint32_t addr, uint32_t seq); +void fee_cache_relocate_address(uint16_t block_id, uint32_t old_addr, uint32_t new_addr); +uint16_t fee_cache_export_ckpt(fee_ckpt_cache_entry_t *entries, uint16_t max_entries); +void fee_cache_import_ckpt(const fee_ckpt_cache_entry_t *entries, uint16_t entry_count); +void fee_ckpt_init(void); +fee_ret_t fee_ckpt_restore(void); +fee_ret_t fee_ckpt_flush(void); +rt_bool_t fee_ckpt_has_restored_image(void); +void fee_lane_fast_init(void); +void fee_lane_log_init(void); +void fee_lane_bulk_init(void); + +#endif diff --git a/components/custom_fee/fee_lane_bulk.c b/components/custom_fee/fee_lane_bulk.c new file mode 100644 index 00000000000..c3f70aba91a --- /dev/null +++ b/components/custom_fee/fee_lane_bulk.c @@ -0,0 +1,5 @@ +#include "fee_internal.h" + +void fee_lane_bulk_init(void) +{ +} diff --git a/components/custom_fee/fee_lane_fast.c b/components/custom_fee/fee_lane_fast.c new file mode 100644 index 00000000000..69eaf6d801a --- /dev/null +++ b/components/custom_fee/fee_lane_fast.c @@ -0,0 +1,5 @@ +#include "fee_internal.h" + +void fee_lane_fast_init(void) +{ +} diff --git a/components/custom_fee/fee_lane_log.c b/components/custom_fee/fee_lane_log.c new file mode 100644 index 00000000000..4babf0868f2 --- /dev/null +++ b/components/custom_fee/fee_lane_log.c @@ -0,0 +1,5 @@ +#include "fee_internal.h" + +void fee_lane_log_init(void) +{ +} diff --git a/components/custom_fee/fee_onflash.c b/components/custom_fee/fee_onflash.c new file mode 100644 index 00000000000..52b6453e838 --- /dev/null +++ b/components/custom_fee/fee_onflash.c @@ -0,0 +1,294 @@ +#include "fee_onflash.h" +#include + +static uint32_t fee_onflash_crc32_update(uint32_t crc, const uint8_t *data, uint32_t len) +{ + uint32_t i; + + if (data == RT_NULL) + { + return crc; + } + + for (i = 0U; i < len; ++i) + { + crc = (crc << 5) - crc + data[i]; + } + + return crc; +} + +static uint32_t fee_onflash_crc32_accumulate(const uint8_t *data, uint32_t len) +{ + return fee_onflash_crc32_update(0U, data, len); +} + +static rt_bool_t fee_onflash_is_sector_state_valid(uint8_t state) +{ + if ((state == (uint8_t)FEE_SECTOR_PREPARE) || + (state == (uint8_t)FEE_SECTOR_ACTIVE) || + (state == (uint8_t)FEE_SECTOR_GC_DST) || + (state == (uint8_t)FEE_SECTOR_OLD_PENDING_ERASE) || + (state == (uint8_t)FEE_SECTOR_BAD)) + { + return RT_TRUE; + } + + return RT_FALSE; +} + +static uint32_t fee_onflash_sector_header_crc(const fee_sector_header_t *header) +{ + const uint8_t *raw; + uint32_t prefix_len; + + if (header == RT_NULL) + { + return 0U; + } + + raw = (const uint8_t *)header; + prefix_len = (uint32_t)((const uint8_t *)&header->hdr_crc - raw); + return fee_onflash_crc32_accumulate(raw, prefix_len); +} + +static uint32_t fee_onflash_record_header_crc(const fee_record_header_t *header) +{ + const uint8_t *raw; + uint32_t prefix_len; + + if (header == RT_NULL) + { + return 0U; + } + + raw = (const uint8_t *)header; + prefix_len = (uint32_t)((const uint8_t *)&header->hdr_crc - raw); + return fee_onflash_crc32_accumulate(raw, prefix_len); +} + +static uint32_t fee_onflash_commit_tail_crc(const fee_commit_tail_t *tail) +{ + uint32_t crc; + + if (tail == RT_NULL) + { + return 0U; + } + + crc = fee_onflash_crc32_update(0U, (const uint8_t *)&tail->data_crc, (uint32_t)sizeof(tail->data_crc)); + crc = fee_onflash_crc32_update(crc, (const uint8_t *)&tail->tail_flags, (uint32_t)sizeof(tail->tail_flags)); + return crc; +} + +uint32_t fee_onflash_align_up(uint32_t value, uint32_t align) +{ + if (align == 0U) + { + return value; + } + + return ((value + align - 1U) / align) * align; +} + +uint32_t fee_onflash_calc_record_span(const fee_block_cfg_t *cfg, uint16_t data_len) +{ + uint32_t align; + uint32_t payload_len; + uint32_t span; + + if (cfg == RT_NULL) + { + return 0U; + } + + align = (cfg->record_align == 0U) ? FEE_CFG_ALIGN_UNIT : cfg->record_align; + payload_len = fee_onflash_align_up((uint32_t)data_len, FEE_CFG_ALIGN_UNIT); + span = (uint32_t)sizeof(fee_record_header_t) + payload_len + (uint32_t)sizeof(fee_commit_tail_t); + + return fee_onflash_align_up(span, align); +} + +fee_ret_t fee_onflash_encode_sector_header(fee_sector_header_t *header, + uint8_t lane_id, uint8_t state, uint32_t generation, uint32_t data_start, uint32_t data_end) +{ + if (header == RT_NULL) + { + return FEE_E_PARAM; + } + + if ((fee_onflash_is_sector_state_valid(state) == RT_FALSE) || (data_start >= data_end)) + { + return FEE_E_PARAM; + } + + (void)memset(header, 0, sizeof(*header)); + header->magic = FEE_SECTOR_MAGIC; + header->format_version = FEE_CFG_FORMAT_VERSION; + header->lane_id = lane_id; + header->state = state; + header->generation = generation; + header->data_start = data_start; + header->data_end = data_end; + header->hdr_seq = generation; + header->hdr_crc = fee_onflash_sector_header_crc(header); + header->commit_marker = FEE_COMMIT_MARKER; + + return FEE_E_OK; +} + +fee_ret_t fee_onflash_encode_record_header(fee_record_header_t *header, + uint16_t block_id, uint8_t record_type, uint16_t data_len, uint32_t seq) +{ + if (header == RT_NULL) + { + return FEE_E_PARAM; + } + + (void)memset(header, 0, sizeof(*header)); + header->magic = FEE_RECORD_MAGIC; + header->block_id = block_id; + header->record_type = record_type; + header->flags = 0x01U; + header->data_len = data_len; + header->header_len = (uint16_t)sizeof(*header); + header->seq = seq; + header->hdr_crc = fee_onflash_record_header_crc(header); + + return FEE_E_OK; +} + +fee_ret_t fee_onflash_encode_commit_tail(fee_commit_tail_t *tail, const uint8_t *data, uint16_t len) +{ + if (tail == RT_NULL) + { + return FEE_E_PARAM; + } + + (void)memset(tail, 0, sizeof(*tail)); + tail->data_crc = fee_onflash_crc32_accumulate(data, len); + tail->tail_flags = 0U; + tail->tail_crc = fee_onflash_commit_tail_crc(tail); + tail->commit_marker = FEE_COMMIT_MARKER; + + return FEE_E_OK; +} + +rt_bool_t fee_onflash_is_record_committed(const fee_commit_tail_t *tail) +{ + if (tail == RT_NULL) + { + return RT_FALSE; + } + + return (tail->commit_marker == FEE_COMMIT_MARKER) ? RT_TRUE : RT_FALSE; +} + +rt_bool_t fee_onflash_validate_sector_header(const fee_sector_header_t *header) +{ + if (header == RT_NULL) + { + return RT_FALSE; + } + + if (header->magic != FEE_SECTOR_MAGIC) + { + return RT_FALSE; + } + + if (header->format_version != FEE_CFG_FORMAT_VERSION) + { + return RT_FALSE; + } + + if (fee_onflash_is_sector_state_valid(header->state) == RT_FALSE) + { + return RT_FALSE; + } + + if ((header->data_start == 0U) || (header->data_start >= header->data_end)) + { + return RT_FALSE; + } + + if (header->hdr_crc != fee_onflash_sector_header_crc(header)) + { + return RT_FALSE; + } + + if (header->commit_marker != FEE_COMMIT_MARKER) + { + return RT_FALSE; + } + + return RT_TRUE; +} + +rt_bool_t fee_onflash_validate_record_header(const fee_record_header_t *header, const fee_block_cfg_t *cfg) +{ + if ((header == RT_NULL) || (cfg == RT_NULL)) + { + return RT_FALSE; + } + + if ((header->magic != FEE_RECORD_MAGIC) || + (header->block_id != cfg->block_id) || + (header->header_len != (uint16_t)sizeof(*header)) || + (header->hdr_crc != fee_onflash_record_header_crc(header))) + { + return RT_FALSE; + } + + if ((header->flags & 0xF8U) != 0U) + { + return RT_FALSE; + } + + if ((header->record_type != (uint8_t)FEE_RECORD_DATA) && + (header->record_type != (uint8_t)FEE_RECORD_TOMBSTONE)) + { + return RT_FALSE; + } + + if (header->record_type == (uint8_t)FEE_RECORD_DATA) + { + if ((header->data_len == 0U) || (header->data_len > cfg->max_len)) + { + return RT_FALSE; + } + } + else if (header->data_len != 0U) + { + return RT_FALSE; + } + + return RT_TRUE; +} + +rt_bool_t fee_onflash_validate_commit_tail(const fee_commit_tail_t *tail) +{ + if (tail == RT_NULL) + { + return RT_FALSE; + } + + if (tail->commit_marker != FEE_COMMIT_MARKER) + { + return RT_FALSE; + } + + return (tail->tail_crc == fee_onflash_commit_tail_crc(tail)) ? RT_TRUE : RT_FALSE; +} + +rt_bool_t fee_onflash_validate_payload_crc(const fee_commit_tail_t *tail, const uint8_t *data, uint16_t len) +{ + uint32_t payload_crc; + + if (tail == RT_NULL) + { + return RT_FALSE; + } + + payload_crc = fee_onflash_crc32_accumulate(data, len); + return (tail->data_crc == payload_crc) ? RT_TRUE : RT_FALSE; +} diff --git a/components/custom_fee/fee_onflash.h b/components/custom_fee/fee_onflash.h new file mode 100644 index 00000000000..ed3bdbb5cb6 --- /dev/null +++ b/components/custom_fee/fee_onflash.h @@ -0,0 +1,81 @@ +#ifndef CUSTOM_FEE_ONFLASH_H +#define CUSTOM_FEE_ONFLASH_H + +#include +#include "fee_cfg.h" +#include "fee_api.h" + +#define FEE_SECTOR_MAGIC (0x46454553UL) +#define FEE_RECORD_MAGIC (0x46454552UL) +#define FEE_COMMIT_MARKER (0x434F4D4DUL) + +typedef enum +{ + FEE_SECTOR_ERASED = 0xFF, + FEE_SECTOR_PREPARE = 0x11, + FEE_SECTOR_ACTIVE = 0x22, + FEE_SECTOR_GC_DST = 0x33, + FEE_SECTOR_OLD_PENDING_ERASE = 0x44, + FEE_SECTOR_BAD = 0x55 +} fee_sector_state_t; + +typedef enum +{ + FEE_RECORD_DATA = 0xD1, + FEE_RECORD_TOMBSTONE = 0xD2, + FEE_RECORD_CKPT_CHUNK = 0xC1 +} fee_record_type_t; + +typedef struct +{ + uint32_t magic; + uint16_t format_version; + uint8_t lane_id; + uint8_t state; + uint32_t generation; + uint32_t erase_count; + uint32_t data_start; + uint32_t data_end; + uint32_t hdr_seq; + uint32_t hdr_crc; + uint32_t commit_marker; + uint32_t reserved[7]; +} fee_sector_header_t; + +typedef struct +{ + uint32_t magic; + uint16_t block_id; + uint8_t record_type; + uint8_t flags; + uint16_t data_len; + uint16_t header_len; + uint32_t seq; + uint32_t generation; + uint32_t prev_addr_hint; + uint32_t hdr_crc; + uint32_t reserved; +} fee_record_header_t; + +typedef struct +{ + uint32_t data_crc; + uint32_t tail_crc; + uint32_t tail_flags; + uint32_t commit_marker; +} fee_commit_tail_t; + +uint32_t fee_onflash_align_up(uint32_t value, uint32_t align); +uint32_t fee_onflash_calc_record_span(const fee_block_cfg_t *cfg, uint16_t data_len); +fee_ret_t fee_onflash_encode_sector_header(fee_sector_header_t *header, + uint8_t lane_id, uint8_t state, uint32_t generation, uint32_t data_start, uint32_t data_end); +fee_ret_t fee_onflash_encode_record_header(fee_record_header_t *header, + uint16_t block_id, uint8_t record_type, uint16_t data_len, uint32_t seq); +fee_ret_t fee_onflash_encode_commit_tail(fee_commit_tail_t *tail, const uint8_t *data, uint16_t len); +rt_bool_t fee_onflash_is_record_committed(const fee_commit_tail_t *tail); +rt_bool_t fee_onflash_validate_sector_header(const fee_sector_header_t *header); +rt_bool_t fee_onflash_validate_record_header(const fee_record_header_t *header, const fee_block_cfg_t *cfg); +rt_bool_t fee_onflash_validate_commit_tail(const fee_commit_tail_t *tail); +rt_bool_t fee_onflash_validate_payload_crc(const fee_commit_tail_t *tail, const uint8_t *data, uint16_t len); + +#endif diff --git a/components/custom_fee/fee_port.c b/components/custom_fee/fee_port.c new file mode 100644 index 00000000000..7e701c891d8 --- /dev/null +++ b/components/custom_fee/fee_port.c @@ -0,0 +1,290 @@ +#include "fee_flash_drv.h" +#include + +#ifndef CUSTOM_FEE_MOCK_FLASH_SIZE +#define CUSTOM_FEE_MOCK_FLASH_SIZE 0xA0000U +#endif + +static uint8_t g_fee_mock_flash[CUSTOM_FEE_MOCK_FLASH_SIZE]; +static fee_flash_caps_t g_fee_mock_caps = +{ + CUSTOM_FEE_MOCK_FLASH_SIZE, + 1U, + 8U, + 0xE000U, + 512U, + 0U, + 0U +}; +static fee_status_t g_fee_mock_status = FEE_STATUS_UNINIT; +static fee_job_result_t g_fee_mock_job_result = FEE_JOB_NONE; +static uint8_t g_fee_mock_initialized = 0U; +static uint8_t g_fee_mock_debug_active = 0U; +static fee_port_debug_stats_t g_fee_mock_stats; + +static fee_ret_t fee_port_check_range(uint32_t addr, uint32_t len) +{ + if ((addr + len) > (uint32_t)sizeof(g_fee_mock_flash)) + { + return FEE_E_PARAM; + } + + return FEE_E_OK; +} + +static fee_ret_t fee_port_check_alignment(uint32_t addr, uint32_t len, uint32_t align) +{ + if ((align == 0U) || ((addr % align) != 0U) || ((len % align) != 0U)) + { + return FEE_E_PARAM; + } + + return FEE_E_OK; +} + +fee_ret_t fee_port_init(void) +{ + return fee_flash_driver_init(); +} + +fee_ret_t fee_port_get_caps(fee_flash_caps_t *caps) +{ + return fee_flash_driver_get_caps(caps); +} + +fee_ret_t fee_port_read(uint32_t addr, uint8_t *dst, uint32_t len) +{ + return fee_flash_driver_read(addr, dst, len); +} + +fee_ret_t fee_port_write(uint32_t addr, const uint8_t *src, uint32_t len) +{ + return fee_flash_driver_write(addr, src, len); +} + +fee_ret_t fee_port_erase(uint32_t addr, uint32_t len) +{ + return fee_flash_driver_erase(addr, len); +} + +void fee_port_mainfunction(void) +{ + fee_flash_driver_mainfunction(); +} + +fee_status_t fee_port_get_status(void) +{ + return fee_flash_driver_get_status(); +} + +fee_job_result_t fee_port_get_job_result(void) +{ + return fee_flash_driver_get_job_result(); +} + +fee_ret_t fee_port_debug_reset_stats(void) +{ + return fee_flash_driver_debug_reset_stats(); +} + +fee_ret_t fee_port_debug_get_stats(fee_port_debug_stats_t *stats) +{ + return fee_flash_driver_debug_get_stats(stats); +} + +fee_ret_t fee_port_debug_get_storage(const uint8_t **storage, uint32_t *size) +{ + return fee_flash_driver_debug_get_storage(storage, size); +} + +rt_weak fee_ret_t fee_flash_driver_init(void) +{ + if (g_fee_mock_initialized == 0U) + { + (void)memset(g_fee_mock_flash, 0xFF, sizeof(g_fee_mock_flash)); + g_fee_mock_initialized = 1U; + } + + g_fee_mock_debug_active = 1U; + g_fee_mock_stats.init_calls++; + g_fee_mock_status = FEE_STATUS_IDLE; + g_fee_mock_job_result = FEE_JOB_OK; + return FEE_E_OK; +} + +rt_weak fee_ret_t fee_flash_driver_get_caps(fee_flash_caps_t *caps) +{ + if (caps == RT_NULL) + { + return FEE_E_PARAM; + } + + *caps = g_fee_mock_caps; + return FEE_E_OK; +} + +rt_weak fee_ret_t fee_flash_driver_read(uint32_t addr, uint8_t *dst, uint32_t len) +{ + fee_ret_t ret; + + if ((dst == RT_NULL) || (len == 0U)) + { + return FEE_E_PARAM; + } + + ret = fee_port_check_range(addr, len); + if (ret != FEE_E_OK) + { + return ret; + } + + ret = fee_port_check_alignment(addr, len, g_fee_mock_caps.read_unit); + if (ret != FEE_E_OK) + { + return ret; + } + + g_fee_mock_debug_active = 1U; + g_fee_mock_stats.read_calls++; + g_fee_mock_stats.read_bytes += len; + g_fee_mock_status = FEE_STATUS_BUSY; + (void)memcpy(dst, &g_fee_mock_flash[addr], len); + g_fee_mock_status = FEE_STATUS_IDLE; + g_fee_mock_job_result = FEE_JOB_OK; + + return FEE_E_OK; +} + +rt_weak fee_ret_t fee_flash_driver_write(uint32_t addr, const uint8_t *src, uint32_t len) +{ + fee_ret_t ret; + + if ((src == RT_NULL) || (len == 0U)) + { + return FEE_E_PARAM; + } + + ret = fee_port_check_range(addr, len); + if (ret != FEE_E_OK) + { + return ret; + } + + ret = fee_port_check_alignment(addr, len, g_fee_mock_caps.program_unit); + if (ret != FEE_E_OK) + { + return ret; + } + + g_fee_mock_debug_active = 1U; + g_fee_mock_stats.write_calls++; + g_fee_mock_stats.write_bytes += len; + g_fee_mock_status = FEE_STATUS_BUSY; + { + uint32_t idx; + + for (idx = 0U; idx < len; ++idx) + { + g_fee_mock_flash[addr + idx] &= src[idx]; + } + } + g_fee_mock_status = FEE_STATUS_IDLE; + g_fee_mock_job_result = FEE_JOB_OK; + + return FEE_E_OK; +} + +rt_weak fee_ret_t fee_flash_driver_erase(uint32_t addr, uint32_t len) +{ + fee_ret_t ret; + + if (len == 0U) + { + return FEE_E_PARAM; + } + + ret = fee_port_check_range(addr, len); + if (ret != FEE_E_OK) + { + return ret; + } + + ret = fee_port_check_alignment(addr, len, g_fee_mock_caps.erase_unit); + if (ret != FEE_E_OK) + { + return ret; + } + + g_fee_mock_debug_active = 1U; + g_fee_mock_stats.erase_calls++; + g_fee_mock_stats.erase_bytes += len; + g_fee_mock_status = FEE_STATUS_BUSY; + (void)memset(&g_fee_mock_flash[addr], 0xFF, len); + g_fee_mock_status = FEE_STATUS_IDLE; + g_fee_mock_job_result = FEE_JOB_OK; + + return FEE_E_OK; +} + +rt_weak void fee_flash_driver_mainfunction(void) +{ + g_fee_mock_stats.poll_calls++; +} + +rt_weak fee_status_t fee_flash_driver_get_status(void) +{ + return g_fee_mock_status; +} + +rt_weak fee_job_result_t fee_flash_driver_get_job_result(void) +{ + return g_fee_mock_job_result; +} + +rt_weak fee_ret_t fee_flash_driver_debug_reset_stats(void) +{ + if (g_fee_mock_debug_active == 0U) + { + (void)memset(&g_fee_mock_stats, 0, sizeof(g_fee_mock_stats)); + return FEE_E_NOT_OK; + } + + (void)memset(&g_fee_mock_stats, 0, sizeof(g_fee_mock_stats)); + return FEE_E_OK; +} + +rt_weak fee_ret_t fee_flash_driver_debug_get_stats(fee_port_debug_stats_t *stats) +{ + if (stats == RT_NULL) + { + return FEE_E_PARAM; + } + + if (g_fee_mock_debug_active == 0U) + { + (void)memset(stats, 0, sizeof(*stats)); + return FEE_E_NOT_OK; + } + + *stats = g_fee_mock_stats; + return FEE_E_OK; +} + +rt_weak fee_ret_t fee_flash_driver_debug_get_storage(const uint8_t **storage, uint32_t *size) +{ + if ((storage == RT_NULL) || (size == RT_NULL)) + { + return FEE_E_PARAM; + } + + if (g_fee_mock_debug_active == 0U) + { + *storage = RT_NULL; + *size = 0U; + return FEE_E_NOT_OK; + } + + *storage = &g_fee_mock_flash[0]; + *size = (uint32_t)sizeof(g_fee_mock_flash); + return FEE_E_OK; +} diff --git a/components/custom_fee/fee_port.h b/components/custom_fee/fee_port.h new file mode 100644 index 00000000000..eeef1346d44 --- /dev/null +++ b/components/custom_fee/fee_port.h @@ -0,0 +1,42 @@ +#ifndef CUSTOM_FEE_PORT_H +#define CUSTOM_FEE_PORT_H + +#include +#include "fee_api.h" + +typedef struct +{ + uint32_t total_size; + uint16_t read_unit; + uint16_t program_unit; + uint32_t erase_unit; + uint16_t preferred_chunk; + uint8_t supports_suspend; + uint8_t supports_compare; +} fee_flash_caps_t; + +typedef struct +{ + uint32_t init_calls; + uint32_t poll_calls; + uint32_t read_calls; + uint32_t read_bytes; + uint32_t write_calls; + uint32_t write_bytes; + uint32_t erase_calls; + uint32_t erase_bytes; +} fee_port_debug_stats_t; + +fee_ret_t fee_port_init(void); +fee_ret_t fee_port_get_caps(fee_flash_caps_t *caps); +fee_ret_t fee_port_read(uint32_t addr, uint8_t *dst, uint32_t len); +fee_ret_t fee_port_write(uint32_t addr, const uint8_t *src, uint32_t len); +fee_ret_t fee_port_erase(uint32_t addr, uint32_t len); +void fee_port_mainfunction(void); +fee_status_t fee_port_get_status(void); +fee_job_result_t fee_port_get_job_result(void); +fee_ret_t fee_port_debug_reset_stats(void); +fee_ret_t fee_port_debug_get_stats(fee_port_debug_stats_t *stats); +fee_ret_t fee_port_debug_get_storage(const uint8_t **storage, uint32_t *size); + +#endif diff --git a/components/custom_fee/fee_recovery.c b/components/custom_fee/fee_recovery.c new file mode 100644 index 00000000000..d70ae3fef8b --- /dev/null +++ b/components/custom_fee/fee_recovery.c @@ -0,0 +1,671 @@ +#include "fee_internal.h" + +static fee_flash_caps_t g_fee_caps; +static uint8_t g_fee_tail_scan_lane = (uint8_t)FEE_LANE_FAST; + +static rt_bool_t fee_recovery_is_business_lane(uint8_t lane) +{ + if ((lane == (uint8_t)FEE_LANE_FAST) || + (lane == (uint8_t)FEE_LANE_NORMAL) || + (lane == (uint8_t)FEE_LANE_BULK)) + { + return RT_TRUE; + } + + return RT_FALSE; +} + +static void fee_recovery_mark_checkpoint_stale(void) +{ + g_fee_ctx.checkpoint_dirty = 1U; + g_fee_ctx.checkpoint_requested = 1U; +} + +static uint32_t fee_recovery_sector_data_start(uint32_t sector_base) +{ + return sector_base + fee_onflash_align_up((uint32_t)sizeof(fee_sector_header_t), g_fee_caps.program_unit); +} + +static uint32_t fee_recovery_lane_sector_base(uint8_t lane, uint8_t sector_idx) +{ + return g_fee_ctx.lane[lane].range_base + ((uint32_t)sector_idx * g_fee_caps.erase_unit); +} + +static void fee_recovery_set_lane_range(uint8_t lane, uint32_t range_base, uint8_t sector_count) +{ + fee_lane_ctx_t *lane_ctx = &g_fee_ctx.lane[lane]; + uint32_t max_span; + + lane_ctx->range_base = range_base; + lane_ctx->range_limit = range_base + ((uint32_t)sector_count * g_fee_caps.erase_unit); + lane_ctx->sector_count = sector_count; + lane_ctx->active_sector = 0U; + lane_ctx->dst_sector = 0U; + lane_ctx->spare_sector = (sector_count > 1U) ? 1U : 0U; + lane_ctx->gc_old_sector = 0U; + lane_ctx->base_addr = range_base; + lane_ctx->data_start = fee_recovery_sector_data_start(range_base); + lane_ctx->limit_addr = range_base + g_fee_caps.erase_unit; + lane_ctx->scan_start = FEE_INVALID_ADDR; + lane_ctx->gc_write_offset = FEE_INVALID_ADDR; + lane_ctx->active_generation = 0U; + lane_ctx->gc_cursor = 0U; + lane_ctx->gc_state = (uint8_t)FEE_GC_IDLE; + lane_ctx->gc_requested = 0U; + lane_ctx->gc_force = 0U; + + max_span = fee_cfg_get_lane_max_span(lane); + if (max_span == 0U) + { + max_span = FEE_CFG_ALIGN_UNIT; + } + + if ((lane != (uint8_t)FEE_LANE_META) && (sector_count > 1U)) + { + lane_ctx->gc_start_threshold = max_span * 2U; + lane_ctx->gc_force_threshold = max_span; + } + else + { + lane_ctx->gc_start_threshold = 0U; + lane_ctx->gc_force_threshold = 0U; + } +} + +static fee_ret_t fee_recovery_assign_layout(void) +{ + uint32_t range_base = 0U; + uint8_t lane; + + if (g_fee_caps.total_size < (g_fee_caps.erase_unit * fee_cfg_get_total_sector_count())) + { + return FEE_E_NOT_OK; + } + + for (lane = (uint8_t)FEE_LANE_META; lane < (uint8_t)FEE_LANE_COUNT; ++lane) + { + uint8_t sector_count = fee_cfg_get_lane_sector_count(lane); + + if ((sector_count == 0U) || (sector_count > FEE_CFG_MAX_LANE_SECTOR_COUNT)) + { + return FEE_E_NOT_OK; + } + + fee_recovery_set_lane_range(lane, range_base, sector_count); + range_base += (uint32_t)sector_count * g_fee_caps.erase_unit; + } + + return FEE_E_OK; +} + +static fee_ret_t fee_recovery_format_lane_sector(uint8_t lane, uint8_t sector_idx, + uint8_t state, uint32_t generation, rt_bool_t erase_first) +{ + fee_sector_header_t header; + uint32_t sector_base = fee_recovery_lane_sector_base(lane, sector_idx); + uint32_t data_start = fee_recovery_sector_data_start(sector_base); + uint32_t data_end = sector_base + g_fee_caps.erase_unit; + fee_ret_t ret; + + ret = fee_onflash_encode_sector_header(&header, lane, state, generation, data_start, data_end); + if (ret != FEE_E_OK) + { + return ret; + } + + if ((erase_first != RT_FALSE) && (fee_port_erase(sector_base, g_fee_caps.erase_unit) != FEE_E_OK)) + { + return FEE_E_NOT_OK; + } + + if (fee_port_write(sector_base, (const uint8_t *)&header, (uint32_t)sizeof(header)) != FEE_E_OK) + { + return FEE_E_NOT_OK; + } + + return FEE_E_OK; +} + +static rt_bool_t fee_recovery_is_valid_lane_header(uint8_t lane, const fee_sector_header_t *header, + uint32_t sector_base) +{ + if (!fee_onflash_validate_sector_header(header)) + { + return RT_FALSE; + } + + if ((header->lane_id != lane) || + (header->data_start != fee_recovery_sector_data_start(sector_base)) || + (header->data_end != (sector_base + g_fee_caps.erase_unit))) + { + return RT_FALSE; + } + + if ((header->state != (uint8_t)FEE_SECTOR_ACTIVE) && + (header->state != (uint8_t)FEE_SECTOR_GC_DST) && + (header->state != (uint8_t)FEE_SECTOR_OLD_PENDING_ERASE)) + { + return RT_FALSE; + } + + return RT_TRUE; +} + +static uint8_t fee_recovery_pick_lane_sector(uint8_t lane, uint8_t exclude0, uint8_t exclude1, + const rt_bool_t *valid) +{ + fee_lane_ctx_t *lane_ctx = &g_fee_ctx.lane[lane]; + uint8_t idx; + + for (idx = 0U; idx < lane_ctx->sector_count; ++idx) + { + if ((idx == exclude0) || (idx == exclude1)) + { + continue; + } + + if ((valid != RT_NULL) && (valid[idx] == RT_FALSE)) + { + return idx; + } + } + + for (idx = 0U; idx < lane_ctx->sector_count; ++idx) + { + if ((idx != exclude0) && (idx != exclude1)) + { + return idx; + } + } + + return exclude0; +} + +static void fee_recovery_select_active_sector(uint8_t lane, uint8_t active_sector, uint8_t dst_sector, + uint8_t spare_sector, uint32_t generation) +{ + fee_lane_ctx_t *lane_ctx = &g_fee_ctx.lane[lane]; + uint32_t sector_base = fee_recovery_lane_sector_base(lane, active_sector); + uint32_t data_start = fee_recovery_sector_data_start(sector_base); + uint32_t data_end = sector_base + g_fee_caps.erase_unit; + + lane_ctx->active_sector = active_sector; + lane_ctx->dst_sector = dst_sector; + lane_ctx->spare_sector = spare_sector; + lane_ctx->gc_old_sector = active_sector; + lane_ctx->base_addr = sector_base; + lane_ctx->data_start = data_start; + lane_ctx->limit_addr = data_end; + lane_ctx->active_generation = generation; + lane_ctx->gc_cursor = 0U; + lane_ctx->gc_state = (uint8_t)FEE_GC_IDLE; + lane_ctx->gc_write_offset = FEE_INVALID_ADDR; + + if ((lane_ctx->free_offset < data_start) || (lane_ctx->free_offset > data_end)) + { + lane_ctx->free_offset = data_start; + } + + if ((lane_ctx->scan_start < data_start) || (lane_ctx->scan_start > data_end)) + { + lane_ctx->scan_start = lane_ctx->free_offset; + } +} + +static fee_ret_t fee_recovery_open_single_sector_lane(uint8_t lane) +{ + fee_sector_header_t header; + fee_lane_ctx_t *lane_ctx = &g_fee_ctx.lane[lane]; + uint32_t sector_base = fee_recovery_lane_sector_base(lane, 0U); + fee_ret_t ret; + + ret = fee_port_read(sector_base, (uint8_t *)&header, (uint32_t)sizeof(header)); + if (ret != FEE_E_OK) + { + return ret; + } + + if (fee_recovery_is_valid_lane_header(lane, &header, sector_base) == RT_FALSE) + { + ret = fee_recovery_format_lane_sector(lane, 0U, (uint8_t)FEE_SECTOR_ACTIVE, 1U, RT_TRUE); + if (ret != FEE_E_OK) + { + return ret; + } + + lane_ctx->free_offset = fee_recovery_sector_data_start(sector_base); + lane_ctx->scan_start = lane_ctx->free_offset; + fee_recovery_select_active_sector(lane, 0U, 0U, 0U, 1U); + fee_recovery_mark_checkpoint_stale(); + return FEE_E_OK; + } + + if ((fee_ckpt_has_restored_image() != RT_FALSE) && + ((lane_ctx->active_sector != 0U) || (lane_ctx->dst_sector != 0U) || (lane_ctx->spare_sector != 0U) || + (lane_ctx->active_generation != header.generation))) + { + fee_recovery_mark_checkpoint_stale(); + } + + fee_recovery_select_active_sector(lane, 0U, 0U, 0U, header.generation); + return FEE_E_OK; +} + +static fee_ret_t fee_recovery_open_multi_sector_lane(uint8_t lane) +{ + fee_lane_ctx_t *lane_ctx = &g_fee_ctx.lane[lane]; + fee_sector_header_t headers[FEE_CFG_MAX_LANE_SECTOR_COUNT]; + rt_bool_t valid[FEE_CFG_MAX_LANE_SECTOR_COUNT]; + uint8_t idx; + uint8_t selected_active = 0xFFU; + uint8_t selected_gc_dst = 0xFFU; + uint8_t ckpt_active = lane_ctx->active_sector; + uint8_t ckpt_dst = lane_ctx->dst_sector; + uint8_t ckpt_spare = lane_ctx->spare_sector; + uint32_t ckpt_generation = lane_ctx->active_generation; + uint8_t active_sector; + uint8_t dst_sector; + uint8_t spare_sector; + fee_ret_t ret; + + if (lane_ctx->sector_count > FEE_CFG_MAX_LANE_SECTOR_COUNT) + { + return FEE_E_NOT_OK; + } + + for (idx = 0U; idx < lane_ctx->sector_count; ++idx) + { + uint32_t sector_base = fee_recovery_lane_sector_base(lane, idx); + + ret = fee_port_read(sector_base, (uint8_t *)&headers[idx], (uint32_t)sizeof(headers[idx])); + if (ret != FEE_E_OK) + { + return ret; + } + + valid[idx] = fee_recovery_is_valid_lane_header(lane, &headers[idx], sector_base); + if (valid[idx] == RT_FALSE) + { + continue; + } + + if (headers[idx].state == (uint8_t)FEE_SECTOR_ACTIVE) + { + if ((selected_active == 0xFFU) || (headers[idx].generation > headers[selected_active].generation)) + { + selected_active = idx; + } + } + else if ((selected_gc_dst == 0xFFU) || (headers[idx].generation > headers[selected_gc_dst].generation)) + { + selected_gc_dst = idx; + } + } + + if ((selected_active == 0xFFU) && (selected_gc_dst == 0xFFU)) + { + ret = fee_recovery_format_lane_sector(lane, 0U, + (uint8_t)FEE_SECTOR_ACTIVE, 1U, RT_TRUE); + if (ret != FEE_E_OK) + { + return ret; + } + + for (idx = 1U; idx < lane_ctx->sector_count; ++idx) + { + ret = fee_port_erase(fee_recovery_lane_sector_base(lane, idx), g_fee_caps.erase_unit); + if (ret != FEE_E_OK) + { + return ret; + } + } + + lane_ctx->free_offset = fee_recovery_sector_data_start(fee_recovery_lane_sector_base(lane, 0U)); + lane_ctx->scan_start = lane_ctx->free_offset; + dst_sector = (lane_ctx->sector_count > 1U) ? 1U : 0U; + spare_sector = (lane_ctx->sector_count > 2U) ? 2U : dst_sector; + fee_recovery_select_active_sector(lane, 0U, dst_sector, spare_sector, 1U); + fee_recovery_mark_checkpoint_stale(); + return FEE_E_OK; + } + + active_sector = (selected_active != 0xFFU) ? selected_active : selected_gc_dst; + dst_sector = (selected_gc_dst != 0xFFU) ? selected_gc_dst : + fee_recovery_pick_lane_sector(lane, active_sector, 0xFFU, &valid[0]); + if (dst_sector == active_sector) + { + dst_sector = fee_recovery_pick_lane_sector(lane, active_sector, 0xFFU, RT_NULL); + } + + spare_sector = fee_recovery_pick_lane_sector(lane, active_sector, dst_sector, &valid[0]); + if (spare_sector == active_sector) + { + spare_sector = dst_sector; + } + + if ((fee_ckpt_has_restored_image() != RT_FALSE) && + ((ckpt_active != active_sector) || (ckpt_dst != dst_sector) || + (ckpt_spare != spare_sector) || (ckpt_generation != headers[active_sector].generation))) + { + fee_recovery_mark_checkpoint_stale(); + } + + fee_recovery_select_active_sector(lane, active_sector, dst_sector, spare_sector, + headers[active_sector].generation); + + if ((selected_active != 0xFFU) && (selected_gc_dst != 0xFFU) && (selected_gc_dst != selected_active)) + { + lane_ctx->gc_requested = 1U; + } + + return FEE_E_OK; +} + +static fee_ret_t fee_recovery_open_lane(uint8_t lane) +{ + fee_lane_ctx_t *lane_ctx = &g_fee_ctx.lane[lane]; + + if (lane_ctx->sector_count > 1U) + { + return fee_recovery_open_multi_sector_lane(lane); + } + + return fee_recovery_open_single_sector_lane(lane); +} + +static fee_ret_t fee_recovery_open_all_lanes(void) +{ + uint8_t lane; + + for (lane = (uint8_t)FEE_LANE_FAST; lane <= (uint8_t)FEE_LANE_BULK; ++lane) + { + fee_ret_t ret = fee_recovery_open_lane(lane); + + if (ret != FEE_E_OK) + { + return ret; + } + } + + return FEE_E_OK; +} + +static rt_bool_t fee_recovery_has_valid_current_copy(uint16_t block_id) +{ + fee_cache_entry_t *entry = fee_cache_lookup(block_id); + + if (entry == RT_NULL) + { + return RT_FALSE; + } + + if ((entry->cur_valid == 0U) || (entry->cur_addr == FEE_INVALID_ADDR)) + { + return RT_FALSE; + } + + return RT_TRUE; +} + +static rt_bool_t fee_recovery_boot_critical_ready(void) +{ + const fee_block_cfg_t *table = fee_cfg_get_block_table(); + uint16_t count = fee_cfg_get_block_count(); + uint16_t idx; + + for (idx = 0U; idx < count; ++idx) + { + if (table[idx].boot_critical == 0U) + { + continue; + } + + if (fee_recovery_has_valid_current_copy(table[idx].block_id) == RT_FALSE) + { + return RT_FALSE; + } + } + + return RT_TRUE; +} + +static fee_ret_t fee_recovery_scan_lane_records(uint8_t lane, rt_bool_t *lane_done) +{ + fee_lane_ctx_t *lane_ctx = &g_fee_ctx.lane[lane]; + fee_record_header_t header; + fee_commit_tail_t tail; + const fee_block_cfg_t *cfg; + uint32_t addr; + uint32_t start_addr; + uint32_t next_addr; + uint32_t step_records; + uint32_t scanned_records; + uint32_t tail_addr; + fee_ret_t ret; + + if (lane_done == RT_NULL) + { + return FEE_E_PARAM; + } + + addr = lane_ctx->scan_start; + start_addr = addr; + step_records = 0U; + scanned_records = 0U; + *lane_done = RT_TRUE; + + while ((addr + (uint32_t)sizeof(header) + (uint32_t)sizeof(tail)) <= lane_ctx->limit_addr) + { + if (step_records >= FEE_CFG_RECOVERY_TAIL_RECORDS_PER_STEP) + { + *lane_done = RT_FALSE; + break; + } + + ret = fee_port_read(addr, (uint8_t *)&header, (uint32_t)sizeof(header)); + if (ret != FEE_E_OK) + { + return ret; + } + + if (header.magic != FEE_RECORD_MAGIC) + { + break; + } + + cfg = fee_cfg_find_block(header.block_id); + if ((cfg == RT_NULL) || (cfg->lane_type != lane) || + (fee_onflash_validate_record_header(&header, cfg) == RT_FALSE)) + { + break; + } + + next_addr = addr + fee_onflash_calc_record_span(cfg, header.data_len); + if ((next_addr <= addr) || (next_addr > lane_ctx->limit_addr)) + { + break; + } + + tail_addr = addr + (uint32_t)sizeof(header) + + fee_onflash_align_up((uint32_t)header.data_len, g_fee_caps.program_unit); + ret = fee_port_read(tail_addr, (uint8_t *)&tail, (uint32_t)sizeof(tail)); + if (ret != FEE_E_OK) + { + return ret; + } + + if (fee_onflash_validate_commit_tail(&tail) == RT_FALSE) + { + break; + } + + if (header.record_type == FEE_RECORD_DATA) + { + fee_cache_update_data(header.block_id, cfg->lane_type, addr, header.data_len, header.seq); + } + else if (header.record_type == FEE_RECORD_TOMBSTONE) + { + fee_cache_update_tombstone(header.block_id, cfg->lane_type, addr, header.seq); + } + + addr = next_addr; + ++step_records; + ++scanned_records; + } + + lane_ctx->free_offset = addr; + lane_ctx->scan_start = addr; + + if (addr > start_addr) + { + /* Rebuild post-checkpoint tail accounting so the next boot remains bounded. */ + lane_ctx->dirty_record_count += scanned_records; + lane_ctx->dirty_bytes += (addr - start_addr); + g_fee_ctx.checkpoint_dirty = 1U; + g_fee_ctx.checkpoint_requested = 1U; + } + + return FEE_E_OK; +} + +fee_ret_t fee_recovery_start(void) +{ + fee_ret_t ret; + + g_fee_ctx.init_state = FEE_INIT_META_SCAN; + g_fee_ctx.status = FEE_STATUS_BUSY_INTERNAL; + + ret = fee_port_get_caps(&g_fee_caps); + if (ret != FEE_E_OK) + { + g_fee_ctx.init_state = FEE_INIT_FAILED; + return ret; + } + + ret = fee_recovery_assign_layout(); + if (ret != FEE_E_OK) + { + g_fee_ctx.init_state = FEE_INIT_FAILED; + return ret; + } + + g_fee_tail_scan_lane = (uint8_t)FEE_LANE_FAST; + + return FEE_E_OK; +} + +fee_ret_t fee_recovery_step(void) +{ + fee_ret_t ret; + rt_bool_t ckpt_restored = RT_FALSE; + + if (g_fee_ctx.init_state == FEE_INIT_META_SCAN) + { + ret = fee_ckpt_restore(); + if (ret == FEE_E_OK) + { + ckpt_restored = RT_TRUE; + } + + ret = fee_recovery_open_all_lanes(); + if (ret != FEE_E_OK) + { + g_fee_ctx.init_state = FEE_INIT_FAILED; + return ret; + } + + if ((ckpt_restored != RT_FALSE) && (fee_recovery_boot_critical_ready() != RT_FALSE)) + { + g_fee_ctx.init_state = FEE_INIT_CKPT_READY; + } + else + { + g_fee_ctx.init_state = FEE_INIT_TAIL_SCAN; + } + + return FEE_E_OK; + } + + if (g_fee_ctx.init_state == FEE_INIT_CKPT_READY) + { + g_fee_ctx.init_state = FEE_INIT_TAIL_SCAN; + return FEE_E_OK; + } + + if (g_fee_ctx.init_state == FEE_INIT_TAIL_SCAN) + { + rt_bool_t lane_done = RT_FALSE; + + while ((g_fee_tail_scan_lane < (uint8_t)FEE_LANE_COUNT) && + (fee_recovery_is_business_lane(g_fee_tail_scan_lane) == RT_FALSE)) + { + ++g_fee_tail_scan_lane; + } + + if (g_fee_tail_scan_lane >= (uint8_t)FEE_LANE_COUNT) + { + g_fee_ctx.init_state = FEE_INIT_FULL_READY; + g_fee_ctx.status = FEE_STATUS_IDLE; + g_fee_ctx.job_result = FEE_JOB_OK; + return FEE_E_OK; + } + + ret = fee_recovery_scan_lane_records(g_fee_tail_scan_lane, &lane_done); + if (ret != FEE_E_OK) + { + g_fee_ctx.init_state = FEE_INIT_FAILED; + return ret; + } + + if (lane_done != RT_FALSE) + { + ++g_fee_tail_scan_lane; + } + + if (g_fee_tail_scan_lane >= (uint8_t)FEE_LANE_COUNT) + { + g_fee_ctx.init_state = FEE_INIT_FULL_READY; + g_fee_ctx.status = FEE_STATUS_IDLE; + g_fee_ctx.job_result = FEE_JOB_OK; + } + + return FEE_E_OK; + } + + return FEE_E_OK; +} + +rt_bool_t fee_recovery_can_read_block(uint16_t block_id) +{ + if (fee_cfg_find_block(block_id) == RT_NULL) + { + return RT_FALSE; + } + + if (g_fee_ctx.init_state == FEE_INIT_FULL_READY) + { + return RT_TRUE; + } + + if (g_fee_ctx.init_state == FEE_INIT_CKPT_READY) + { + if (fee_cfg_is_boot_critical(block_id) == RT_FALSE) + { + return RT_FALSE; + } + + return fee_recovery_has_valid_current_copy(block_id); + } + + if (g_fee_ctx.init_state == FEE_INIT_TAIL_SCAN) + { + return fee_recovery_has_valid_current_copy(block_id); + } + + return RT_FALSE; +} + +rt_bool_t fee_recovery_is_full_ready(void) +{ + return (g_fee_ctx.init_state == FEE_INIT_FULL_READY) ? RT_TRUE : RT_FALSE; +} diff --git a/components/custom_fee/fee_sched.c b/components/custom_fee/fee_sched.c new file mode 100644 index 00000000000..e4d0a855535 --- /dev/null +++ b/components/custom_fee/fee_sched.c @@ -0,0 +1,332 @@ +#include "fee_internal.h" + +typedef struct +{ + uint8_t used; + uint8_t type; + fee_request_t request; +} fee_sched_slot_t; + +static fee_sched_slot_t g_fee_sched_urgent[FEE_CFG_MAX_PENDING_REQUESTS]; +static fee_sched_slot_t g_fee_sched_normal[FEE_CFG_MAX_PENDING_REQUESTS]; + +static void fee_sched_clear_queue(fee_sched_slot_t *queue, uint16_t queue_len) +{ + if ((queue == RT_NULL) || (queue_len == 0U)) + { + return; + } + + (void)memset(queue, 0, (rt_size_t)queue_len * sizeof(queue[0])); +} + +static rt_bool_t fee_sched_queue_has_work(const fee_sched_slot_t *queue, uint16_t queue_len) +{ + uint16_t idx; + + if ((queue == RT_NULL) || (queue_len == 0U)) + { + return RT_FALSE; + } + + for (idx = 0U; idx < queue_len; ++idx) + { + if (queue[idx].used != 0U) + { + return RT_TRUE; + } + } + + return RT_FALSE; +} + +static fee_ret_t fee_sched_queue_push(fee_sched_slot_t *queue, uint16_t queue_len, + fee_request_type_t type, const fee_request_t *request) +{ + uint16_t idx; + + if ((queue == RT_NULL) || (request == RT_NULL) || (type == FEE_REQ_NONE)) + { + return FEE_E_PARAM; + } + + for (idx = 0U; idx < queue_len; ++idx) + { + if (queue[idx].used == 0U) + { + queue[idx].used = 1U; + queue[idx].type = (uint8_t)type; + queue[idx].request = *request; + return FEE_E_OK; + } + } + + return FEE_E_BUSY; +} + +static rt_bool_t fee_sched_queue_pop(fee_sched_slot_t *queue, uint16_t queue_len, fee_sched_slot_t *slot) +{ + uint16_t idx; + + if ((queue == RT_NULL) || (slot == RT_NULL) || (queue_len == 0U)) + { + return RT_FALSE; + } + + if (queue[0].used == 0U) + { + return RT_FALSE; + } + + *slot = queue[0]; + + for (idx = 1U; idx < queue_len; ++idx) + { + queue[idx - 1U] = queue[idx]; + } + + (void)memset(&queue[queue_len - 1U], 0, sizeof(queue[0])); + return RT_TRUE; +} + +static fee_ret_t fee_sched_queue_requeue(fee_sched_slot_t *queue, uint16_t queue_len, + const fee_sched_slot_t *slot) +{ + if ((slot == RT_NULL) || (slot->used == 0U)) + { + return FEE_E_PARAM; + } + + return fee_sched_queue_push(queue, queue_len, (fee_request_type_t)slot->type, &slot->request); +} + +static fee_ret_t fee_sched_enqueue_request(fee_request_type_t type, const fee_request_t *request) +{ + const fee_block_cfg_t *cfg; + fee_sched_slot_t *queue; + + if (request == RT_NULL) + { + return FEE_E_PARAM; + } + + cfg = fee_cfg_find_block(request->block_id); + if (cfg == RT_NULL) + { + return FEE_E_PARAM; + } + + queue = (cfg->lane_type == (uint8_t)FEE_LANE_FAST) ? &g_fee_sched_urgent[0] : &g_fee_sched_normal[0]; + return fee_sched_queue_push(queue, FEE_CFG_MAX_PENDING_REQUESTS, type, request); +} + +static fee_ret_t fee_sched_dispatch_one(const fee_sched_slot_t *slot) +{ + if (slot == RT_NULL) + { + return FEE_E_PARAM; + } + + if (slot->type == (uint8_t)FEE_REQ_WRITE) + { + return fee_core_write(slot->request.block_id, slot->request.src, slot->request.len); + } + + if (slot->type == (uint8_t)FEE_REQ_INVALIDATE) + { + return fee_core_invalidate(slot->request.block_id); + } + + if (slot->type == (uint8_t)FEE_REQ_ROLLBACK) + { + return fee_core_rollback(slot->request.block_id); + } + + return FEE_E_PARAM; +} + +static fee_ret_t fee_sched_try_dispatch_queue(fee_sched_slot_t *queue, uint16_t queue_len, + rt_bool_t *made_progress) +{ + fee_sched_slot_t slot; + uint16_t attempts = 0U; + fee_ret_t ret; + + if (made_progress != RT_NULL) + { + *made_progress = RT_FALSE; + } + + while (attempts < queue_len) + { + if (fee_sched_queue_pop(queue, queue_len, &slot) == RT_FALSE) + { + return FEE_E_OK; + } + + ret = fee_sched_dispatch_one(&slot); + if (ret == FEE_E_BUSY) + { + if (fee_sched_queue_requeue(queue, queue_len, &slot) != FEE_E_OK) + { + return FEE_E_NOT_OK; + } + + ++attempts; + continue; + } + + if (ret != FEE_E_OK) + { + return ret; + } + + if (made_progress != RT_NULL) + { + *made_progress = RT_TRUE; + } + + return FEE_E_OK; + } + + return FEE_E_OK; +} + +fee_ret_t fee_sched_submit_read(uint16_t block_id, uint16_t offset, uint8_t *dst, uint16_t len) +{ + return fee_core_read(block_id, offset, dst, len); +} + +fee_ret_t fee_sched_submit_write(uint16_t block_id, const uint8_t *src, uint16_t len) +{ + fee_request_t request; + fee_ret_t ret; + + request.block_id = block_id; + request.offset = 0U; + request.len = len; + request.src = src; + request.dst = RT_NULL; + + ret = fee_sched_enqueue_request(FEE_REQ_WRITE, &request); + if (ret != FEE_E_OK) + { + return ret; + } + + g_fee_ctx.status = FEE_STATUS_BUSY_INTERNAL; + g_fee_ctx.job_result = FEE_JOB_PENDING; + return FEE_E_OK; +} + +fee_ret_t fee_sched_submit_invalidate(uint16_t block_id) +{ + fee_request_t request; + fee_ret_t ret; + + request.block_id = block_id; + request.offset = 0U; + request.len = 0U; + request.src = RT_NULL; + request.dst = RT_NULL; + + ret = fee_sched_enqueue_request(FEE_REQ_INVALIDATE, &request); + if (ret != FEE_E_OK) + { + return ret; + } + + g_fee_ctx.status = FEE_STATUS_BUSY_INTERNAL; + g_fee_ctx.job_result = FEE_JOB_PENDING; + return FEE_E_OK; +} + +fee_ret_t fee_sched_submit_rollback(uint16_t block_id) +{ + fee_request_t request; + fee_ret_t ret; + + request.block_id = block_id; + request.offset = 0U; + request.len = 0U; + request.src = RT_NULL; + request.dst = RT_NULL; + + ret = fee_sched_enqueue_request(FEE_REQ_ROLLBACK, &request); + if (ret != FEE_E_OK) + { + return ret; + } + + g_fee_ctx.status = FEE_STATUS_BUSY_INTERNAL; + g_fee_ctx.job_result = FEE_JOB_PENDING; + return FEE_E_OK; +} + +void fee_sched_mainfunction(void) +{ + fee_ret_t ret; + rt_bool_t dispatched = RT_FALSE; + rt_bool_t has_work = RT_FALSE; + + if (g_fee_ctx.init_state == FEE_INIT_FAILED) + { + fee_sched_clear_queue(&g_fee_sched_urgent[0], FEE_CFG_MAX_PENDING_REQUESTS); + fee_sched_clear_queue(&g_fee_sched_normal[0], FEE_CFG_MAX_PENDING_REQUESTS); + return; + } + + if (g_fee_ctx.init_state != FEE_INIT_FULL_READY) + { + has_work = fee_sched_has_pending_work(); + if (has_work != RT_FALSE) + { + g_fee_ctx.status = FEE_STATUS_BUSY_INTERNAL; + g_fee_ctx.job_result = FEE_JOB_PENDING; + } + return; + } + + g_fee_ctx.status = FEE_STATUS_BUSY_INTERNAL; + g_fee_ctx.job_result = FEE_JOB_PENDING; + + ret = fee_sched_try_dispatch_queue(&g_fee_sched_urgent[0], FEE_CFG_MAX_PENDING_REQUESTS, &dispatched); + if (ret != FEE_E_OK) + { + g_fee_ctx.job_result = FEE_JOB_FAILED; + g_fee_ctx.status = FEE_STATUS_BUSY_INTERNAL; + return; + } + + if (dispatched == RT_FALSE) + { + ret = fee_sched_try_dispatch_queue(&g_fee_sched_normal[0], FEE_CFG_MAX_PENDING_REQUESTS, &dispatched); + if (ret != FEE_E_OK) + { + g_fee_ctx.job_result = FEE_JOB_FAILED; + g_fee_ctx.status = FEE_STATUS_BUSY_INTERNAL; + return; + } + } + + has_work = fee_sched_has_pending_work(); + if (dispatched == RT_FALSE) + { + g_fee_ctx.job_result = (has_work != RT_FALSE) ? FEE_JOB_PENDING : FEE_JOB_OK; + g_fee_ctx.status = (has_work != RT_FALSE) ? FEE_STATUS_BUSY_INTERNAL : FEE_STATUS_IDLE; + return; + } + + g_fee_ctx.job_result = (has_work != RT_FALSE) ? FEE_JOB_PENDING : FEE_JOB_OK; + g_fee_ctx.status = (has_work != RT_FALSE) ? FEE_STATUS_BUSY_INTERNAL : FEE_STATUS_IDLE; +} + +rt_bool_t fee_sched_has_pending_work(void) +{ + if (fee_sched_queue_has_work(&g_fee_sched_urgent[0], FEE_CFG_MAX_PENDING_REQUESTS) != RT_FALSE) + { + return RT_TRUE; + } + + return fee_sched_queue_has_work(&g_fee_sched_normal[0], FEE_CFG_MAX_PENDING_REQUESTS); +} diff --git a/components/custom_fee/fee_test.c b/components/custom_fee/fee_test.c new file mode 100644 index 00000000000..18344e82b47 --- /dev/null +++ b/components/custom_fee/fee_test.c @@ -0,0 +1,1309 @@ +#include +#include "rtthread.h" +#include "finsh.h" +#include "fee_api.h" +#include "fee_cfg.h" +#include "fee_port.h" +#include "fee_internal.h" +#include "fee_onflash.h" + +#define FEE_TEST_POLL_BUDGET (20000U) +#define FEE_TEST_GC_WRITE_COUNT (160U) +#define FEE_TEST_LAYOUT_MAX_RECORDS (12U) +#define FEE_TEST_FLASH_WINDOW_BYTES (96U) +#define FEE_TEST_CKPT_MAGIC (0x4645434BUL) + +typedef struct +{ + rt_tick_t tick; + uint32_t ms; +} fee_test_stamp_t; + +typedef struct +{ + rt_tick_t ticks; + uint32_t ms; +} fee_test_duration_t; + +typedef struct +{ + uint32_t free_offset; + uint32_t active_generation; + uint8_t active_sector; + uint8_t dst_sector; + uint8_t spare_sector; + uint8_t sector_count; + uint32_t reserved; +} fee_test_ckpt_lane_state_t; + +typedef struct +{ + fee_test_ckpt_lane_state_t lane[FEE_LANE_COUNT]; + uint32_t entry_count; + fee_ckpt_cache_entry_t entries[FEE_CACHE_MAX_ENTRIES]; +} fee_test_ckpt_payload_t; + +typedef struct +{ + uint32_t magic; + uint32_t format_version; + uint32_t generation; + fee_test_ckpt_payload_t payload; + uint32_t payload_crc; + uint32_t commit_marker; + uint32_t reserved[4]; +} fee_test_ckpt_image_t; + +static uint32_t fee_test_crc32_accumulate(const uint8_t *data, uint32_t len) +{ + uint32_t crc = 0U; + uint32_t idx; + + if (data == RT_NULL) + { + return 0U; + } + + for (idx = 0U; idx < len; ++idx) + { + crc = (crc << 5) - crc + data[idx]; + } + + return crc; +} + +static uint32_t fee_test_ckpt_payload_crc(const fee_test_ckpt_image_t *image) +{ + return fee_test_crc32_accumulate((const uint8_t *)&image->payload, + (uint32_t)sizeof(image->payload)); +} + +static rt_bool_t fee_test_ckpt_is_valid(const fee_test_ckpt_image_t *image) +{ + if (image == RT_NULL) + { + return RT_FALSE; + } + + if ((image->magic != FEE_TEST_CKPT_MAGIC) || + (image->format_version != FEE_CFG_FORMAT_VERSION) || + (image->commit_marker != FEE_COMMIT_MARKER)) + { + return RT_FALSE; + } + + if (image->payload.entry_count > FEE_CACHE_MAX_ENTRIES) + { + return RT_FALSE; + } + + return (image->payload_crc == fee_test_ckpt_payload_crc(image)) ? RT_TRUE : RT_FALSE; +} + +static fee_test_stamp_t fee_test_stamp_now(void) +{ + fee_test_stamp_t stamp; + + stamp.tick = rt_tick_get(); + stamp.ms = rt_tick_get_millisecond(); + return stamp; +} + +static fee_test_duration_t fee_test_elapsed(fee_test_stamp_t start, fee_test_stamp_t end) +{ + fee_test_duration_t duration; + + duration.ticks = end.tick - start.tick; + duration.ms = end.ms - start.ms; + return duration; +} + +static int fee_test_wait_full_ready(uint32_t budget, uint32_t *loops) +{ + uint32_t used_loops = 0U; + + while (budget > 0U) + { + fee_init_state_t state = fee_get_init_state(); + + if (state == FEE_INIT_FULL_READY) + { + if (loops != RT_NULL) + { + *loops = used_loops; + } + return 0; + } + + if (state == FEE_INIT_FAILED) + { + break; + } + + fee_mainfunction(); + ++used_loops; + --budget; + } + + if (loops != RT_NULL) + { + *loops = used_loops; + } + + return -1; +} + +static int fee_test_wait_idle(uint32_t budget, uint32_t *loops) +{ + uint32_t used_loops = 0U; + + while (budget > 0U) + { + fee_job_result_t job_result; + + fee_mainfunction(); + ++used_loops; + job_result = fee_get_job_result(); + + if ((fee_get_init_state() == FEE_INIT_FULL_READY) && + (fee_get_memif_status() == FEE_STATUS_IDLE) && + (job_result != FEE_JOB_PENDING)) + { + if (loops != RT_NULL) + { + *loops = used_loops; + } + return (job_result == FEE_JOB_FAILED) ? -1 : 0; + } + + if ((fee_get_init_state() == FEE_INIT_FAILED) || (job_result == FEE_JOB_FAILED)) + { + break; + } + + --budget; + } + + if (loops != RT_NULL) + { + *loops = used_loops; + } + + return -1; +} + +static void fee_test_fill_pattern(uint8_t *buf, uint16_t len, uint8_t seed) +{ + uint16_t idx; + + for (idx = 0U; idx < len; ++idx) + { + buf[idx] = (uint8_t)(seed + (uint8_t)(idx * 3U)); + } +} + +static int fee_test_expect_ret(const char *step, fee_ret_t actual, fee_ret_t expected) +{ + if (actual != expected) + { + rt_kprintf("custom_fee_test: %s failed, ret=%u expected=%u\n", + step, (unsigned)actual, (unsigned)expected); + return -1; + } + + return 0; +} + +static const char *fee_test_lane_name(uint8_t lane) +{ + if (lane == (uint8_t)FEE_LANE_META) + { + return "meta"; + } + + if (lane == (uint8_t)FEE_LANE_FAST) + { + return "fast"; + } + + if (lane == (uint8_t)FEE_LANE_NORMAL) + { + return "normal"; + } + + if (lane == (uint8_t)FEE_LANE_BULK) + { + return "bulk"; + } + + return "unknown"; +} + +static const char *fee_test_sector_state_name(uint8_t state) +{ + if (state == (uint8_t)FEE_SECTOR_PREPARE) + { + return "PREPARE"; + } + + if (state == (uint8_t)FEE_SECTOR_ACTIVE) + { + return "ACTIVE"; + } + + if (state == (uint8_t)FEE_SECTOR_GC_DST) + { + return "GC_DST"; + } + + if (state == (uint8_t)FEE_SECTOR_OLD_PENDING_ERASE) + { + return "OLD_PENDING_ERASE"; + } + + if (state == (uint8_t)FEE_SECTOR_BAD) + { + return "BAD"; + } + + if (state == (uint8_t)FEE_SECTOR_ERASED) + { + return "ERASED"; + } + + return "UNKNOWN"; +} + +static const char *fee_test_record_type_name(uint8_t type) +{ + if (type == (uint8_t)FEE_RECORD_DATA) + { + return "DATA"; + } + + if (type == (uint8_t)FEE_RECORD_TOMBSTONE) + { + return "TOMBSTONE"; + } + + if (type == (uint8_t)FEE_RECORD_CKPT_CHUNK) + { + return "CKPT_CHUNK"; + } + + return "UNKNOWN"; +} + +static void fee_test_print_buffer(const char *label, const uint8_t *buf, uint16_t len) +{ + uint16_t idx; + + if ((label == RT_NULL) || (buf == RT_NULL)) + { + return; + } + + rt_kprintf("custom_fee_diag_test: %s len=%u\n", label, (unsigned)len); + for (idx = 0U; idx < len; idx = (uint16_t)(idx + 16U)) + { + uint16_t line_len = (uint16_t)(len - idx); + uint16_t jdx; + + if (line_len > 16U) + { + line_len = 16U; + } + + rt_kprintf("custom_fee_diag_test: %04u:", (unsigned)idx); + for (jdx = 0U; jdx < line_len; ++jdx) + { + rt_kprintf(" %02x", buf[idx + jdx]); + } + rt_kprintf("\n"); + } +} + +static rt_bool_t fee_test_fetch_debug_stats(fee_port_debug_stats_t *stats) +{ + if (stats == RT_NULL) + { + return RT_FALSE; + } + + if (fee_port_debug_get_stats(stats) != FEE_E_OK) + { + (void)memset(stats, 0, sizeof(*stats)); + return RT_FALSE; + } + + return RT_TRUE; +} + +static void fee_test_print_stats(const char *label, const fee_test_duration_t *duration, + const fee_port_debug_stats_t *stats, rt_bool_t stats_valid, uint32_t wait_loops) +{ + if ((label == RT_NULL) || (duration == RT_NULL)) + { + return; + } + + if (stats_valid == RT_FALSE) + { + rt_kprintf("custom_fee_diag_test: %s time=%u ms ticks=%lu wait_loops=%u driver[unavailable]\n", + label, (unsigned)duration->ms, (unsigned long)duration->ticks, (unsigned)wait_loops); + return; + } + + rt_kprintf( + "custom_fee_diag_test: %s time=%u ms ticks=%lu wait_loops=%u " + "driver[init=%u read=%u/%uB write=%u/%uB erase=%u/%uB poll=%u]\n", + label, + (unsigned)duration->ms, + (unsigned long)duration->ticks, + (unsigned)wait_loops, + (unsigned)stats->init_calls, + (unsigned)stats->read_calls, + (unsigned)stats->read_bytes, + (unsigned)stats->write_calls, + (unsigned)stats->write_bytes, + (unsigned)stats->erase_calls, + (unsigned)stats->erase_bytes, + (unsigned)stats->poll_calls); +} + +static rt_bool_t fee_test_storage_copy(const uint8_t *storage, uint32_t flash_size, + uint32_t addr, void *dst, uint32_t len) +{ + if ((storage == RT_NULL) || (dst == RT_NULL) || (len == 0U)) + { + return RT_FALSE; + } + + if ((addr > flash_size) || (len > (flash_size - addr))) + { + return RT_FALSE; + } + + (void)memcpy(dst, &storage[addr], len); + return RT_TRUE; +} + +static rt_bool_t fee_test_storage_is_erased(const uint8_t *storage, uint32_t flash_size, + uint32_t addr, uint32_t len) +{ + uint32_t idx; + + if ((storage == RT_NULL) || (len == 0U)) + { + return RT_FALSE; + } + + if ((addr > flash_size) || (len > (flash_size - addr))) + { + return RT_FALSE; + } + + for (idx = 0U; idx < len; ++idx) + { + if (storage[addr + idx] != 0xFFU) + { + return RT_FALSE; + } + } + + return RT_TRUE; +} + +static void fee_test_dump_hex_window(const char *label, const uint8_t *storage, + uint32_t flash_size, uint32_t addr, uint32_t len) +{ + uint32_t idx; + + if ((label == RT_NULL) || (storage == RT_NULL) || (len == 0U)) + { + return; + } + + if (addr >= flash_size) + { + return; + } + + if (len > (flash_size - addr)) + { + len = flash_size - addr; + } + + rt_kprintf("custom_fee_diag_test: %s addr=0x%08x len=%u\n", + label, (unsigned)addr, (unsigned)len); + for (idx = 0U; idx < len; idx += 16U) + { + uint32_t line_len = len - idx; + uint32_t jdx; + + if (line_len > 16U) + { + line_len = 16U; + } + + rt_kprintf("custom_fee_diag_test: 0x%08x:", (unsigned)(addr + idx)); + for (jdx = 0U; jdx < line_len; ++jdx) + { + rt_kprintf(" %02x", storage[addr + idx + jdx]); + } + rt_kprintf("\n"); + } +} + +static void fee_test_dump_record_raw(const uint8_t *storage, uint32_t flash_size, + uint32_t addr, const fee_record_header_t *record_header, uint32_t stored_len, uint32_t tail_addr) +{ + uint32_t payload_dump_len; + + if ((storage == RT_NULL) || (record_header == RT_NULL)) + { + return; + } + + fee_test_dump_hex_window("record header raw", storage, flash_size, addr, + (uint32_t)sizeof(fee_record_header_t)); + + payload_dump_len = record_header->data_len; + if (payload_dump_len > 64U) + { + payload_dump_len = 64U; + } + + if (payload_dump_len > 0U) + { + fee_test_dump_hex_window("record payload raw", storage, flash_size, + addr + (uint32_t)sizeof(fee_record_header_t), payload_dump_len); + } + + if (stored_len > payload_dump_len) + { + rt_kprintf("custom_fee_diag_test: record padding bytes=%u\n", + (unsigned)(stored_len - payload_dump_len)); + } + + fee_test_dump_hex_window("record tail raw", storage, flash_size, tail_addr, + (uint32_t)sizeof(fee_commit_tail_t)); +} + +static void fee_test_dump_checkpoint_layout(const uint8_t *storage, uint32_t flash_size, + const fee_flash_caps_t *caps) +{ + uint32_t idx; + uint32_t active_meta_base = 0U; + uint32_t active_meta_generation = 0U; + rt_bool_t active_meta_found = RT_FALSE; + + if ((storage == RT_NULL) || (caps == RT_NULL)) + { + return; + } + + for (idx = 0U; idx < FEE_CFG_META_SECTOR_COUNT; ++idx) + { + fee_test_ckpt_image_t image; + uint32_t base = idx * caps->erase_unit; + rt_bool_t valid; + uint8_t lane; + + if (fee_test_storage_is_erased(storage, flash_size, base, 64U) != RT_FALSE) + { + rt_kprintf("custom_fee_diag_test: meta[%u] base=0x%08x ERASED\n", + (unsigned)idx, (unsigned)base); + continue; + } + + if (fee_test_storage_copy(storage, flash_size, base, &image, (uint32_t)sizeof(image)) == RT_FALSE) + { + rt_kprintf("custom_fee_diag_test: meta[%u] base=0x%08x unreadable\n", + (unsigned)idx, (unsigned)base); + continue; + } + + valid = fee_test_ckpt_is_valid(&image); + rt_kprintf( + "custom_fee_diag_test: meta[%u] base=0x%08x valid=%u generation=%lu entries=%u commit=0x%08x\n", + (unsigned)idx, + (unsigned)base, + (unsigned)valid, + (unsigned long)image.generation, + (unsigned)image.payload.entry_count, + (unsigned)image.commit_marker); + + if (valid != RT_FALSE) + { + if ((active_meta_found == RT_FALSE) || (image.generation >= active_meta_generation)) + { + active_meta_base = base; + active_meta_generation = image.generation; + active_meta_found = RT_TRUE; + } + + for (lane = (uint8_t)FEE_LANE_FAST; lane <= (uint8_t)FEE_LANE_BULK; ++lane) + { + const fee_test_ckpt_lane_state_t *state = &image.payload.lane[lane]; + + rt_kprintf( + "custom_fee_diag_test: meta[%u] lane=%s active=%u dst=%u spare=%u " + "gen=%lu free=0x%08x\n", + (unsigned)idx, + fee_test_lane_name(lane), + (unsigned)state->active_sector, + (unsigned)state->dst_sector, + (unsigned)state->spare_sector, + (unsigned long)state->active_generation, + (unsigned)state->free_offset); + } + } + + } + + if (active_meta_found != RT_FALSE) + { + fee_test_dump_hex_window("meta active raw", storage, flash_size, active_meta_base, 64U); + } +} + +static void fee_test_dump_business_lane_layout(uint8_t lane, const uint8_t *storage, + uint32_t flash_size, const fee_flash_caps_t *caps) +{ + const fee_lane_ctx_t *lane_ctx = &g_fee_ctx.lane[lane]; + uint8_t sector_idx; + uint32_t active_window_len; + + rt_kprintf( + "custom_fee_diag_test: lane=%s range=[0x%08x,0x%08x) active=%u dst=%u spare=%u " + "old=%u free=0x%08x limit=0x%08x dirty_records=%lu dirty_bytes=%lu\n", + fee_test_lane_name(lane), + (unsigned)lane_ctx->range_base, + (unsigned)lane_ctx->range_limit, + (unsigned)lane_ctx->active_sector, + (unsigned)lane_ctx->dst_sector, + (unsigned)lane_ctx->spare_sector, + (unsigned)lane_ctx->gc_old_sector, + (unsigned)lane_ctx->free_offset, + (unsigned)lane_ctx->limit_addr, + (unsigned long)lane_ctx->dirty_record_count, + (unsigned long)lane_ctx->dirty_bytes); + + for (sector_idx = 0U; sector_idx < lane_ctx->sector_count; ++sector_idx) + { + fee_sector_header_t sector_header; + uint32_t sector_base = lane_ctx->range_base + ((uint32_t)sector_idx * caps->erase_unit); + uint32_t addr; + uint32_t record_count = 0U; + uint32_t printed_records = 0U; + + if (fee_test_storage_is_erased(storage, flash_size, sector_base, + (uint32_t)sizeof(fee_sector_header_t)) != RT_FALSE) + { + rt_kprintf("custom_fee_diag_test: sector=%u base=0x%08x ERASED\n", + (unsigned)sector_idx, (unsigned)sector_base); + continue; + } + + if (fee_test_storage_copy(storage, flash_size, sector_base, + §or_header, (uint32_t)sizeof(sector_header)) == RT_FALSE) + { + rt_kprintf("custom_fee_diag_test: sector=%u base=0x%08x unreadable\n", + (unsigned)sector_idx, (unsigned)sector_base); + continue; + } + + if (!fee_onflash_validate_sector_header(§or_header)) + { + if (sector_header.magic == FEE_SECTOR_MAGIC) + { + rt_kprintf( + "custom_fee_diag_test: sector=%u base=0x%08x header-valid=0 " + "state=%s generation=%lu data=[0x%08x,0x%08x)\n", + (unsigned)sector_idx, + (unsigned)sector_base, + fee_test_sector_state_name(sector_header.state), + (unsigned long)sector_header.generation, + (unsigned)sector_header.data_start, + (unsigned)sector_header.data_end); + } + else + { + rt_kprintf("custom_fee_diag_test: sector=%u base=0x%08x raw/unknown\n", + (unsigned)sector_idx, (unsigned)sector_base); + } + fee_test_dump_hex_window("sector raw", storage, flash_size, sector_base, 64U); + continue; + } + + rt_kprintf( + "custom_fee_diag_test: sector=%u base=0x%08x state=%s generation=%lu " + "data=[0x%08x,0x%08x)\n", + (unsigned)sector_idx, + (unsigned)sector_base, + fee_test_sector_state_name(sector_header.state), + (unsigned long)sector_header.generation, + (unsigned)sector_header.data_start, + (unsigned)sector_header.data_end); + + addr = sector_header.data_start; + while ((addr + (uint32_t)sizeof(fee_record_header_t) + + (uint32_t)sizeof(fee_commit_tail_t)) <= sector_header.data_end) + { + fee_record_header_t record_header; + fee_commit_tail_t tail; + const fee_block_cfg_t *cfg; + uint32_t stored_len; + uint32_t next_addr; + uint32_t tail_addr; + rt_bool_t committed; + + if (fee_test_storage_copy(storage, flash_size, addr, &record_header, + (uint32_t)sizeof(record_header)) == RT_FALSE) + { + break; + } + + if (record_header.magic != FEE_RECORD_MAGIC) + { + break; + } + + cfg = fee_cfg_find_block(record_header.block_id); + if ((cfg == RT_NULL) || (cfg->lane_type != lane) || + (fee_onflash_validate_record_header(&record_header, cfg) == RT_FALSE)) + { + break; + } + + stored_len = fee_onflash_align_up((uint32_t)record_header.data_len, caps->program_unit); + tail_addr = addr + (uint32_t)sizeof(record_header) + stored_len; + if (fee_test_storage_copy(storage, flash_size, tail_addr, &tail, + (uint32_t)sizeof(tail)) == RT_FALSE) + { + break; + } + + committed = fee_onflash_validate_commit_tail(&tail); + if (printed_records < FEE_TEST_LAYOUT_MAX_RECORDS) + { + rt_kprintf( + "custom_fee_diag_test: record=%lu addr=0x%08x block=%u type=%s " + "seq=%lu len=%u committed=%u prev=0x%08x\n", + (unsigned long)record_count, + (unsigned)addr, + (unsigned)record_header.block_id, + fee_test_record_type_name(record_header.record_type), + (unsigned long)record_header.seq, + (unsigned)record_header.data_len, + (unsigned)committed, + (unsigned)record_header.prev_addr_hint); + if (record_count == 0U) + { + fee_test_dump_record_raw(storage, flash_size, addr, &record_header, + stored_len, tail_addr); + } + ++printed_records; + } + + next_addr = addr + fee_onflash_calc_record_span(cfg, record_header.data_len); + if ((next_addr <= addr) || (next_addr > sector_header.data_end)) + { + break; + } + + addr = next_addr; + ++record_count; + } + + if (record_count > printed_records) + { + rt_kprintf("custom_fee_diag_test: ... %lu more record(s) omitted\n", + (unsigned long)(record_count - printed_records)); + } + + rt_kprintf("custom_fee_diag_test: sector=%u stop_addr=0x%08x record_count=%lu\n", + (unsigned)sector_idx, (unsigned)addr, (unsigned long)record_count); + } + + active_window_len = 0U; + if ((lane_ctx->base_addr < lane_ctx->free_offset) && (lane_ctx->base_addr < flash_size)) + { + active_window_len = lane_ctx->free_offset - lane_ctx->base_addr; + if (active_window_len > FEE_TEST_FLASH_WINDOW_BYTES) + { + active_window_len = FEE_TEST_FLASH_WINDOW_BYTES; + } + } + + if (active_window_len > 0U) + { + fee_test_dump_hex_window("active sector raw", storage, flash_size, + lane_ctx->base_addr, active_window_len); + } +} + +static void fee_test_dump_flash_layout(const char *stage) +{ + fee_flash_caps_t caps; + const uint8_t *storage = RT_NULL; + uint32_t flash_size = 0U; + uint8_t lane; + + if ((stage == RT_NULL) || (fee_port_get_caps(&caps) != FEE_E_OK)) + { + rt_kprintf("custom_fee_diag_test: flash layout unavailable\n"); + return; + } + + if (fee_port_debug_get_storage(&storage, &flash_size) != FEE_E_OK) + { + rt_kprintf("custom_fee_diag_test: flash layout unavailable for current driver\n"); + return; + } + + rt_kprintf( + "custom_fee_diag_test: flash layout [%s] total=0x%08x erase=0x%08x read_unit=%u " + "program_unit=%u\n", + stage, + (unsigned)caps.total_size, + (unsigned)caps.erase_unit, + (unsigned)caps.read_unit, + (unsigned)caps.program_unit); + + fee_test_dump_checkpoint_layout(storage, flash_size, &caps); + + for (lane = (uint8_t)FEE_LANE_FAST; lane <= (uint8_t)FEE_LANE_BULK; ++lane) + { + fee_test_dump_business_lane_layout(lane, storage, flash_size, &caps); + } +} + +static int fee_test_reset_backend(void) +{ + fee_flash_caps_t caps; + uint32_t used_bytes; + uint32_t idx; + fee_ret_t ret; + + ret = fee_port_init(); + if (ret != FEE_E_OK) + { + rt_kprintf("custom_fee_test: fee_port_init failed ret=%u\n", (unsigned)ret); + return -1; + } + + ret = fee_port_get_caps(&caps); + if (ret != FEE_E_OK) + { + rt_kprintf("custom_fee_test: fee_port_get_caps failed ret=%u\n", (unsigned)ret); + return -1; + } + + used_bytes = fee_cfg_get_total_sector_count() * caps.erase_unit; + if (used_bytes > caps.total_size) + { + rt_kprintf("custom_fee_test: backend too small, used=0x%08x total=0x%08x\n", + (unsigned)used_bytes, (unsigned)caps.total_size); + return -1; + } + + for (idx = 0U; idx < fee_cfg_get_total_sector_count(); ++idx) + { + ret = fee_port_erase(idx * caps.erase_unit, caps.erase_unit); + if (ret != FEE_E_OK) + { + rt_kprintf("custom_fee_test: erase sector=%u failed ret=%u\n", + (unsigned)idx, (unsigned)ret); + return -1; + } + } + + return 0; +} + +static int fee_test_measure_init(const char *label, rt_bool_t verbose) +{ + fee_test_stamp_t start; + fee_test_stamp_t end; + fee_test_duration_t duration; + fee_port_debug_stats_t stats; + rt_bool_t stats_valid = RT_FALSE; + uint32_t wait_loops = 0U; + + (void)fee_port_debug_reset_stats(); + start = fee_test_stamp_now(); + + if (fee_test_expect_ret(label, fee_init(), FEE_E_OK) != 0) + { + return -1; + } + + if (fee_test_wait_full_ready(FEE_TEST_POLL_BUDGET, &wait_loops) != 0) + { + rt_kprintf("custom_fee_test: %s did not reach FULL_READY\n", label); + return -1; + } + + end = fee_test_stamp_now(); + duration = fee_test_elapsed(start, end); + + if (verbose != RT_FALSE) + { + stats_valid = fee_test_fetch_debug_stats(&stats); + fee_test_print_stats(label, &duration, &stats, stats_valid, wait_loops); + } + + return 0; +} + +static int fee_test_measure_write(uint16_t block_id, const uint8_t *src, uint16_t len, + const char *label, rt_bool_t verbose) +{ + fee_test_stamp_t start; + fee_test_stamp_t end; + fee_test_duration_t duration; + fee_port_debug_stats_t stats; + rt_bool_t stats_valid = RT_FALSE; + uint32_t wait_loops = 0U; + + (void)fee_port_debug_reset_stats(); + start = fee_test_stamp_now(); + + if (fee_test_expect_ret(label, fee_write(block_id, src, len), FEE_E_OK) != 0) + { + return -1; + } + + if (fee_test_wait_idle(FEE_TEST_POLL_BUDGET, &wait_loops) != 0) + { + rt_kprintf("custom_fee_test: %s did not complete\n", label); + return -1; + } + + end = fee_test_stamp_now(); + duration = fee_test_elapsed(start, end); + + if (verbose != RT_FALSE) + { + stats_valid = fee_test_fetch_debug_stats(&stats); + fee_test_print_stats(label, &duration, &stats, stats_valid, wait_loops); + fee_test_print_buffer(label, src, len); + } + + return 0; +} + +static int fee_test_measure_read(uint16_t block_id, const uint8_t *expect, uint16_t len, + const char *label, rt_bool_t verbose) +{ + uint8_t readback[FEE_CFG_MAX_BLOCK_LEN]; + fee_ret_t ret; + fee_test_stamp_t start; + fee_test_stamp_t end; + fee_test_duration_t duration; + fee_port_debug_stats_t stats; + rt_bool_t stats_valid = RT_FALSE; + + if (len > (uint16_t)sizeof(readback)) + { + rt_kprintf("custom_fee_test: readback len too large: %u\n", (unsigned)len); + return -1; + } + + (void)memset(readback, 0, sizeof(readback)); + (void)fee_port_debug_reset_stats(); + start = fee_test_stamp_now(); + ret = fee_read(block_id, 0U, readback, len); + end = fee_test_stamp_now(); + + if (ret != FEE_E_OK) + { + rt_kprintf("custom_fee_test: %s ret=%u\n", label, (unsigned)ret); + return -1; + } + + if (memcmp(readback, expect, len) != 0) + { + rt_kprintf("custom_fee_test: readback mismatch on block=%u for %s\n", + (unsigned)block_id, label); + return -1; + } + + duration = fee_test_elapsed(start, end); + if (verbose != RT_FALSE) + { + stats_valid = fee_test_fetch_debug_stats(&stats); + fee_test_print_stats(label, &duration, &stats, stats_valid, 0U); + fee_test_print_buffer(label, readback, len); + } + + return 0; +} + +static int fee_test_measure_action(fee_ret_t actual, const char *label, rt_bool_t verbose) +{ + fee_test_stamp_t start; + fee_test_stamp_t end; + fee_test_duration_t duration; + fee_port_debug_stats_t stats; + rt_bool_t stats_valid = RT_FALSE; + uint32_t wait_loops = 0U; + + (void)fee_port_debug_reset_stats(); + start = fee_test_stamp_now(); + + if (fee_test_expect_ret(label, actual, FEE_E_OK) != 0) + { + return -1; + } + + if (fee_test_wait_idle(FEE_TEST_POLL_BUDGET, &wait_loops) != 0) + { + rt_kprintf("custom_fee_test: %s did not complete\n", label); + return -1; + } + + end = fee_test_stamp_now(); + duration = fee_test_elapsed(start, end); + + if (verbose != RT_FALSE) + { + stats_valid = fee_test_fetch_debug_stats(&stats); + fee_test_print_stats(label, &duration, &stats, stats_valid, wait_loops); + } + + return 0; +} + +static int fee_test_measure_failed_read(uint16_t block_id, uint16_t len, fee_ret_t expected, + const char *label, rt_bool_t verbose) +{ + uint8_t readback[FEE_CFG_MAX_BLOCK_LEN]; + fee_ret_t ret; + fee_test_stamp_t start; + fee_test_stamp_t end; + fee_test_duration_t duration; + fee_port_debug_stats_t stats; + rt_bool_t stats_valid = RT_FALSE; + + if (len > (uint16_t)sizeof(readback)) + { + rt_kprintf("custom_fee_test: failed-read len too large: %u\n", (unsigned)len); + return -1; + } + + (void)memset(readback, 0, sizeof(readback)); + (void)fee_port_debug_reset_stats(); + start = fee_test_stamp_now(); + ret = fee_read(block_id, 0U, readback, len); + end = fee_test_stamp_now(); + + if (fee_test_expect_ret(label, ret, expected) != 0) + { + return -1; + } + + duration = fee_test_elapsed(start, end); + if (verbose != RT_FALSE) + { + stats_valid = fee_test_fetch_debug_stats(&stats); + fee_test_print_stats(label, &duration, &stats, stats_valid, 0U); + } + + return 0; +} + +static int fee_test_run_gc_bench(uint8_t *fast_buf, uint16_t len, rt_bool_t verbose) +{ + rt_tick_t total_ticks = 0U; + uint32_t total_ms = 0U; + rt_tick_t gc_total_ticks = 0U; + uint32_t gc_total_ms = 0U; + rt_tick_t gc_max_ticks = 0U; + uint32_t gc_events = 0U; + uint32_t idx; + + for (idx = 0U; idx < FEE_TEST_GC_WRITE_COUNT; ++idx) + { + fee_test_stamp_t start; + fee_test_stamp_t end; + fee_test_duration_t duration; + fee_port_debug_stats_t stats; + rt_bool_t stats_valid = RT_FALSE; + uint32_t wait_loops = 0U; + uint8_t before_sector; + uint8_t after_sector; + uint32_t before_generation; + uint32_t after_generation; + uint32_t before_free_offset; + uint32_t after_free_offset; + rt_bool_t gc_switched; + + fee_test_fill_pattern(fast_buf, len, (uint8_t)(0x60U + idx)); + before_sector = g_fee_ctx.lane[FEE_LANE_FAST].active_sector; + before_generation = g_fee_ctx.lane[FEE_LANE_FAST].active_generation; + before_free_offset = g_fee_ctx.lane[FEE_LANE_FAST].free_offset; + (void)fee_port_debug_reset_stats(); + start = fee_test_stamp_now(); + + if (fee_test_expect_ret("gc stress write", fee_write(1U, fast_buf, len), FEE_E_OK) != 0) + { + rt_kprintf("custom_fee_test: gc stress failed at iter=%u\n", (unsigned)idx); + return -1; + } + + if (fee_test_wait_idle(FEE_TEST_POLL_BUDGET, &wait_loops) != 0) + { + rt_kprintf("custom_fee_test: gc stress did not complete at iter=%u\n", (unsigned)idx); + return -1; + } + + end = fee_test_stamp_now(); + duration = fee_test_elapsed(start, end); + total_ticks += duration.ticks; + total_ms += duration.ms; + after_sector = g_fee_ctx.lane[FEE_LANE_FAST].active_sector; + after_generation = g_fee_ctx.lane[FEE_LANE_FAST].active_generation; + after_free_offset = g_fee_ctx.lane[FEE_LANE_FAST].free_offset; + gc_switched = ((after_sector != before_sector) || (after_generation != before_generation)) ? + RT_TRUE : RT_FALSE; + + if (verbose != RT_FALSE) + { + stats_valid = fee_test_fetch_debug_stats(&stats); + if (gc_switched != RT_FALSE) + { + ++gc_events; + gc_total_ticks += duration.ticks; + gc_total_ms += duration.ms; + if (duration.ticks > gc_max_ticks) + { + gc_max_ticks = duration.ticks; + } + } + + if (stats_valid != RT_FALSE) + { + rt_kprintf( + "custom_fee_diag_test: gc_write[%03u] time=%u ms ticks=%lu wait_loops=%u " + "fast_lane[sector=%u->%u gen=%lu->%lu free=0x%08x->0x%08x gc=%u] " + "driver[read=%u/%uB write=%u/%uB erase=%u/%uB poll=%u]\n", + (unsigned)idx, + (unsigned)duration.ms, + (unsigned long)duration.ticks, + (unsigned)wait_loops, + (unsigned)before_sector, + (unsigned)after_sector, + (unsigned long)before_generation, + (unsigned long)after_generation, + (unsigned)before_free_offset, + (unsigned)after_free_offset, + (unsigned)gc_switched, + (unsigned)stats.read_calls, + (unsigned)stats.read_bytes, + (unsigned)stats.write_calls, + (unsigned)stats.write_bytes, + (unsigned)stats.erase_calls, + (unsigned)stats.erase_bytes, + (unsigned)stats.poll_calls); + } + else + { + rt_kprintf( + "custom_fee_diag_test: gc_write[%03u] time=%u ms ticks=%lu wait_loops=%u " + "fast_lane[sector=%u->%u gen=%lu->%lu free=0x%08x->0x%08x gc=%u] " + "driver[unavailable]\n", + (unsigned)idx, + (unsigned)duration.ms, + (unsigned long)duration.ticks, + (unsigned)wait_loops, + (unsigned)before_sector, + (unsigned)after_sector, + (unsigned long)before_generation, + (unsigned long)after_generation, + (unsigned)before_free_offset, + (unsigned)after_free_offset, + (unsigned)gc_switched); + } + } + } + + if (verbose != RT_FALSE) + { + rt_kprintf( + "custom_fee_diag_test: gc summary writes=%u total_time=%u ms total_ticks=%lu " + "gc_events=%u gc_time=%u ms gc_ticks=%lu gc_max_ticks=%lu\n", + (unsigned)FEE_TEST_GC_WRITE_COUNT, + (unsigned)total_ms, + (unsigned long)total_ticks, + (unsigned)gc_events, + (unsigned)gc_total_ms, + (unsigned long)gc_total_ticks, + (unsigned long)gc_max_ticks); + } + + return 0; +} + +static int fee_test_run(rt_bool_t verbose) +{ + uint8_t fast_a[32]; + uint8_t fast_b[32]; + uint8_t normal_a[64]; + fee_block_status_t status; + + fee_test_fill_pattern(fast_a, (uint16_t)sizeof(fast_a), 0x11U); + fee_test_fill_pattern(fast_b, (uint16_t)sizeof(fast_b), 0x51U); + fee_test_fill_pattern(normal_a, (uint16_t)sizeof(normal_a), 0x21U); + + if (fee_test_reset_backend() != 0) + { + return -1; + } + + if (fee_test_measure_init("init", verbose) != 0) + { + return -1; + } + + if (fee_test_measure_write(1U, fast_a, (uint16_t)sizeof(fast_a), + "write block1 fast_a", verbose) != 0) + { + return -1; + } + + if (fee_test_measure_read(1U, fast_a, (uint16_t)sizeof(fast_a), + "read block1 fast_a", verbose) != 0) + { + return -1; + } + + if (fee_test_measure_write(1U, fast_b, (uint16_t)sizeof(fast_b), + "write block1 fast_b", verbose) != 0) + { + return -1; + } + + if (fee_test_measure_read(1U, fast_b, (uint16_t)sizeof(fast_b), + "read block1 fast_b", verbose) != 0) + { + return -1; + } + + if (fee_test_measure_action(fee_rollback(1U), "rollback block1", verbose) != 0) + { + return -1; + } + + if (fee_test_measure_read(1U, fast_a, (uint16_t)sizeof(fast_a), + "read block1 rollback", verbose) != 0) + { + return -1; + } + + if (fee_test_measure_write(2U, normal_a, (uint16_t)sizeof(normal_a), + "write block2 normal_a", verbose) != 0) + { + return -1; + } + + if (fee_test_measure_read(2U, normal_a, (uint16_t)sizeof(normal_a), + "read block2 normal_a", verbose) != 0) + { + return -1; + } + + if (fee_test_measure_init("re-init", verbose) != 0) + { + return -1; + } + + if (fee_test_measure_read(1U, fast_a, (uint16_t)sizeof(fast_a), + "read block1 after re-init", verbose) != 0) + { + return -1; + } + + if (fee_test_measure_read(2U, normal_a, (uint16_t)sizeof(normal_a), + "read block2 after re-init", verbose) != 0) + { + return -1; + } + + if (fee_test_run_gc_bench(fast_b, (uint16_t)sizeof(fast_b), verbose) != 0) + { + return -1; + } + + fee_test_fill_pattern(fast_b, (uint16_t)sizeof(fast_b), + (uint8_t)(0x60U + (FEE_TEST_GC_WRITE_COUNT - 1U))); + if (fee_test_measure_read(1U, fast_b, (uint16_t)sizeof(fast_b), + "read block1 after gc", verbose) != 0) + { + return -1; + } + + if (fee_test_measure_action(fee_invalidate(1U), "invalidate block1", verbose) != 0) + { + return -1; + } + + if (fee_test_expect_ret("get_status", fee_get_status(1U, &status), FEE_E_OK) != 0) + { + return -1; + } + + if (status != FEE_BLOCK_STATUS_INVALIDATED) + { + rt_kprintf("custom_fee_test: expected INVALIDATED, got %u\n", (unsigned)status); + return -1; + } + + if (verbose != RT_FALSE) + { + rt_kprintf("custom_fee_diag_test: block1 status after invalidate=%u\n", (unsigned)status); + } + + if (fee_test_measure_failed_read(1U, (uint16_t)sizeof(fast_a), FEE_E_NOT_OK, + "read invalidated block1", verbose) != 0) + { + return -1; + } + + if (verbose != RT_FALSE) + { + fee_test_dump_flash_layout("final"); + } + + return 0; +} + +int custom_fee_test(void) +{ + rt_kprintf("custom_fee_test: start\n"); + + if (fee_test_run(RT_FALSE) != 0) + { + return -1; + } + + rt_kprintf("custom_fee_test: PASS\n"); + return 0; +} +MSH_CMD_EXPORT(custom_fee_test, custom fee smoke test); + +int custom_fee_diag_test(void) +{ + rt_kprintf("custom_fee_diag_test: start\n"); + + if (fee_test_run(RT_TRUE) != 0) + { + return -1; + } + + rt_kprintf("custom_fee_diag_test: PASS\n"); + return 0; +} +MSH_CMD_EXPORT(custom_fee_diag_test, custom fee diagnostic test);