From 375b8e9a25d44922ef4b0666cfeece0f57c15607 Mon Sep 17 00:00:00 2001 From: Yujong Lee Date: Mon, 26 Jan 2026 11:31:58 +0900 Subject: [PATCH] save --- Cargo.lock | 3 + .../journey-fixing-the-data-layer.mdx | 16 + plugins/fs-db/AGENTS.md | 5 + plugins/fs-db/Cargo.toml | 3 + plugins/fs-db/DECISIONS.md | 113 ++++++ plugins/fs-db/SPEC.md | 29 ++ plugins/fs-db/js/bindings.gen.ts | 128 +++--- plugins/fs-db/src/error.rs | 6 + plugins/fs-db/src/ext.rs | 1 + plugins/fs-db/src/lib.rs | 22 +- plugins/fs-db/src/migrations.rs | 56 ++- plugins/fs-db/src/runner.rs | 200 +++++++++ plugins/fs-db/src/tests/fixtures.rs | 380 ++++++++++++++++++ plugins/fs-db/src/tests/migration_tests.rs | 231 +++++++++++ plugins/fs-db/src/tests/mod.rs | 2 + plugins/fs-db/src/types/data/mod.rs | 5 + plugins/fs-db/src/types/data/session.rs | 141 +++++++ plugins/fs-db/src/types/data/transcript.rs | 176 ++++++++ plugins/fs-db/src/types/document.rs | 89 ++++ plugins/fs-db/src/types/migration.rs | 87 ++++ plugins/fs-db/src/types/mod.rs | 11 + plugins/fs-db/src/types/schema.rs | 9 + plugins/fs-db/src/types/version.rs | 89 ++++ 23 files changed, 1734 insertions(+), 68 deletions(-) create mode 100644 apps/web/content/articles/journey-fixing-the-data-layer.mdx create mode 100644 plugins/fs-db/AGENTS.md create mode 100644 plugins/fs-db/DECISIONS.md create mode 100644 plugins/fs-db/SPEC.md create mode 100644 plugins/fs-db/src/runner.rs create mode 100644 plugins/fs-db/src/tests/fixtures.rs create mode 100644 plugins/fs-db/src/tests/migration_tests.rs create mode 100644 plugins/fs-db/src/tests/mod.rs create mode 100644 plugins/fs-db/src/types/data/mod.rs create mode 100644 plugins/fs-db/src/types/data/session.rs create mode 100644 plugins/fs-db/src/types/data/transcript.rs create mode 100644 plugins/fs-db/src/types/document.rs create mode 100644 plugins/fs-db/src/types/migration.rs create mode 100644 plugins/fs-db/src/types/mod.rs create mode 100644 plugins/fs-db/src/types/schema.rs create mode 100644 plugins/fs-db/src/types/version.rs diff --git a/Cargo.lock b/Cargo.lock index 3453ea88e5..fd1ce58842 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -16749,13 +16749,16 @@ dependencies = [ name = "tauri-plugin-fs-db" version = "0.1.0" dependencies = [ + "semver 1.0.27", "serde", + "serde_json", "specta", "specta-typescript", "tauri", "tauri-plugin", "tauri-plugin-settings", "tauri-specta", + "tempfile", "thiserror 2.0.17", "tokio", "uuid", diff --git a/apps/web/content/articles/journey-fixing-the-data-layer.mdx b/apps/web/content/articles/journey-fixing-the-data-layer.mdx new file mode 100644 index 0000000000..3acb27142a --- /dev/null +++ b/apps/web/content/articles/journey-fixing-the-data-layer.mdx @@ -0,0 +1,16 @@ +--- +meta_title: "Messy journey of fixing the data layer of Hyprnote" +meta_description: "TOOD" +author: "Yujong Lee" +coverImage: "/api/images/blog/journey-fixing-the-data-layer/cover.png" +published: true +date: "2026-01-25" +--- + +## How did we get here? + +## The problems + +## The solutions + +## The results diff --git a/plugins/fs-db/AGENTS.md b/plugins/fs-db/AGENTS.md new file mode 100644 index 0000000000..bd5dda5e55 --- /dev/null +++ b/plugins/fs-db/AGENTS.md @@ -0,0 +1,5 @@ +We are working on designing `plugins/fs-db/src`. Still very WIP. + +See `plugins/fs-db/SPEC.md` and `plugins/fs-db/DECISIONS.md`. + +Your job is have conversation with me, incrementally form a better spec, document decisions. diff --git a/plugins/fs-db/Cargo.toml b/plugins/fs-db/Cargo.toml index c39b236c75..69d6ae6d2a 100644 --- a/plugins/fs-db/Cargo.toml +++ b/plugins/fs-db/Cargo.toml @@ -12,6 +12,7 @@ tauri-plugin = { workspace = true, features = ["build"] } [dev-dependencies] specta-typescript = { workspace = true } +tempfile = { workspace = true } tokio = { workspace = true, features = ["macros"] } [dependencies] @@ -20,7 +21,9 @@ tauri-plugin-settings = { workspace = true } tauri-specta = { workspace = true, features = ["derive", "typescript"] } serde = { workspace = true } +serde_json = { workspace = true } specta = { workspace = true } +semver = "1" thiserror = { workspace = true } uuid = { workspace = true } diff --git a/plugins/fs-db/DECISIONS.md b/plugins/fs-db/DECISIONS.md new file mode 100644 index 0000000000..e92a828e35 --- /dev/null +++ b/plugins/fs-db/DECISIONS.md @@ -0,0 +1,113 @@ +--- +Note that DECISIONS file is not for writing open questions. This is only for documenting the decisions we agreed. +--- + +## Migration Model + +**Decision**: Operation-based migrations + +Each migration is a function that receives `base_dir: &Path` and performs whatever operations needed: + +```rust +pub struct Migration { + pub name: &'static str, + pub from: SchemaVersion, + pub to: SchemaVersion, + pub run: fn(&Path) -> Result<()>, +} +``` + +**Rationale**: +- Migrations are diverse (SQLite extraction vs file rename vs frontmatter transform) +- Simple and flexible - no artificial constraints +- Can add structure later if patterns emerge + +## Migration Ordering + +**Decision**: Use `semver::Version` for app version-based migrations + +Use the `semver` crate. Migrations are tied to actual Hyprnote app versions, not arbitrary schema numbers. Not every app version has a migration - only versions that need data structure changes (checkpoints). + +```rust +// https://docs.rs/semver/latest/semver/ +use semver::Version; + +Migration::new( + "extract_from_sqlite", + Version::parse("1.0.1").unwrap(), + Version::parse("1.0.2").unwrap(), + |base_dir| { ... } +) +``` + +## App Version Source + +**Decision**: Get current app version from tauri's `Config.version` + +See https://docs.rs/tauri/latest/tauri/struct.Config.html#structfield.version + +## Fresh Install vs Migration Decision Logic + +``` +if .schema/version exists: + current_version = parse(.schema/version) + run migrations where from >= current_version +else if db.sqlite exists: + # Old user before version tracking + current_version = "1.0.1" # or earliest known version + run migrations where from >= current_version +else: + # Fresh install, no data to migrate + write app_version to .schema/version + skip all migrations +``` + +## Runner Algorithm + +1. Determine `current_version` using decision logic above +2. Get `app_version` from tauri Config (https://docs.rs/tauri/latest/tauri/struct.Config.html#structfield.version) +3. Collect all migrations where `from >= current_version && to <= app_version` +4. Sort by `from` using semver's `Ord` +5. Run in order +6. Write `app_version` to `.schema/version` + +## Testing + +Tests should make migration behavior explicit: + +```rust +#[test] +fn fresh_install_skips_all_migrations() { + // Given: empty base_dir (no .schema/version, no db.sqlite) + // When: app v1.0.3 runs + // Then: no migrations run, .schema/version written as "1.0.3" +} + +#[test] +fn old_user_without_version_file_but_has_sqlite() { + // Given: db.sqlite exists, no .schema/version + // When: app v1.0.3 runs + // Then: treat as v1.0.1, run all migrations from 1.0.1 +} + +#[test] +fn user_on_1_0_1_upgrading_to_1_0_3() { + // Given: .schema/version = "1.0.1" + // When: app v1.0.3 runs + // Then: migrations 1.0.1→1.0.2 and 1.0.2→1.0.3 are applied +} + +#[test] +fn user_on_1_0_2_upgrading_to_1_0_3() { + // Given: .schema/version = "1.0.2" + // When: app v1.0.3 runs + // Then: only migration 1.0.2→1.0.3 is applied +} + +#[test] +fn user_already_on_latest() { + // Given: .schema/version = "1.0.3" + // When: app v1.0.3 runs + // Then: no migrations run +} +``` diff --git a/plugins/fs-db/SPEC.md b/plugins/fs-db/SPEC.md new file mode 100644 index 0000000000..5f6b8b285c --- /dev/null +++ b/plugins/fs-db/SPEC.md @@ -0,0 +1,29 @@ +--- +Note that SPEC file is not a implementation plan. +Use this for requirment understanding, not planning. +Ask followup question if needed, and update it if I explicitly agreed. +--- + +## Where we're heading + +Mostly 2 things. + +1. Lazy loading for some part of data. + - We already doing some kind of file-system based storage thing.(See apps/desktop/src/store/tinybase) (like obsidian, but bit more complex because we have more richer/relational data) + - Due to performance reason, we are migrating off from "loading everything to Tinybase" to "load only metadatas - for listing session purpose etc - and load detailed data on-demand. + - **Scope**: Session is the priority (includes transcripts, enhanced_notes). Other entities (template, chat_shortcut, etc.) are not priority for lazy loading. + +2. SQlite like migration support, for filesystem structure. (plugins/fs-db) + - Migration run in `setup` of `plugins/fs-db/src/lib.rs`. Resolve past<>current-app-version and apply logic sequencly. + - **Version tracking**: Global `.schema/version` file. + - We need test to ensure the user's data is properly migrated to latest structure, when they do OTA update. + - See https://github.com/fastrepl/hyprnote-data. Feel free to clone it in `/tmp/hyprnote-data` and inspect. + - We might bring `plugins/importer/src/sources/hyprnote/v1_sqlite` into fs-db migration. That is level of flexibility we need. + - **SQLite migration**: Users on old versions use `apps/desktop/src/store/tinybase/persister/local` (SQLite). Need migration path from SQLite to filesystem-first. + +## Migration capabilities needed + +Migrations need to support: +- Filesystem-level: rename, move, delete files/folders +- File-level: frontmatter transform, field addition/deletion (for md files) +- Data extraction: SQLite/TinyBase JSON → filesystem structure (for v0 → v1) diff --git a/plugins/fs-db/js/bindings.gen.ts b/plugins/fs-db/js/bindings.gen.ts index cbd010cb25..dc2c3cebed 100644 --- a/plugins/fs-db/js/bindings.gen.ts +++ b/plugins/fs-db/js/bindings.gen.ts @@ -1,86 +1,90 @@ // @ts-nocheck -/** tauri-specta globals **/ -import { - Channel as TAURI_CHANNEL, - invoke as TAURI_INVOKE, -} from "@tauri-apps/api/core"; -import * as TAURI_API_EVENT from "@tauri-apps/api/event"; -import { type WebviewWindow as __WebviewWindow__ } from "@tauri-apps/api/webviewWindow"; // This file was generated by [tauri-specta](https://github.com/oscartbeaumont/tauri-specta). Do not edit this file manually. /** user-defined commands **/ + export const commands = { - async ping(payload: PingRequest): Promise> { +async ping(payload: PingRequest) : Promise> { try { - return { - status: "ok", - data: await TAURI_INVOKE("plugin:fs-db|ping", { payload }), - }; - } catch (e) { - if (e instanceof Error) throw e; - else return { status: "error", error: e as any }; - } - }, -}; + return { status: "ok", data: await TAURI_INVOKE("plugin:fs-db|ping", { payload }) }; +} catch (e) { + if(e instanceof Error) throw e; + else return { status: "error", error: e as any }; +} +} +} /** user-defined events **/ + + /** user-defined constants **/ + + /** user-defined types **/ -export type PingRequest = { value: string | null }; -export type PingResponse = { value: string | null }; +export type PingRequest = { value: string | null } +export type PingResponse = { value: string | null } + +/** tauri-specta globals **/ + +import { + invoke as TAURI_INVOKE, + Channel as TAURI_CHANNEL, +} from "@tauri-apps/api/core"; +import * as TAURI_API_EVENT from "@tauri-apps/api/event"; +import { type WebviewWindow as __WebviewWindow__ } from "@tauri-apps/api/webviewWindow"; type __EventObj__ = { - listen: ( - cb: TAURI_API_EVENT.EventCallback, - ) => ReturnType>; - once: ( - cb: TAURI_API_EVENT.EventCallback, - ) => ReturnType>; - emit: null extends T - ? (payload?: T) => ReturnType - : (payload: T) => ReturnType; + listen: ( + cb: TAURI_API_EVENT.EventCallback, + ) => ReturnType>; + once: ( + cb: TAURI_API_EVENT.EventCallback, + ) => ReturnType>; + emit: null extends T + ? (payload?: T) => ReturnType + : (payload: T) => ReturnType; }; export type Result = - | { status: "ok"; data: T } - | { status: "error"; error: E }; + | { status: "ok"; data: T } + | { status: "error"; error: E }; function __makeEvents__>( - mappings: Record, + mappings: Record, ) { - return new Proxy( - {} as unknown as { - [K in keyof T]: __EventObj__ & { - (handle: __WebviewWindow__): __EventObj__; - }; - }, - { - get: (_, event) => { - const name = mappings[event as keyof T]; - - return new Proxy((() => {}) as any, { - apply: (_, __, [window]: [__WebviewWindow__]) => ({ - listen: (arg: any) => window.listen(name, arg), - once: (arg: any) => window.once(name, arg), - emit: (arg: any) => window.emit(name, arg), - }), - get: (_, command: keyof __EventObj__) => { - switch (command) { - case "listen": - return (arg: any) => TAURI_API_EVENT.listen(name, arg); - case "once": - return (arg: any) => TAURI_API_EVENT.once(name, arg); - case "emit": - return (arg: any) => TAURI_API_EVENT.emit(name, arg); - } - }, - }); - }, - }, - ); + return new Proxy( + {} as unknown as { + [K in keyof T]: __EventObj__ & { + (handle: __WebviewWindow__): __EventObj__; + }; + }, + { + get: (_, event) => { + const name = mappings[event as keyof T]; + + return new Proxy((() => {}) as any, { + apply: (_, __, [window]: [__WebviewWindow__]) => ({ + listen: (arg: any) => window.listen(name, arg), + once: (arg: any) => window.once(name, arg), + emit: (arg: any) => window.emit(name, arg), + }), + get: (_, command: keyof __EventObj__) => { + switch (command) { + case "listen": + return (arg: any) => TAURI_API_EVENT.listen(name, arg); + case "once": + return (arg: any) => TAURI_API_EVENT.once(name, arg); + case "emit": + return (arg: any) => TAURI_API_EVENT.emit(name, arg); + } + }, + }); + }, + }, + ); } diff --git a/plugins/fs-db/src/error.rs b/plugins/fs-db/src/error.rs index 53206e2ee5..945faa33bf 100644 --- a/plugins/fs-db/src/error.rs +++ b/plugins/fs-db/src/error.rs @@ -6,6 +6,12 @@ pub type Result = std::result::Result; pub enum Error { #[error(transparent)] Io(#[from] std::io::Error), + #[error(transparent)] + Json(#[from] serde_json::Error), + #[error("version parse error: {0}")] + VersionParse(String), + #[error("migration failed")] + Migration, } impl Serialize for Error { diff --git a/plugins/fs-db/src/ext.rs b/plugins/fs-db/src/ext.rs index 90ed3ffb22..12882896ab 100644 --- a/plugins/fs-db/src/ext.rs +++ b/plugins/fs-db/src/ext.rs @@ -1,4 +1,5 @@ pub struct FsDb<'a, R: tauri::Runtime, M: tauri::Manager> { + #[allow(dead_code)] manager: &'a M, _runtime: std::marker::PhantomData R>, } diff --git a/plugins/fs-db/src/lib.rs b/plugins/fs-db/src/lib.rs index fa15513a13..b5ac24c7aa 100644 --- a/plugins/fs-db/src/lib.rs +++ b/plugins/fs-db/src/lib.rs @@ -3,9 +3,20 @@ mod error; mod ext; pub mod migrations; mod models; +pub mod runner; +pub mod types; + +#[cfg(test)] +mod tests; pub use error::{Error, Result}; pub use ext::*; +pub use runner::MigrationRunner; +pub use types::*; + +pub mod version { + pub use crate::types::{Version, read_current_version, write_version}; +} const PLUGIN_NAME: &str = "fs-db"; @@ -25,9 +36,16 @@ pub fn init() -> tauri::plugin::TauriPlugin { .invoke_handler(specta_builder.invoke_handler()) .setup(|app, _api| { use tauri_plugin_settings::SettingsPluginExt; + + let app_version = app + .config() + .version + .as_ref() + .and_then(|v| semver::Version::parse(v).ok()) + .unwrap_or_else(|| semver::Version::new(0, 0, 0)); + if let Ok(base_dir) = app.settings().content_base() { - migrations::move_uuid_folders_to_sessions(&base_dir)?; - migrations::rename_transcript(&base_dir)?; + migrations::run_migrations(&base_dir, app_version)?; } Ok(()) }) diff --git a/plugins/fs-db/src/migrations.rs b/plugins/fs-db/src/migrations.rs index 5e5cefdedc..6d671975bd 100644 --- a/plugins/fs-db/src/migrations.rs +++ b/plugins/fs-db/src/migrations.rs @@ -1,19 +1,24 @@ use std::path::Path; +use semver::Version; use uuid::Uuid; -use crate::Error; +use crate::{Migration, MigrationRunner, Result}; + +fn v(s: &str) -> Version { + Version::parse(s).unwrap() +} fn is_uuid(name: &str) -> bool { Uuid::try_parse(name).is_ok() } -pub fn rename_transcript(base_dir: &Path) -> Result<(), Error> { +pub fn rename_transcript(base_dir: &Path) -> Result<()> { if !base_dir.exists() { return Ok(()); } - fn rename_recursively(dir: &Path) -> Result<(), Error> { + fn rename_recursively(dir: &Path) -> Result<()> { let entries = std::fs::read_dir(dir)?; for entry in entries { @@ -38,7 +43,7 @@ pub fn rename_transcript(base_dir: &Path) -> Result<(), Error> { rename_recursively(base_dir) } -pub fn move_uuid_folders_to_sessions(base_dir: &Path) -> Result<(), Error> { +pub fn move_uuid_folders_to_sessions(base_dir: &Path) -> Result<()> { let sessions_dir = base_dir.join("sessions"); if !base_dir.exists() { @@ -76,3 +81,46 @@ pub fn move_uuid_folders_to_sessions(base_dir: &Path) -> Result<(), Error> { Ok(()) } + +fn migration_1_0_1_to_1_0_2(base_dir: &Path) -> Result<()> { + move_uuid_folders_to_sessions(base_dir)?; + rename_transcript(base_dir)?; + Ok(()) +} + +pub fn all_migrations() -> Vec { + vec![Migration::new( + "extract_from_legacy_structure", + v("1.0.1"), + v("1.0.2"), + migration_1_0_1_to_1_0_2, + )] +} + +pub fn run_migrations(base_dir: &Path, app_version: Version) -> Result<()> { + let migrations = all_migrations(); + let runner = MigrationRunner::new(base_dir, app_version, &migrations); + let _report = runner.run()?; + Ok(()) +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn all_migrations_are_sorted_by_from_version() { + let migrations = all_migrations(); + for window in migrations.windows(2) { + assert!(window[0].from <= window[1].from); + } + } + + #[test] + fn migration_chain_is_continuous() { + let migrations = all_migrations(); + for window in migrations.windows(2) { + assert_eq!(window[0].to, window[1].from); + } + } +} diff --git a/plugins/fs-db/src/runner.rs b/plugins/fs-db/src/runner.rs new file mode 100644 index 0000000000..b7c3154c4a --- /dev/null +++ b/plugins/fs-db/src/runner.rs @@ -0,0 +1,200 @@ +use std::path::Path; + +use semver::Version; + +use crate::{Migration, MigrationReport, Result, version}; + +pub struct MigrationRunner<'a> { + base_dir: &'a Path, + app_version: Version, + migrations: &'a [Migration], +} + +impl<'a> MigrationRunner<'a> { + pub fn new(base_dir: &'a Path, app_version: Version, migrations: &'a [Migration]) -> Self { + Self { + base_dir, + app_version, + migrations, + } + } + + pub fn run(&self) -> Result { + let current_version = version::read_current_version(self.base_dir)?; + + match current_version { + None => { + version::write_version(self.base_dir, &self.app_version)?; + Ok(MigrationReport { + from_version: self.app_version.to_string(), + to_version: self.app_version.to_string(), + migrations_applied: Vec::new(), + }) + } + Some(current) => { + let pending = self.collect_pending_migrations(¤t); + let mut applied = Vec::new(); + + for migration in pending { + migration.run(self.base_dir)?; + applied.push(format!( + "{}: {} -> {}", + migration.name, migration.from, migration.to + )); + } + + version::write_version(self.base_dir, &self.app_version)?; + + Ok(MigrationReport { + from_version: current.to_string(), + to_version: self.app_version.to_string(), + migrations_applied: applied, + }) + } + } + } + + fn collect_pending_migrations(&self, current_version: &Version) -> Vec<&Migration> { + let mut pending: Vec<_> = self + .migrations + .iter() + .filter(|m| m.from >= *current_version && m.to <= self.app_version) + .collect(); + pending.sort_by(|a, b| a.from.cmp(&b.from)); + pending + } +} + +#[cfg(test)] +mod tests { + use super::*; + use tempfile::TempDir; + + fn noop(_base_dir: &Path) -> Result<()> { + Ok(()) + } + + fn v(s: &str) -> Version { + Version::parse(s).unwrap() + } + + #[test] + fn fresh_install_skips_all_migrations() { + let temp = TempDir::new().unwrap(); + let migrations = [ + Migration::new("m1", v("1.0.1"), v("1.0.2"), noop), + Migration::new("m2", v("1.0.2"), v("1.0.3"), noop), + ]; + + let runner = MigrationRunner::new(temp.path(), v("1.0.3"), &migrations); + let report = runner.run().unwrap(); + + assert_eq!(report.migrations_applied.len(), 0); + assert_eq!(report.to_version, "1.0.3"); + + let stored = version::read_current_version(temp.path()).unwrap(); + assert_eq!(stored, Some(v("1.0.3"))); + } + + #[test] + fn old_user_without_version_file_but_has_sqlite() { + let temp = TempDir::new().unwrap(); + std::fs::write(temp.path().join("db.sqlite"), "").unwrap(); + + let migrations = [ + Migration::new("m1", v("1.0.1"), v("1.0.2"), noop), + Migration::new("m2", v("1.0.2"), v("1.0.3"), noop), + ]; + + let runner = MigrationRunner::new(temp.path(), v("1.0.3"), &migrations); + let report = runner.run().unwrap(); + + assert_eq!(report.from_version, "1.0.1"); + assert_eq!(report.migrations_applied.len(), 2); + } + + #[test] + fn user_on_1_0_1_upgrading_to_1_0_3() { + let temp = TempDir::new().unwrap(); + version::write_version(temp.path(), &v("1.0.1")).unwrap(); + + let migrations = [ + Migration::new("m1", v("1.0.1"), v("1.0.2"), noop), + Migration::new("m2", v("1.0.2"), v("1.0.3"), noop), + ]; + + let runner = MigrationRunner::new(temp.path(), v("1.0.3"), &migrations); + let report = runner.run().unwrap(); + + assert_eq!(report.from_version, "1.0.1"); + assert_eq!(report.to_version, "1.0.3"); + assert_eq!(report.migrations_applied.len(), 2); + } + + #[test] + fn user_on_1_0_2_upgrading_to_1_0_3() { + let temp = TempDir::new().unwrap(); + version::write_version(temp.path(), &v("1.0.2")).unwrap(); + + let migrations = [ + Migration::new("m1", v("1.0.1"), v("1.0.2"), noop), + Migration::new("m2", v("1.0.2"), v("1.0.3"), noop), + ]; + + let runner = MigrationRunner::new(temp.path(), v("1.0.3"), &migrations); + let report = runner.run().unwrap(); + + assert_eq!(report.from_version, "1.0.2"); + assert_eq!(report.to_version, "1.0.3"); + assert_eq!(report.migrations_applied.len(), 1); + assert!(report.migrations_applied[0].contains("m2")); + } + + #[test] + fn user_already_on_latest() { + let temp = TempDir::new().unwrap(); + version::write_version(temp.path(), &v("1.0.3")).unwrap(); + + let migrations = [ + Migration::new("m1", v("1.0.1"), v("1.0.2"), noop), + Migration::new("m2", v("1.0.2"), v("1.0.3"), noop), + ]; + + let runner = MigrationRunner::new(temp.path(), v("1.0.3"), &migrations); + let report = runner.run().unwrap(); + + assert_eq!(report.from_version, "1.0.3"); + assert_eq!(report.to_version, "1.0.3"); + assert_eq!(report.migrations_applied.len(), 0); + } + + #[test] + fn migrations_run_in_order() { + use std::sync::atomic::{AtomicUsize, Ordering}; + static COUNTER: AtomicUsize = AtomicUsize::new(0); + static ORDER: [AtomicUsize; 2] = [AtomicUsize::new(0), AtomicUsize::new(0)]; + + fn first(_: &Path) -> Result<()> { + ORDER[0].store(COUNTER.fetch_add(1, Ordering::SeqCst), Ordering::SeqCst); + Ok(()) + } + + fn second(_: &Path) -> Result<()> { + ORDER[1].store(COUNTER.fetch_add(1, Ordering::SeqCst), Ordering::SeqCst); + Ok(()) + } + + let temp = TempDir::new().unwrap(); + version::write_version(temp.path(), &v("1.0.1")).unwrap(); + + let migrations = [ + Migration::new("second", v("1.0.2"), v("1.0.3"), second), + Migration::new("first", v("1.0.1"), v("1.0.2"), first), + ]; + + let runner = MigrationRunner::new(temp.path(), v("1.0.3"), &migrations); + runner.run().unwrap(); + + assert!(ORDER[0].load(Ordering::SeqCst) < ORDER[1].load(Ordering::SeqCst)); + } +} diff --git a/plugins/fs-db/src/tests/fixtures.rs b/plugins/fs-db/src/tests/fixtures.rs new file mode 100644 index 0000000000..8192ed953f --- /dev/null +++ b/plugins/fs-db/src/tests/fixtures.rs @@ -0,0 +1,380 @@ +use std::fs; +use std::path::Path; + +use semver::Version; +use tempfile::TempDir; + +use crate::version; + +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +#[allow(non_camel_case_types)] +pub enum AppVersion { + V1_0_1, + V1_0_2, +} + +impl AppVersion { + pub fn to_semver(&self) -> Version { + match self { + AppVersion::V1_0_1 => Version::parse("1.0.1").unwrap(), + AppVersion::V1_0_2 => Version::parse("1.0.2").unwrap(), + } + } + + pub fn uses_underscore_transcript(&self) -> bool { + matches!(self, AppVersion::V1_0_1) + } + + pub fn sessions_in_root(&self) -> bool { + matches!(self, AppVersion::V1_0_1) + } +} + +pub struct VersionedFixture { + pub version: AppVersion, + temp_dir: TempDir, +} + +impl VersionedFixture { + pub fn new(version: AppVersion) -> std::io::Result { + let temp_dir = TempDir::new()?; + version::write_version(temp_dir.path(), &version.to_semver()) + .map_err(|e| std::io::Error::new(std::io::ErrorKind::Other, e.to_string()))?; + Ok(Self { version, temp_dir }) + } + + pub fn new_fresh() -> std::io::Result { + let temp_dir = TempDir::new()?; + Ok(Self { + version: AppVersion::V1_0_2, + temp_dir, + }) + } + + pub fn new_with_sqlite() -> std::io::Result { + let temp_dir = TempDir::new()?; + fs::write(temp_dir.path().join("db.sqlite"), "")?; + Ok(Self { + version: AppVersion::V1_0_1, + temp_dir, + }) + } + + pub fn path(&self) -> &Path { + self.temp_dir.path() + } + + pub fn file_exists(&self, relative_path: &str) -> bool { + self.path().join(relative_path).exists() + } + + #[allow(dead_code)] + pub fn read_file(&self, relative_path: &str) -> std::io::Result { + fs::read_to_string(self.path().join(relative_path)) + } + + #[allow(dead_code)] + pub fn read_json(&self, relative_path: &str) -> std::io::Result { + let content = self.read_file(relative_path)?; + serde_json::from_str(&content) + .map_err(|e| std::io::Error::new(std::io::ErrorKind::InvalidData, e)) + } + + pub fn read_version(&self) -> std::io::Result { + fs::read_to_string(self.path().join(".schema/version")) + } + + pub fn with_session(&self, session: SessionFixture) -> std::io::Result<&Self> { + session.write_to(self.path(), self.version)?; + Ok(self) + } +} + +#[derive(Debug, Clone)] +pub struct ParticipantFixture { + pub id: String, + pub human_id: String, + pub source: String, +} + +impl ParticipantFixture { + #[allow(dead_code)] + pub fn new(id: &str, human_id: &str) -> Self { + Self { + id: id.to_string(), + human_id: human_id.to_string(), + source: "manual".to_string(), + } + } +} + +#[derive(Debug, Clone)] +pub struct WordFixture { + pub id: String, + pub text: String, + pub start_ms: u64, + pub end_ms: u64, + pub confidence: f64, + pub speaker: u32, +} + +impl WordFixture { + pub fn new(id: &str, text: &str, start_ms: u64, end_ms: u64) -> Self { + Self { + id: id.to_string(), + text: text.to_string(), + start_ms, + end_ms, + confidence: 0.95, + speaker: 0, + } + } +} + +#[derive(Debug, Clone)] +pub struct TranscriptFixture { + pub id: String, + pub session_id: String, + pub started_at: u64, + pub ended_at: Option, + pub words: Vec, +} + +impl TranscriptFixture { + pub fn new(id: &str, session_id: &str) -> Self { + Self { + id: id.to_string(), + session_id: session_id.to_string(), + started_at: 0, + ended_at: None, + words: Vec::new(), + } + } + + pub fn with_words(mut self, words: Vec) -> Self { + self.words = words; + self + } + + fn to_json(&self) -> serde_json::Value { + serde_json::json!({ + "transcripts": [{ + "id": self.id, + "user_id": "local", + "created_at": "2026-01-25T00:00:00Z", + "session_id": self.session_id, + "started_at": self.started_at, + "ended_at": self.ended_at, + "words": self.words.iter().map(|w| serde_json::json!({ + "id": w.id, + "transcript_id": self.id, + "text": w.text, + "start_ms": w.start_ms, + "end_ms": w.end_ms, + "confidence": w.confidence, + "speaker": w.speaker + })).collect::>(), + "speaker_hints": [] + }] + }) + } +} + +#[derive(Debug, Clone)] +pub struct SessionFixture { + pub id: String, + pub title: String, + pub created_at: String, + pub user_id: String, + pub event_id: Option, + pub participants: Vec, + pub transcript: Option, +} + +impl SessionFixture { + pub fn new(id: &str, title: &str) -> Self { + Self { + id: id.to_string(), + title: title.to_string(), + created_at: "2026-01-25T00:00:00Z".to_string(), + user_id: "local".to_string(), + event_id: None, + participants: Vec::new(), + transcript: None, + } + } + + pub fn with_transcript(mut self, transcript: TranscriptFixture) -> Self { + self.transcript = Some(transcript); + self + } + + pub fn sample_meeting() -> Self { + Self::new("e780bc6c-d209-47f0-8c13-8dd90d94ca5b", "Team Standup") + } + + pub fn sample_with_transcript() -> Self { + let session_id = "bb385a22-64ce-476c-882a-4d18b6706483"; + let transcript = TranscriptFixture::new("t-001", session_id).with_words(vec![ + WordFixture::new("w-001", "Hello", 0, 500), + WordFixture::new("w-002", "world", 500, 1000), + ]); + + Self::new(session_id, "Meeting with Transcript").with_transcript(transcript) + } + + pub fn write_to(&self, base_dir: &Path, version: AppVersion) -> std::io::Result<()> { + let session_dir = if version.sessions_in_root() { + base_dir.join(&self.id) + } else { + base_dir.join("sessions").join(&self.id) + }; + + fs::create_dir_all(&session_dir)?; + + let meta = self.to_meta_json(); + fs::write( + session_dir.join("_meta.json"), + serde_json::to_string_pretty(&meta).unwrap(), + )?; + + if let Some(ref transcript) = self.transcript { + let transcript_filename = if version.uses_underscore_transcript() { + "_transcript.json" + } else { + "transcript.json" + }; + fs::write( + session_dir.join(transcript_filename), + serde_json::to_string_pretty(&transcript.to_json()).unwrap(), + )?; + } + + Ok(()) + } + + fn to_meta_json(&self) -> serde_json::Value { + let mut meta = serde_json::json!({ + "id": self.id, + "user_id": self.user_id, + "created_at": self.created_at, + "title": self.title, + "participants": self.participants.iter().map(|p| serde_json::json!({ + "id": p.id, + "user_id": "local", + "created_at": self.created_at, + "session_id": self.id, + "human_id": p.human_id, + "source": p.source + })).collect::>() + }); + + if let Some(ref event_id) = self.event_id { + meta["event_id"] = serde_json::json!(event_id); + } + + meta + } +} + +#[allow(dead_code)] +pub struct TestFixture { + pub temp_dir: TempDir, +} + +#[allow(dead_code)] +impl TestFixture { + pub fn new() -> std::io::Result { + let temp_dir = TempDir::new()?; + Ok(Self { temp_dir }) + } + + pub fn path(&self) -> &Path { + self.temp_dir.path() + } + + pub fn set_version(&self, version: &str) -> std::io::Result<()> { + let v = Version::parse(version).unwrap(); + version::write_version(self.path(), &v) + .map_err(|e| std::io::Error::new(std::io::ErrorKind::Other, e.to_string())) + } + + pub fn read_version(&self) -> std::io::Result { + fs::read_to_string(self.path().join(".schema/version")) + } + + pub fn file_exists(&self, relative_path: &str) -> bool { + self.path().join(relative_path).exists() + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn versioned_fixture_v1_0_1() -> std::io::Result<()> { + let fixture = VersionedFixture::new(AppVersion::V1_0_1)?; + assert_eq!(fixture.read_version()?, "1.0.1"); + Ok(()) + } + + #[test] + fn versioned_fixture_v1_0_2() -> std::io::Result<()> { + let fixture = VersionedFixture::new(AppVersion::V1_0_2)?; + assert_eq!(fixture.read_version()?, "1.0.2"); + Ok(()) + } + + #[test] + fn fresh_fixture_has_no_version() -> std::io::Result<()> { + let fixture = VersionedFixture::new_fresh()?; + assert!(!fixture.file_exists(".schema/version")); + Ok(()) + } + + #[test] + fn sqlite_fixture_has_db_file() -> std::io::Result<()> { + let fixture = VersionedFixture::new_with_sqlite()?; + assert!(fixture.file_exists("db.sqlite")); + assert!(!fixture.file_exists(".schema/version")); + Ok(()) + } + + #[test] + fn session_fixture_v1_0_1_uses_underscore_transcript() -> std::io::Result<()> { + let fixture = VersionedFixture::new(AppVersion::V1_0_1)?; + let session = SessionFixture::sample_with_transcript(); + fixture.with_session(session)?; + + assert!(fixture.file_exists("bb385a22-64ce-476c-882a-4d18b6706483/_transcript.json")); + assert!(!fixture.file_exists("bb385a22-64ce-476c-882a-4d18b6706483/transcript.json")); + Ok(()) + } + + #[test] + fn session_fixture_v1_0_1_sessions_in_root() -> std::io::Result<()> { + let fixture = VersionedFixture::new(AppVersion::V1_0_1)?; + let session = SessionFixture::sample_meeting(); + fixture.with_session(session)?; + + assert!(fixture.file_exists("e780bc6c-d209-47f0-8c13-8dd90d94ca5b/_meta.json")); + assert!(!fixture.file_exists("sessions/e780bc6c-d209-47f0-8c13-8dd90d94ca5b/_meta.json")); + Ok(()) + } + + #[test] + fn session_fixture_v1_0_2_uses_sessions_folder() -> std::io::Result<()> { + let fixture = VersionedFixture::new(AppVersion::V1_0_2)?; + let session = SessionFixture::sample_with_transcript(); + fixture.with_session(session)?; + + assert!( + fixture.file_exists("sessions/bb385a22-64ce-476c-882a-4d18b6706483/transcript.json") + ); + assert!( + !fixture.file_exists("sessions/bb385a22-64ce-476c-882a-4d18b6706483/_transcript.json") + ); + Ok(()) + } +} diff --git a/plugins/fs-db/src/tests/migration_tests.rs b/plugins/fs-db/src/tests/migration_tests.rs new file mode 100644 index 0000000000..0647da5fdb --- /dev/null +++ b/plugins/fs-db/src/tests/migration_tests.rs @@ -0,0 +1,231 @@ +use semver::Version; + +use crate::{Migration, MigrationRunner, migrations, version}; + +use super::fixtures::{AppVersion, SessionFixture, VersionedFixture}; + +fn v(s: &str) -> Version { + Version::parse(s).unwrap() +} + +fn noop(_: &std::path::Path) -> crate::Result<()> { + Ok(()) +} + +#[test] +fn fresh_install_skips_all_migrations() { + let fixture = VersionedFixture::new_fresh().unwrap(); + let migrations = [ + Migration::new("m1", v("1.0.1"), v("1.0.2"), noop), + Migration::new("m2", v("1.0.2"), v("1.0.3"), noop), + ]; + + let runner = MigrationRunner::new(fixture.path(), v("1.0.3"), &migrations); + let report = runner.run().unwrap(); + + assert_eq!(report.migrations_applied.len(), 0); + assert_eq!(report.to_version, "1.0.3"); + + let stored = fixture.read_version().unwrap(); + assert_eq!(stored, "1.0.3"); +} + +#[test] +fn old_user_without_version_file_but_has_sqlite() { + let fixture = VersionedFixture::new_with_sqlite().unwrap(); + let migrations = [ + Migration::new("m1", v("1.0.1"), v("1.0.2"), noop), + Migration::new("m2", v("1.0.2"), v("1.0.3"), noop), + ]; + + let runner = MigrationRunner::new(fixture.path(), v("1.0.3"), &migrations); + let report = runner.run().unwrap(); + + assert_eq!(report.from_version, "1.0.1"); + assert_eq!(report.migrations_applied.len(), 2); +} + +#[test] +fn user_on_1_0_1_upgrading_to_1_0_3() { + let fixture = VersionedFixture::new(AppVersion::V1_0_1).unwrap(); + let migrations = [ + Migration::new("m1", v("1.0.1"), v("1.0.2"), noop), + Migration::new("m2", v("1.0.2"), v("1.0.3"), noop), + ]; + + let runner = MigrationRunner::new(fixture.path(), v("1.0.3"), &migrations); + let report = runner.run().unwrap(); + + assert_eq!(report.from_version, "1.0.1"); + assert_eq!(report.to_version, "1.0.3"); + assert_eq!(report.migrations_applied.len(), 2); +} + +#[test] +fn user_on_1_0_2_upgrading_to_1_0_3() { + let fixture = VersionedFixture::new(AppVersion::V1_0_2).unwrap(); + let migrations = [ + Migration::new("m1", v("1.0.1"), v("1.0.2"), noop), + Migration::new("m2", v("1.0.2"), v("1.0.3"), noop), + ]; + + let runner = MigrationRunner::new(fixture.path(), v("1.0.3"), &migrations); + let report = runner.run().unwrap(); + + assert_eq!(report.from_version, "1.0.2"); + assert_eq!(report.to_version, "1.0.3"); + assert_eq!(report.migrations_applied.len(), 1); + assert!(report.migrations_applied[0].contains("m2")); +} + +#[test] +fn user_already_on_latest() { + let fixture = VersionedFixture::new(AppVersion::V1_0_2).unwrap(); + version::write_version(fixture.path(), &v("1.0.3")).unwrap(); + + let migrations = [ + Migration::new("m1", v("1.0.1"), v("1.0.2"), noop), + Migration::new("m2", v("1.0.2"), v("1.0.3"), noop), + ]; + + let runner = MigrationRunner::new(fixture.path(), v("1.0.3"), &migrations); + let report = runner.run().unwrap(); + + assert_eq!(report.from_version, "1.0.3"); + assert_eq!(report.to_version, "1.0.3"); + assert_eq!(report.migrations_applied.len(), 0); +} + +#[test] +fn migrate_v1_0_1_rename_transcript() { + let fixture = VersionedFixture::new(AppVersion::V1_0_1).unwrap(); + let session = SessionFixture::sample_with_transcript(); + let session_id = session.id.clone(); + fixture.with_session(session).unwrap(); + + assert!(fixture.file_exists(&format!("{}/_transcript.json", session_id))); + assert!(!fixture.file_exists(&format!("{}/transcript.json", session_id))); + + migrations::rename_transcript(fixture.path()).unwrap(); + + assert!(!fixture.file_exists(&format!("{}/_transcript.json", session_id))); + assert!(fixture.file_exists(&format!("{}/transcript.json", session_id))); +} + +#[test] +fn migrate_v1_0_1_move_uuid_folders_to_sessions() { + let fixture = VersionedFixture::new(AppVersion::V1_0_1).unwrap(); + let session = SessionFixture::sample_meeting(); + let session_id = session.id.clone(); + fixture.with_session(session).unwrap(); + + assert!(fixture.file_exists(&format!("{}/_meta.json", session_id))); + assert!(!fixture.file_exists(&format!("sessions/{}/_meta.json", session_id))); + + migrations::move_uuid_folders_to_sessions(fixture.path()).unwrap(); + + assert!(!fixture.file_exists(&format!("{}/_meta.json", session_id))); + assert!(fixture.file_exists(&format!("sessions/{}/_meta.json", session_id))); +} + +#[test] +fn migrate_v1_0_1_full_migration() { + let fixture = VersionedFixture::new(AppVersion::V1_0_1).unwrap(); + + let session_with_transcript = SessionFixture::sample_with_transcript(); + let transcript_session_id = session_with_transcript.id.clone(); + fixture.with_session(session_with_transcript).unwrap(); + + let simple_session = SessionFixture::sample_meeting(); + let simple_session_id = simple_session.id.clone(); + fixture.with_session(simple_session).unwrap(); + + assert!(fixture.file_exists(&format!("{}/_transcript.json", transcript_session_id))); + assert!(fixture.file_exists(&format!("{}/_meta.json", simple_session_id))); + assert!(!fixture.file_exists("sessions")); + + migrations::move_uuid_folders_to_sessions(fixture.path()).unwrap(); + migrations::rename_transcript(fixture.path()).unwrap(); + + assert!(fixture.file_exists(&format!("sessions/{}/_meta.json", transcript_session_id))); + assert!(fixture.file_exists(&format!( + "sessions/{}/transcript.json", + transcript_session_id + ))); + assert!(!fixture.file_exists(&format!( + "sessions/{}/_transcript.json", + transcript_session_id + ))); + assert!(fixture.file_exists(&format!("sessions/{}/_meta.json", simple_session_id))); +} + +#[test] +fn migrate_v1_0_1_handles_both_transcript_formats() { + let fixture = VersionedFixture::new(AppVersion::V1_0_1).unwrap(); + let session = SessionFixture::sample_with_transcript(); + let session_id = session.id.clone(); + fixture.with_session(session).unwrap(); + + std::fs::write( + fixture.path().join(&session_id).join("transcript.json"), + r#"{"transcripts":[]}"#, + ) + .unwrap(); + + assert!(fixture.file_exists(&format!("{}/_transcript.json", session_id))); + assert!(fixture.file_exists(&format!("{}/transcript.json", session_id))); + + migrations::rename_transcript(fixture.path()).unwrap(); + + assert!(!fixture.file_exists(&format!("{}/_transcript.json", session_id))); + assert!(fixture.file_exists(&format!("{}/transcript.json", session_id))); +} + +#[test] +fn migrate_v1_0_2_no_changes_needed() { + let fixture = VersionedFixture::new(AppVersion::V1_0_2).unwrap(); + let session = SessionFixture::sample_with_transcript(); + let session_id = session.id.clone(); + fixture.with_session(session).unwrap(); + + assert!(fixture.file_exists(&format!("sessions/{}/transcript.json", session_id))); + assert!(!fixture.file_exists(&format!("sessions/{}/_transcript.json", session_id))); + + migrations::rename_transcript(fixture.path()).unwrap(); + migrations::move_uuid_folders_to_sessions(fixture.path()).unwrap(); + + assert!(fixture.file_exists(&format!("sessions/{}/transcript.json", session_id))); +} + +#[test] +fn migrate_v1_0_1_to_1_0_2_with_runner() { + let fixture = VersionedFixture::new(AppVersion::V1_0_1).unwrap(); + let session = SessionFixture::sample_with_transcript(); + fixture.with_session(session).unwrap(); + + let migrations = migrations::all_migrations(); + let runner = MigrationRunner::new(fixture.path(), v("1.0.2"), &migrations); + let report = runner.run().unwrap(); + + assert_eq!(report.from_version, "1.0.1"); + assert_eq!(report.to_version, "1.0.2"); + assert_eq!(report.migrations_applied.len(), 1); + + let version = fixture.read_version().unwrap(); + assert_eq!(version, "1.0.2"); +} + +#[test] +fn migrate_v1_0_2_already_current() { + let fixture = VersionedFixture::new(AppVersion::V1_0_2).unwrap(); + let session = SessionFixture::sample_with_transcript(); + fixture.with_session(session).unwrap(); + + let migrations = migrations::all_migrations(); + let runner = MigrationRunner::new(fixture.path(), v("1.0.2"), &migrations); + let report = runner.run().unwrap(); + + assert_eq!(report.from_version, "1.0.2"); + assert_eq!(report.to_version, "1.0.2"); + assert_eq!(report.migrations_applied.len(), 0); +} diff --git a/plugins/fs-db/src/tests/mod.rs b/plugins/fs-db/src/tests/mod.rs new file mode 100644 index 0000000000..79d38cf6f3 --- /dev/null +++ b/plugins/fs-db/src/tests/mod.rs @@ -0,0 +1,2 @@ +mod fixtures; +mod migration_tests; diff --git a/plugins/fs-db/src/types/data/mod.rs b/plugins/fs-db/src/types/data/mod.rs new file mode 100644 index 0000000000..43797b62e9 --- /dev/null +++ b/plugins/fs-db/src/types/data/mod.rs @@ -0,0 +1,5 @@ +mod session; +mod transcript; + +pub use session::SessionMeta; +pub use transcript::TranscriptFile; diff --git a/plugins/fs-db/src/types/data/session.rs b/plugins/fs-db/src/types/data/session.rs new file mode 100644 index 0000000000..13012aced6 --- /dev/null +++ b/plugins/fs-db/src/types/data/session.rs @@ -0,0 +1,141 @@ +use serde::{Deserialize, Serialize}; +use specta::Type; + +#[allow(dead_code)] +pub mod v1 { + use super::*; + + #[derive(Debug, Clone, Serialize, Deserialize)] + pub struct Participant { + pub id: String, + pub user_id: String, + pub created_at: String, + pub session_id: String, + pub human_id: String, + pub source: String, + } + + #[derive(Debug, Clone, Serialize, Deserialize)] + pub struct SessionMeta { + pub id: String, + pub user_id: String, + pub created_at: String, + pub title: String, + #[serde(skip_serializing_if = "Option::is_none")] + pub event_id: Option, + #[serde(default)] + pub participants: Vec, + #[serde(default, skip_serializing_if = "Vec::is_empty")] + pub tags: Vec, + } + + impl SessionMeta { + pub fn migrate(self) -> super::v2::SessionMeta { + super::v2::SessionMeta { + id: self.id, + user_id: self.user_id, + created_at: self.created_at, + title: self.title, + event_id: self.event_id, + participants: self + .participants + .into_iter() + .map(|p| super::v2::Participant { + id: p.id, + user_id: p.user_id, + created_at: p.created_at, + session_id: p.session_id, + human_id: p.human_id, + source: p.source, + }) + .collect(), + tags: self.tags, + schema_version: 1, + } + } + } +} + +pub mod v2 { + use super::*; + + #[derive(Debug, Clone, Serialize, Deserialize, Type)] + pub struct Participant { + pub id: String, + pub user_id: String, + pub created_at: String, + pub session_id: String, + pub human_id: String, + pub source: String, + } + + #[derive(Debug, Clone, Serialize, Deserialize, Type)] + pub struct SessionMeta { + pub id: String, + pub user_id: String, + pub created_at: String, + pub title: String, + #[serde(skip_serializing_if = "Option::is_none")] + pub event_id: Option, + #[serde(default)] + pub participants: Vec, + #[serde(default, skip_serializing_if = "Vec::is_empty")] + pub tags: Vec, + #[serde(rename = "_schema_version")] + pub schema_version: u32, + } +} + +pub use v2::SessionMeta; + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn migrate_v1_to_v2() { + let v1_meta = v1::SessionMeta { + id: "test-id".to_string(), + user_id: "local".to_string(), + created_at: "2026-01-25T00:00:00Z".to_string(), + title: "Test Session".to_string(), + event_id: None, + participants: vec![], + tags: vec![], + }; + + let v2_meta = v1_meta.migrate(); + assert_eq!(v2_meta.id, "test-id"); + assert_eq!(v2_meta.schema_version, 1); + } + + #[test] + fn deserialize_v1_json() { + let json = r#"{ + "id": "test-id", + "user_id": "local", + "created_at": "2026-01-25T00:00:00Z", + "title": "Test Session", + "participants": [] + }"#; + + let meta: v1::SessionMeta = serde_json::from_str(json).unwrap(); + assert_eq!(meta.id, "test-id"); + } + + #[test] + fn deserialize_v2_json() { + let json = r#"{ + "id": "test-id", + "user_id": "local", + "created_at": "2026-01-25T00:00:00Z", + "title": "Test Session", + "participants": [], + "_schema_version": 1 + }"#; + + let meta: v2::SessionMeta = serde_json::from_str(json).unwrap(); + assert_eq!(meta.id, "test-id"); + assert_eq!(meta.schema_version, 1); + } +} diff --git a/plugins/fs-db/src/types/data/transcript.rs b/plugins/fs-db/src/types/data/transcript.rs new file mode 100644 index 0000000000..f4da6e4b6b --- /dev/null +++ b/plugins/fs-db/src/types/data/transcript.rs @@ -0,0 +1,176 @@ +use serde::{Deserialize, Serialize}; +use specta::Type; + +#[allow(dead_code)] +pub mod v1 { + use super::*; + + #[derive(Debug, Clone, Serialize, Deserialize)] + pub struct Word { + pub id: String, + pub transcript_id: String, + pub text: String, + pub start_ms: u64, + pub end_ms: u64, + pub confidence: f64, + pub speaker: u32, + } + + #[derive(Debug, Clone, Serialize, Deserialize)] + pub struct SpeakerHint { + pub id: String, + pub transcript_id: String, + pub speaker: u32, + pub human_id: String, + } + + #[derive(Debug, Clone, Serialize, Deserialize)] + pub struct Transcript { + pub id: String, + pub user_id: String, + pub created_at: String, + pub session_id: String, + pub started_at: u64, + #[serde(skip_serializing_if = "Option::is_none")] + pub ended_at: Option, + #[serde(default)] + pub words: Vec, + #[serde(default)] + pub speaker_hints: Vec, + } + + #[derive(Debug, Clone, Serialize, Deserialize)] + pub struct TranscriptFile { + pub transcripts: Vec, + } + + impl TranscriptFile { + pub fn migrate(self) -> super::v2::TranscriptFile { + super::v2::TranscriptFile { + transcripts: self + .transcripts + .into_iter() + .map(|t| super::v2::Transcript { + id: t.id, + user_id: t.user_id, + created_at: t.created_at, + session_id: t.session_id, + started_at: t.started_at, + ended_at: t.ended_at, + words: t + .words + .into_iter() + .map(|w| super::v2::Word { + id: w.id, + transcript_id: w.transcript_id, + text: w.text, + start_ms: w.start_ms, + end_ms: w.end_ms, + confidence: w.confidence, + speaker: w.speaker, + }) + .collect(), + speaker_hints: t + .speaker_hints + .into_iter() + .map(|h| super::v2::SpeakerHint { + id: h.id, + transcript_id: h.transcript_id, + speaker: h.speaker, + human_id: h.human_id, + }) + .collect(), + }) + .collect(), + } + } + } +} + +pub mod v2 { + use super::*; + + #[derive(Debug, Clone, Serialize, Deserialize, Type)] + pub struct Word { + pub id: String, + pub transcript_id: String, + pub text: String, + pub start_ms: u64, + pub end_ms: u64, + pub confidence: f64, + pub speaker: u32, + } + + #[derive(Debug, Clone, Serialize, Deserialize, Type)] + pub struct SpeakerHint { + pub id: String, + pub transcript_id: String, + pub speaker: u32, + pub human_id: String, + } + + #[derive(Debug, Clone, Serialize, Deserialize, Type)] + pub struct Transcript { + pub id: String, + pub user_id: String, + pub created_at: String, + pub session_id: String, + pub started_at: u64, + #[serde(skip_serializing_if = "Option::is_none")] + pub ended_at: Option, + #[serde(default)] + pub words: Vec, + #[serde(default)] + pub speaker_hints: Vec, + } + + #[derive(Debug, Clone, Serialize, Deserialize, Type)] + pub struct TranscriptFile { + pub transcripts: Vec, + } +} + +pub use v2::TranscriptFile; + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn migrate_v1_to_v2() { + let v1_file = v1::TranscriptFile { + transcripts: vec![v1::Transcript { + id: "t-001".to_string(), + user_id: "local".to_string(), + created_at: "2026-01-25T00:00:00Z".to_string(), + session_id: "s-001".to_string(), + started_at: 0, + ended_at: Some(1000), + words: vec![], + speaker_hints: vec![], + }], + }; + + let v2_file = v1_file.migrate(); + assert_eq!(v2_file.transcripts.len(), 1); + assert_eq!(v2_file.transcripts[0].id, "t-001"); + } + + #[test] + fn deserialize_v1_json() { + let json = r#"{ + "transcripts": [{ + "id": "t-001", + "user_id": "local", + "created_at": "2026-01-25T00:00:00Z", + "session_id": "s-001", + "started_at": 0, + "words": [], + "speaker_hints": [] + }] + }"#; + + let file: v1::TranscriptFile = serde_json::from_str(json).unwrap(); + assert_eq!(file.transcripts[0].id, "t-001"); + } +} diff --git a/plugins/fs-db/src/types/document.rs b/plugins/fs-db/src/types/document.rs new file mode 100644 index 0000000000..a2ad189a7e --- /dev/null +++ b/plugins/fs-db/src/types/document.rs @@ -0,0 +1,89 @@ +use std::collections::HashMap; + +use serde::{Deserialize, Serialize}; +use serde_json::Value; +use specta::Type; + +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize, Type)] +pub struct Document { + pub id: String, + pub metadata: HashMap, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub body: Option, +} + +impl Document { + pub fn new(id: impl Into) -> Self { + Self { + id: id.into(), + metadata: HashMap::new(), + body: None, + } + } + + pub fn with_metadata(mut self, key: impl Into, value: impl Into) -> Self { + self.metadata.insert(key.into(), value.into()); + self + } + + pub fn with_body(mut self, body: impl Into) -> Self { + self.body = Some(body.into()); + self + } + + pub fn get(&self, key: &str) -> Option { + self.metadata + .get(key) + .and_then(|v| serde_json::from_value(v.clone()).ok()) + } + + pub fn set(&mut self, key: impl Into, value: impl Into) { + self.metadata.insert(key.into(), value.into()); + } + + pub fn remove(&mut self, key: &str) -> Option { + self.metadata.remove(key) + } + + pub fn rename_field(&mut self, from: &str, to: impl Into) -> bool { + if let Some(value) = self.metadata.remove(from) { + self.metadata.insert(to.into(), value); + true + } else { + false + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn document_builder() { + let doc = Document::new("123") + .with_metadata("name", "Alice") + .with_metadata("age", 30) + .with_body("Hello world"); + + assert_eq!(doc.id, "123"); + assert_eq!(doc.get::("name"), Some("Alice".to_string())); + assert_eq!(doc.get::("age"), Some(30)); + assert_eq!(doc.body, Some("Hello world".to_string())); + } + + #[test] + fn document_rename_field() { + let mut doc = Document::new("123").with_metadata("email", "a@b.com"); + + assert!(doc.rename_field("email", "emails")); + assert!(doc.metadata.get("email").is_none()); + assert_eq!(doc.get::("emails"), Some("a@b.com".to_string())); + } + + #[test] + fn document_rename_nonexistent() { + let mut doc = Document::new("123"); + assert!(!doc.rename_field("missing", "new")); + } +} diff --git a/plugins/fs-db/src/types/migration.rs b/plugins/fs-db/src/types/migration.rs new file mode 100644 index 0000000000..8dc1ded327 --- /dev/null +++ b/plugins/fs-db/src/types/migration.rs @@ -0,0 +1,87 @@ +use std::path::Path; + +use semver::Version; + +use crate::Result; + +pub struct Migration { + pub name: &'static str, + pub from: Version, + pub to: Version, + pub run: fn(&Path) -> Result<()>, +} + +impl Migration { + pub fn new( + name: &'static str, + from: Version, + to: Version, + run: fn(&Path) -> Result<()>, + ) -> Self { + Self { + name, + from, + to, + run, + } + } + + pub fn run(&self, base_dir: &Path) -> Result<()> { + (self.run)(base_dir) + } +} + +impl std::fmt::Debug for Migration { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("Migration") + .field("name", &self.name) + .field("from", &self.from) + .field("to", &self.to) + .finish() + } +} + +#[cfg(test)] +mod tests { + use super::*; + + fn noop(_base_dir: &Path) -> Result<()> { + Ok(()) + } + + #[test] + fn migration_creation() { + let m = Migration::new( + "test_migration", + Version::parse("1.0.1").unwrap(), + Version::parse("1.0.2").unwrap(), + noop, + ); + + assert_eq!(m.name, "test_migration"); + assert_eq!(m.from, Version::parse("1.0.1").unwrap()); + assert_eq!(m.to, Version::parse("1.0.2").unwrap()); + } + + #[test] + fn migration_run() { + use std::sync::atomic::{AtomicBool, Ordering}; + static CALLED: AtomicBool = AtomicBool::new(false); + + fn test_run(_base_dir: &Path) -> Result<()> { + CALLED.store(true, Ordering::SeqCst); + Ok(()) + } + + let m = Migration::new( + "test", + Version::parse("1.0.1").unwrap(), + Version::parse("1.0.2").unwrap(), + test_run, + ); + + let temp = tempfile::TempDir::new().unwrap(); + m.run(temp.path()).unwrap(); + assert!(CALLED.load(Ordering::SeqCst)); + } +} diff --git a/plugins/fs-db/src/types/mod.rs b/plugins/fs-db/src/types/mod.rs new file mode 100644 index 0000000000..8fb3d290b3 --- /dev/null +++ b/plugins/fs-db/src/types/mod.rs @@ -0,0 +1,11 @@ +pub mod data; +mod document; +mod migration; +mod schema; +mod version; + +pub use data::{SessionMeta, TranscriptFile}; +pub use document::*; +pub use migration::*; +pub use schema::*; +pub use version::{Version, read_current_version, write_version}; diff --git a/plugins/fs-db/src/types/schema.rs b/plugins/fs-db/src/types/schema.rs new file mode 100644 index 0000000000..cbfddaee61 --- /dev/null +++ b/plugins/fs-db/src/types/schema.rs @@ -0,0 +1,9 @@ +use serde::{Deserialize, Serialize}; +use specta::Type; + +#[derive(Debug, Clone, Serialize, Deserialize, Type)] +pub struct MigrationReport { + pub from_version: String, + pub to_version: String, + pub migrations_applied: Vec, +} diff --git a/plugins/fs-db/src/types/version.rs b/plugins/fs-db/src/types/version.rs new file mode 100644 index 0000000000..f8d2006c08 --- /dev/null +++ b/plugins/fs-db/src/types/version.rs @@ -0,0 +1,89 @@ +use std::path::Path; + +use crate::Result; + +pub use semver::Version; + +const VERSION_FILE: &str = ".schema/version"; +const SQLITE_FILE: &str = "db.sqlite"; +const LEGACY_VERSION: &str = "1.0.1"; + +pub fn read_current_version(base_dir: &Path) -> Result> { + let version_path = base_dir.join(VERSION_FILE); + let sqlite_path = base_dir.join(SQLITE_FILE); + + if version_path.exists() { + let content = std::fs::read_to_string(&version_path)?; + let version = Version::parse(content.trim()) + .map_err(|_| crate::Error::VersionParse(content.clone()))?; + Ok(Some(version)) + } else if sqlite_path.exists() { + let version = Version::parse(LEGACY_VERSION).unwrap(); + Ok(Some(version)) + } else { + Ok(None) + } +} + +pub fn write_version(base_dir: &Path, version: &Version) -> Result<()> { + let version_path = base_dir.join(VERSION_FILE); + if let Some(parent) = version_path.parent() { + std::fs::create_dir_all(parent)?; + } + std::fs::write(&version_path, version.to_string())?; + Ok(()) +} + +#[cfg(test)] +mod tests { + use super::*; + use tempfile::TempDir; + + #[test] + fn fresh_install_returns_none() { + let temp = TempDir::new().unwrap(); + let result = read_current_version(temp.path()).unwrap(); + assert!(result.is_none()); + } + + #[test] + fn existing_version_file_returns_version() { + let temp = TempDir::new().unwrap(); + std::fs::create_dir_all(temp.path().join(".schema")).unwrap(); + std::fs::write(temp.path().join(VERSION_FILE), "1.0.2").unwrap(); + + let result = read_current_version(temp.path()).unwrap(); + assert_eq!(result, Some(Version::parse("1.0.2").unwrap())); + } + + #[test] + fn sqlite_without_version_returns_legacy() { + let temp = TempDir::new().unwrap(); + std::fs::write(temp.path().join(SQLITE_FILE), "").unwrap(); + + let result = read_current_version(temp.path()).unwrap(); + assert_eq!(result, Some(Version::parse(LEGACY_VERSION).unwrap())); + } + + #[test] + fn write_and_read_version() { + let temp = TempDir::new().unwrap(); + let version = Version::parse("1.2.3").unwrap(); + + write_version(temp.path(), &version).unwrap(); + let result = read_current_version(temp.path()).unwrap(); + + assert_eq!(result, Some(version)); + } + + #[test] + fn version_ordering() { + let v1 = Version::parse("1.0.1").unwrap(); + let v2 = Version::parse("1.0.2").unwrap(); + let v3 = Version::parse("1.0.10").unwrap(); + + assert!(v1 < v2); + assert!(v2 < v3); + assert!(v1 < v3); + } +}