Skip to content
Open
24 changes: 24 additions & 0 deletions .github/workflows/ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -95,6 +95,30 @@ jobs:
with:
github-token: ${{ secrets.GITHUB_TOKEN }}

- name: Verify migration idempotency
run: |
pg_dump "$DATABASE_URL" \
--exclude-table-data=storage.migrations \
--restrict-key=test \
> before.sql

npm run migration:test-idempotency

pg_dump "$DATABASE_URL" \
--exclude-table-data=storage.migrations \
--restrict-key=test \
> after.sql

diff before.sql after.sql || (echo 'Schema mismatch!'; exit 1)

env:
PGRST_JWT_SECRET: ${{ secrets.PGRST_JWT_SECRET }}
DATABASE_URL: postgresql://postgres:postgres@127.0.0.1/postgres
DB_INSTALL_ROLES: true
ENABLE_DEFAULT_METRICS: false
PG_QUEUE_ENABLE: false
MULTI_TENANT: false

- name: Ensure OrioleDB migration compatibility
run: |
npm run infra:restart:oriole
Expand Down
1 change: 0 additions & 1 deletion migrations/tenant/00010-search-files-search-function.sql
Original file line number Diff line number Diff line change
@@ -1,4 +1,3 @@
drop function storage.search;

create or replace function storage.search (
prefix text,
Expand Down
24 changes: 16 additions & 8 deletions migrations/tenant/0002-storage-schema.sql
Original file line number Diff line number Diff line change
Expand Up @@ -18,11 +18,23 @@ BEGIN
END IF;

-- Install ROLES
EXECUTE 'CREATE ROLE ' || anon_role || ' NOLOGIN NOINHERIT';
EXECUTE 'CREATE ROLE ' || authenticated_role || ' NOLOGIN NOINHERIT';
EXECUTE 'CREATE ROLE ' || service_role || ' NOLOGIN NOINHERIT bypassrls';
IF NOT EXISTS (SELECT 1 FROM pg_roles WHERE rolname = anon_role) THEN
EXECUTE 'CREATE ROLE ' || anon_role || ' NOLOGIN NOINHERIT';
END IF;

IF NOT EXISTS (SELECT 1 FROM pg_roles WHERE rolname = authenticated_role ) THEN
EXECUTE 'CREATE ROLE ' || authenticated_role || ' NOLOGIN NOINHERIT';
END IF;

IF NOT EXISTS (SELECT 1 FROM pg_roles WHERE rolname = service_role) THEN
EXECUTE 'CREATE ROLE ' || service_role || ' NOLOGIN NOINHERIT bypassrls';
END IF;

IF NOT EXISTS (SELECT 1 FROM pg_roles WHERE rolname = 'authenticator') THEN
EXECUTE 'CREATE USER authenticator NOINHERIT';
END IF;


create user authenticator noinherit;
EXECUTE 'grant ' || anon_role || ' to authenticator';
EXECUTE 'grant ' || authenticated_role || ' to authenticator';
EXECUTE 'grant ' || service_role || ' to authenticator';
Expand Down Expand Up @@ -70,7 +82,6 @@ CREATE INDEX IF NOT EXISTS name_prefix_search ON storage.objects(name text_patte

ALTER TABLE storage.objects ENABLE ROW LEVEL SECURITY;

drop function if exists storage.foldername;
CREATE OR REPLACE FUNCTION storage.foldername(name text)
RETURNS text[]
LANGUAGE plpgsql
Expand All @@ -83,7 +94,6 @@ BEGIN
END
$function$;

drop function if exists storage.filename;
CREATE OR REPLACE FUNCTION storage.filename(name text)
RETURNS text
LANGUAGE plpgsql
Expand All @@ -96,7 +106,6 @@ BEGIN
END
$function$;

drop function if exists storage.extension;
CREATE OR REPLACE FUNCTION storage.extension(name text)
RETURNS text
LANGUAGE plpgsql
Expand All @@ -113,7 +122,6 @@ END
$function$;

-- @todo can this query be optimised further?
drop function if exists storage.search;
CREATE OR REPLACE FUNCTION storage.search(prefix text, bucketname text, limits int DEFAULT 100, levels int DEFAULT 1, offsets int DEFAULT 0)
RETURNS TABLE (
name text,
Expand Down
2 changes: 1 addition & 1 deletion migrations/tenant/0006-change-column-name-in-get-size.sql
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
DROP FUNCTION storage.get_size_by_bucket();
DROP FUNCTION IF EXISTS storage.get_size_by_bucket();
CREATE OR REPLACE FUNCTION storage.get_size_by_bucket()
RETURNS TABLE (
size BIGINT,
Expand Down
1 change: 0 additions & 1 deletion migrations/tenant/0009-fix-search-function.sql
Original file line number Diff line number Diff line change
@@ -1,4 +1,3 @@
drop function if exists storage.search;
CREATE OR REPLACE FUNCTION storage.search(prefix text, bucketname text, limits int DEFAULT 100, levels int DEFAULT 1, offsets int DEFAULT 0)
RETURNS TABLE (
name text,
Expand Down
5 changes: 4 additions & 1 deletion migrations/tenant/0038-iceberg-catalog-flag-on-buckets.sql
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,9 @@ DO $$
updated_at timestamptz NOT NULL default now()
);

CREATE UNIQUE INDEX IF NOT EXISTS idx_iceberg_namespaces_bucket_id ON storage.iceberg_namespaces (bucket_id, name);
IF EXISTS (SELECT 1 FROM information_schema.columns WHERE table_name = 'iceberg_namespaces' AND column_name = 'bucket_id') THEN
CREATE UNIQUE INDEX IF NOT EXISTS idx_iceberg_namespaces_bucket_id ON storage.iceberg_namespaces (bucket_id, name);
END IF;

CREATE TABLE IF NOT EXISTS storage.iceberg_tables (
id uuid primary key default gen_random_uuid(),
Expand All @@ -52,6 +54,7 @@ DO $$
updated_at timestamptz NOT NULL default now()
);

DROP INDEX IF EXISTS idx_iceberg_tables_namespace_id;
CREATE UNIQUE INDEX idx_iceberg_tables_namespace_id ON storage.iceberg_tables (namespace_id, name);

ALTER TABLE storage.iceberg_namespaces ENABLE ROW LEVEL SECURITY;
Expand Down
1 change: 0 additions & 1 deletion migrations/tenant/0039-add-search-v2-sort-support.sql
Original file line number Diff line number Diff line change
@@ -1,4 +1,3 @@
DROP FUNCTION IF EXISTS storage.search_v2;
CREATE OR REPLACE FUNCTION storage.search_v2 (
prefix text,
bucket_name text,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -240,6 +240,10 @@ BEGIN
END;
$$;

DROP TRIGGER IF EXISTS objects_delete_cleanup ON storage.objects;
DROP TRIGGER IF EXISTS prefixes_delete_cleanup ON storage.prefixes;
DROP TRIGGER IF EXISTS objects_update_cleanup ON storage.objects;

-- Trigger bindings
CREATE TRIGGER objects_delete_cleanup
AFTER DELETE ON storage.objects
Expand Down
24 changes: 22 additions & 2 deletions migrations/tenant/0048-iceberg-catalog-ids.sql
Original file line number Diff line number Diff line change
Expand Up @@ -54,8 +54,28 @@ DO $$
ALTER TABLE storage.iceberg_tables RENAME COLUMN bucket_id to bucket_name;
END IF;

ALTER TABLE storage.iceberg_namespaces ADD COLUMN IF NOT EXISTS catalog_id uuid NULL REFERENCES storage.buckets_analytics(id) ON DELETE CASCADE;
ALTER TABLE storage.iceberg_tables ADD COLUMN IF NOT EXISTS catalog_id uuid NULL REFERENCES storage.buckets_analytics(id) ON DELETE CASCADE;
ALTER TABLE storage.iceberg_namespaces ADD COLUMN IF NOT EXISTS catalog_id uuid NULL;
ALTER TABLE storage.iceberg_tables ADD COLUMN IF NOT EXISTS catalog_id uuid NULL;

IF NOT EXISTS (
SELECT 1 FROM information_schema.table_constraints
WHERE table_schema = 'storage'
AND table_name = 'iceberg_namespaces'
AND constraint_name = 'iceberg_namespaces_catalog_id_fkey'
) THEN
ALTER TABLE storage.iceberg_namespaces ADD CONSTRAINT iceberg_namespaces_catalog_id_fkey
FOREIGN KEY (catalog_id) REFERENCES storage.buckets_analytics(id) ON DELETE CASCADE;
END IF;

IF NOT EXISTS (
SELECT 1 FROM information_schema.table_constraints
WHERE table_schema = 'storage'
AND table_name = 'iceberg_tables'
AND constraint_name = 'iceberg_tables_catalog_id_fkey'
) THEN
ALTER TABLE storage.iceberg_tables ADD CONSTRAINT iceberg_tables_catalog_id_fkey
FOREIGN KEY (catalog_id) REFERENCES storage.buckets_analytics(id) ON DELETE CASCADE;
END IF;

CREATE UNIQUE INDEX IF NOT EXISTS idx_iceberg_namespaces_bucket_id ON storage.iceberg_namespaces (catalog_id, name);
CREATE UNIQUE INDEX IF NOT EXISTS idx_iceberg_tables_namespace_id ON storage.iceberg_tables (catalog_id, namespace_id, name);
Expand Down
1 change: 1 addition & 0 deletions package.json
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@
"format": "prettier -c --write src/**",
"lint": "prettier -v && prettier -c src/**",
"migration:run": "tsx src/scripts/migrate-call.ts",
"migration:test-idempotency": "tsx src/scripts/test-migration-idempotency.ts",
"migrations:types": "tsx src/scripts/migrations-types.ts",
"docs:export": "tsx ./src/scripts/export-docs.ts",
"test:dummy-data": "tsx -r dotenv/config ./src/test/db/import-dummy-data.ts",
Expand Down
12 changes: 7 additions & 5 deletions src/internal/database/migrations/migrate.ts
Original file line number Diff line number Diff line change
Expand Up @@ -358,7 +358,7 @@ export async function runMigrationsOnTenant({
}

export async function resetMigration(options: {
tenantId: string
tenantId?: string
untilMigration: keyof typeof DBMigration
markCompletedTillMigration?: keyof typeof DBMigration
databaseUrl: string
Expand Down Expand Up @@ -447,10 +447,12 @@ export async function resetMigration(options: {
}
}

await updateTenantMigrationsState(options.tenantId, {
migration: latestRunMigration,
state: TenantMigrationStatus.COMPLETED,
})
if (options.tenantId) {
await updateTenantMigrationsState(options.tenantId, {
migration: latestRunMigration,
state: TenantMigrationStatus.COMPLETED,
})
}

await pgClient.query(`COMMIT`)

Expand Down
1 change: 1 addition & 0 deletions src/internal/database/migrations/types.ts
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
export const DBMigration = {
'create-migrations-table': 0,
initialmigration: 1,
'search-files-search-function': 2,
'storage-schema': 3,
Expand Down
17 changes: 12 additions & 5 deletions src/scripts/migrations-types.ts
Original file line number Diff line number Diff line change
Expand Up @@ -6,17 +6,24 @@ function main() {
const migrationsPath = path.join(__dirname, '..', '..', 'migrations', 'tenant', '*.sql')
const files = glob.sync(migrationsPath).sort()

const migrations = files.map((file, index) => {
const migrations = [
// this migration is hardcoded by the postgres migrations library
{
file: 'create-migrations-table',
index: 0,
},
]

files.forEach((file, index) => {
const fileName = file
.split(path.sep)
.pop()
?.replace(/[0-9]+-/, '')
.replace('.sql', '')

return {
file: fileName,
migrations.push({
file: fileName || '',
index: index + 1,
}
})
})

const migrationsEnum = migrations.map((migration) => {
Expand Down
39 changes: 39 additions & 0 deletions src/scripts/test-migration-idempotency.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,39 @@
import dotenv from 'dotenv'
dotenv.config()

import { resetMigration, runMigrationsOnTenant } from '@internal/database/migrations'
import { DBMigration } from '@internal/database/migrations/types'
import { getConfig } from '../config'

void (async () => {
const { databaseURL, dbMigrationFreezeAt } = getConfig()
const migrations = Object.keys(DBMigration) as (keyof typeof DBMigration)[]

let previousMigration: keyof typeof DBMigration = 'create-migrations-table'

for (const migration of migrations.slice(1)) {
console.log(`Running migration ${migration}`)
await runMigrationsOnTenant({
databaseUrl: databaseURL,
upToMigration: migration,
})

console.log(`Resetting migration ${migration}`)
await resetMigration({
databaseUrl: databaseURL,
untilMigration: previousMigration,
})

console.log(`Rerunning migration ${migration}`)
await runMigrationsOnTenant({
databaseUrl: databaseURL,
upToMigration: migration,
})

if (dbMigrationFreezeAt === migration) {
break
}

previousMigration = migration
}
})()
Loading